xref: /reactos/ntoskrnl/mm/ARM3/pool.c (revision 5100859e)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/pool.c
5  * PURPOSE:         ARM Memory Manager Pool Allocator
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 /* GLOBALS ********************************************************************/
19 
20 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
21 PFN_COUNT MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
22 PVOID MmNonPagedPoolEnd0;
23 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
24 KGUARDED_MUTEX MmPagedPoolMutex;
25 MM_PAGED_POOL_INFO MmPagedPoolInfo;
26 SIZE_T MmAllocatedNonPagedPool;
27 ULONG MmSpecialPoolTag;
28 ULONG MmConsumedPoolPercentage;
29 BOOLEAN MmProtectFreedNonPagedPool;
30 SLIST_HEADER MiNonPagedPoolSListHead;
31 ULONG MiNonPagedPoolSListMaximum = 4;
32 SLIST_HEADER MiPagedPoolSListHead;
33 ULONG MiPagedPoolSListMaximum = 8;
34 
35 /* PRIVATE FUNCTIONS **********************************************************/
36 
37 VOID
38 NTAPI
39 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
40                           IN ULONG PageCount)
41 {
42     PMMPTE PointerPte, LastPte;
43     MMPTE TempPte;
44 
45     /* If pool is physical, can't protect PTEs */
46     if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
47 
48     /* Get PTE pointers and loop */
49     PointerPte = MiAddressToPte(VirtualAddress);
50     LastPte = PointerPte + PageCount;
51     do
52     {
53         /* Capture the PTE for safety */
54         TempPte = *PointerPte;
55 
56         /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
57         TempPte.u.Hard.Valid = 0;
58         TempPte.u.Soft.Prototype = 1;
59         MI_WRITE_INVALID_PTE(PointerPte, TempPte);
60     } while (++PointerPte < LastPte);
61 
62     /* Flush the TLB */
63     KeFlushEntireTb(TRUE, TRUE);
64 }
65 
66 BOOLEAN
67 NTAPI
68 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
69                             IN ULONG PageCount)
70 {
71     PMMPTE PointerPte;
72     MMPTE TempPte;
73     PFN_NUMBER UnprotectedPages = 0;
74 
75     /* If pool is physical, can't protect PTEs */
76     if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
77 
78     /* Get, and capture the PTE */
79     PointerPte = MiAddressToPte(VirtualAddress);
80     TempPte = *PointerPte;
81 
82     /* Loop protected PTEs */
83     while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
84     {
85         /* Unprotect the PTE */
86         TempPte.u.Hard.Valid = 1;
87         TempPte.u.Soft.Prototype = 0;
88         MI_WRITE_VALID_PTE(PointerPte, TempPte);
89 
90         /* One more page */
91         if (++UnprotectedPages == PageCount) break;
92 
93         /* Capture next PTE */
94         TempPte = *(++PointerPte);
95     }
96 
97     /* Return if any pages were unprotected */
98     return UnprotectedPages ? TRUE : FALSE;
99 }
100 
101 FORCEINLINE
102 VOID
103 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
104                               OUT PVOID* PoolFlink,
105                               OUT PVOID* PoolBlink)
106 {
107     BOOLEAN Safe;
108     PVOID PoolVa;
109 
110     /* Initialize variables */
111     *PoolFlink = *PoolBlink = NULL;
112 
113     /* Check if the list has entries */
114     if (IsListEmpty(Links) == FALSE)
115     {
116         /* We are going to need to forward link to do an insert */
117         PoolVa = Links->Flink;
118 
119         /* So make it safe to access */
120         Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
121         if (Safe) *PoolFlink = PoolVa;
122     }
123 
124     /* Are we going to need a backward link too? */
125     if (Links != Links->Blink)
126     {
127         /* Get the head's backward link for the insert */
128         PoolVa = Links->Blink;
129 
130         /* Make it safe to access */
131         Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
132         if (Safe) *PoolBlink = PoolVa;
133     }
134 }
135 
136 FORCEINLINE
137 VOID
138 MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
139                             IN PVOID PoolBlink)
140 {
141     /* Reprotect the pages, if they got unprotected earlier */
142     if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
143     if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
144 }
145 
146 VOID
147 NTAPI
148 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
149                           IN PLIST_ENTRY Entry,
150                           IN BOOLEAN Critical)
151 {
152     PVOID PoolFlink, PoolBlink;
153 
154     /* Make the list accessible */
155     MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
156 
157     /* Now insert in the right position */
158     Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
159 
160     /* And reprotect the pages containing the free links */
161     MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
162 }
163 
164 VOID
165 NTAPI
166 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
167 {
168     PVOID PoolFlink, PoolBlink;
169 
170     /* Make the list accessible */
171     MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
172 
173     /* Now remove */
174     RemoveEntryList(Entry);
175 
176     /* And reprotect the pages containing the free links */
177     if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
178     if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
179 }
180 
181 VOID
182 NTAPI
183 INIT_FUNCTION
184 MiInitializeNonPagedPoolThresholds(VOID)
185 {
186     PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
187 
188     /* Default low threshold of 8MB or one third of nonpaged pool */
189     MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
190     MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
191 
192     /* Default high threshold of 20MB or 50% */
193     MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
194     MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
195     ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
196 }
197 
198 VOID
199 NTAPI
200 INIT_FUNCTION
201 MiInitializePoolEvents(VOID)
202 {
203     KIRQL OldIrql;
204     PFN_NUMBER FreePoolInPages;
205 
206     /* Lock paged pool */
207     KeAcquireGuardedMutex(&MmPagedPoolMutex);
208 
209     /* Total size of the paged pool minus the allocated size, is free */
210     FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
211 
212     /* Check the initial state high state */
213     if (FreePoolInPages >= MiHighPagedPoolThreshold)
214     {
215         /* We have plenty of pool */
216         KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
217     }
218     else
219     {
220         /* We don't */
221         KeClearEvent(MiHighPagedPoolEvent);
222     }
223 
224     /* Check the initial low state */
225     if (FreePoolInPages <= MiLowPagedPoolThreshold)
226     {
227         /* We're very low in free pool memory */
228         KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
229     }
230     else
231     {
232         /* We're not */
233         KeClearEvent(MiLowPagedPoolEvent);
234     }
235 
236     /* Release the paged pool lock */
237     KeReleaseGuardedMutex(&MmPagedPoolMutex);
238 
239     /* Now it's time for the nonpaged pool lock */
240     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
241 
242     /* Free pages are the maximum minus what's been allocated */
243     FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
244 
245     /* Check if we have plenty */
246     if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
247     {
248         /* We do, set the event */
249         KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
250     }
251     else
252     {
253         /* We don't, clear the event */
254         KeClearEvent(MiHighNonPagedPoolEvent);
255     }
256 
257     /* Check if we have very little */
258     if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
259     {
260         /* We do, set the event */
261         KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
262     }
263     else
264     {
265         /* We don't, clear it */
266         KeClearEvent(MiLowNonPagedPoolEvent);
267     }
268 
269     /* We're done, release the nonpaged pool lock */
270     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
271 }
272 
273 VOID
274 NTAPI
275 INIT_FUNCTION
276 MiInitializeNonPagedPool(VOID)
277 {
278     ULONG i;
279     PFN_COUNT PoolPages;
280     PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
281     PMMPTE PointerPte;
282     PAGED_CODE();
283 
284     //
285     // Initialize the pool S-LISTs as well as their maximum count. In general,
286     // we'll allow 8 times the default on a 2GB system, and two times the default
287     // on a 1GB system.
288     //
289     InitializeSListHead(&MiPagedPoolSListHead);
290     InitializeSListHead(&MiNonPagedPoolSListHead);
291     if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE))
292     {
293         MiNonPagedPoolSListMaximum *= 8;
294         MiPagedPoolSListMaximum *= 8;
295     }
296     else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE))
297     {
298         MiNonPagedPoolSListMaximum *= 2;
299         MiPagedPoolSListMaximum *= 2;
300     }
301 
302     //
303     // However if debugging options for the pool are enabled, turn off the S-LIST
304     // to reduce the risk of messing things up even more
305     //
306     if (MmProtectFreedNonPagedPool)
307     {
308         MiNonPagedPoolSListMaximum = 0;
309         MiPagedPoolSListMaximum = 0;
310     }
311 
312     //
313     // We keep 4 lists of free pages (4 lists help avoid contention)
314     //
315     for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
316     {
317         //
318         // Initialize each of them
319         //
320         InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
321     }
322 
323     //
324     // Calculate how many pages the initial nonpaged pool has
325     //
326     PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
327     MmNumberOfFreeNonPagedPool = PoolPages;
328 
329     //
330     // Initialize the first free entry
331     //
332     FreeEntry = MmNonPagedPoolStart;
333     FirstEntry = FreeEntry;
334     FreeEntry->Size = PoolPages;
335     FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
336     FreeEntry->Owner = FirstEntry;
337 
338     //
339     // Insert it into the last list
340     //
341     InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
342                    &FreeEntry->List);
343 
344     //
345     // Now create free entries for every single other page
346     //
347     while (PoolPages-- > 1)
348     {
349         //
350         // Link them all back to the original entry
351         //
352         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
353         FreeEntry->Owner = FirstEntry;
354         FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
355     }
356 
357     //
358     // Validate and remember first allocated pool page
359     //
360     PointerPte = MiAddressToPte(MmNonPagedPoolStart);
361     ASSERT(PointerPte->u.Hard.Valid == 1);
362     MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
363 
364     //
365     // Keep track of where initial nonpaged pool ends
366     //
367     MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
368                                  MmSizeOfNonPagedPoolInBytes);
369 
370     //
371     // Validate and remember last allocated pool page
372     //
373     PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
374     ASSERT(PointerPte->u.Hard.Valid == 1);
375     MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
376 
377     //
378     // Validate the first nonpaged pool expansion page (which is a guard page)
379     //
380     PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
381     ASSERT(PointerPte->u.Hard.Valid == 0);
382 
383     //
384     // Calculate the size of the expansion region alone
385     //
386     MiExpansionPoolPagesInitialCharge = (PFN_COUNT)
387     BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
388 
389     //
390     // Remove 2 pages, since there's a guard page on top and on the bottom
391     //
392     MiExpansionPoolPagesInitialCharge -= 2;
393 
394     //
395     // Now initialize the nonpaged pool expansion PTE space. Remember there's a
396     // guard page on top so make sure to skip it. The bottom guard page will be
397     // guaranteed by the fact our size is off by one.
398     //
399     MiInitializeSystemPtes(PointerPte + 1,
400                            MiExpansionPoolPagesInitialCharge,
401                            NonPagedPoolExpansion);
402 }
403 
404 POOL_TYPE
405 NTAPI
406 MmDeterminePoolType(IN PVOID PoolAddress)
407 {
408     //
409     // Use a simple bounds check
410     //
411     if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd)
412         return PagedPool;
413     else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd)
414         return NonPagedPool;
415     KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0);
416 }
417 
418 PVOID
419 NTAPI
420 MiAllocatePoolPages(IN POOL_TYPE PoolType,
421                     IN SIZE_T SizeInBytes)
422 {
423     PFN_NUMBER PageFrameNumber;
424     PFN_COUNT SizeInPages, PageTableCount;
425     ULONG i;
426     KIRQL OldIrql;
427     PLIST_ENTRY NextEntry, NextHead, LastHead;
428     PMMPTE PointerPte, StartPte;
429     PMMPDE PointerPde;
430     ULONG EndAllocation;
431     MMPTE TempPte;
432     MMPDE TempPde;
433     PMMPFN Pfn1;
434     PVOID BaseVa, BaseVaStart;
435     PMMFREE_POOL_ENTRY FreeEntry;
436 
437     //
438     // Figure out how big the allocation is in pages
439     //
440     SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);
441 
442     //
443     // Check for overflow
444     //
445     if (SizeInPages == 0)
446     {
447         //
448         // Fail
449         //
450         return NULL;
451     }
452 
453     //
454     // Handle paged pool
455     //
456     if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
457     {
458         //
459         // If only one page is being requested, try to grab it from the S-LIST
460         //
461         if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
462         {
463             BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
464             if (BaseVa) return BaseVa;
465         }
466 
467         //
468         // Lock the paged pool mutex
469         //
470         KeAcquireGuardedMutex(&MmPagedPoolMutex);
471 
472         //
473         // Find some empty allocation space
474         //
475         i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
476                                    SizeInPages,
477                                    MmPagedPoolInfo.PagedPoolHint);
478         if (i == 0xFFFFFFFF)
479         {
480             //
481             // Get the page bit count
482             //
483             i = ((SizeInPages - 1) / PTE_COUNT) + 1;
484             DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);
485 
486             //
487             // Check if there is enougn paged pool expansion space left
488             //
489             if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
490                 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
491             {
492                 //
493                 // Out of memory!
494                 //
495                 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
496                 KeReleaseGuardedMutex(&MmPagedPoolMutex);
497                 return NULL;
498             }
499 
500             //
501             // Check if we'll have to expand past the last PTE we have available
502             //
503             if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
504                  (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
505             {
506                 //
507                 // We can only support this much then
508                 //
509                 PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool);
510                 PageTableCount = (PFN_COUNT)(PointerPde + 1 -
511                                  MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
512                 ASSERT(PageTableCount < i);
513                 i = PageTableCount;
514             }
515             else
516             {
517                 //
518                 // Otherwise, there is plenty of space left for this expansion
519                 //
520                 PageTableCount = i;
521             }
522 
523             //
524             // Get the template PDE we'll use to expand
525             //
526             TempPde = ValidKernelPde;
527 
528             //
529             // Get the first PTE in expansion space
530             //
531             PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
532             BaseVa = MiPdeToPte(PointerPde);
533             BaseVaStart = BaseVa;
534 
535             //
536             // Lock the PFN database and loop pages
537             //
538             OldIrql = MiAcquirePfnLock();
539             do
540             {
541                 //
542                 // It should not already be valid
543                 //
544                 ASSERT(PointerPde->u.Hard.Valid == 0);
545 
546                 /* Request a page */
547                 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
548                 MI_SET_PROCESS2("Kernel");
549                 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
550                 TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
551 #if (_MI_PAGING_LEVELS >= 3)
552                 /* On PAE/x64 systems, there's no double-buffering */
553                 ASSERT(FALSE);
554 #else
555                 //
556                 // Save it into our double-buffered system page directory
557                 //
558                 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
559 
560                 /* Initialize the PFN */
561                 MiInitializePfnForOtherProcess(PageFrameNumber,
562                                                (PMMPTE)PointerPde,
563                                                MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
564 
565                 /* Write the actual PDE now */
566 //                MI_WRITE_VALID_PDE(PointerPde, TempPde);
567 #endif
568                 //
569                 // Move on to the next expansion address
570                 //
571                 PointerPde++;
572                 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
573                 i--;
574             } while (i > 0);
575 
576             //
577             // Release the PFN database lock
578             //
579             MiReleasePfnLock(OldIrql);
580 
581             //
582             // These pages are now available, clear their availablity bits
583             //
584             EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
585                              (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
586                              PTE_COUNT;
587             RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
588                          EndAllocation,
589                          PageTableCount * PTE_COUNT);
590 
591             //
592             // Update the next expansion location
593             //
594             MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
595 
596             //
597             // Zero out the newly available memory
598             //
599             RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
600 
601             //
602             // Now try consuming the pages again
603             //
604             i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
605                                        SizeInPages,
606                                        0);
607             if (i == 0xFFFFFFFF)
608             {
609                 //
610                 // Out of memory!
611                 //
612                 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
613                 KeReleaseGuardedMutex(&MmPagedPoolMutex);
614                 return NULL;
615             }
616         }
617 
618         //
619         // Update the pool hint if the request was just one page
620         //
621         if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
622 
623         //
624         // Update the end bitmap so we know the bounds of this allocation when
625         // the time comes to free it
626         //
627         EndAllocation = i + SizeInPages - 1;
628         RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
629 
630         //
631         // Now we can release the lock (it mainly protects the bitmap)
632         //
633         KeReleaseGuardedMutex(&MmPagedPoolMutex);
634 
635         //
636         // Now figure out where this allocation starts
637         //
638         BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
639 
640         //
641         // Flush the TLB
642         //
643         KeFlushEntireTb(TRUE, TRUE);
644 
645         /* Setup a demand-zero writable PTE */
646         MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
647 
648         //
649         // Find the first and last PTE, then loop them all
650         //
651         PointerPte = MiAddressToPte(BaseVa);
652         StartPte = PointerPte + SizeInPages;
653         do
654         {
655             //
656             // Write the demand zero PTE and keep going
657             //
658             MI_WRITE_INVALID_PTE(PointerPte, TempPte);
659         } while (++PointerPte < StartPte);
660 
661         //
662         // Return the allocation address to the caller
663         //
664         return BaseVa;
665     }
666 
667     //
668     // If only one page is being requested, try to grab it from the S-LIST
669     //
670     if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
671     {
672         BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
673         if (BaseVa) return BaseVa;
674     }
675 
676     //
677     // Allocations of less than 4 pages go into their individual buckets
678     //
679     i = SizeInPages - 1;
680     if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
681 
682     //
683     // Loop through all the free page lists based on the page index
684     //
685     NextHead = &MmNonPagedPoolFreeListHead[i];
686     LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
687 
688     //
689     // Acquire the nonpaged pool lock
690     //
691     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
692     do
693     {
694         //
695         // Now loop through all the free page entries in this given list
696         //
697         NextEntry = NextHead->Flink;
698         while (NextEntry != NextHead)
699         {
700             /* Is freed non paged pool enabled */
701             if (MmProtectFreedNonPagedPool)
702             {
703                 /* We need to be able to touch this page, unprotect it */
704                 MiUnProtectFreeNonPagedPool(NextEntry, 0);
705             }
706 
707             //
708             // Grab the entry and see if it can handle our allocation
709             //
710             FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
711             ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
712             if (FreeEntry->Size >= SizeInPages)
713             {
714                 //
715                 // It does, so consume the pages from here
716                 //
717                 FreeEntry->Size -= SizeInPages;
718 
719                 //
720                 // The allocation will begin in this free page area
721                 //
722                 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
723                                  (FreeEntry->Size  << PAGE_SHIFT));
724 
725                 /* Remove the item from the list, depending if pool is protected */
726                 if (MmProtectFreedNonPagedPool)
727                     MiProtectedPoolRemoveEntryList(&FreeEntry->List);
728                 else
729                     RemoveEntryList(&FreeEntry->List);
730 
731                 //
732                 // However, check if its' still got space left
733                 //
734                 if (FreeEntry->Size != 0)
735                 {
736                     /* Check which list to insert this entry into */
737                     i = FreeEntry->Size - 1;
738                     if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
739 
740                     /* Insert the entry into the free list head, check for prot. pool */
741                     if (MmProtectFreedNonPagedPool)
742                         MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
743                     else
744                         InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
745 
746                     /* Is freed non paged pool protected? */
747                     if (MmProtectFreedNonPagedPool)
748                     {
749                         /* Protect the freed pool! */
750                         MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
751                     }
752                 }
753 
754                 //
755                 // Grab the PTE for this allocation
756                 //
757                 PointerPte = MiAddressToPte(BaseVa);
758                 ASSERT(PointerPte->u.Hard.Valid == 1);
759 
760                 //
761                 // Grab the PFN NextEntry and index
762                 //
763                 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
764 
765                 //
766                 // Now mark it as the beginning of an allocation
767                 //
768                 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
769                 Pfn1->u3.e1.StartOfAllocation = 1;
770 
771                 /* Mark it as special pool if needed */
772                 ASSERT(Pfn1->u4.VerifierAllocation == 0);
773                 if (PoolType & VERIFIER_POOL_MASK)
774                 {
775                     Pfn1->u4.VerifierAllocation = 1;
776                 }
777 
778                 //
779                 // Check if the allocation is larger than one page
780                 //
781                 if (SizeInPages != 1)
782                 {
783                     //
784                     // Navigate to the last PFN entry and PTE
785                     //
786                     PointerPte += SizeInPages - 1;
787                     ASSERT(PointerPte->u.Hard.Valid == 1);
788                     Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
789                 }
790 
791                 //
792                 // Mark this PFN as the last (might be the same as the first)
793                 //
794                 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
795                 Pfn1->u3.e1.EndOfAllocation = 1;
796 
797                 //
798                 // Release the nonpaged pool lock, and return the allocation
799                 //
800                 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
801                 return BaseVa;
802             }
803 
804             //
805             // Try the next free page entry
806             //
807             NextEntry = FreeEntry->List.Flink;
808 
809             /* Is freed non paged pool protected? */
810             if (MmProtectFreedNonPagedPool)
811             {
812                 /* Protect the freed pool! */
813                 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
814             }
815         }
816     } while (++NextHead < LastHead);
817 
818     //
819     // If we got here, we're out of space.
820     // Start by releasing the lock
821     //
822     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
823 
824     //
825     // Allocate some system PTEs
826     //
827     StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
828     PointerPte = StartPte;
829     if (StartPte == NULL)
830     {
831         //
832         // Ran out of memory
833         //
834         DPRINT1("Out of NP Expansion Pool\n");
835         return NULL;
836     }
837 
838     //
839     // Acquire the pool lock now
840     //
841     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
842 
843     //
844     // Lock the PFN database too
845     //
846     MiAcquirePfnLockAtDpcLevel();
847 
848     //
849     // Loop the pages
850     //
851     TempPte = ValidKernelPte;
852     do
853     {
854         /* Allocate a page */
855         MI_SET_USAGE(MI_USAGE_PAGED_POOL);
856         MI_SET_PROCESS2("Kernel");
857         PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
858 
859         /* Get the PFN entry for it and fill it out */
860         Pfn1 = MiGetPfnEntry(PageFrameNumber);
861         Pfn1->u3.e2.ReferenceCount = 1;
862         Pfn1->u2.ShareCount = 1;
863         Pfn1->PteAddress = PointerPte;
864         Pfn1->u3.e1.PageLocation = ActiveAndValid;
865         Pfn1->u4.VerifierAllocation = 0;
866 
867         /* Write the PTE for it */
868         TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
869         MI_WRITE_VALID_PTE(PointerPte++, TempPte);
870     } while (--SizeInPages > 0);
871 
872     //
873     // This is the last page
874     //
875     Pfn1->u3.e1.EndOfAllocation = 1;
876 
877     //
878     // Get the first page and mark it as such
879     //
880     Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
881     Pfn1->u3.e1.StartOfAllocation = 1;
882 
883     /* Mark it as a verifier allocation if needed */
884     ASSERT(Pfn1->u4.VerifierAllocation == 0);
885     if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;
886 
887     //
888     // Release the PFN and nonpaged pool lock
889     //
890     MiReleasePfnLockFromDpcLevel();
891     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
892 
893     //
894     // Return the address
895     //
896     return MiPteToAddress(StartPte);
897 }
898 
899 ULONG
900 NTAPI
901 MiFreePoolPages(IN PVOID StartingVa)
902 {
903     PMMPTE PointerPte, StartPte;
904     PMMPFN Pfn1, StartPfn;
905     PFN_COUNT FreePages, NumberOfPages;
906     KIRQL OldIrql;
907     PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
908     ULONG i, End;
909     ULONG_PTR Offset;
910 
911     //
912     // Handle paged pool
913     //
914     if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
915     {
916         //
917         // Calculate the offset from the beginning of paged pool, and convert it
918         // into pages
919         //
920         Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart;
921         i = (ULONG)(Offset >> PAGE_SHIFT);
922         End = i;
923 
924         //
925         // Now use the end bitmap to scan until we find a set bit, meaning that
926         // this allocation finishes here
927         //
928         while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
929 
930         //
931         // Now calculate the total number of pages this allocation spans. If it's
932         // only one page, add it to the S-LIST instead of freeing it
933         //
934         NumberOfPages = End - i + 1;
935         if ((NumberOfPages == 1) &&
936             (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum))
937         {
938             InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa);
939             return 1;
940         }
941 
942         /* Delete the actual pages */
943         PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
944         FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
945         ASSERT(FreePages == NumberOfPages);
946 
947         //
948         // Acquire the paged pool lock
949         //
950         KeAcquireGuardedMutex(&MmPagedPoolMutex);
951 
952         //
953         // Clear the allocation and free bits
954         //
955         RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End);
956         RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
957 
958         //
959         // Update the hint if we need to
960         //
961         if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
962 
963         //
964         // Release the lock protecting the bitmaps
965         //
966         KeReleaseGuardedMutex(&MmPagedPoolMutex);
967 
968         //
969         // And finally return the number of pages freed
970         //
971         return NumberOfPages;
972     }
973 
974     //
975     // Get the first PTE and its corresponding PFN entry. If this is also the
976     // last PTE, meaning that this allocation was only for one page, push it into
977     // the S-LIST instead of freeing it
978     //
979     StartPte = PointerPte = MiAddressToPte(StartingVa);
980     StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
981     if ((Pfn1->u3.e1.EndOfAllocation == 1) &&
982         (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum))
983     {
984         InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa);
985         return 1;
986     }
987 
988     //
989     // Loop until we find the last PTE
990     //
991     while (Pfn1->u3.e1.EndOfAllocation == 0)
992     {
993         //
994         // Keep going
995         //
996         PointerPte++;
997         Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
998     }
999 
1000     //
1001     // Now we know how many pages we have
1002     //
1003     NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1);
1004 
1005     //
1006     // Acquire the nonpaged pool lock
1007     //
1008     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
1009 
1010     //
1011     // Mark the first and last PTEs as not part of an allocation anymore
1012     //
1013     StartPfn->u3.e1.StartOfAllocation = 0;
1014     Pfn1->u3.e1.EndOfAllocation = 0;
1015 
1016     //
1017     // Assume we will free as many pages as the allocation was
1018     //
1019     FreePages = NumberOfPages;
1020 
1021     //
1022     // Peek one page past the end of the allocation
1023     //
1024     PointerPte++;
1025 
1026     //
1027     // Guard against going past initial nonpaged pool
1028     //
1029     if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
1030     {
1031         //
1032         // This page is on the outskirts of initial nonpaged pool, so ignore it
1033         //
1034         Pfn1 = NULL;
1035     }
1036     else
1037     {
1038         /* Sanity check */
1039         ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
1040 
1041         /* Check if protected pool is enabled */
1042         if (MmProtectFreedNonPagedPool)
1043         {
1044             /* The freed block will be merged, it must be made accessible */
1045             MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1046         }
1047 
1048         //
1049         // Otherwise, our entire allocation must've fit within the initial non
1050         // paged pool, or the expansion nonpaged pool, so get the PFN entry of
1051         // the next allocation
1052         //
1053         if (PointerPte->u.Hard.Valid == 1)
1054         {
1055             //
1056             // It's either expansion or initial: get the PFN entry
1057             //
1058             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1059         }
1060         else
1061         {
1062             //
1063             // This means we've reached the guard page that protects the end of
1064             // the expansion nonpaged pool
1065             //
1066             Pfn1 = NULL;
1067         }
1068 
1069     }
1070 
1071     //
1072     // Check if this allocation actually exists
1073     //
1074     if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
1075     {
1076         //
1077         // It doesn't, so we should actually locate a free entry descriptor
1078         //
1079         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
1080                                          (NumberOfPages << PAGE_SHIFT));
1081         ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1082         ASSERT(FreeEntry->Owner == FreeEntry);
1083 
1084         /* Consume this entry's pages */
1085         FreePages += FreeEntry->Size;
1086 
1087         /* Remove the item from the list, depending if pool is protected */
1088         if (MmProtectFreedNonPagedPool)
1089             MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1090         else
1091             RemoveEntryList(&FreeEntry->List);
1092     }
1093 
1094     //
1095     // Now get the official free entry we'll create for the caller's allocation
1096     //
1097     FreeEntry = StartingVa;
1098 
1099     //
1100     // Check if the our allocation is the very first page
1101     //
1102     if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
1103     {
1104         //
1105         // Then we can't do anything or we'll risk underflowing
1106         //
1107         Pfn1 = NULL;
1108     }
1109     else
1110     {
1111         //
1112         // Otherwise, get the PTE for the page right before our allocation
1113         //
1114         PointerPte -= NumberOfPages + 1;
1115 
1116         /* Check if protected pool is enabled */
1117         if (MmProtectFreedNonPagedPool)
1118         {
1119             /* The freed block will be merged, it must be made accessible */
1120             MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1121         }
1122 
1123         /* Check if this is valid pool, or a guard page */
1124         if (PointerPte->u.Hard.Valid == 1)
1125         {
1126             //
1127             // It's either expansion or initial nonpaged pool, get the PFN entry
1128             //
1129             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1130         }
1131         else
1132         {
1133             //
1134             // We must've reached the guard page, so don't risk touching it
1135             //
1136             Pfn1 = NULL;
1137         }
1138     }
1139 
1140     //
1141     // Check if there is a valid PFN entry for the page before the allocation
1142     // and then check if this page was actually the end of an allocation.
1143     // If it wasn't, then we know for sure it's a free page
1144     //
1145     if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1146     {
1147         //
1148         // Get the free entry descriptor for that given page range
1149         //
1150         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1151         ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1152         FreeEntry = FreeEntry->Owner;
1153 
1154         /* Check if protected pool is enabled */
1155         if (MmProtectFreedNonPagedPool)
1156         {
1157             /* The freed block will be merged, it must be made accessible */
1158             MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1159         }
1160 
1161         //
1162         // Check if the entry is small enough to be indexed on a free list
1163         // If it is, we'll want to re-insert it, since we're about to
1164         // collapse our pages on top of it, which will change its count
1165         //
1166         if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
1167         {
1168             /* Remove the item from the list, depending if pool is protected */
1169             if (MmProtectFreedNonPagedPool)
1170                 MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1171             else
1172                 RemoveEntryList(&FreeEntry->List);
1173 
1174             //
1175             // Update its size
1176             //
1177             FreeEntry->Size += FreePages;
1178 
1179             //
1180             // And now find the new appropriate list to place it in
1181             //
1182             i = (ULONG)(FreeEntry->Size - 1);
1183             if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1184 
1185             /* Insert the entry into the free list head, check for prot. pool */
1186             if (MmProtectFreedNonPagedPool)
1187                 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1188             else
1189                 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1190         }
1191         else
1192         {
1193             //
1194             // Otherwise, just combine our free pages into this entry
1195             //
1196             FreeEntry->Size += FreePages;
1197         }
1198     }
1199 
1200     //
1201     // Check if we were unable to do any compaction, and we'll stick with this
1202     //
1203     if (FreeEntry == StartingVa)
1204     {
1205         //
1206         // Well, now we are a free entry. At worse we just have our newly freed
1207         // pages, at best we have our pages plus whatever entry came after us
1208         //
1209         FreeEntry->Size = FreePages;
1210 
1211         //
1212         // Find the appropriate list we should be on
1213         //
1214         i = FreeEntry->Size - 1;
1215         if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1216 
1217         /* Insert the entry into the free list head, check for prot. pool */
1218         if (MmProtectFreedNonPagedPool)
1219             MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1220         else
1221             InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1222     }
1223 
1224     //
1225     // Just a sanity check
1226     //
1227     ASSERT(FreePages != 0);
1228 
1229     //
1230     // Get all the pages between our allocation and its end. These will all now
1231     // become free page chunks.
1232     //
1233     NextEntry = StartingVa;
1234     LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1235     do
1236     {
1237         //
1238         // Link back to the parent free entry, and keep going
1239         //
1240         NextEntry->Owner = FreeEntry;
1241         NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1242         NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1243     } while (NextEntry != LastEntry);
1244 
1245     /* Is freed non paged pool protected? */
1246     if (MmProtectFreedNonPagedPool)
1247     {
1248         /* Protect the freed pool! */
1249         MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1250     }
1251 
1252     //
1253     // We're done, release the lock and let the caller know how much we freed
1254     //
1255     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
1256     return NumberOfPages;
1257 }
1258 
1259 
1260 BOOLEAN
1261 NTAPI
1262 MiRaisePoolQuota(IN POOL_TYPE PoolType,
1263                  IN ULONG CurrentMaxQuota,
1264                  OUT PULONG NewMaxQuota)
1265 {
1266     //
1267     // Not implemented
1268     //
1269     UNIMPLEMENTED;
1270     *NewMaxQuota = CurrentMaxQuota + 65536;
1271     return TRUE;
1272 }
1273 
1274 NTSTATUS
1275 NTAPI
1276 MiInitializeSessionPool(VOID)
1277 {
1278     PMMPTE PointerPte, LastPte;
1279     PMMPDE PointerPde, LastPde;
1280     PFN_NUMBER PageFrameIndex, PdeCount;
1281     PPOOL_DESCRIPTOR PoolDescriptor;
1282     PMM_SESSION_SPACE SessionGlobal;
1283     PMM_PAGED_POOL_INFO PagedPoolInfo;
1284     NTSTATUS Status;
1285     ULONG Index, PoolSize, BitmapSize;
1286     PAGED_CODE();
1287 
1288     /* Lock session pool */
1289     SessionGlobal = MmSessionSpace->GlobalVirtualAddress;
1290     KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex);
1291 
1292     /* Setup a valid pool descriptor */
1293     PoolDescriptor = &MmSessionSpace->PagedPool;
1294     ExInitializePoolDescriptor(PoolDescriptor,
1295                                PagedPoolSession,
1296                                0,
1297                                0,
1298                                &SessionGlobal->PagedPoolMutex);
1299 
1300     /* Setup the pool addresses */
1301     MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart;
1302     MmSessionSpace->PagedPoolEnd = (PVOID)((ULONG_PTR)MiSessionPoolEnd - 1);
1303     DPRINT1("Session Pool Start: 0x%p End: 0x%p\n",
1304             MmSessionSpace->PagedPoolStart, MmSessionSpace->PagedPoolEnd);
1305 
1306     /* Reset all the counters */
1307     PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
1308     PagedPoolInfo->PagedPoolCommit = 0;
1309     PagedPoolInfo->PagedPoolHint = 0;
1310     PagedPoolInfo->AllocatedPagedPool = 0;
1311 
1312     /* Compute PDE and PTE addresses */
1313     PointerPde = MiAddressToPde(MmSessionSpace->PagedPoolStart);
1314     PointerPte = MiAddressToPte(MmSessionSpace->PagedPoolStart);
1315     LastPde = MiAddressToPde(MmSessionSpace->PagedPoolEnd);
1316     LastPte = MiAddressToPte(MmSessionSpace->PagedPoolEnd);
1317 
1318     /* Write them down */
1319     MmSessionSpace->PagedPoolBasePde = PointerPde;
1320     PagedPoolInfo->FirstPteForPagedPool = PointerPte;
1321     PagedPoolInfo->LastPteForPagedPool = LastPte;
1322     PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
1323 
1324     /* Zero the PDEs */
1325     PdeCount = LastPde - PointerPde;
1326     RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE));
1327 
1328     /* Initialize the PFN for the PDE */
1329     Status = MiInitializeAndChargePfn(&PageFrameIndex,
1330                                       PointerPde,
1331                                       MmSessionSpace->SessionPageDirectoryIndex,
1332                                       TRUE);
1333     ASSERT(NT_SUCCESS(Status) == TRUE);
1334 
1335     /* Initialize the first page table */
1336     Index = (ULONG_PTR)MmSessionSpace->PagedPoolStart - (ULONG_PTR)MmSessionBase;
1337     Index >>= 22;
1338 #ifndef _M_AMD64 // FIXME
1339     ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0);
1340     MmSessionSpace->PageTables[Index] = *PointerPde;
1341 #endif
1342 
1343     /* Bump up counters */
1344     InterlockedIncrementSizeT(&MmSessionSpace->NonPageablePages);
1345     InterlockedIncrementSizeT(&MmSessionSpace->CommittedPages);
1346 
1347     /* Compute the size of the pool in pages, and of the bitmap for it */
1348     PoolSize = MmSessionPoolSize >> PAGE_SHIFT;
1349     BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG);
1350 
1351     /* Allocate and initialize the bitmap to track allocations */
1352     PagedPoolInfo->PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1353                                                                   BitmapSize,
1354                                                                   TAG_MM);
1355     ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL);
1356     RtlInitializeBitMap(PagedPoolInfo->PagedPoolAllocationMap,
1357                         (PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1),
1358                         PoolSize);
1359 
1360     /* Set all bits, but clear the first page table's worth */
1361     RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap);
1362     RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
1363 
1364     /* Allocate and initialize the bitmap to track free space */
1365     PagedPoolInfo->EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1366                                                                 BitmapSize,
1367                                                                 TAG_MM);
1368     ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL);
1369     RtlInitializeBitMap(PagedPoolInfo->EndOfPagedPoolBitmap,
1370                         (PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1),
1371                         PoolSize);
1372 
1373     /* Clear all the bits and return success */
1374     RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap);
1375     return STATUS_SUCCESS;
1376 }
1377 
1378 /* PUBLIC FUNCTIONS ***********************************************************/
1379 
1380 /*
1381  * @unimplemented
1382  */
1383 PVOID
1384 NTAPI
1385 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
1386                          IN ULONG PoolTag)
1387 {
1388     UNIMPLEMENTED;
1389     return NULL;
1390 }
1391 
1392 /*
1393  * @unimplemented
1394  */
1395 VOID
1396 NTAPI
1397 MmFreeMappingAddress(IN PVOID BaseAddress,
1398                      IN ULONG PoolTag)
1399 {
1400     UNIMPLEMENTED;
1401 }
1402 
1403 /* EOF */
1404