xref: /reactos/ntoskrnl/mm/ARM3/pool.c (revision 9393fc32)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/pool.c
5  * PURPOSE:         ARM Memory Manager Pool Allocator
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 /* GLOBALS ********************************************************************/
19 
20 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
21 PFN_COUNT MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
22 PVOID MmNonPagedPoolEnd0;
23 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
24 KGUARDED_MUTEX MmPagedPoolMutex;
25 MM_PAGED_POOL_INFO MmPagedPoolInfo;
26 SIZE_T MmAllocatedNonPagedPool;
27 ULONG MmSpecialPoolTag;
28 ULONG MmConsumedPoolPercentage;
29 BOOLEAN MmProtectFreedNonPagedPool;
30 SLIST_HEADER MiNonPagedPoolSListHead;
31 ULONG MiNonPagedPoolSListMaximum = 4;
32 SLIST_HEADER MiPagedPoolSListHead;
33 ULONG MiPagedPoolSListMaximum = 8;
34 
35 /* PRIVATE FUNCTIONS **********************************************************/
36 
37 VOID
38 NTAPI
39 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
40                           IN ULONG PageCount)
41 {
42     PMMPTE PointerPte, LastPte;
43     MMPTE TempPte;
44 
45     /* If pool is physical, can't protect PTEs */
46     if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
47 
48     /* Get PTE pointers and loop */
49     PointerPte = MiAddressToPte(VirtualAddress);
50     LastPte = PointerPte + PageCount;
51     do
52     {
53         /* Capture the PTE for safety */
54         TempPte = *PointerPte;
55 
56         /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
57         TempPte.u.Hard.Valid = 0;
58         TempPte.u.Soft.Prototype = 1;
59         MI_WRITE_INVALID_PTE(PointerPte, TempPte);
60     } while (++PointerPte < LastPte);
61 
62     /* Flush the TLB */
63     KeFlushEntireTb(TRUE, TRUE);
64 }
65 
66 BOOLEAN
67 NTAPI
68 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
69                             IN ULONG PageCount)
70 {
71     PMMPTE PointerPte;
72     MMPTE TempPte;
73     PFN_NUMBER UnprotectedPages = 0;
74 
75     /* If pool is physical, can't protect PTEs */
76     if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
77 
78     /* Get, and capture the PTE */
79     PointerPte = MiAddressToPte(VirtualAddress);
80     TempPte = *PointerPte;
81 
82     /* Loop protected PTEs */
83     while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
84     {
85         /* Unprotect the PTE */
86         TempPte.u.Hard.Valid = 1;
87         TempPte.u.Soft.Prototype = 0;
88         MI_WRITE_VALID_PTE(PointerPte, TempPte);
89 
90         /* One more page */
91         if (++UnprotectedPages == PageCount) break;
92 
93         /* Capture next PTE */
94         TempPte = *(++PointerPte);
95     }
96 
97     /* Return if any pages were unprotected */
98     return UnprotectedPages ? TRUE : FALSE;
99 }
100 
101 FORCEINLINE
102 VOID
103 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
104                               OUT PVOID* PoolFlink,
105                               OUT PVOID* PoolBlink)
106 {
107     BOOLEAN Safe;
108     PVOID PoolVa;
109 
110     /* Initialize variables */
111     *PoolFlink = *PoolBlink = NULL;
112 
113     /* Check if the list has entries */
114     if (IsListEmpty(Links) == FALSE)
115     {
116         /* We are going to need to forward link to do an insert */
117         PoolVa = Links->Flink;
118 
119         /* So make it safe to access */
120         Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
121         if (Safe) *PoolFlink = PoolVa;
122     }
123 
124     /* Are we going to need a backward link too? */
125     if (Links != Links->Blink)
126     {
127         /* Get the head's backward link for the insert */
128         PoolVa = Links->Blink;
129 
130         /* Make it safe to access */
131         Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
132         if (Safe) *PoolBlink = PoolVa;
133     }
134 }
135 
136 FORCEINLINE
137 VOID
138 MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
139                             IN PVOID PoolBlink)
140 {
141     /* Reprotect the pages, if they got unprotected earlier */
142     if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
143     if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
144 }
145 
146 VOID
147 NTAPI
148 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
149                           IN PLIST_ENTRY Entry,
150                           IN BOOLEAN Critical)
151 {
152     PVOID PoolFlink, PoolBlink;
153 
154     /* Make the list accessible */
155     MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
156 
157     /* Now insert in the right position */
158     Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
159 
160     /* And reprotect the pages containing the free links */
161     MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
162 }
163 
164 VOID
165 NTAPI
166 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
167 {
168     PVOID PoolFlink, PoolBlink;
169 
170     /* Make the list accessible */
171     MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
172 
173     /* Now remove */
174     RemoveEntryList(Entry);
175 
176     /* And reprotect the pages containing the free links */
177     if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
178     if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
179 }
180 
181 CODE_SEG("INIT")
182 VOID
183 NTAPI
184 MiInitializeNonPagedPoolThresholds(VOID)
185 {
186     PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
187 
188     /* Default low threshold of 8MB or one third of nonpaged pool */
189     MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
190     MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
191 
192     /* Default high threshold of 20MB or 50% */
193     MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
194     MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
195     ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
196 }
197 
198 CODE_SEG("INIT")
199 VOID
200 NTAPI
201 MiInitializePoolEvents(VOID)
202 {
203     KIRQL OldIrql;
204     PFN_NUMBER FreePoolInPages;
205 
206     /* Lock paged pool */
207     KeAcquireGuardedMutex(&MmPagedPoolMutex);
208 
209     /* Total size of the paged pool minus the allocated size, is free */
210     FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
211 
212     /* Check the initial state high state */
213     if (FreePoolInPages >= MiHighPagedPoolThreshold)
214     {
215         /* We have plenty of pool */
216         KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
217     }
218     else
219     {
220         /* We don't */
221         KeClearEvent(MiHighPagedPoolEvent);
222     }
223 
224     /* Check the initial low state */
225     if (FreePoolInPages <= MiLowPagedPoolThreshold)
226     {
227         /* We're very low in free pool memory */
228         KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
229     }
230     else
231     {
232         /* We're not */
233         KeClearEvent(MiLowPagedPoolEvent);
234     }
235 
236     /* Release the paged pool lock */
237     KeReleaseGuardedMutex(&MmPagedPoolMutex);
238 
239     /* Now it's time for the nonpaged pool lock */
240     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
241 
242     /* Free pages are the maximum minus what's been allocated */
243     FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
244 
245     /* Check if we have plenty */
246     if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
247     {
248         /* We do, set the event */
249         KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
250     }
251     else
252     {
253         /* We don't, clear the event */
254         KeClearEvent(MiHighNonPagedPoolEvent);
255     }
256 
257     /* Check if we have very little */
258     if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
259     {
260         /* We do, set the event */
261         KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
262     }
263     else
264     {
265         /* We don't, clear it */
266         KeClearEvent(MiLowNonPagedPoolEvent);
267     }
268 
269     /* We're done, release the nonpaged pool lock */
270     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
271 }
272 
273 CODE_SEG("INIT")
274 VOID
275 NTAPI
276 MiInitializeNonPagedPool(VOID)
277 {
278     ULONG i;
279     PFN_COUNT PoolPages;
280     PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
281     PMMPTE PointerPte;
282     PAGED_CODE();
283 
284     //
285     // Initialize the pool S-LISTs as well as their maximum count. In general,
286     // we'll allow 8 times the default on a 2GB system, and two times the default
287     // on a 1GB system.
288     //
289     InitializeSListHead(&MiPagedPoolSListHead);
290     InitializeSListHead(&MiNonPagedPoolSListHead);
291     if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE))
292     {
293         MiNonPagedPoolSListMaximum *= 8;
294         MiPagedPoolSListMaximum *= 8;
295     }
296     else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE))
297     {
298         MiNonPagedPoolSListMaximum *= 2;
299         MiPagedPoolSListMaximum *= 2;
300     }
301 
302     //
303     // However if debugging options for the pool are enabled, turn off the S-LIST
304     // to reduce the risk of messing things up even more
305     //
306     if (MmProtectFreedNonPagedPool)
307     {
308         MiNonPagedPoolSListMaximum = 0;
309         MiPagedPoolSListMaximum = 0;
310     }
311 
312     //
313     // We keep 4 lists of free pages (4 lists help avoid contention)
314     //
315     for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
316     {
317         //
318         // Initialize each of them
319         //
320         InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
321     }
322 
323     //
324     // Calculate how many pages the initial nonpaged pool has
325     //
326     PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
327     MmNumberOfFreeNonPagedPool = PoolPages;
328 
329     //
330     // Initialize the first free entry
331     //
332     FreeEntry = MmNonPagedPoolStart;
333     FirstEntry = FreeEntry;
334     FreeEntry->Size = PoolPages;
335     FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
336     FreeEntry->Owner = FirstEntry;
337 
338     //
339     // Insert it into the last list
340     //
341     InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
342                    &FreeEntry->List);
343 
344     //
345     // Now create free entries for every single other page
346     //
347     while (PoolPages-- > 1)
348     {
349         //
350         // Link them all back to the original entry
351         //
352         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
353         FreeEntry->Owner = FirstEntry;
354         FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
355     }
356 
357     //
358     // Validate and remember first allocated pool page
359     //
360     PointerPte = MiAddressToPte(MmNonPagedPoolStart);
361     ASSERT(PointerPte->u.Hard.Valid == 1);
362     MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
363 
364     //
365     // Keep track of where initial nonpaged pool ends
366     //
367     MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
368                                  MmSizeOfNonPagedPoolInBytes);
369 
370     //
371     // Validate and remember last allocated pool page
372     //
373     PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
374     ASSERT(PointerPte->u.Hard.Valid == 1);
375     MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
376 
377     //
378     // Validate the first nonpaged pool expansion page (which is a guard page)
379     //
380     PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
381     ASSERT(PointerPte->u.Hard.Valid == 0);
382 
383     //
384     // Calculate the size of the expansion region alone
385     //
386     MiExpansionPoolPagesInitialCharge = (PFN_COUNT)
387     BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
388 
389     //
390     // Remove 2 pages, since there's a guard page on top and on the bottom
391     //
392     MiExpansionPoolPagesInitialCharge -= 2;
393 
394     //
395     // Now initialize the nonpaged pool expansion PTE space. Remember there's a
396     // guard page on top so make sure to skip it. The bottom guard page will be
397     // guaranteed by the fact our size is off by one.
398     //
399     MiInitializeSystemPtes(PointerPte + 1,
400                            MiExpansionPoolPagesInitialCharge,
401                            NonPagedPoolExpansion);
402 }
403 
404 POOL_TYPE
405 NTAPI
406 MmDeterminePoolType(IN PVOID PoolAddress)
407 {
408     //
409     // Use a simple bounds check
410     //
411     if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd)
412         return PagedPool;
413     else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd)
414         return NonPagedPool;
415     KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0);
416 }
417 
418 PVOID
419 NTAPI
420 MiAllocatePoolPages(IN POOL_TYPE PoolType,
421                     IN SIZE_T SizeInBytes)
422 {
423     PFN_NUMBER PageFrameNumber;
424     PFN_COUNT SizeInPages, PageTableCount;
425     ULONG i;
426     KIRQL OldIrql;
427     PLIST_ENTRY NextEntry, NextHead, LastHead;
428     PMMPTE PointerPte, StartPte;
429     PMMPDE PointerPde;
430     ULONG EndAllocation;
431     MMPTE TempPte;
432     MMPDE TempPde;
433     PMMPFN Pfn1;
434     PVOID BaseVa, BaseVaStart;
435     PMMFREE_POOL_ENTRY FreeEntry;
436 
437     //
438     // Figure out how big the allocation is in pages
439     //
440     SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);
441 
442     //
443     // Check for overflow
444     //
445     if (SizeInPages == 0)
446     {
447         //
448         // Fail
449         //
450         return NULL;
451     }
452 
453     //
454     // Handle paged pool
455     //
456     if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
457     {
458         //
459         // If only one page is being requested, try to grab it from the S-LIST
460         //
461         if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
462         {
463             BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
464             if (BaseVa) return BaseVa;
465         }
466 
467         //
468         // Lock the paged pool mutex
469         //
470         KeAcquireGuardedMutex(&MmPagedPoolMutex);
471 
472         //
473         // Find some empty allocation space
474         //
475         i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
476                                    SizeInPages,
477                                    MmPagedPoolInfo.PagedPoolHint);
478         if (i == 0xFFFFFFFF)
479         {
480             //
481             // Get the page bit count
482             //
483             i = ((SizeInPages - 1) / PTE_PER_PAGE) + 1;
484             DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);
485 
486             //
487             // Check if there is enougn paged pool expansion space left
488             //
489             if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
490                 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
491             {
492                 //
493                 // Out of memory!
494                 //
495                 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
496                 KeReleaseGuardedMutex(&MmPagedPoolMutex);
497                 return NULL;
498             }
499 
500             //
501             // Check if we'll have to expand past the last PTE we have available
502             //
503             if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
504                  (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
505             {
506                 //
507                 // We can only support this much then
508                 //
509                 PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool);
510                 PageTableCount = (PFN_COUNT)(PointerPde + 1 -
511                                  MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
512                 ASSERT(PageTableCount < i);
513                 i = PageTableCount;
514             }
515             else
516             {
517                 //
518                 // Otherwise, there is plenty of space left for this expansion
519                 //
520                 PageTableCount = i;
521             }
522 
523             //
524             // Get the template PDE we'll use to expand
525             //
526             TempPde = ValidKernelPde;
527 
528             //
529             // Get the first PTE in expansion space
530             //
531             PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
532             BaseVa = MiPdeToPte(PointerPde);
533             BaseVaStart = BaseVa;
534 
535             //
536             // Lock the PFN database and loop pages
537             //
538             OldIrql = MiAcquirePfnLock();
539             do
540             {
541                 //
542                 // It should not already be valid
543                 //
544                 ASSERT(PointerPde->u.Hard.Valid == 0);
545 
546                 /* Request a page */
547                 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
548                 MI_SET_PROCESS2("Kernel");
549                 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
550                 TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
551 #if (_MI_PAGING_LEVELS >= 3)
552                 /* On PAE/x64 systems, there's no double-buffering */
553                 /* Initialize the PFN entry for it */
554                 MiInitializePfnForOtherProcess(PageFrameNumber,
555                                                (PMMPTE)PointerPde,
556                                                PFN_FROM_PTE(MiAddressToPte(PointerPde)));
557 
558                 /* Write the actual PDE now */
559                 MI_WRITE_VALID_PDE(PointerPde, TempPde);
560 #else
561                 //
562                 // Save it into our double-buffered system page directory
563                 //
564                 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
565 
566                 /* Initialize the PFN */
567                 MiInitializePfnForOtherProcess(PageFrameNumber,
568                                                (PMMPTE)PointerPde,
569                                                MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_PER_PAGE]);
570 #endif
571 
572                 //
573                 // Move on to the next expansion address
574                 //
575                 PointerPde++;
576                 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
577                 i--;
578             } while (i > 0);
579 
580             //
581             // Release the PFN database lock
582             //
583             MiReleasePfnLock(OldIrql);
584 
585             //
586             // These pages are now available, clear their availablity bits
587             //
588             EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
589                                     (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
590                             PTE_PER_PAGE;
591             RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
592                          EndAllocation,
593                          PageTableCount * PTE_PER_PAGE);
594 
595             //
596             // Update the next expansion location
597             //
598             MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
599 
600             //
601             // Zero out the newly available memory
602             //
603             RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
604 
605             //
606             // Now try consuming the pages again
607             //
608             i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
609                                        SizeInPages,
610                                        0);
611             if (i == 0xFFFFFFFF)
612             {
613                 //
614                 // Out of memory!
615                 //
616                 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
617                 KeReleaseGuardedMutex(&MmPagedPoolMutex);
618                 return NULL;
619             }
620         }
621 
622         //
623         // Update the pool hint if the request was just one page
624         //
625         if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
626 
627         //
628         // Update the end bitmap so we know the bounds of this allocation when
629         // the time comes to free it
630         //
631         EndAllocation = i + SizeInPages - 1;
632         RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
633 
634         //
635         // Now we can release the lock (it mainly protects the bitmap)
636         //
637         KeReleaseGuardedMutex(&MmPagedPoolMutex);
638 
639         //
640         // Now figure out where this allocation starts
641         //
642         BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
643 
644         //
645         // Flush the TLB
646         //
647         KeFlushEntireTb(TRUE, TRUE);
648 
649         /* Setup a demand-zero writable PTE */
650         MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
651 
652         //
653         // Find the first and last PTE, then loop them all
654         //
655         PointerPte = MiAddressToPte(BaseVa);
656         StartPte = PointerPte + SizeInPages;
657         do
658         {
659             //
660             // Write the demand zero PTE and keep going
661             //
662             MI_WRITE_INVALID_PTE(PointerPte, TempPte);
663         } while (++PointerPte < StartPte);
664 
665         //
666         // Return the allocation address to the caller
667         //
668         return BaseVa;
669     }
670 
671     //
672     // If only one page is being requested, try to grab it from the S-LIST
673     //
674     if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
675     {
676         BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
677         if (BaseVa) return BaseVa;
678     }
679 
680     //
681     // Allocations of less than 4 pages go into their individual buckets
682     //
683     i = min(SizeInPages, MI_MAX_FREE_PAGE_LISTS) - 1;
684 
685     //
686     // Loop through all the free page lists based on the page index
687     //
688     NextHead = &MmNonPagedPoolFreeListHead[i];
689     LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
690 
691     //
692     // Acquire the nonpaged pool lock
693     //
694     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
695     do
696     {
697         //
698         // Now loop through all the free page entries in this given list
699         //
700         NextEntry = NextHead->Flink;
701         while (NextEntry != NextHead)
702         {
703             /* Is freed non paged pool enabled */
704             if (MmProtectFreedNonPagedPool)
705             {
706                 /* We need to be able to touch this page, unprotect it */
707                 MiUnProtectFreeNonPagedPool(NextEntry, 0);
708             }
709 
710             //
711             // Grab the entry and see if it can handle our allocation
712             //
713             FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
714             ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
715             if (FreeEntry->Size >= SizeInPages)
716             {
717                 //
718                 // It does, so consume the pages from here
719                 //
720                 FreeEntry->Size -= SizeInPages;
721 
722                 //
723                 // The allocation will begin in this free page area
724                 //
725                 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
726                                  (FreeEntry->Size  << PAGE_SHIFT));
727 
728                 /* Remove the item from the list, depending if pool is protected */
729                 if (MmProtectFreedNonPagedPool)
730                     MiProtectedPoolRemoveEntryList(&FreeEntry->List);
731                 else
732                     RemoveEntryList(&FreeEntry->List);
733 
734                 //
735                 // However, check if its' still got space left
736                 //
737                 if (FreeEntry->Size != 0)
738                 {
739                     /* Check which list to insert this entry into */
740                     i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1;
741 
742                     /* Insert the entry into the free list head, check for prot. pool */
743                     if (MmProtectFreedNonPagedPool)
744                         MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
745                     else
746                         InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
747 
748                     /* Is freed non paged pool protected? */
749                     if (MmProtectFreedNonPagedPool)
750                     {
751                         /* Protect the freed pool! */
752                         MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
753                     }
754                 }
755 
756                 //
757                 // Grab the PTE for this allocation
758                 //
759                 PointerPte = MiAddressToPte(BaseVa);
760                 ASSERT(PointerPte->u.Hard.Valid == 1);
761 
762                 //
763                 // Grab the PFN NextEntry and index
764                 //
765                 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
766 
767                 //
768                 // Now mark it as the beginning of an allocation
769                 //
770                 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
771                 Pfn1->u3.e1.StartOfAllocation = 1;
772 
773                 /* Mark it as special pool if needed */
774                 ASSERT(Pfn1->u4.VerifierAllocation == 0);
775                 if (PoolType & VERIFIER_POOL_MASK)
776                 {
777                     Pfn1->u4.VerifierAllocation = 1;
778                 }
779 
780                 //
781                 // Check if the allocation is larger than one page
782                 //
783                 if (SizeInPages != 1)
784                 {
785                     //
786                     // Navigate to the last PFN entry and PTE
787                     //
788                     PointerPte += SizeInPages - 1;
789                     ASSERT(PointerPte->u.Hard.Valid == 1);
790                     Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
791                 }
792 
793                 //
794                 // Mark this PFN as the last (might be the same as the first)
795                 //
796                 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
797                 Pfn1->u3.e1.EndOfAllocation = 1;
798 
799                 //
800                 // Release the nonpaged pool lock, and return the allocation
801                 //
802                 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
803                 return BaseVa;
804             }
805 
806             //
807             // Try the next free page entry
808             //
809             NextEntry = FreeEntry->List.Flink;
810 
811             /* Is freed non paged pool protected? */
812             if (MmProtectFreedNonPagedPool)
813             {
814                 /* Protect the freed pool! */
815                 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
816             }
817         }
818     } while (++NextHead < LastHead);
819 
820     //
821     // If we got here, we're out of space.
822     // Start by releasing the lock
823     //
824     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
825 
826     //
827     // Allocate some system PTEs
828     //
829     StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
830     PointerPte = StartPte;
831     if (StartPte == NULL)
832     {
833         //
834         // Ran out of memory
835         //
836         DPRINT("Out of NP Expansion Pool\n");
837         return NULL;
838     }
839 
840     //
841     // Acquire the pool lock now
842     //
843     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
844 
845     //
846     // Lock the PFN database too
847     //
848     MiAcquirePfnLockAtDpcLevel();
849 
850     /* Check that we have enough available pages for this request */
851     if (MmAvailablePages < SizeInPages)
852     {
853         MiReleasePfnLockFromDpcLevel();
854         KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
855 
856         MiReleaseSystemPtes(StartPte, SizeInPages, NonPagedPoolExpansion);
857 
858         DPRINT1("OUT OF AVAILABLE PAGES! Required %lu, Available %lu\n", SizeInPages, MmAvailablePages);
859 
860         return NULL;
861     }
862 
863     //
864     // Loop the pages
865     //
866     TempPte = ValidKernelPte;
867     do
868     {
869         /* Allocate a page */
870         MI_SET_USAGE(MI_USAGE_PAGED_POOL);
871         MI_SET_PROCESS2("Kernel");
872         PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
873 
874         /* Get the PFN entry for it and fill it out */
875         Pfn1 = MiGetPfnEntry(PageFrameNumber);
876         Pfn1->u3.e2.ReferenceCount = 1;
877         Pfn1->u2.ShareCount = 1;
878         Pfn1->PteAddress = PointerPte;
879         Pfn1->u3.e1.PageLocation = ActiveAndValid;
880         Pfn1->u4.VerifierAllocation = 0;
881 
882         /* Write the PTE for it */
883         TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
884         MI_WRITE_VALID_PTE(PointerPte++, TempPte);
885     } while (--SizeInPages > 0);
886 
887     //
888     // This is the last page
889     //
890     Pfn1->u3.e1.EndOfAllocation = 1;
891 
892     //
893     // Get the first page and mark it as such
894     //
895     Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
896     Pfn1->u3.e1.StartOfAllocation = 1;
897 
898     /* Mark it as a verifier allocation if needed */
899     ASSERT(Pfn1->u4.VerifierAllocation == 0);
900     if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;
901 
902     //
903     // Release the PFN and nonpaged pool lock
904     //
905     MiReleasePfnLockFromDpcLevel();
906     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
907 
908     //
909     // Return the address
910     //
911     return MiPteToAddress(StartPte);
912 }
913 
914 ULONG
915 NTAPI
916 MiFreePoolPages(IN PVOID StartingVa)
917 {
918     PMMPTE PointerPte, StartPte;
919     PMMPFN Pfn1, StartPfn;
920     PFN_COUNT FreePages, NumberOfPages;
921     KIRQL OldIrql;
922     PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
923     ULONG i, End;
924     ULONG_PTR Offset;
925 
926     //
927     // Handle paged pool
928     //
929     if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
930     {
931         //
932         // Calculate the offset from the beginning of paged pool, and convert it
933         // into pages
934         //
935         Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart;
936         i = (ULONG)(Offset >> PAGE_SHIFT);
937         End = i;
938 
939         //
940         // Now use the end bitmap to scan until we find a set bit, meaning that
941         // this allocation finishes here
942         //
943         while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
944 
945         //
946         // Now calculate the total number of pages this allocation spans. If it's
947         // only one page, add it to the S-LIST instead of freeing it
948         //
949         NumberOfPages = End - i + 1;
950         if ((NumberOfPages == 1) &&
951             (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum))
952         {
953             InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa);
954             return 1;
955         }
956 
957         /* Delete the actual pages */
958         PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
959         FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
960         ASSERT(FreePages == NumberOfPages);
961 
962         //
963         // Acquire the paged pool lock
964         //
965         KeAcquireGuardedMutex(&MmPagedPoolMutex);
966 
967         //
968         // Clear the allocation and free bits
969         //
970         RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End);
971         RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
972 
973         //
974         // Update the hint if we need to
975         //
976         if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
977 
978         //
979         // Release the lock protecting the bitmaps
980         //
981         KeReleaseGuardedMutex(&MmPagedPoolMutex);
982 
983         //
984         // And finally return the number of pages freed
985         //
986         return NumberOfPages;
987     }
988 
989     //
990     // Get the first PTE and its corresponding PFN entry. If this is also the
991     // last PTE, meaning that this allocation was only for one page, push it into
992     // the S-LIST instead of freeing it
993     //
994     StartPte = PointerPte = MiAddressToPte(StartingVa);
995     StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
996     if ((Pfn1->u3.e1.EndOfAllocation == 1) &&
997         (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum))
998     {
999         InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa);
1000         return 1;
1001     }
1002 
1003     //
1004     // Loop until we find the last PTE
1005     //
1006     while (Pfn1->u3.e1.EndOfAllocation == 0)
1007     {
1008         //
1009         // Keep going
1010         //
1011         PointerPte++;
1012         Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1013     }
1014 
1015     //
1016     // Now we know how many pages we have
1017     //
1018     NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1);
1019 
1020     //
1021     // Acquire the nonpaged pool lock
1022     //
1023     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
1024 
1025     //
1026     // Mark the first and last PTEs as not part of an allocation anymore
1027     //
1028     StartPfn->u3.e1.StartOfAllocation = 0;
1029     Pfn1->u3.e1.EndOfAllocation = 0;
1030 
1031     //
1032     // Assume we will free as many pages as the allocation was
1033     //
1034     FreePages = NumberOfPages;
1035 
1036     //
1037     // Peek one page past the end of the allocation
1038     //
1039     PointerPte++;
1040 
1041     //
1042     // Guard against going past initial nonpaged pool
1043     //
1044     if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
1045     {
1046         //
1047         // This page is on the outskirts of initial nonpaged pool, so ignore it
1048         //
1049         Pfn1 = NULL;
1050     }
1051     else
1052     {
1053         /* Sanity check */
1054         ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
1055 
1056         /* Check if protected pool is enabled */
1057         if (MmProtectFreedNonPagedPool)
1058         {
1059             /* The freed block will be merged, it must be made accessible */
1060             MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1061         }
1062 
1063         //
1064         // Otherwise, our entire allocation must've fit within the initial non
1065         // paged pool, or the expansion nonpaged pool, so get the PFN entry of
1066         // the next allocation
1067         //
1068         if (PointerPte->u.Hard.Valid == 1)
1069         {
1070             //
1071             // It's either expansion or initial: get the PFN entry
1072             //
1073             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1074         }
1075         else
1076         {
1077             //
1078             // This means we've reached the guard page that protects the end of
1079             // the expansion nonpaged pool
1080             //
1081             Pfn1 = NULL;
1082         }
1083 
1084     }
1085 
1086     //
1087     // Check if this allocation actually exists
1088     //
1089     if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
1090     {
1091         //
1092         // It doesn't, so we should actually locate a free entry descriptor
1093         //
1094         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
1095                                          (NumberOfPages << PAGE_SHIFT));
1096         ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1097         ASSERT(FreeEntry->Owner == FreeEntry);
1098 
1099         /* Consume this entry's pages */
1100         FreePages += FreeEntry->Size;
1101 
1102         /* Remove the item from the list, depending if pool is protected */
1103         if (MmProtectFreedNonPagedPool)
1104             MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1105         else
1106             RemoveEntryList(&FreeEntry->List);
1107     }
1108 
1109     //
1110     // Now get the official free entry we'll create for the caller's allocation
1111     //
1112     FreeEntry = StartingVa;
1113 
1114     //
1115     // Check if the our allocation is the very first page
1116     //
1117     if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
1118     {
1119         //
1120         // Then we can't do anything or we'll risk underflowing
1121         //
1122         Pfn1 = NULL;
1123     }
1124     else
1125     {
1126         //
1127         // Otherwise, get the PTE for the page right before our allocation
1128         //
1129         PointerPte -= NumberOfPages + 1;
1130 
1131         /* Check if protected pool is enabled */
1132         if (MmProtectFreedNonPagedPool)
1133         {
1134             /* The freed block will be merged, it must be made accessible */
1135             MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1136         }
1137 
1138         /* Check if this is valid pool, or a guard page */
1139         if (PointerPte->u.Hard.Valid == 1)
1140         {
1141             //
1142             // It's either expansion or initial nonpaged pool, get the PFN entry
1143             //
1144             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1145         }
1146         else
1147         {
1148             //
1149             // We must've reached the guard page, so don't risk touching it
1150             //
1151             Pfn1 = NULL;
1152         }
1153     }
1154 
1155     //
1156     // Check if there is a valid PFN entry for the page before the allocation
1157     // and then check if this page was actually the end of an allocation.
1158     // If it wasn't, then we know for sure it's a free page
1159     //
1160     if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1161     {
1162         //
1163         // Get the free entry descriptor for that given page range
1164         //
1165         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1166         ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1167         FreeEntry = FreeEntry->Owner;
1168 
1169         /* Check if protected pool is enabled */
1170         if (MmProtectFreedNonPagedPool)
1171         {
1172             /* The freed block will be merged, it must be made accessible */
1173             MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1174         }
1175 
1176         //
1177         // Check if the entry is small enough (1-3 pages) to be indexed on a free list
1178         // If it is, we'll want to re-insert it, since we're about to
1179         // collapse our pages on top of it, which will change its count
1180         //
1181         if (FreeEntry->Size < MI_MAX_FREE_PAGE_LISTS)
1182         {
1183             /* Remove the item from the list, depending if pool is protected */
1184             if (MmProtectFreedNonPagedPool)
1185                 MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1186             else
1187                 RemoveEntryList(&FreeEntry->List);
1188 
1189             //
1190             // Update its size
1191             //
1192             FreeEntry->Size += FreePages;
1193 
1194             //
1195             // And now find the new appropriate list to place it in
1196             //
1197             i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1;
1198 
1199             /* Insert the entry into the free list head, check for prot. pool */
1200             if (MmProtectFreedNonPagedPool)
1201                 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1202             else
1203                 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1204         }
1205         else
1206         {
1207             //
1208             // Otherwise, just combine our free pages into this entry
1209             //
1210             FreeEntry->Size += FreePages;
1211         }
1212     }
1213 
1214     //
1215     // Check if we were unable to do any compaction, and we'll stick with this
1216     //
1217     if (FreeEntry == StartingVa)
1218     {
1219         //
1220         // Well, now we are a free entry. At worse we just have our newly freed
1221         // pages, at best we have our pages plus whatever entry came after us
1222         //
1223         FreeEntry->Size = FreePages;
1224 
1225         //
1226         // Find the appropriate list we should be on
1227         //
1228         i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1;
1229 
1230         /* Insert the entry into the free list head, check for prot. pool */
1231         if (MmProtectFreedNonPagedPool)
1232             MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1233         else
1234             InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1235     }
1236 
1237     //
1238     // Just a sanity check
1239     //
1240     ASSERT(FreePages != 0);
1241 
1242     //
1243     // Get all the pages between our allocation and its end. These will all now
1244     // become free page chunks.
1245     //
1246     NextEntry = StartingVa;
1247     LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1248     do
1249     {
1250         //
1251         // Link back to the parent free entry, and keep going
1252         //
1253         NextEntry->Owner = FreeEntry;
1254         NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1255         NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1256     } while (NextEntry != LastEntry);
1257 
1258     /* Is freed non paged pool protected? */
1259     if (MmProtectFreedNonPagedPool)
1260     {
1261         /* Protect the freed pool! */
1262         MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1263     }
1264 
1265     //
1266     // We're done, release the lock and let the caller know how much we freed
1267     //
1268     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
1269     return NumberOfPages;
1270 }
1271 
1272 
1273 BOOLEAN
1274 NTAPI
1275 MiRaisePoolQuota(IN POOL_TYPE PoolType,
1276                  IN ULONG CurrentMaxQuota,
1277                  OUT PULONG NewMaxQuota)
1278 {
1279     //
1280     // Not implemented
1281     //
1282     UNIMPLEMENTED;
1283     *NewMaxQuota = CurrentMaxQuota + 65536;
1284     return TRUE;
1285 }
1286 
1287 NTSTATUS
1288 NTAPI
1289 MiInitializeSessionPool(VOID)
1290 {
1291     PMMPTE PointerPte, LastPte;
1292     PMMPDE PointerPde, LastPde;
1293     PFN_NUMBER PageFrameIndex, PdeCount;
1294     PPOOL_DESCRIPTOR PoolDescriptor;
1295     PMM_SESSION_SPACE SessionGlobal;
1296     PMM_PAGED_POOL_INFO PagedPoolInfo;
1297     NTSTATUS Status;
1298     ULONG Index, PoolSize, BitmapSize;
1299     PAGED_CODE();
1300 
1301     /* Lock session pool */
1302     SessionGlobal = MmSessionSpace->GlobalVirtualAddress;
1303     KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex);
1304 
1305     /* Setup a valid pool descriptor */
1306     PoolDescriptor = &MmSessionSpace->PagedPool;
1307     ExInitializePoolDescriptor(PoolDescriptor,
1308                                PagedPoolSession,
1309                                0,
1310                                0,
1311                                &SessionGlobal->PagedPoolMutex);
1312 
1313     /* Setup the pool addresses */
1314     MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart;
1315     MmSessionSpace->PagedPoolEnd = (PVOID)((ULONG_PTR)MiSessionPoolEnd - 1);
1316     DPRINT1("Session Pool Start: 0x%p End: 0x%p\n",
1317             MmSessionSpace->PagedPoolStart, MmSessionSpace->PagedPoolEnd);
1318 
1319     /* Reset all the counters */
1320     PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
1321     PagedPoolInfo->PagedPoolCommit = 0;
1322     PagedPoolInfo->PagedPoolHint = 0;
1323     PagedPoolInfo->AllocatedPagedPool = 0;
1324 
1325     /* Compute PDE and PTE addresses */
1326     PointerPde = MiAddressToPde(MmSessionSpace->PagedPoolStart);
1327     PointerPte = MiAddressToPte(MmSessionSpace->PagedPoolStart);
1328     LastPde = MiAddressToPde(MmSessionSpace->PagedPoolEnd);
1329     LastPte = MiAddressToPte(MmSessionSpace->PagedPoolEnd);
1330 
1331     /* Write them down */
1332     MmSessionSpace->PagedPoolBasePde = PointerPde;
1333     PagedPoolInfo->FirstPteForPagedPool = PointerPte;
1334     PagedPoolInfo->LastPteForPagedPool = LastPte;
1335     PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
1336 
1337     /* Zero the PDEs */
1338     PdeCount = LastPde - PointerPde;
1339     RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE));
1340 
1341     /* Initialize the PFN for the PDE */
1342     Status = MiInitializeAndChargePfn(&PageFrameIndex,
1343                                       PointerPde,
1344                                       MmSessionSpace->SessionPageDirectoryIndex,
1345                                       TRUE);
1346     ASSERT(NT_SUCCESS(Status) == TRUE);
1347 
1348     /* Initialize the first page table */
1349     Index = (ULONG_PTR)MmSessionSpace->PagedPoolStart - (ULONG_PTR)MmSessionBase;
1350     Index >>= 22;
1351 #ifndef _M_AMD64 // FIXME
1352     ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0);
1353     MmSessionSpace->PageTables[Index] = *PointerPde;
1354 #endif
1355 
1356     /* Bump up counters */
1357     InterlockedIncrementSizeT(&MmSessionSpace->NonPageablePages);
1358     InterlockedIncrementSizeT(&MmSessionSpace->CommittedPages);
1359 
1360     /* Compute the size of the pool in pages, and of the bitmap for it */
1361     PoolSize = MmSessionPoolSize >> PAGE_SHIFT;
1362     BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG);
1363 
1364     /* Allocate and initialize the bitmap to track allocations */
1365     PagedPoolInfo->PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1366                                                                   BitmapSize,
1367                                                                   TAG_MM);
1368     ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL);
1369     RtlInitializeBitMap(PagedPoolInfo->PagedPoolAllocationMap,
1370                         (PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1),
1371                         PoolSize);
1372 
1373     /* Set all bits, but clear the first page table's worth */
1374     RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap);
1375     RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
1376 
1377     /* Allocate and initialize the bitmap to track free space */
1378     PagedPoolInfo->EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1379                                                                 BitmapSize,
1380                                                                 TAG_MM);
1381     ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL);
1382     RtlInitializeBitMap(PagedPoolInfo->EndOfPagedPoolBitmap,
1383                         (PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1),
1384                         PoolSize);
1385 
1386     /* Clear all the bits and return success */
1387     RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap);
1388     return STATUS_SUCCESS;
1389 }
1390 
1391 /* PUBLIC FUNCTIONS ***********************************************************/
1392 
1393 /*
1394  * @unimplemented
1395  */
1396 PVOID
1397 NTAPI
1398 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
1399                          IN ULONG PoolTag)
1400 {
1401     UNIMPLEMENTED;
1402     return NULL;
1403 }
1404 
1405 /*
1406  * @unimplemented
1407  */
1408 VOID
1409 NTAPI
1410 MmFreeMappingAddress(IN PVOID BaseAddress,
1411                      IN ULONG PoolTag)
1412 {
1413     UNIMPLEMENTED;
1414 }
1415 
1416 /* EOF */
1417