xref: /reactos/ntoskrnl/mm/ARM3/mminit.c (revision 45fd48bd)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/mminit.c
5  * PURPOSE:         ARM Memory Manager Initialization
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "miarm.h"
17 #undef MmSystemRangeStart
18 
19 /* GLOBALS ********************************************************************/
20 
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28 
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31 
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43 
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100 
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106 
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112 
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd;    // 0xC0000000
128 PVOID MiSessionImageEnd;    // 0xC0000000
129 PVOID MiSessionImageStart;  // 0xBF800000
130 PVOID MiSessionSpaceWs;
131 PVOID MiSessionViewStart;   // 0xBE000000
132 PVOID MiSessionPoolEnd;     // 0xBE000000
133 PVOID MiSessionPoolStart;   // 0xBD000000
134 PVOID MmSessionBase;        // 0xBD000000
135 SIZE_T MmSessionSize;
136 SIZE_T MmSessionViewSize;
137 SIZE_T MmSessionPoolSize;
138 SIZE_T MmSessionImageSize;
139 
140 /*
141  * These are the PTE addresses of the boundaries carved out above
142  */
143 PMMPTE MiSessionImagePteStart;
144 PMMPTE MiSessionImagePteEnd;
145 PMMPTE MiSessionBasePte;
146 PMMPTE MiSessionLastPte;
147 
148 //
149 // The system view space, on the other hand, is where sections that are memory
150 // mapped into "system space" end up.
151 //
152 // By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
153 //
154 PVOID MiSystemViewStart;
155 SIZE_T MmSystemViewSize;
156 
157 #if (_MI_PAGING_LEVELS == 2)
158 //
159 // A copy of the system page directory (the page directory associated with the
160 // System process) is kept (double-mapped) by the manager in order to lazily
161 // map paged pool PDEs into external processes when they fault on a paged pool
162 // address.
163 //
164 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
165 PMMPDE MmSystemPagePtes;
166 #endif
167 
168 //
169 // The system cache starts right after hyperspace. The first few pages are for
170 // keeping track of the system working set list.
171 //
172 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
173 //
174 PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
175 
176 //
177 // Windows NT seems to choose between 7000, 11000 and 50000
178 // On systems with more than 32MB, this number is then doubled, and further
179 // aligned up to a PDE boundary (4MB).
180 //
181 PFN_COUNT MmNumberOfSystemPtes;
182 
183 //
184 // This is how many pages the PFN database will take up
185 // In Windows, this includes the Quark Color Table, but not in ARM³
186 //
187 PFN_NUMBER MxPfnAllocation;
188 
189 //
190 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
191 // of pages that are not actually valid physical memory, such as ACPI reserved
192 // regions, BIOS address ranges, or holes in physical memory address space which
193 // could indicate device-mapped I/O memory.
194 //
195 // In fact, the lack of a PFN entry for a page usually indicates that this is
196 // I/O space instead.
197 //
198 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
199 // a bit to each. If the bit is set, then the page is valid physical RAM.
200 //
201 RTL_BITMAP MiPfnBitMap;
202 
203 //
204 // This structure describes the different pieces of RAM-backed address space
205 //
206 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
207 
208 //
209 // This is where we keep track of the most basic physical layout markers
210 //
211 PFN_NUMBER MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
212 PFN_COUNT MmNumberOfPhysicalPages;
213 
214 //
215 // The total number of pages mapped by the boot loader, which include the kernel
216 // HAL, boot drivers, registry, NLS files and other loader data structures is
217 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
218 // coming from the loader.
219 //
220 // This number is later aligned up to a PDE boundary.
221 //
222 SIZE_T MmBootImageSize;
223 
224 //
225 // These three variables keep track of the core separation of address space that
226 // exists between kernel mode and user mode.
227 //
228 ULONG_PTR MmUserProbeAddress;
229 PVOID MmHighestUserAddress;
230 PVOID MmSystemRangeStart;
231 
232 /* And these store the respective highest PTE/PDE address */
233 PMMPTE MiHighestUserPte;
234 PMMPDE MiHighestUserPde;
235 #if (_MI_PAGING_LEVELS >= 3)
236 PMMPTE MiHighestUserPpe;
237 #if (_MI_PAGING_LEVELS >= 4)
238 PMMPTE MiHighestUserPxe;
239 #endif
240 #endif
241 
242 /* These variables define the system cache address space */
243 PVOID MmSystemCacheStart = (PVOID)MI_SYSTEM_CACHE_START;
244 PVOID MmSystemCacheEnd;
245 ULONG MmSizeOfSystemCacheInPages;
246 MMSUPPORT MmSystemCacheWs;
247 
248 //
249 // This is where hyperspace ends (followed by the system cache working set)
250 //
251 PVOID MmHyperSpaceEnd;
252 
253 //
254 // Page coloring algorithm data
255 //
256 ULONG MmSecondaryColors;
257 ULONG MmSecondaryColorMask;
258 
259 //
260 // Actual (registry-configurable) size of a GUI thread's stack
261 //
262 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
263 
264 //
265 // Before we have a PFN database, memory comes straight from our physical memory
266 // blocks, which is nice because it's guaranteed contiguous and also because once
267 // we take a page from here, the system doesn't see it anymore.
268 // However, once the fun is over, those pages must be re-integrated back into
269 // PFN society life, and that requires us keeping a copy of the original layout
270 // so that we can parse it later.
271 //
272 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
273 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
274 
275 /*
276  * For each page's worth bytes of L2 cache in a given set/way line, the zero and
277  * free lists are organized in what is called a "color".
278  *
279  * This array points to the two lists, so it can be thought of as a multi-dimensional
280  * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
281  * we describe the array in pointer form instead.
282  *
283  * On a final note, the color tables themselves are right after the PFN database.
284  */
285 C_ASSERT(FreePageList == 1);
286 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
287 
288 /* An event used in Phase 0 before the rest of the system is ready to go */
289 KEVENT MiTempEvent;
290 
291 /* All the events used for memory threshold notifications */
292 PKEVENT MiLowMemoryEvent;
293 PKEVENT MiHighMemoryEvent;
294 PKEVENT MiLowPagedPoolEvent;
295 PKEVENT MiHighPagedPoolEvent;
296 PKEVENT MiLowNonPagedPoolEvent;
297 PKEVENT MiHighNonPagedPoolEvent;
298 
299 /* The actual thresholds themselves, in page numbers */
300 PFN_NUMBER MmLowMemoryThreshold;
301 PFN_NUMBER MmHighMemoryThreshold;
302 PFN_NUMBER MiLowPagedPoolThreshold;
303 PFN_NUMBER MiHighPagedPoolThreshold;
304 PFN_NUMBER MiLowNonPagedPoolThreshold;
305 PFN_NUMBER MiHighNonPagedPoolThreshold;
306 
307 /*
308  * This number determines how many free pages must exist, at minimum, until we
309  * start trimming working sets and flushing modified pages to obtain more free
310  * pages.
311  *
312  * This number changes if the system detects that this is a server product
313  */
314 PFN_NUMBER MmMinimumFreePages = 26;
315 
316 /*
317  * This number indicates how many pages we consider to be a low limit of having
318  * "plenty" of free memory.
319  *
320  * It is doubled on systems that have more than 63MB of memory
321  */
322 PFN_NUMBER MmPlentyFreePages = 400;
323 
324 /* These values store the type of system this is (small, med, large) and if server */
325 ULONG MmProductType;
326 MM_SYSTEMSIZE MmSystemSize;
327 
328 /*
329  * These values store the cache working set minimums and maximums, in pages
330  *
331  * The minimum value is boosted on systems with more than 24MB of RAM, and cut
332  * down to only 32 pages on embedded (<24MB RAM) systems.
333  *
334  * An extra boost of 2MB is given on systems with more than 33MB of RAM.
335  */
336 PFN_NUMBER MmSystemCacheWsMinimum = 288;
337 PFN_NUMBER MmSystemCacheWsMaximum = 350;
338 
339 /* FIXME: Move to cache/working set code later */
340 BOOLEAN MmLargeSystemCache;
341 
342 /*
343  * This value determines in how many fragments/chunks the subsection prototype
344  * PTEs should be allocated when mapping a section object. It is configurable in
345  * the registry through the MapAllocationFragment parameter.
346  *
347  * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
348  * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
349  *
350  * The maximum it can be set to is 2MB, and the minimum is 4KB.
351  */
352 SIZE_T MmAllocationFragment;
353 
354 /*
355  * These two values track how much virtual memory can be committed, and when
356  * expansion should happen.
357  */
358  // FIXME: They should be moved elsewhere since it's not an "init" setting?
359 SIZE_T MmTotalCommitLimit;
360 SIZE_T MmTotalCommitLimitMaximum;
361 
362 /*
363  * These values tune certain user parameters. They have default values set here,
364  * as well as in the code, and can be overwritten by registry settings.
365  */
366 SIZE_T MmHeapSegmentReserve = 1 * _1MB;
367 SIZE_T MmHeapSegmentCommit = 2 * PAGE_SIZE;
368 SIZE_T MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
369 SIZE_T MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
370 SIZE_T MmMinimumStackCommitInBytes = 0;
371 
372 /* Internal setting used for debugging memory descriptors */
373 BOOLEAN MiDbgEnableMdDump =
374 #ifdef _ARM_
375 TRUE;
376 #else
377 FALSE;
378 #endif
379 
380 /* Number of memory descriptors in the loader block */
381 ULONG MiNumberDescriptors = 0;
382 
383 /* Number of free pages in the loader block */
384 PFN_NUMBER MiNumberOfFreePages = 0;
385 
386 /* Timeout value for critical sections (2.5 minutes) */
387 ULONG MmCritsectTimeoutSeconds = 150; // NT value: 720 * 60 * 60; (30 days)
388 LARGE_INTEGER MmCriticalSectionTimeout;
389 
390 //
391 // Throttling limits for Cc (in pages)
392 // Above top, we don't throttle
393 // Above bottom, we throttle depending on the amount of modified pages
394 // Otherwise, we throttle!
395 //
396 ULONG MmThrottleTop;
397 ULONG MmThrottleBottom;
398 
399 /* PRIVATE FUNCTIONS **********************************************************/
400 
401 VOID
402 NTAPI
403 MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
404 {
405     PLIST_ENTRY ListEntry;
406     PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
407     PFN_NUMBER PageFrameIndex, FreePages = 0;
408 
409     /* Loop the memory descriptors */
410     for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
411          ListEntry != &LoaderBlock->MemoryDescriptorListHead;
412          ListEntry = ListEntry->Flink)
413     {
414         /* Get the descriptor */
415         Descriptor = CONTAINING_RECORD(ListEntry,
416                                        MEMORY_ALLOCATION_DESCRIPTOR,
417                                        ListEntry);
418         DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
419             Descriptor->MemoryType, Descriptor->BasePage, Descriptor->PageCount);
420 
421         /* Count this descriptor */
422         MiNumberDescriptors++;
423 
424         /* Check if this is invisible memory */
425         if ((Descriptor->MemoryType == LoaderFirmwarePermanent) ||
426             (Descriptor->MemoryType == LoaderSpecialMemory) ||
427             (Descriptor->MemoryType == LoaderHALCachedMemory) ||
428             (Descriptor->MemoryType == LoaderBBTMemory))
429         {
430             /* Skip this descriptor */
431             continue;
432         }
433 
434         /* Check if this is bad memory */
435         if (Descriptor->MemoryType != LoaderBad)
436         {
437             /* Count this in the total of pages */
438             MmNumberOfPhysicalPages += (PFN_COUNT)Descriptor->PageCount;
439         }
440 
441         /* Check if this is the new lowest page */
442         if (Descriptor->BasePage < MmLowestPhysicalPage)
443         {
444             /* Update the lowest page */
445             MmLowestPhysicalPage = Descriptor->BasePage;
446         }
447 
448         /* Check if this is the new highest page */
449         PageFrameIndex = Descriptor->BasePage + Descriptor->PageCount;
450         if (PageFrameIndex > MmHighestPhysicalPage)
451         {
452             /* Update the highest page */
453             MmHighestPhysicalPage = PageFrameIndex - 1;
454         }
455 
456         /* Check if this is free memory */
457         if ((Descriptor->MemoryType == LoaderFree) ||
458             (Descriptor->MemoryType == LoaderLoadedProgram) ||
459             (Descriptor->MemoryType == LoaderFirmwareTemporary) ||
460             (Descriptor->MemoryType == LoaderOsloaderStack))
461         {
462             /* Count it too free pages */
463             MiNumberOfFreePages += Descriptor->PageCount;
464 
465             /* Check if this is the largest memory descriptor */
466             if (Descriptor->PageCount > FreePages)
467             {
468                 /* Remember it */
469                 MxFreeDescriptor = Descriptor;
470                 FreePages = Descriptor->PageCount;
471             }
472         }
473     }
474 
475     /* Save original values of the free descriptor, since it'll be
476      * altered by early allocations */
477     MxOldFreeDescriptor = *MxFreeDescriptor;
478 }
479 
480 PFN_NUMBER
481 NTAPI
482 INIT_FUNCTION
483 MxGetNextPage(IN PFN_NUMBER PageCount)
484 {
485     PFN_NUMBER Pfn;
486 
487     /* Make sure we have enough pages */
488     if (PageCount > MxFreeDescriptor->PageCount)
489     {
490         /* Crash the system */
491         KeBugCheckEx(INSTALL_MORE_MEMORY,
492                      MmNumberOfPhysicalPages,
493                      MxFreeDescriptor->PageCount,
494                      MxOldFreeDescriptor.PageCount,
495                      PageCount);
496     }
497 
498     /* Use our lowest usable free pages */
499     Pfn = MxFreeDescriptor->BasePage;
500     MxFreeDescriptor->BasePage += PageCount;
501     MxFreeDescriptor->PageCount -= PageCount;
502     return Pfn;
503 }
504 
505 VOID
506 NTAPI
507 INIT_FUNCTION
508 MiComputeColorInformation(VOID)
509 {
510     ULONG L2Associativity;
511 
512     /* Check if no setting was provided already */
513     if (!MmSecondaryColors)
514     {
515         /* Get L2 cache information */
516         L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
517 
518         /* The number of colors is the number of cache bytes by set/way */
519         MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
520         if (L2Associativity) MmSecondaryColors /= L2Associativity;
521     }
522 
523     /* Now convert cache bytes into pages */
524     MmSecondaryColors >>= PAGE_SHIFT;
525     if (!MmSecondaryColors)
526     {
527         /* If there was no cache data from the KPCR, use the default colors */
528         MmSecondaryColors = MI_SECONDARY_COLORS;
529     }
530     else
531     {
532         /* Otherwise, make sure there aren't too many colors */
533         if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
534         {
535             /* Set the maximum */
536             MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
537         }
538 
539         /* Make sure there aren't too little colors */
540         if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
541         {
542             /* Set the default */
543             MmSecondaryColors = MI_SECONDARY_COLORS;
544         }
545 
546         /* Finally make sure the colors are a power of two */
547         if (MmSecondaryColors & (MmSecondaryColors - 1))
548         {
549             /* Set the default */
550             MmSecondaryColors = MI_SECONDARY_COLORS;
551         }
552     }
553 
554     /* Compute the mask and store it */
555     MmSecondaryColorMask = MmSecondaryColors - 1;
556     KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
557 }
558 
559 VOID
560 NTAPI
561 INIT_FUNCTION
562 MiInitializeColorTables(VOID)
563 {
564     ULONG i;
565     PMMPTE PointerPte, LastPte;
566     MMPTE TempPte = ValidKernelPte;
567 
568     /* The color table starts after the ARM3 PFN database */
569     MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
570 
571     /* Loop the PTEs. We have two color tables for each secondary color */
572     PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
573     LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
574                              (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
575                              - 1);
576     while (PointerPte <= LastPte)
577     {
578         /* Check for valid PTE */
579         if (PointerPte->u.Hard.Valid == 0)
580         {
581             /* Get a page and map it */
582             TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
583             MI_WRITE_VALID_PTE(PointerPte, TempPte);
584 
585             /* Zero out the page */
586             RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
587         }
588 
589         /* Next */
590         PointerPte++;
591     }
592 
593     /* Now set the address of the next list, right after this one */
594     MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
595 
596     /* Now loop the lists to set them up */
597     for (i = 0; i < MmSecondaryColors; i++)
598     {
599         /* Set both free and zero lists for each color */
600         MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
601         MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
602         MmFreePagesByColor[ZeroedPageList][i].Count = 0;
603         MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
604         MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
605         MmFreePagesByColor[FreePageList][i].Count = 0;
606     }
607 }
608 
609 #ifndef _M_AMD64
610 BOOLEAN
611 NTAPI
612 INIT_FUNCTION
613 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
614                   IN PFN_NUMBER Pfn)
615 {
616     PLIST_ENTRY NextEntry;
617     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
618 
619     /* Loop the memory descriptors */
620     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
621     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
622     {
623         /* Get the memory descriptor */
624         MdBlock = CONTAINING_RECORD(NextEntry,
625                                     MEMORY_ALLOCATION_DESCRIPTOR,
626                                     ListEntry);
627 
628         /* Check if this PFN could be part of the block */
629         if (Pfn >= (MdBlock->BasePage))
630         {
631             /* Check if it really is part of the block */
632             if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
633             {
634                 /* Check if the block is actually memory we don't map */
635                 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
636                     (MdBlock->MemoryType == LoaderBBTMemory) ||
637                     (MdBlock->MemoryType == LoaderSpecialMemory))
638                 {
639                     /* We don't need PFN database entries for this memory */
640                     break;
641                 }
642 
643                 /* This is memory we want to map */
644                 return TRUE;
645             }
646         }
647         else
648         {
649             /* Blocks are ordered, so if it's not here, it doesn't exist */
650             break;
651         }
652 
653         /* Get to the next descriptor */
654         NextEntry = MdBlock->ListEntry.Flink;
655     }
656 
657     /* Check if this PFN is actually from our free memory descriptor */
658     if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
659         (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
660     {
661         /* We use these pages for initial mappings, so we do want to count them */
662         return TRUE;
663     }
664 
665     /* Otherwise this isn't memory that we describe or care about */
666     return FALSE;
667 }
668 
669 VOID
670 NTAPI
671 INIT_FUNCTION
672 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
673 {
674     PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
675     PLIST_ENTRY NextEntry;
676     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
677     PMMPTE PointerPte, LastPte;
678     MMPTE TempPte = ValidKernelPte;
679 
680     /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
681     FreePage = MxFreeDescriptor->BasePage;
682     FreePageCount = MxFreeDescriptor->PageCount;
683     PagesLeft = 0;
684 
685     /* Loop the memory descriptors */
686     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
687     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
688     {
689         /* Get the descriptor */
690         MdBlock = CONTAINING_RECORD(NextEntry,
691                                     MEMORY_ALLOCATION_DESCRIPTOR,
692                                     ListEntry);
693         if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
694             (MdBlock->MemoryType == LoaderBBTMemory) ||
695             (MdBlock->MemoryType == LoaderSpecialMemory))
696         {
697             /* These pages are not part of the PFN database */
698             NextEntry = MdBlock->ListEntry.Flink;
699             continue;
700         }
701 
702         /* Next, check if this is our special free descriptor we've found */
703         if (MdBlock == MxFreeDescriptor)
704         {
705             /* Use the real numbers instead */
706             BasePage = MxOldFreeDescriptor.BasePage;
707             PageCount = MxOldFreeDescriptor.PageCount;
708         }
709         else
710         {
711             /* Use the descriptor's numbers */
712             BasePage = MdBlock->BasePage;
713             PageCount = MdBlock->PageCount;
714         }
715 
716         /* Get the PTEs for this range */
717         PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
718         LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
719         DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
720 
721         /* Loop them */
722         while (PointerPte <= LastPte)
723         {
724             /* We'll only touch PTEs that aren't already valid */
725             if (PointerPte->u.Hard.Valid == 0)
726             {
727                 /* Use the next free page */
728                 TempPte.u.Hard.PageFrameNumber = FreePage;
729                 ASSERT(FreePageCount != 0);
730 
731                 /* Consume free pages */
732                 FreePage++;
733                 FreePageCount--;
734                 if (!FreePageCount)
735                 {
736                     /* Out of memory */
737                     KeBugCheckEx(INSTALL_MORE_MEMORY,
738                                  MmNumberOfPhysicalPages,
739                                  FreePageCount,
740                                  MxOldFreeDescriptor.PageCount,
741                                  1);
742                 }
743 
744                 /* Write out this PTE */
745                 PagesLeft++;
746                 MI_WRITE_VALID_PTE(PointerPte, TempPte);
747 
748                 /* Zero this page */
749                 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
750             }
751 
752             /* Next! */
753             PointerPte++;
754         }
755 
756         /* Do the next address range */
757         NextEntry = MdBlock->ListEntry.Flink;
758     }
759 
760     /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
761     MxFreeDescriptor->BasePage = FreePage;
762     MxFreeDescriptor->PageCount = FreePageCount;
763 }
764 
765 VOID
766 NTAPI
767 INIT_FUNCTION
768 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
769 {
770     PMMPDE PointerPde;
771     PMMPTE PointerPte;
772     ULONG i, Count, j;
773     PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
774     PMMPFN Pfn1, Pfn2;
775     ULONG_PTR BaseAddress = 0;
776 
777     /* PFN of the startup page directory */
778     StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
779 
780     /* Start with the first PDE and scan them all */
781     PointerPde = MiAddressToPde(NULL);
782     Count = PD_COUNT * PDE_COUNT;
783     for (i = 0; i < Count; i++)
784     {
785         /* Check for valid PDE */
786         if (PointerPde->u.Hard.Valid == 1)
787         {
788             /* Get the PFN from it */
789             PageFrameIndex = PFN_FROM_PTE(PointerPde);
790 
791             /* Do we want a PFN entry for this page? */
792             if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
793             {
794                 /* Yes we do, set it up */
795                 Pfn1 = MiGetPfnEntry(PageFrameIndex);
796                 Pfn1->u4.PteFrame = StartupPdIndex;
797                 Pfn1->PteAddress = (PMMPTE)PointerPde;
798                 Pfn1->u2.ShareCount++;
799                 Pfn1->u3.e2.ReferenceCount = 1;
800                 Pfn1->u3.e1.PageLocation = ActiveAndValid;
801                 Pfn1->u3.e1.CacheAttribute = MiNonCached;
802 #if MI_TRACE_PFNS
803                 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
804                 memcpy(Pfn1->ProcessName, "Initial PDE", 16);
805 #endif
806             }
807             else
808             {
809                 /* No PFN entry */
810                 Pfn1 = NULL;
811             }
812 
813             /* Now get the PTE and scan the pages */
814             PointerPte = MiAddressToPte(BaseAddress);
815             for (j = 0; j < PTE_COUNT; j++)
816             {
817                 /* Check for a valid PTE */
818                 if (PointerPte->u.Hard.Valid == 1)
819                 {
820                     /* Increase the shared count of the PFN entry for the PDE */
821                     ASSERT(Pfn1 != NULL);
822                     Pfn1->u2.ShareCount++;
823 
824                     /* Now check if the PTE is valid memory too */
825                     PtePageIndex = PFN_FROM_PTE(PointerPte);
826                     if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
827                     {
828                         /*
829                          * Only add pages above the end of system code or pages
830                          * that are part of nonpaged pool
831                          */
832                         if ((BaseAddress >= 0xA0000000) ||
833                             ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
834                              (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
835                                             MmSizeOfNonPagedPoolInBytes)))
836                         {
837                             /* Get the PFN entry and make sure it too is valid */
838                             Pfn2 = MiGetPfnEntry(PtePageIndex);
839                             if ((MmIsAddressValid(Pfn2)) &&
840                                 (MmIsAddressValid(Pfn2 + 1)))
841                             {
842                                 /* Setup the PFN entry */
843                                 Pfn2->u4.PteFrame = PageFrameIndex;
844                                 Pfn2->PteAddress = PointerPte;
845                                 Pfn2->u2.ShareCount++;
846                                 Pfn2->u3.e2.ReferenceCount = 1;
847                                 Pfn2->u3.e1.PageLocation = ActiveAndValid;
848                                 Pfn2->u3.e1.CacheAttribute = MiNonCached;
849 #if MI_TRACE_PFNS
850                                 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
851                                 memcpy(Pfn1->ProcessName, "Initial PTE", 16);
852 #endif
853                             }
854                         }
855                     }
856                 }
857 
858                 /* Next PTE */
859                 PointerPte++;
860                 BaseAddress += PAGE_SIZE;
861             }
862         }
863         else
864         {
865             /* Next PDE mapped address */
866             BaseAddress += PDE_MAPPED_VA;
867         }
868 
869         /* Next PTE */
870         PointerPde++;
871     }
872 }
873 
874 VOID
875 NTAPI
876 INIT_FUNCTION
877 MiBuildPfnDatabaseZeroPage(VOID)
878 {
879     PMMPFN Pfn1;
880     PMMPDE PointerPde;
881 
882     /* Grab the lowest page and check if it has no real references */
883     Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
884     if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
885     {
886         /* Make it a bogus page to catch errors */
887         PointerPde = MiAddressToPde(0xFFFFFFFF);
888         Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
889         Pfn1->PteAddress = (PMMPTE)PointerPde;
890         Pfn1->u2.ShareCount++;
891         Pfn1->u3.e2.ReferenceCount = 0xFFF0;
892         Pfn1->u3.e1.PageLocation = ActiveAndValid;
893         Pfn1->u3.e1.CacheAttribute = MiNonCached;
894     }
895 }
896 
897 VOID
898 NTAPI
899 INIT_FUNCTION
900 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
901 {
902     PLIST_ENTRY NextEntry;
903     PFN_NUMBER PageCount = 0;
904     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
905     PFN_NUMBER PageFrameIndex;
906     PMMPFN Pfn1;
907     PMMPTE PointerPte;
908     PMMPDE PointerPde;
909     KIRQL OldIrql;
910 
911     /* Now loop through the descriptors */
912     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
913     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
914     {
915         /* Get the current descriptor */
916         MdBlock = CONTAINING_RECORD(NextEntry,
917                                     MEMORY_ALLOCATION_DESCRIPTOR,
918                                     ListEntry);
919 
920         /* Read its data */
921         PageCount = MdBlock->PageCount;
922         PageFrameIndex = MdBlock->BasePage;
923 
924         /* Don't allow memory above what the PFN database is mapping */
925         if (PageFrameIndex > MmHighestPhysicalPage)
926         {
927             /* Since they are ordered, everything past here will be larger */
928             break;
929         }
930 
931         /* On the other hand, the end page might be higher up... */
932         if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
933         {
934             /* In which case we'll trim the descriptor to go as high as we can */
935             PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
936             MdBlock->PageCount = PageCount;
937 
938             /* But if there's nothing left to trim, we got too high, so quit */
939             if (!PageCount) break;
940         }
941 
942         /* Now check the descriptor type */
943         switch (MdBlock->MemoryType)
944         {
945             /* Check for bad RAM */
946             case LoaderBad:
947 
948                 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
949                 break;
950 
951             /* Check for free RAM */
952             case LoaderFree:
953             case LoaderLoadedProgram:
954             case LoaderFirmwareTemporary:
955             case LoaderOsloaderStack:
956 
957                 /* Get the last page of this descriptor. Note we loop backwards */
958                 PageFrameIndex += PageCount - 1;
959                 Pfn1 = MiGetPfnEntry(PageFrameIndex);
960 
961                 /* Lock the PFN Database */
962                 OldIrql = MiAcquirePfnLock();
963                 while (PageCount--)
964                 {
965                     /* If the page really has no references, mark it as free */
966                     if (!Pfn1->u3.e2.ReferenceCount)
967                     {
968                         /* Add it to the free list */
969                         Pfn1->u3.e1.CacheAttribute = MiNonCached;
970                         MiInsertPageInFreeList(PageFrameIndex);
971                     }
972 
973                     /* Go to the next page */
974                     Pfn1--;
975                     PageFrameIndex--;
976                 }
977 
978                 /* Release PFN database */
979                 MiReleasePfnLock(OldIrql);
980 
981                 /* Done with this block */
982                 break;
983 
984             /* Check for pages that are invisible to us */
985             case LoaderFirmwarePermanent:
986             case LoaderSpecialMemory:
987             case LoaderBBTMemory:
988 
989                 /* And skip them */
990                 break;
991 
992             default:
993 
994                 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
995                 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
996                 Pfn1 = MiGetPfnEntry(PageFrameIndex);
997                 while (PageCount--)
998                 {
999                     /* Check if the page is really unused */
1000                     PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
1001                     if (!Pfn1->u3.e2.ReferenceCount)
1002                     {
1003                         /* Mark it as being in-use */
1004                         Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
1005                         Pfn1->PteAddress = PointerPte;
1006                         Pfn1->u2.ShareCount++;
1007                         Pfn1->u3.e2.ReferenceCount = 1;
1008                         Pfn1->u3.e1.PageLocation = ActiveAndValid;
1009                         Pfn1->u3.e1.CacheAttribute = MiNonCached;
1010 #if MI_TRACE_PFNS
1011                         Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
1012 #endif
1013 
1014                         /* Check for RAM disk page */
1015                         if (MdBlock->MemoryType == LoaderXIPRom)
1016                         {
1017                             /* Make it a pseudo-I/O ROM mapping */
1018                             Pfn1->u1.Flink = 0;
1019                             Pfn1->u2.ShareCount = 0;
1020                             Pfn1->u3.e2.ReferenceCount = 0;
1021                             Pfn1->u3.e1.PageLocation = 0;
1022                             Pfn1->u3.e1.Rom = 1;
1023                             Pfn1->u4.InPageError = 0;
1024                             Pfn1->u3.e1.PrototypePte = 1;
1025                         }
1026                     }
1027 
1028                     /* Advance page structures */
1029                     Pfn1++;
1030                     PageFrameIndex++;
1031                     PointerPte++;
1032                 }
1033                 break;
1034         }
1035 
1036         /* Next descriptor entry */
1037         NextEntry = MdBlock->ListEntry.Flink;
1038     }
1039 }
1040 
1041 VOID
1042 NTAPI
1043 INIT_FUNCTION
1044 MiBuildPfnDatabaseSelf(VOID)
1045 {
1046     PMMPTE PointerPte, LastPte;
1047     PMMPFN Pfn1;
1048 
1049     /* Loop the PFN database page */
1050     PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
1051     LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
1052     while (PointerPte <= LastPte)
1053     {
1054         /* Make sure the page is valid */
1055         if (PointerPte->u.Hard.Valid == 1)
1056         {
1057             /* Get the PFN entry and just mark it referenced */
1058             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1059             Pfn1->u2.ShareCount = 1;
1060             Pfn1->u3.e2.ReferenceCount = 1;
1061 #if MI_TRACE_PFNS
1062             Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
1063 #endif
1064         }
1065 
1066         /* Next */
1067         PointerPte++;
1068     }
1069 }
1070 
1071 VOID
1072 NTAPI
1073 INIT_FUNCTION
1074 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1075 {
1076     /* Scan memory and start setting up PFN entries */
1077     MiBuildPfnDatabaseFromPages(LoaderBlock);
1078 
1079     /* Add the zero page */
1080     MiBuildPfnDatabaseZeroPage();
1081 
1082     /* Scan the loader block and build the rest of the PFN database */
1083     MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
1084 
1085     /* Finally add the pages for the PFN database itself */
1086     MiBuildPfnDatabaseSelf();
1087 }
1088 #endif /* !_M_AMD64 */
1089 
1090 VOID
1091 NTAPI
1092 INIT_FUNCTION
1093 MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1094 {
1095     PLIST_ENTRY NextMd;
1096     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1097     ULONG_PTR i;
1098     PFN_NUMBER BasePage, LoaderPages;
1099     PMMPFN Pfn1;
1100     KIRQL OldIrql;
1101     PPHYSICAL_MEMORY_RUN Buffer, Entry;
1102 
1103     /* Loop the descriptors in order to count them */
1104     i = 0;
1105     NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1106     while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1107     {
1108         MdBlock = CONTAINING_RECORD(NextMd,
1109                                     MEMORY_ALLOCATION_DESCRIPTOR,
1110                                     ListEntry);
1111         i++;
1112         NextMd = MdBlock->ListEntry.Flink;
1113     }
1114 
1115     /* Allocate a structure to hold the physical runs */
1116     Buffer = ExAllocatePoolWithTag(NonPagedPool,
1117                                    i * sizeof(PHYSICAL_MEMORY_RUN),
1118                                    'lMmM');
1119     ASSERT(Buffer != NULL);
1120     Entry = Buffer;
1121 
1122     /* Loop the descriptors again */
1123     NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1124     while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1125     {
1126         /* Check what kind this was */
1127         MdBlock = CONTAINING_RECORD(NextMd,
1128                                     MEMORY_ALLOCATION_DESCRIPTOR,
1129                                     ListEntry);
1130         switch (MdBlock->MemoryType)
1131         {
1132             /* Registry, NLS, and heap data */
1133             case LoaderRegistryData:
1134             case LoaderOsloaderHeap:
1135             case LoaderNlsData:
1136                 /* Are all a candidate for deletion */
1137                 Entry->BasePage = MdBlock->BasePage;
1138                 Entry->PageCount = MdBlock->PageCount;
1139                 Entry++;
1140 
1141             /* We keep the rest */
1142             default:
1143                 break;
1144         }
1145 
1146         /* Move to the next descriptor */
1147         NextMd = MdBlock->ListEntry.Flink;
1148     }
1149 
1150     /* Acquire the PFN lock */
1151     OldIrql = MiAcquirePfnLock();
1152 
1153     /* Loop the runs */
1154     LoaderPages = 0;
1155     while (--Entry >= Buffer)
1156     {
1157         /* See how many pages are in this run */
1158         i = Entry->PageCount;
1159         BasePage = Entry->BasePage;
1160 
1161         /* Loop each page */
1162         Pfn1 = MiGetPfnEntry(BasePage);
1163         while (i--)
1164         {
1165             /* Check if it has references or is in any kind of list */
1166             if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
1167             {
1168                 /* Set the new PTE address and put this page into the free list */
1169                 Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
1170                 MiInsertPageInFreeList(BasePage);
1171                 LoaderPages++;
1172             }
1173             else if (BasePage)
1174             {
1175                 /* It has a reference, so simply drop it */
1176                 ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
1177 
1178                 /* Drop a dereference on this page, which should delete it */
1179                 Pfn1->PteAddress->u.Long = 0;
1180                 MI_SET_PFN_DELETED(Pfn1);
1181                 MiDecrementShareCount(Pfn1, BasePage);
1182                 LoaderPages++;
1183             }
1184 
1185             /* Move to the next page */
1186             Pfn1++;
1187             BasePage++;
1188         }
1189     }
1190 
1191     /* Release the PFN lock and flush the TLB */
1192     DPRINT("Loader pages freed: %lx\n", LoaderPages);
1193     MiReleasePfnLock(OldIrql);
1194     KeFlushCurrentTb();
1195 
1196     /* Free our run structure */
1197     ExFreePoolWithTag(Buffer, 'lMmM');
1198 }
1199 
1200 VOID
1201 NTAPI
1202 INIT_FUNCTION
1203 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
1204 {
1205     /* This function needs to do more work, for now, we tune page minimums */
1206 
1207     /* Check for a system with around 64MB RAM or more */
1208     if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
1209     {
1210         /* Double the minimum amount of pages we consider for a "plenty free" scenario */
1211         MmPlentyFreePages *= 2;
1212     }
1213 }
1214 
1215 VOID
1216 NTAPI
1217 INIT_FUNCTION
1218 MiNotifyMemoryEvents(VOID)
1219 {
1220     /* Are we in a low-memory situation? */
1221     if (MmAvailablePages < MmLowMemoryThreshold)
1222     {
1223         /* Clear high, set low  */
1224         if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1225         if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
1226     }
1227     else if (MmAvailablePages < MmHighMemoryThreshold)
1228     {
1229         /* We are in between, clear both */
1230         if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1231         if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1232     }
1233     else
1234     {
1235         /* Clear low, set high  */
1236         if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1237         if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1238     }
1239 }
1240 
1241 NTSTATUS
1242 NTAPI
1243 INIT_FUNCTION
1244 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1245                     OUT PKEVENT *Event)
1246 {
1247     PACL Dacl;
1248     HANDLE EventHandle;
1249     ULONG DaclLength;
1250     NTSTATUS Status;
1251     OBJECT_ATTRIBUTES ObjectAttributes;
1252     SECURITY_DESCRIPTOR SecurityDescriptor;
1253 
1254     /* Create the SD */
1255     Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1256                                          SECURITY_DESCRIPTOR_REVISION);
1257     if (!NT_SUCCESS(Status)) return Status;
1258 
1259     /* One ACL with 3 ACEs, containing each one SID */
1260     DaclLength = sizeof(ACL) +
1261                  3 * sizeof(ACCESS_ALLOWED_ACE) +
1262                  RtlLengthSid(SeLocalSystemSid) +
1263                  RtlLengthSid(SeAliasAdminsSid) +
1264                  RtlLengthSid(SeWorldSid);
1265 
1266     /* Allocate space for the DACL */
1267     Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1268     if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1269 
1270     /* Setup the ACL inside it */
1271     Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1272     if (!NT_SUCCESS(Status)) goto CleanUp;
1273 
1274     /* Add query rights for everyone */
1275     Status = RtlAddAccessAllowedAce(Dacl,
1276                                     ACL_REVISION,
1277                                     SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1278                                     SeWorldSid);
1279     if (!NT_SUCCESS(Status)) goto CleanUp;
1280 
1281     /* Full rights for the admin */
1282     Status = RtlAddAccessAllowedAce(Dacl,
1283                                     ACL_REVISION,
1284                                     EVENT_ALL_ACCESS,
1285                                     SeAliasAdminsSid);
1286     if (!NT_SUCCESS(Status)) goto CleanUp;
1287 
1288     /* As well as full rights for the system */
1289     Status = RtlAddAccessAllowedAce(Dacl,
1290                                     ACL_REVISION,
1291                                     EVENT_ALL_ACCESS,
1292                                     SeLocalSystemSid);
1293     if (!NT_SUCCESS(Status)) goto CleanUp;
1294 
1295     /* Set this DACL inside the SD */
1296     Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1297                                           TRUE,
1298                                           Dacl,
1299                                           FALSE);
1300     if (!NT_SUCCESS(Status)) goto CleanUp;
1301 
1302     /* Setup the event attributes, making sure it's a permanent one */
1303     InitializeObjectAttributes(&ObjectAttributes,
1304                                Name,
1305                                OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1306                                NULL,
1307                                &SecurityDescriptor);
1308 
1309     /* Create the event */
1310     Status = ZwCreateEvent(&EventHandle,
1311                            EVENT_ALL_ACCESS,
1312                            &ObjectAttributes,
1313                            NotificationEvent,
1314                            FALSE);
1315 CleanUp:
1316     /* Free the DACL */
1317     ExFreePoolWithTag(Dacl, 'lcaD');
1318 
1319     /* Check if this is the success path */
1320     if (NT_SUCCESS(Status))
1321     {
1322         /* Add a reference to the object, then close the handle we had */
1323         Status = ObReferenceObjectByHandle(EventHandle,
1324                                            EVENT_MODIFY_STATE,
1325                                            ExEventObjectType,
1326                                            KernelMode,
1327                                            (PVOID*)Event,
1328                                            NULL);
1329         ZwClose (EventHandle);
1330     }
1331 
1332     /* Return status */
1333     return Status;
1334 }
1335 
1336 BOOLEAN
1337 NTAPI
1338 INIT_FUNCTION
1339 MiInitializeMemoryEvents(VOID)
1340 {
1341     UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1342     UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1343     UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1344     UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1345     UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1346     UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1347     NTSTATUS Status;
1348 
1349     /* Check if we have a registry setting */
1350     if (MmLowMemoryThreshold)
1351     {
1352         /* Convert it to pages */
1353         MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1354     }
1355     else
1356     {
1357         /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1358         MmLowMemoryThreshold = MmPlentyFreePages;
1359 
1360         /* More than one GB of memory? */
1361         if (MmNumberOfPhysicalPages > 0x40000)
1362         {
1363             /* Start at 32MB, and add another 16MB for each GB */
1364             MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1365             MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1366         }
1367         else if (MmNumberOfPhysicalPages > 0x8000)
1368         {
1369             /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1370             MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1371         }
1372 
1373         /* Don't let the minimum threshold go past 64MB */
1374         MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1375     }
1376 
1377     /* Check if we have a registry setting */
1378     if (MmHighMemoryThreshold)
1379     {
1380         /* Convert it into pages */
1381         MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1382     }
1383     else
1384     {
1385         /* Otherwise, the default is three times the low memory threshold */
1386         MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1387         ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1388     }
1389 
1390     /* Make sure high threshold is actually higher than the low */
1391     MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1392 
1393     /* Create the memory events for all the thresholds */
1394     Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1395     if (!NT_SUCCESS(Status)) return FALSE;
1396     Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1397     if (!NT_SUCCESS(Status)) return FALSE;
1398     Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1399     if (!NT_SUCCESS(Status)) return FALSE;
1400     Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1401     if (!NT_SUCCESS(Status)) return FALSE;
1402     Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1403     if (!NT_SUCCESS(Status)) return FALSE;
1404     Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1405     if (!NT_SUCCESS(Status)) return FALSE;
1406 
1407     /* Now setup the pool events */
1408     MiInitializePoolEvents();
1409 
1410     /* Set the initial event state */
1411     MiNotifyMemoryEvents();
1412     return TRUE;
1413 }
1414 
1415 VOID
1416 NTAPI
1417 INIT_FUNCTION
1418 MiAddHalIoMappings(VOID)
1419 {
1420     PVOID BaseAddress;
1421     PMMPDE PointerPde, LastPde;
1422     PMMPTE PointerPte;
1423     ULONG j;
1424     PFN_NUMBER PageFrameIndex;
1425 
1426     /* HAL Heap address -- should be on a PDE boundary */
1427     BaseAddress = (PVOID)MM_HAL_VA_START;
1428     ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1429 
1430     /* Check how many PDEs the heap has */
1431     PointerPde = MiAddressToPde(BaseAddress);
1432     LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
1433 
1434     while (PointerPde <= LastPde)
1435     {
1436         /* Does the HAL own this mapping? */
1437         if ((PointerPde->u.Hard.Valid == 1) &&
1438             (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
1439         {
1440             /* Get the PTE for it and scan each page */
1441             PointerPte = MiAddressToPte(BaseAddress);
1442             for (j = 0 ; j < PTE_COUNT; j++)
1443             {
1444                 /* Does the HAL own this page? */
1445                 if (PointerPte->u.Hard.Valid == 1)
1446                 {
1447                     /* Is the HAL using it for device or I/O mapped memory? */
1448                     PageFrameIndex = PFN_FROM_PTE(PointerPte);
1449                     if (!MiGetPfnEntry(PageFrameIndex))
1450                     {
1451                         /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1452                         DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1453                     }
1454                 }
1455 
1456                 /* Move to the next page */
1457                 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1458                 PointerPte++;
1459             }
1460         }
1461         else
1462         {
1463             /* Move to the next address */
1464             BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1465         }
1466 
1467         /* Move to the next PDE */
1468         PointerPde++;
1469     }
1470 }
1471 
1472 VOID
1473 NTAPI
1474 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1475 {
1476     ULONG i;
1477     PMMPFN Pfn1;
1478     PCHAR Consumer = "Unknown";
1479     KIRQL OldIrql;
1480     ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1481 #if MI_TRACE_PFNS
1482     ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1483     PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1484     {
1485         "Not set",
1486         "Paged Pool",
1487         "Nonpaged Pool",
1488         "Nonpaged Pool Ex",
1489         "Kernel Stack",
1490         "Kernel Stack Ex",
1491         "System PTE",
1492         "VAD",
1493         "PEB/TEB",
1494         "Section",
1495         "Page Table",
1496         "Page Directory",
1497         "Old Page Table",
1498         "Driver Page",
1499         "Contiguous Alloc",
1500         "MDL",
1501         "Demand Zero",
1502         "Zero Loop",
1503         "Cache",
1504         "PFN Database",
1505         "Boot Driver",
1506         "Initial Memory",
1507         "Free Page"
1508     };
1509 #endif
1510     //
1511     // Loop the PFN database
1512     //
1513     KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1514     for (i = 0; i <= MmHighestPhysicalPage; i++)
1515     {
1516         Pfn1 = MiGetPfnEntry(i);
1517         if (!Pfn1) continue;
1518 #if MI_TRACE_PFNS
1519         ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1520 #endif
1521         //
1522         // Get the page location
1523         //
1524         switch (Pfn1->u3.e1.PageLocation)
1525         {
1526             case ActiveAndValid:
1527 
1528                 Consumer = "Active and Valid";
1529                 ActivePages++;
1530                 break;
1531 
1532             case ZeroedPageList:
1533 
1534                 Consumer = "Zero Page List";
1535                 FreePages++;
1536                 break;//continue;
1537 
1538             case FreePageList:
1539 
1540                 Consumer = "Free Page List";
1541                 FreePages++;
1542                 break;//continue;
1543 
1544             default:
1545 
1546                 Consumer = "Other (ASSERT!)";
1547                 OtherPages++;
1548                 break;
1549         }
1550 
1551 #if MI_TRACE_PFNS
1552         /* Add into bucket */
1553         UsageBucket[Pfn1->PfnUsage]++;
1554 #endif
1555 
1556         //
1557         // Pretty-print the page
1558         //
1559         if (!StatusOnly)
1560         DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1561                  i << PAGE_SHIFT,
1562                  Consumer,
1563                  Pfn1->u3.e2.ReferenceCount,
1564                  Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1565 #if MI_TRACE_PFNS
1566                  MI_USAGE_TEXT[Pfn1->PfnUsage],
1567                  Pfn1->ProcessName);
1568 #else
1569                  "Page tracking",
1570                  "is disabled");
1571 #endif
1572     }
1573 
1574     DbgPrint("Active:               %5d pages\t[%6d KB]\n", ActivePages,  (ActivePages    << PAGE_SHIFT) / 1024);
1575     DbgPrint("Free:                 %5d pages\t[%6d KB]\n", FreePages,    (FreePages      << PAGE_SHIFT) / 1024);
1576     DbgPrint("Other:                %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1577     DbgPrint("-----------------------------------------\n");
1578 #if MI_TRACE_PFNS
1579     OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1580     DbgPrint("Boot Images:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1581     OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1582     DbgPrint("System Drivers:       %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1583     OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1584     DbgPrint("PFN Database:         %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1585     OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_PAGE_DIRECTORY] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1586     DbgPrint("Page Tables:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1587     OtherPages = UsageBucket[MI_USAGE_SYSTEM_PTE];
1588     DbgPrint("System PTEs:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1589     OtherPages = UsageBucket[MI_USAGE_VAD];
1590     DbgPrint("VADs:                 %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1591     OtherPages = UsageBucket[MI_USAGE_CONTINOUS_ALLOCATION];
1592     DbgPrint("Continuous Allocs:    %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1593     OtherPages = UsageBucket[MI_USAGE_MDL];
1594     DbgPrint("MDLs:                 %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1595     OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1596     DbgPrint("NonPaged Pool:        %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1597     OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1598     DbgPrint("Paged Pool:           %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1599     OtherPages = UsageBucket[MI_USAGE_DEMAND_ZERO];
1600     DbgPrint("Demand Zero:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1601     OtherPages = UsageBucket[MI_USAGE_ZERO_LOOP];
1602     DbgPrint("Zero Loop:            %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1603     OtherPages = UsageBucket[MI_USAGE_PEB_TEB];
1604     DbgPrint("PEB/TEB:              %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1605     OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1606     DbgPrint("Kernel Stack:         %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1607     OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1608     DbgPrint("Init Memory:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1609     OtherPages = UsageBucket[MI_USAGE_SECTION];
1610     DbgPrint("Sections:             %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1611     OtherPages = UsageBucket[MI_USAGE_CACHE];
1612     DbgPrint("Cache:                %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1613     OtherPages = UsageBucket[MI_USAGE_FREE_PAGE];
1614     DbgPrint("Free:                 %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1615 #endif
1616     KeLowerIrql(OldIrql);
1617 }
1618 
1619 PPHYSICAL_MEMORY_DESCRIPTOR
1620 NTAPI
1621 INIT_FUNCTION
1622 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1623                          IN PBOOLEAN IncludeType)
1624 {
1625     PLIST_ENTRY NextEntry;
1626     ULONG Run = 0, InitialRuns;
1627     PFN_NUMBER NextPage = -1, PageCount = 0;
1628     PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1629     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1630 
1631     //
1632     // Start with the maximum we might need
1633     //
1634     InitialRuns = MiNumberDescriptors;
1635 
1636     //
1637     // Allocate the maximum we'll ever need
1638     //
1639     Buffer = ExAllocatePoolWithTag(NonPagedPool,
1640                                    sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1641                                    sizeof(PHYSICAL_MEMORY_RUN) *
1642                                    (InitialRuns - 1),
1643                                    'lMmM');
1644     if (!Buffer) return NULL;
1645 
1646     //
1647     // For now that's how many runs we have
1648     //
1649     Buffer->NumberOfRuns = InitialRuns;
1650 
1651     //
1652     // Now loop through the descriptors again
1653     //
1654     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1655     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1656     {
1657         //
1658         // Grab each one, and check if it's one we should include
1659         //
1660         MdBlock = CONTAINING_RECORD(NextEntry,
1661                                     MEMORY_ALLOCATION_DESCRIPTOR,
1662                                     ListEntry);
1663         if ((MdBlock->MemoryType < LoaderMaximum) &&
1664             (IncludeType[MdBlock->MemoryType]))
1665         {
1666             //
1667             // Add this to our running total
1668             //
1669             PageCount += MdBlock->PageCount;
1670 
1671             //
1672             // Check if the next page is described by the next descriptor
1673             //
1674             if (MdBlock->BasePage == NextPage)
1675             {
1676                 //
1677                 // Combine it into the same physical run
1678                 //
1679                 ASSERT(MdBlock->PageCount != 0);
1680                 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1681                 NextPage += MdBlock->PageCount;
1682             }
1683             else
1684             {
1685                 //
1686                 // Otherwise just duplicate the descriptor's contents
1687                 //
1688                 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1689                 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1690                 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1691 
1692                 //
1693                 // And in this case, increase the number of runs
1694                 //
1695                 Run++;
1696             }
1697         }
1698 
1699         //
1700         // Try the next descriptor
1701         //
1702         NextEntry = MdBlock->ListEntry.Flink;
1703     }
1704 
1705     //
1706     // We should not have been able to go past our initial estimate
1707     //
1708     ASSERT(Run <= Buffer->NumberOfRuns);
1709 
1710     //
1711     // Our guess was probably exaggerated...
1712     //
1713     if (InitialRuns > Run)
1714     {
1715         //
1716         // Allocate a more accurately sized buffer
1717         //
1718         NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1719                                           sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1720                                           sizeof(PHYSICAL_MEMORY_RUN) *
1721                                           (Run - 1),
1722                                           'lMmM');
1723         if (NewBuffer)
1724         {
1725             //
1726             // Copy the old buffer into the new, then free it
1727             //
1728             RtlCopyMemory(NewBuffer->Run,
1729                           Buffer->Run,
1730                           sizeof(PHYSICAL_MEMORY_RUN) * Run);
1731             ExFreePoolWithTag(Buffer, 'lMmM');
1732 
1733             //
1734             // Now use the new buffer
1735             //
1736             Buffer = NewBuffer;
1737         }
1738     }
1739 
1740     //
1741     // Write the final numbers, and return it
1742     //
1743     Buffer->NumberOfRuns = Run;
1744     Buffer->NumberOfPages = PageCount;
1745     return Buffer;
1746 }
1747 
1748 VOID
1749 NTAPI
1750 INIT_FUNCTION
1751 MiBuildPagedPool(VOID)
1752 {
1753     PMMPTE PointerPte;
1754     PMMPDE PointerPde;
1755     MMPDE TempPde = ValidKernelPde;
1756     PFN_NUMBER PageFrameIndex;
1757     KIRQL OldIrql;
1758     SIZE_T Size;
1759     ULONG BitMapSize;
1760 #if (_MI_PAGING_LEVELS >= 3)
1761     MMPPE TempPpe = ValidKernelPpe;
1762     PMMPPE PointerPpe;
1763 #elif (_MI_PAGING_LEVELS == 2)
1764     MMPTE TempPte = ValidKernelPte;
1765 
1766     //
1767     // Get the page frame number for the system page directory
1768     //
1769     PointerPte = MiAddressToPte(PDE_BASE);
1770     ASSERT(PD_COUNT == 1);
1771     MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1772 
1773     //
1774     // Allocate a system PTE which will hold a copy of the page directory
1775     //
1776     PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1777     ASSERT(PointerPte);
1778     MmSystemPagePtes = MiPteToAddress(PointerPte);
1779 
1780     //
1781     // Make this system PTE point to the system page directory.
1782     // It is now essentially double-mapped. This will be used later for lazy
1783     // evaluation of PDEs accross process switches, similarly to how the Global
1784     // page directory array in the old ReactOS Mm is used (but in a less hacky
1785     // way).
1786     //
1787     TempPte = ValidKernelPte;
1788     ASSERT(PD_COUNT == 1);
1789     TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1790     MI_WRITE_VALID_PTE(PointerPte, TempPte);
1791 #endif
1792 
1793 #ifdef _M_IX86
1794     //
1795     // Let's get back to paged pool work: size it up.
1796     // By default, it should be twice as big as nonpaged pool.
1797     //
1798     MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1799     if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1800                                     (ULONG_PTR)MmPagedPoolStart))
1801     {
1802         //
1803         // On the other hand, we have limited VA space, so make sure that the VA
1804         // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1805         // whatever maximum is possible.
1806         //
1807         MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1808                                    (ULONG_PTR)MmPagedPoolStart;
1809     }
1810 #endif // _M_IX86
1811 
1812     //
1813     // Get the size in pages and make sure paged pool is at least 32MB.
1814     //
1815     Size = MmSizeOfPagedPoolInBytes;
1816     if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1817     Size = BYTES_TO_PAGES(Size);
1818 
1819     //
1820     // Now check how many PTEs will be required for these many pages.
1821     //
1822     Size = (Size + (1024 - 1)) / 1024;
1823 
1824     //
1825     // Recompute the page-aligned size of the paged pool, in bytes and pages.
1826     //
1827     MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1828     MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1829 
1830 #ifdef _M_IX86
1831     //
1832     // Let's be really sure this doesn't overflow into nonpaged system VA
1833     //
1834     ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1835            (ULONG_PTR)MmNonPagedSystemStart);
1836 #endif // _M_IX86
1837 
1838     //
1839     // This is where paged pool ends
1840     //
1841     MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1842                               MmSizeOfPagedPoolInBytes) - 1);
1843 
1844     //
1845     // Lock the PFN database
1846     //
1847     OldIrql = MiAcquirePfnLock();
1848 
1849 #if (_MI_PAGING_LEVELS >= 3)
1850     /* On these systems, there's no double-mapping, so instead, the PPEs
1851      * are setup to span the entire paged pool area, so there's no need for the
1852      * system PD */
1853     for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
1854          PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
1855          PointerPpe++)
1856     {
1857         /* Check if the PPE is already valid */
1858         if (!PointerPpe->u.Hard.Valid)
1859         {
1860             /* It is not, so map a fresh zeroed page */
1861             TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
1862             MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
1863         }
1864     }
1865 #endif
1866 
1867     //
1868     // So now get the PDE for paged pool and zero it out
1869     //
1870     PointerPde = MiAddressToPde(MmPagedPoolStart);
1871     RtlZeroMemory(PointerPde,
1872                   (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
1873 
1874     //
1875     // Next, get the first and last PTE
1876     //
1877     PointerPte = MiAddressToPte(MmPagedPoolStart);
1878     MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1879     MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1880 
1881     /* Allocate a page and map the first paged pool PDE */
1882     MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1883     MI_SET_PROCESS2("Kernel");
1884     PageFrameIndex = MiRemoveZeroPage(0);
1885     TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1886     MI_WRITE_VALID_PDE(PointerPde, TempPde);
1887 #if (_MI_PAGING_LEVELS >= 3)
1888     /* Use the PPE of MmPagedPoolStart that was setup above */
1889 //    Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1890 
1891     /* Initialize the PFN entry for it */
1892     MiInitializePfnForOtherProcess(PageFrameIndex,
1893                                    (PMMPTE)PointerPde,
1894                                    PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
1895 #else
1896     /* Do it this way */
1897 //    Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1898 
1899     /* Initialize the PFN entry for it */
1900     MiInitializePfnForOtherProcess(PageFrameIndex,
1901                                    (PMMPTE)PointerPde,
1902                                    MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_COUNT]);
1903 #endif
1904 
1905     //
1906     // Release the PFN database lock
1907     //
1908     MiReleasePfnLock(OldIrql);
1909 
1910     //
1911     // We only have one PDE mapped for now... at fault time, additional PDEs
1912     // will be allocated to handle paged pool growth. This is where they'll have
1913     // to start.
1914     //
1915     MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1916 
1917     //
1918     // We keep track of each page via a bit, so check how big the bitmap will
1919     // have to be (make sure to align our page count such that it fits nicely
1920     // into a 4-byte aligned bitmap.
1921     //
1922     // We'll also allocate the bitmap header itself part of the same buffer.
1923     //
1924     Size = Size * 1024;
1925     ASSERT(Size == MmSizeOfPagedPoolInPages);
1926     BitMapSize = (ULONG)Size;
1927     Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1928 
1929     //
1930     // Allocate the allocation bitmap, which tells us which regions have not yet
1931     // been mapped into memory
1932     //
1933     MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1934                                                                    Size,
1935                                                                    TAG_MM);
1936     ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1937 
1938     //
1939     // Initialize it such that at first, only the first page's worth of PTEs is
1940     // marked as allocated (incidentially, the first PDE we allocated earlier).
1941     //
1942     RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1943                         (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1944                         BitMapSize);
1945     RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1946     RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1947 
1948     //
1949     // We have a second bitmap, which keeps track of where allocations end.
1950     // Given the allocation bitmap and a base address, we can therefore figure
1951     // out which page is the last page of that allocation, and thus how big the
1952     // entire allocation is.
1953     //
1954     MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1955                                                                  Size,
1956                                                                  TAG_MM);
1957     ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1958     RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1959                         (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1960                         BitMapSize);
1961 
1962     //
1963     // Since no allocations have been made yet, there are no bits set as the end
1964     //
1965     RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1966 
1967     //
1968     // Initialize paged pool.
1969     //
1970     InitializePool(PagedPool, 0);
1971 
1972     /* Initialize special pool */
1973     MiInitializeSpecialPool();
1974 
1975     /* Default low threshold of 30MB or one fifth of paged pool */
1976     MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1977     MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1978 
1979     /* Default high threshold of 60MB or 25% */
1980     MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1981     MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1982     ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1983 
1984     /* Setup the global session space */
1985     MiInitializeSystemSpaceMap(NULL);
1986 }
1987 
1988 VOID
1989 NTAPI
1990 INIT_FUNCTION
1991 MiDbgDumpMemoryDescriptors(VOID)
1992 {
1993     PLIST_ENTRY NextEntry;
1994     PMEMORY_ALLOCATION_DESCRIPTOR Md;
1995     PFN_NUMBER TotalPages = 0;
1996     PCHAR
1997     MemType[] =
1998     {
1999         "ExceptionBlock    ",
2000         "SystemBlock       ",
2001         "Free              ",
2002         "Bad               ",
2003         "LoadedProgram     ",
2004         "FirmwareTemporary ",
2005         "FirmwarePermanent ",
2006         "OsloaderHeap      ",
2007         "OsloaderStack     ",
2008         "SystemCode        ",
2009         "HalCode           ",
2010         "BootDriver        ",
2011         "ConsoleInDriver   ",
2012         "ConsoleOutDriver  ",
2013         "StartupDpcStack   ",
2014         "StartupKernelStack",
2015         "StartupPanicStack ",
2016         "StartupPcrPage    ",
2017         "StartupPdrPage    ",
2018         "RegistryData      ",
2019         "MemoryData        ",
2020         "NlsData           ",
2021         "SpecialMemory     ",
2022         "BBTMemory         ",
2023         "LoaderReserve     ",
2024         "LoaderXIPRom      "
2025     };
2026 
2027     DPRINT1("Base\t\tLength\t\tType\n");
2028     for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
2029          NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
2030          NextEntry = NextEntry->Flink)
2031     {
2032         Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
2033         DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
2034         TotalPages += Md->PageCount;
2035     }
2036 
2037     DPRINT1("Total: %08lX (%lu MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
2038 }
2039 
2040 BOOLEAN
2041 NTAPI
2042 INIT_FUNCTION
2043 MmArmInitSystem(IN ULONG Phase,
2044                 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
2045 {
2046     ULONG i;
2047     BOOLEAN IncludeType[LoaderMaximum];
2048     PVOID Bitmap;
2049     PPHYSICAL_MEMORY_RUN Run;
2050     PFN_NUMBER PageCount;
2051 #if DBG
2052     ULONG j;
2053     PMMPTE PointerPte, TestPte;
2054     MMPTE TempPte;
2055 #endif
2056 
2057     /* Dump memory descriptors */
2058     if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
2059 
2060     //
2061     // Instantiate memory that we don't consider RAM/usable
2062     // We use the same exclusions that Windows does, in order to try to be
2063     // compatible with WinLDR-style booting
2064     //
2065     for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
2066     IncludeType[LoaderBad] = FALSE;
2067     IncludeType[LoaderFirmwarePermanent] = FALSE;
2068     IncludeType[LoaderSpecialMemory] = FALSE;
2069     IncludeType[LoaderBBTMemory] = FALSE;
2070     if (Phase == 0)
2071     {
2072         /* Count physical pages on the system */
2073         MiScanMemoryDescriptors(LoaderBlock);
2074 
2075         /* Initialize the phase 0 temporary event */
2076         KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
2077 
2078         /* Set all the events to use the temporary event for now */
2079         MiLowMemoryEvent = &MiTempEvent;
2080         MiHighMemoryEvent = &MiTempEvent;
2081         MiLowPagedPoolEvent = &MiTempEvent;
2082         MiHighPagedPoolEvent = &MiTempEvent;
2083         MiLowNonPagedPoolEvent = &MiTempEvent;
2084         MiHighNonPagedPoolEvent = &MiTempEvent;
2085 
2086         //
2087         // Default throttling limits for Cc
2088         // May be ajusted later on depending on system type
2089         //
2090         MmThrottleTop = 450;
2091         MmThrottleBottom = 127;
2092 
2093         //
2094         // Define the basic user vs. kernel address space separation
2095         //
2096         MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
2097         MmUserProbeAddress = (ULONG_PTR)MI_USER_PROBE_ADDRESS;
2098         MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
2099 
2100         /* Highest PTE and PDE based on the addresses above */
2101         MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
2102         MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
2103 #if (_MI_PAGING_LEVELS >= 3)
2104         MiHighestUserPpe = MiAddressToPpe(MmHighestUserAddress);
2105 #if (_MI_PAGING_LEVELS >= 4)
2106         MiHighestUserPxe = MiAddressToPxe(MmHighestUserAddress);
2107 #endif
2108 #endif
2109         //
2110         // Get the size of the boot loader's image allocations and then round
2111         // that region up to a PDE size, so that any PDEs we might create for
2112         // whatever follows are separate from the PDEs that boot loader might've
2113         // already created (and later, we can blow all that away if we want to).
2114         //
2115         MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
2116         MmBootImageSize *= PAGE_SIZE;
2117         MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
2118         ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
2119 
2120         /* Initialize session space address layout */
2121         MiInitializeSessionSpaceLayout();
2122 
2123         /* Set the based section highest address */
2124         MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
2125 
2126 #if DBG
2127         /* The subection PTE format depends on things being 8-byte aligned */
2128         ASSERT((sizeof(CONTROL_AREA) % 8) == 0);
2129         ASSERT((sizeof(SUBSECTION) % 8) == 0);
2130 
2131         /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
2132         PointerPte = (PMMPTE)MmPagedPoolStart;
2133         MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2134         TestPte = MiProtoPteToPte(&TempPte);
2135         ASSERT(PointerPte == TestPte);
2136 
2137         /* Try the last nonpaged pool address */
2138         PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
2139         MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2140         TestPte = MiProtoPteToPte(&TempPte);
2141         ASSERT(PointerPte == TestPte);
2142 
2143         /* Try a bunch of random addresses near the end of the address space */
2144         PointerPte = (PMMPTE)((ULONG_PTR)MI_HIGHEST_SYSTEM_ADDRESS - 0x37FFF);
2145         for (j = 0; j < 20; j += 1)
2146         {
2147             MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2148             TestPte = MiProtoPteToPte(&TempPte);
2149             ASSERT(PointerPte == TestPte);
2150             PointerPte++;
2151         }
2152 
2153         /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
2154         PointerPte = (PMMPTE)((ULONG_PTR)MmNonPagedPoolStart + (MmSizeOfNonPagedPoolInBytes / 2));
2155         MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
2156         TestPte = MiSubsectionPteToSubsection(&TempPte);
2157         ASSERT(PointerPte == TestPte);
2158 #endif
2159 
2160         /* Loop all 8 standby lists */
2161         for (i = 0; i < 8; i++)
2162         {
2163             /* Initialize them */
2164             MmStandbyPageListByPriority[i].Total = 0;
2165             MmStandbyPageListByPriority[i].ListName = StandbyPageList;
2166             MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
2167             MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
2168         }
2169 
2170         /* Initialize the user mode image list */
2171         InitializeListHead(&MmLoadedUserImageList);
2172 
2173         /* Initialize critical section timeout value (relative time is negative) */
2174         MmCriticalSectionTimeout.QuadPart = MmCritsectTimeoutSeconds * (-10000000LL);
2175 
2176         /* Initialize the paged pool mutex and the section commit mutex */
2177         KeInitializeGuardedMutex(&MmPagedPoolMutex);
2178         KeInitializeGuardedMutex(&MmSectionCommitMutex);
2179         KeInitializeGuardedMutex(&MmSectionBasedMutex);
2180 
2181         /* Initialize the Loader Lock */
2182         KeInitializeMutant(&MmSystemLoadLock, FALSE);
2183 
2184         /* Set the zero page event */
2185         KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
2186         MmZeroingPageThreadActive = FALSE;
2187 
2188         /* Initialize the dead stack S-LIST */
2189         InitializeSListHead(&MmDeadStackSListHead);
2190 
2191         //
2192         // Check if this is a machine with less than 19MB of RAM
2193         //
2194         PageCount = MmNumberOfPhysicalPages;
2195         if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
2196         {
2197             //
2198             // Use the very minimum of system PTEs
2199             //
2200             MmNumberOfSystemPtes = 7000;
2201         }
2202         else
2203         {
2204             //
2205             // Use the default
2206             //
2207             MmNumberOfSystemPtes = 11000;
2208             if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
2209             {
2210                 //
2211                 // Double the amount of system PTEs
2212                 //
2213                 MmNumberOfSystemPtes <<= 1;
2214             }
2215             if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
2216             {
2217                 //
2218                 // Double the amount of system PTEs
2219                 //
2220                 MmNumberOfSystemPtes <<= 1;
2221             }
2222             if (MmSpecialPoolTag != 0 && MmSpecialPoolTag != -1)
2223             {
2224                 //
2225                 // Add some extra PTEs for special pool
2226                 //
2227                 MmNumberOfSystemPtes += 0x6000;
2228             }
2229         }
2230 
2231         DPRINT("System PTE count has been tuned to %lu (%lu bytes)\n",
2232                MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
2233 
2234         /* Check if no values are set for the heap limits */
2235         if (MmHeapSegmentReserve == 0)
2236         {
2237             MmHeapSegmentReserve = 2 * _1MB;
2238         }
2239 
2240         if (MmHeapSegmentCommit == 0)
2241         {
2242             MmHeapSegmentCommit = 2 * PAGE_SIZE;
2243         }
2244 
2245         if (MmHeapDeCommitTotalFreeThreshold == 0)
2246         {
2247             MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
2248         }
2249 
2250         if (MmHeapDeCommitFreeBlockThreshold == 0)
2251         {
2252             MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
2253         }
2254 
2255         /* Initialize the working set lock */
2256         ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
2257 
2258         /* Set commit limit */
2259         MmTotalCommitLimit = (2 * _1GB) >> PAGE_SHIFT;
2260         MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2261 
2262         /* Has the allocation fragment been setup? */
2263         if (!MmAllocationFragment)
2264         {
2265             /* Use the default value */
2266             MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
2267             if (PageCount < ((256 * _1MB) / PAGE_SIZE))
2268             {
2269                 /* On memory systems with less than 256MB, divide by 4 */
2270                 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
2271             }
2272             else if (PageCount < (_1GB / PAGE_SIZE))
2273             {
2274                 /* On systems with less than 1GB, divide by 2 */
2275                 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
2276             }
2277         }
2278         else
2279         {
2280             /* Convert from 1KB fragments to pages */
2281             MmAllocationFragment *= _1KB;
2282             MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2283 
2284             /* Don't let it past the maximum */
2285             MmAllocationFragment = min(MmAllocationFragment,
2286                                        MI_MAX_ALLOCATION_FRAGMENT);
2287 
2288             /* Don't let it too small either */
2289             MmAllocationFragment = max(MmAllocationFragment,
2290                                        MI_MIN_ALLOCATION_FRAGMENT);
2291         }
2292 
2293         /* Check for kernel stack size that's too big */
2294         if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
2295         {
2296             /* Sanitize to default value */
2297             MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
2298         }
2299         else
2300         {
2301             /* Take the registry setting, and convert it into bytes */
2302             MmLargeStackSize *= _1KB;
2303 
2304             /* Now align it to a page boundary */
2305             MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
2306 
2307             /* Sanity checks */
2308             ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
2309             ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
2310 
2311             /* Make sure it's not too low */
2312             if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
2313         }
2314 
2315         /* Compute color information (L2 cache-separated paging lists) */
2316         MiComputeColorInformation();
2317 
2318         // Calculate the number of bytes for the PFN database
2319         // then add the color tables and convert to pages
2320         MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
2321         MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
2322         MxPfnAllocation >>= PAGE_SHIFT;
2323 
2324         // We have to add one to the count here, because in the process of
2325         // shifting down to the page size, we actually ended up getting the
2326         // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
2327         // Later on, we'll shift this number back into bytes, which would cause
2328         // us to end up with only 0x5F000 bytes -- when we actually want to have
2329         // 0x60000 bytes.
2330         MxPfnAllocation++;
2331 
2332         /* Initialize the platform-specific parts */
2333         MiInitMachineDependent(LoaderBlock);
2334 
2335         //
2336         // Build the physical memory block
2337         //
2338         MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2339                                                          IncludeType);
2340 
2341         //
2342         // Allocate enough buffer for the PFN bitmap
2343         // Align it up to a 32-bit boundary
2344         //
2345         Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2346                                        (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2347                                        TAG_MM);
2348         if (!Bitmap)
2349         {
2350             //
2351             // This is critical
2352             //
2353             KeBugCheckEx(INSTALL_MORE_MEMORY,
2354                          MmNumberOfPhysicalPages,
2355                          MmLowestPhysicalPage,
2356                          MmHighestPhysicalPage,
2357                          0x101);
2358         }
2359 
2360         //
2361         // Initialize it and clear all the bits to begin with
2362         //
2363         RtlInitializeBitMap(&MiPfnBitMap,
2364                             Bitmap,
2365                             (ULONG)MmHighestPhysicalPage + 1);
2366         RtlClearAllBits(&MiPfnBitMap);
2367 
2368         //
2369         // Loop physical memory runs
2370         //
2371         for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2372         {
2373             //
2374             // Get the run
2375             //
2376             Run = &MmPhysicalMemoryBlock->Run[i];
2377             DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2378                    Run->BasePage << PAGE_SHIFT,
2379                    (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2380 
2381             //
2382             // Make sure it has pages inside it
2383             //
2384             if (Run->PageCount)
2385             {
2386                 //
2387                 // Set the bits in the PFN bitmap
2388                 //
2389                 RtlSetBits(&MiPfnBitMap, (ULONG)Run->BasePage, (ULONG)Run->PageCount);
2390             }
2391         }
2392 
2393         /* Look for large page cache entries that need caching */
2394         MiSyncCachedRanges();
2395 
2396         /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2397         MiAddHalIoMappings();
2398 
2399         /* Set the initial resident page count */
2400         MmResidentAvailablePages = MmAvailablePages - 32;
2401 
2402         /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2403         MiInitializeLargePageSupport();
2404 
2405         /* Check if the registry says any drivers should be loaded with large pages */
2406         MiInitializeDriverLargePageList();
2407 
2408         /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2409         MiReloadBootLoadedDrivers(LoaderBlock);
2410 
2411         /* FIXME: Call out into Driver Verifier for initialization  */
2412 
2413         /* Check how many pages the system has */
2414         if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2415         {
2416             /* Set small system */
2417             MmSystemSize = MmSmallSystem;
2418             MmMaximumDeadKernelStacks = 0;
2419         }
2420         else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2421         {
2422             /* Set small system and add 100 pages for the cache */
2423             MmSystemSize = MmSmallSystem;
2424             MmSystemCacheWsMinimum += 100;
2425             MmMaximumDeadKernelStacks = 2;
2426         }
2427         else
2428         {
2429             /* Set medium system and add 400 pages for the cache */
2430             MmSystemSize = MmMediumSystem;
2431             MmSystemCacheWsMinimum += 400;
2432             MmMaximumDeadKernelStacks = 5;
2433         }
2434 
2435         /* Check for less than 24MB */
2436         if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2437         {
2438             /* No more than 32 pages */
2439             MmSystemCacheWsMinimum = 32;
2440         }
2441 
2442         /* Check for more than 32MB */
2443         if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2444         {
2445             /* Check for product type being "Wi" for WinNT */
2446             if (MmProductType == '\0i\0W')
2447             {
2448                 /* Then this is a large system */
2449                 MmSystemSize = MmLargeSystem;
2450             }
2451             else
2452             {
2453                 /* For servers, we need 64MB to consider this as being large */
2454                 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2455                 {
2456                     /* Set it as large */
2457                     MmSystemSize = MmLargeSystem;
2458                 }
2459             }
2460         }
2461 
2462         /* Check for more than 33 MB */
2463         if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2464         {
2465             /* Add another 500 pages to the cache */
2466             MmSystemCacheWsMinimum += 500;
2467         }
2468 
2469         /* Now setup the shared user data fields */
2470         ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2471         SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2472         SharedUserData->LargePageMinimum = 0;
2473 
2474         /* Check for workstation (Wi for WinNT) */
2475         if (MmProductType == '\0i\0W')
2476         {
2477             /* Set Windows NT Workstation product type */
2478             SharedUserData->NtProductType = NtProductWinNt;
2479             MmProductType = 0;
2480 
2481             /* For this product, we wait till the last moment to throttle */
2482             MmThrottleTop = 250;
2483             MmThrottleBottom = 30;
2484         }
2485         else
2486         {
2487             /* Check for LanMan server (La for LanmanNT) */
2488             if (MmProductType == '\0a\0L')
2489             {
2490                 /* This is a domain controller */
2491                 SharedUserData->NtProductType = NtProductLanManNt;
2492             }
2493             else
2494             {
2495                 /* Otherwise it must be a normal server (Se for ServerNT) */
2496                 SharedUserData->NtProductType = NtProductServer;
2497             }
2498 
2499             /* Set the product type, and make the system more aggressive with low memory */
2500             MmProductType = 1;
2501             MmMinimumFreePages = 81;
2502 
2503             /* We will throttle earlier to preserve memory */
2504             MmThrottleTop = 450;
2505             MmThrottleBottom = 80;
2506         }
2507 
2508         /* Update working set tuning parameters */
2509         MiAdjustWorkingSetManagerParameters(!MmProductType);
2510 
2511         /* Finetune the page count by removing working set and NP expansion */
2512         MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2513         MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2514         MmResidentAvailableAtInit = MmResidentAvailablePages;
2515         if (MmResidentAvailablePages <= 0)
2516         {
2517             /* This should not happen */
2518             DPRINT1("System cache working set too big\n");
2519             return FALSE;
2520         }
2521 
2522         /* Define limits for system cache */
2523 #ifdef _M_AMD64
2524         MmSizeOfSystemCacheInPages = (MI_SYSTEM_CACHE_END - MI_SYSTEM_CACHE_START) / PAGE_SIZE;
2525 #else
2526         MmSizeOfSystemCacheInPages = ((ULONG_PTR)MI_PAGED_POOL_START - (ULONG_PTR)MI_SYSTEM_CACHE_START) / PAGE_SIZE;
2527 #endif
2528         MmSystemCacheEnd = (PVOID)((ULONG_PTR)MmSystemCacheStart + (MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1);
2529 
2530         /* Initialize the system cache */
2531         //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2532 
2533         /* Update the commit limit */
2534         MmTotalCommitLimit = MmAvailablePages;
2535         if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2536         MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2537 
2538         /* Size up paged pool and build the shadow system page directory */
2539         MiBuildPagedPool();
2540 
2541         /* Debugger physical memory support is now ready to be used */
2542         MmDebugPte = MiAddressToPte(MiDebugMapping);
2543 
2544         /* Initialize the loaded module list */
2545         MiInitializeLoadedModuleList(LoaderBlock);
2546     }
2547 
2548     //
2549     // Always return success for now
2550     //
2551     return TRUE;
2552 }
2553 
2554 /* EOF */
2555