xref: /reactos/ntoskrnl/mm/ARM3/mminit.c (revision ab5fdac9)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/mminit.c
5  * PURPOSE:         ARM Memory Manager Initialization
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "miarm.h"
17 #undef MmSystemRangeStart
18 
19 /* GLOBALS ********************************************************************/
20 
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28 
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31 
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43 
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100 
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106 
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112 
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd;    // 0xC0000000
128 PVOID MiSessionImageEnd;    // 0xC0000000
129 PVOID MiSessionImageStart;  // 0xBF800000
130 PVOID MiSessionSpaceWs;
131 PVOID MiSessionViewStart;   // 0xBE000000
132 PVOID MiSessionPoolEnd;     // 0xBE000000
133 PVOID MiSessionPoolStart;   // 0xBD000000
134 PVOID MmSessionBase;        // 0xBD000000
135 SIZE_T MmSessionSize;
136 SIZE_T MmSessionViewSize;
137 SIZE_T MmSessionPoolSize;
138 SIZE_T MmSessionImageSize;
139 
140 /*
141  * These are the PTE addresses of the boundaries carved out above
142  */
143 PMMPTE MiSessionImagePteStart;
144 PMMPTE MiSessionImagePteEnd;
145 PMMPTE MiSessionBasePte;
146 PMMPTE MiSessionLastPte;
147 
148 //
149 // The system view space, on the other hand, is where sections that are memory
150 // mapped into "system space" end up.
151 //
152 // By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
153 //
154 PVOID MiSystemViewStart;
155 SIZE_T MmSystemViewSize;
156 
157 #if (_MI_PAGING_LEVELS == 2)
158 //
159 // A copy of the system page directory (the page directory associated with the
160 // System process) is kept (double-mapped) by the manager in order to lazily
161 // map paged pool PDEs into external processes when they fault on a paged pool
162 // address.
163 //
164 PFN_NUMBER MmSystemPageDirectory[PPE_PER_PAGE];
165 PMMPDE MmSystemPagePtes;
166 #endif
167 
168 //
169 // The system cache starts right after hyperspace. The first few pages are for
170 // keeping track of the system working set list.
171 //
172 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
173 //
174 PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
175 
176 //
177 // Windows NT seems to choose between 7000, 11000 and 50000
178 // On systems with more than 32MB, this number is then doubled, and further
179 // aligned up to a PDE boundary (4MB).
180 //
181 PFN_COUNT MmNumberOfSystemPtes;
182 
183 //
184 // This is how many pages the PFN database will take up
185 // In Windows, this includes the Quark Color Table, but not in ARM³
186 //
187 PFN_NUMBER MxPfnAllocation;
188 
189 //
190 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
191 // of pages that are not actually valid physical memory, such as ACPI reserved
192 // regions, BIOS address ranges, or holes in physical memory address space which
193 // could indicate device-mapped I/O memory.
194 //
195 // In fact, the lack of a PFN entry for a page usually indicates that this is
196 // I/O space instead.
197 //
198 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
199 // a bit to each. If the bit is set, then the page is valid physical RAM.
200 //
201 RTL_BITMAP MiPfnBitMap;
202 
203 //
204 // This structure describes the different pieces of RAM-backed address space
205 //
206 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
207 
208 //
209 // This is where we keep track of the most basic physical layout markers
210 //
211 PFN_NUMBER MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
212 PFN_COUNT MmNumberOfPhysicalPages;
213 
214 //
215 // The total number of pages mapped by the boot loader, which include the kernel
216 // HAL, boot drivers, registry, NLS files and other loader data structures is
217 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
218 // coming from the loader.
219 //
220 // This number is later aligned up to a PDE boundary.
221 //
222 SIZE_T MmBootImageSize;
223 
224 //
225 // These three variables keep track of the core separation of address space that
226 // exists between kernel mode and user mode.
227 //
228 ULONG_PTR MmUserProbeAddress;
229 PVOID MmHighestUserAddress;
230 PVOID MmSystemRangeStart;
231 
232 /* And these store the respective highest PTE/PDE address */
233 PMMPTE MiHighestUserPte;
234 PMMPDE MiHighestUserPde;
235 #if (_MI_PAGING_LEVELS >= 3)
236 PMMPTE MiHighestUserPpe;
237 #if (_MI_PAGING_LEVELS >= 4)
238 PMMPTE MiHighestUserPxe;
239 #endif
240 #endif
241 
242 /* These variables define the system cache address space */
243 PVOID MmSystemCacheStart = (PVOID)MI_SYSTEM_CACHE_START;
244 PVOID MmSystemCacheEnd;
245 ULONG_PTR MmSizeOfSystemCacheInPages;
246 MMSUPPORT MmSystemCacheWs;
247 
248 //
249 // This is where hyperspace ends (followed by the system cache working set)
250 //
251 PVOID MmHyperSpaceEnd;
252 
253 //
254 // Page coloring algorithm data
255 //
256 ULONG MmSecondaryColors;
257 ULONG MmSecondaryColorMask;
258 
259 //
260 // Actual (registry-configurable) size of a GUI thread's stack
261 //
262 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
263 
264 //
265 // Before we have a PFN database, memory comes straight from our physical memory
266 // blocks, which is nice because it's guaranteed contiguous and also because once
267 // we take a page from here, the system doesn't see it anymore.
268 // However, once the fun is over, those pages must be re-integrated back into
269 // PFN society life, and that requires us keeping a copy of the original layout
270 // so that we can parse it later.
271 //
272 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
273 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
274 
275 /*
276  * For each page's worth bytes of L2 cache in a given set/way line, the zero and
277  * free lists are organized in what is called a "color".
278  *
279  * This array points to the two lists, so it can be thought of as a multi-dimensional
280  * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
281  * we describe the array in pointer form instead.
282  *
283  * On a final note, the color tables themselves are right after the PFN database.
284  */
285 C_ASSERT(FreePageList == 1);
286 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
287 
288 /* An event used in Phase 0 before the rest of the system is ready to go */
289 KEVENT MiTempEvent;
290 
291 /* All the events used for memory threshold notifications */
292 PKEVENT MiLowMemoryEvent;
293 PKEVENT MiHighMemoryEvent;
294 PKEVENT MiLowPagedPoolEvent;
295 PKEVENT MiHighPagedPoolEvent;
296 PKEVENT MiLowNonPagedPoolEvent;
297 PKEVENT MiHighNonPagedPoolEvent;
298 
299 /* The actual thresholds themselves, in page numbers */
300 PFN_NUMBER MmLowMemoryThreshold;
301 PFN_NUMBER MmHighMemoryThreshold;
302 PFN_NUMBER MiLowPagedPoolThreshold;
303 PFN_NUMBER MiHighPagedPoolThreshold;
304 PFN_NUMBER MiLowNonPagedPoolThreshold;
305 PFN_NUMBER MiHighNonPagedPoolThreshold;
306 
307 /*
308  * This number determines how many free pages must exist, at minimum, until we
309  * start trimming working sets and flushing modified pages to obtain more free
310  * pages.
311  *
312  * This number changes if the system detects that this is a server product
313  */
314 PFN_NUMBER MmMinimumFreePages = 26;
315 
316 /*
317  * This number indicates how many pages we consider to be a low limit of having
318  * "plenty" of free memory.
319  *
320  * It is doubled on systems that have more than 63MB of memory
321  */
322 PFN_NUMBER MmPlentyFreePages = 400;
323 
324 /* These values store the type of system this is (small, med, large) and if server */
325 ULONG MmProductType;
326 MM_SYSTEMSIZE MmSystemSize;
327 
328 /*
329  * These values store the cache working set minimums and maximums, in pages
330  *
331  * The minimum value is boosted on systems with more than 24MB of RAM, and cut
332  * down to only 32 pages on embedded (<24MB RAM) systems.
333  *
334  * An extra boost of 2MB is given on systems with more than 33MB of RAM.
335  */
336 PFN_NUMBER MmSystemCacheWsMinimum = 288;
337 PFN_NUMBER MmSystemCacheWsMaximum = 350;
338 
339 /* FIXME: Move to cache/working set code later */
340 BOOLEAN MmLargeSystemCache;
341 
342 /*
343  * This value determines in how many fragments/chunks the subsection prototype
344  * PTEs should be allocated when mapping a section object. It is configurable in
345  * the registry through the MapAllocationFragment parameter.
346  *
347  * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
348  * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
349  *
350  * The maximum it can be set to is 2MB, and the minimum is 4KB.
351  */
352 SIZE_T MmAllocationFragment;
353 
354 /*
355  * These two values track how much virtual memory can be committed, and when
356  * expansion should happen.
357  */
358  // FIXME: They should be moved elsewhere since it's not an "init" setting?
359 SIZE_T MmTotalCommitLimit;
360 SIZE_T MmTotalCommitLimitMaximum;
361 
362 /*
363  * These values tune certain user parameters. They have default values set here,
364  * as well as in the code, and can be overwritten by registry settings.
365  */
366 SIZE_T MmHeapSegmentReserve = 1 * _1MB;
367 SIZE_T MmHeapSegmentCommit = 2 * PAGE_SIZE;
368 SIZE_T MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
369 SIZE_T MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
370 SIZE_T MmMinimumStackCommitInBytes = 0;
371 
372 /* Internal setting used for debugging memory descriptors */
373 BOOLEAN MiDbgEnableMdDump =
374 #ifdef _ARM_
375 TRUE;
376 #else
377 FALSE;
378 #endif
379 
380 /* Number of memory descriptors in the loader block */
381 ULONG MiNumberDescriptors = 0;
382 
383 /* Number of free pages in the loader block */
384 PFN_NUMBER MiNumberOfFreePages = 0;
385 
386 /* Timeout value for critical sections (2.5 minutes) */
387 ULONG MmCritsectTimeoutSeconds = 150; // NT value: 720 * 60 * 60; (30 days)
388 LARGE_INTEGER MmCriticalSectionTimeout;
389 
390 //
391 // Throttling limits for Cc (in pages)
392 // Above top, we don't throttle
393 // Above bottom, we throttle depending on the amount of modified pages
394 // Otherwise, we throttle!
395 //
396 ULONG MmThrottleTop;
397 ULONG MmThrottleBottom;
398 
399 /* PRIVATE FUNCTIONS **********************************************************/
400 
401 VOID
402 NTAPI
MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)403 MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
404 {
405     PLIST_ENTRY ListEntry;
406     PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
407     PFN_NUMBER PageFrameIndex, FreePages = 0;
408 
409     /* Loop the memory descriptors */
410     for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
411          ListEntry != &LoaderBlock->MemoryDescriptorListHead;
412          ListEntry = ListEntry->Flink)
413     {
414         /* Get the descriptor */
415         Descriptor = CONTAINING_RECORD(ListEntry,
416                                        MEMORY_ALLOCATION_DESCRIPTOR,
417                                        ListEntry);
418         DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
419             Descriptor->MemoryType, Descriptor->BasePage, Descriptor->PageCount);
420 
421         /* Count this descriptor */
422         MiNumberDescriptors++;
423 
424         /* Check if this is invisible memory */
425         if ((Descriptor->MemoryType == LoaderFirmwarePermanent) ||
426             (Descriptor->MemoryType == LoaderSpecialMemory) ||
427             (Descriptor->MemoryType == LoaderHALCachedMemory) ||
428             (Descriptor->MemoryType == LoaderBBTMemory))
429         {
430             /* Skip this descriptor */
431             continue;
432         }
433 
434         /* Check if this is bad memory */
435         if (Descriptor->MemoryType != LoaderBad)
436         {
437             /* Count this in the total of pages */
438             MmNumberOfPhysicalPages += (PFN_COUNT)Descriptor->PageCount;
439         }
440 
441         /* Check if this is the new lowest page */
442         if (Descriptor->BasePage < MmLowestPhysicalPage)
443         {
444             /* Update the lowest page */
445             MmLowestPhysicalPage = Descriptor->BasePage;
446         }
447 
448         /* Check if this is the new highest page */
449         PageFrameIndex = Descriptor->BasePage + Descriptor->PageCount;
450         if (PageFrameIndex > MmHighestPhysicalPage)
451         {
452             /* Update the highest page */
453             MmHighestPhysicalPage = PageFrameIndex - 1;
454         }
455 
456         /* Check if this is free memory */
457         if ((Descriptor->MemoryType == LoaderFree) ||
458             (Descriptor->MemoryType == LoaderLoadedProgram) ||
459             (Descriptor->MemoryType == LoaderFirmwareTemporary) ||
460             (Descriptor->MemoryType == LoaderOsloaderStack))
461         {
462             /* Count it too free pages */
463             MiNumberOfFreePages += Descriptor->PageCount;
464 
465             /* Check if this is the largest memory descriptor */
466             if (Descriptor->PageCount > FreePages)
467             {
468                 /* Remember it */
469                 MxFreeDescriptor = Descriptor;
470                 FreePages = Descriptor->PageCount;
471             }
472         }
473     }
474 
475     /* Save original values of the free descriptor, since it'll be
476      * altered by early allocations */
477     MxOldFreeDescriptor = *MxFreeDescriptor;
478 }
479 
480 CODE_SEG("INIT")
481 PFN_NUMBER
482 NTAPI
MxGetNextPage(IN PFN_NUMBER PageCount)483 MxGetNextPage(IN PFN_NUMBER PageCount)
484 {
485     PFN_NUMBER Pfn;
486 
487     /* Make sure we have enough pages */
488     if (PageCount > MxFreeDescriptor->PageCount)
489     {
490         /* Crash the system */
491         KeBugCheckEx(INSTALL_MORE_MEMORY,
492                      MmNumberOfPhysicalPages,
493                      MxFreeDescriptor->PageCount,
494                      MxOldFreeDescriptor.PageCount,
495                      PageCount);
496     }
497 
498     /* Use our lowest usable free pages */
499     Pfn = MxFreeDescriptor->BasePage;
500     MxFreeDescriptor->BasePage += PageCount;
501     MxFreeDescriptor->PageCount -= PageCount;
502     return Pfn;
503 }
504 
505 CODE_SEG("INIT")
506 VOID
507 NTAPI
MiComputeColorInformation(VOID)508 MiComputeColorInformation(VOID)
509 {
510     ULONG L2Associativity;
511 
512     /* Check if no setting was provided already */
513     if (!MmSecondaryColors)
514     {
515         /* Get L2 cache information */
516         L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
517 
518         /* The number of colors is the number of cache bytes by set/way */
519         MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
520         if (L2Associativity) MmSecondaryColors /= L2Associativity;
521     }
522 
523     /* Now convert cache bytes into pages */
524     MmSecondaryColors >>= PAGE_SHIFT;
525     if (!MmSecondaryColors)
526     {
527         /* If there was no cache data from the KPCR, use the default colors */
528         MmSecondaryColors = MI_SECONDARY_COLORS;
529     }
530     else
531     {
532         /* Otherwise, make sure there aren't too many colors */
533         if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
534         {
535             /* Set the maximum */
536             MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
537         }
538 
539         /* Make sure there aren't too little colors */
540         if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
541         {
542             /* Set the default */
543             MmSecondaryColors = MI_SECONDARY_COLORS;
544         }
545 
546         /* Finally make sure the colors are a power of two */
547         if (MmSecondaryColors & (MmSecondaryColors - 1))
548         {
549             /* Set the default */
550             MmSecondaryColors = MI_SECONDARY_COLORS;
551         }
552     }
553 
554     /* Compute the mask and store it */
555     MmSecondaryColorMask = MmSecondaryColors - 1;
556     KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
557 }
558 
559 CODE_SEG("INIT")
560 VOID
561 NTAPI
MiInitializeColorTables(VOID)562 MiInitializeColorTables(VOID)
563 {
564     ULONG i;
565     PMMPTE PointerPte, LastPte;
566     MMPTE TempPte = ValidKernelPte;
567 
568     /* The color table starts after the ARM3 PFN database */
569     MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
570 
571     /* Loop the PTEs. We have two color tables for each secondary color */
572     PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
573     LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
574                              (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
575                              - 1);
576     while (PointerPte <= LastPte)
577     {
578         /* Check for valid PTE */
579         if (PointerPte->u.Hard.Valid == 0)
580         {
581             /* Get a page and map it */
582             TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
583             MI_WRITE_VALID_PTE(PointerPte, TempPte);
584 
585             /* Zero out the page */
586             RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
587         }
588 
589         /* Next */
590         PointerPte++;
591     }
592 
593     /* Now set the address of the next list, right after this one */
594     MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
595 
596     /* Now loop the lists to set them up */
597     for (i = 0; i < MmSecondaryColors; i++)
598     {
599         /* Set both free and zero lists for each color */
600         MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
601         MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
602         MmFreePagesByColor[ZeroedPageList][i].Count = 0;
603         MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
604         MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
605         MmFreePagesByColor[FreePageList][i].Count = 0;
606     }
607 }
608 
609 #ifndef _M_AMD64
610 CODE_SEG("INIT")
611 BOOLEAN
612 NTAPI
MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,IN PFN_NUMBER Pfn)613 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
614                   IN PFN_NUMBER Pfn)
615 {
616     PLIST_ENTRY NextEntry;
617     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
618 
619     /* Loop the memory descriptors */
620     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
621     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
622     {
623         /* Get the memory descriptor */
624         MdBlock = CONTAINING_RECORD(NextEntry,
625                                     MEMORY_ALLOCATION_DESCRIPTOR,
626                                     ListEntry);
627 
628         /* Check if this PFN could be part of the block */
629         if (Pfn >= (MdBlock->BasePage))
630         {
631             /* Check if it really is part of the block */
632             if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
633             {
634                 /* Check if the block is actually memory we don't map */
635                 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
636                     (MdBlock->MemoryType == LoaderBBTMemory) ||
637                     (MdBlock->MemoryType == LoaderSpecialMemory))
638                 {
639                     /* We don't need PFN database entries for this memory */
640                     break;
641                 }
642 
643                 /* This is memory we want to map */
644                 return TRUE;
645             }
646         }
647         else
648         {
649             /* Blocks are ordered, so if it's not here, it doesn't exist */
650             break;
651         }
652 
653         /* Get to the next descriptor */
654         NextEntry = MdBlock->ListEntry.Flink;
655     }
656 
657     /* Check if this PFN is actually from our free memory descriptor */
658     if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
659         (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
660     {
661         /* We use these pages for initial mappings, so we do want to count them */
662         return TRUE;
663     }
664 
665     /* Otherwise this isn't memory that we describe or care about */
666     return FALSE;
667 }
668 
669 CODE_SEG("INIT")
670 VOID
671 NTAPI
MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)672 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
673 {
674     PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
675     PLIST_ENTRY NextEntry;
676     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
677     PMMPTE PointerPte, LastPte;
678     MMPTE TempPte = ValidKernelPte;
679 
680     /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
681     FreePage = MxFreeDescriptor->BasePage;
682     FreePageCount = MxFreeDescriptor->PageCount;
683     PagesLeft = 0;
684 
685     /* Loop the memory descriptors */
686     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
687     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
688     {
689         /* Get the descriptor */
690         MdBlock = CONTAINING_RECORD(NextEntry,
691                                     MEMORY_ALLOCATION_DESCRIPTOR,
692                                     ListEntry);
693         if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
694             (MdBlock->MemoryType == LoaderBBTMemory) ||
695             (MdBlock->MemoryType == LoaderSpecialMemory))
696         {
697             /* These pages are not part of the PFN database */
698             NextEntry = MdBlock->ListEntry.Flink;
699             continue;
700         }
701 
702         /* Next, check if this is our special free descriptor we've found */
703         if (MdBlock == MxFreeDescriptor)
704         {
705             /* Use the real numbers instead */
706             BasePage = MxOldFreeDescriptor.BasePage;
707             PageCount = MxOldFreeDescriptor.PageCount;
708         }
709         else
710         {
711             /* Use the descriptor's numbers */
712             BasePage = MdBlock->BasePage;
713             PageCount = MdBlock->PageCount;
714         }
715 
716         /* Get the PTEs for this range */
717         PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
718         LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
719         DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
720 
721         /* Loop them */
722         while (PointerPte <= LastPte)
723         {
724             /* We'll only touch PTEs that aren't already valid */
725             if (PointerPte->u.Hard.Valid == 0)
726             {
727                 /* Use the next free page */
728                 TempPte.u.Hard.PageFrameNumber = FreePage;
729                 ASSERT(FreePageCount != 0);
730 
731                 /* Consume free pages */
732                 FreePage++;
733                 FreePageCount--;
734                 if (!FreePageCount)
735                 {
736                     /* Out of memory */
737                     KeBugCheckEx(INSTALL_MORE_MEMORY,
738                                  MmNumberOfPhysicalPages,
739                                  FreePageCount,
740                                  MxOldFreeDescriptor.PageCount,
741                                  1);
742                 }
743 
744                 /* Write out this PTE */
745                 PagesLeft++;
746                 MI_WRITE_VALID_PTE(PointerPte, TempPte);
747 
748                 /* Zero this page */
749                 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
750             }
751 
752             /* Next! */
753             PointerPte++;
754         }
755 
756         /* Do the next address range */
757         NextEntry = MdBlock->ListEntry.Flink;
758     }
759 
760     /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
761     MxFreeDescriptor->BasePage = FreePage;
762     MxFreeDescriptor->PageCount = FreePageCount;
763 }
764 
765 CODE_SEG("INIT")
766 VOID
767 NTAPI
MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)768 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
769 {
770     PMMPDE PointerPde;
771     PMMPTE PointerPte;
772     ULONG i, Count, j;
773     PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
774     PMMPFN Pfn1, Pfn2;
775     ULONG_PTR BaseAddress = 0;
776 
777     /* PFN of the startup page directory */
778     StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
779 
780     /* Start with the first PDE and scan them all */
781     PointerPde = MiAddressToPde(NULL);
782     Count = PPE_PER_PAGE * PDE_PER_PAGE;
783     for (i = 0; i < Count; i++)
784     {
785         /* Check for valid PDE */
786         if (PointerPde->u.Hard.Valid == 1)
787         {
788             /* Get the PFN from it */
789             PageFrameIndex = PFN_FROM_PTE(PointerPde);
790 
791             /* Do we want a PFN entry for this page? */
792             if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
793             {
794                 /* Yes we do, set it up */
795                 Pfn1 = MiGetPfnEntry(PageFrameIndex);
796                 Pfn1->u4.PteFrame = StartupPdIndex;
797                 Pfn1->PteAddress = (PMMPTE)PointerPde;
798                 Pfn1->u2.ShareCount++;
799                 Pfn1->u3.e2.ReferenceCount = 1;
800                 Pfn1->u3.e1.PageLocation = ActiveAndValid;
801                 Pfn1->u3.e1.CacheAttribute = MiNonCached;
802 #if MI_TRACE_PFNS
803                 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
804                 MI_SET_PFN_PROCESS_NAME(Pfn1, "Initial PDE");
805 #endif
806             }
807             else
808             {
809                 /* No PFN entry */
810                 Pfn1 = NULL;
811             }
812 
813             /* Now get the PTE and scan the pages */
814             PointerPte = MiAddressToPte(BaseAddress);
815             for (j = 0; j < PTE_PER_PAGE; j++)
816             {
817                 /* Check for a valid PTE */
818                 if (PointerPte->u.Hard.Valid == 1)
819                 {
820                     /* Increase the shared count of the PFN entry for the PDE */
821                     ASSERT(Pfn1 != NULL);
822                     Pfn1->u2.ShareCount++;
823 
824                     /* Now check if the PTE is valid memory too */
825                     PtePageIndex = PFN_FROM_PTE(PointerPte);
826                     if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
827                     {
828                         /*
829                          * Only add pages above the end of system code or pages
830                          * that are part of nonpaged pool
831                          */
832                         if ((BaseAddress >= 0xA0000000) ||
833                             ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
834                              (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
835                                             MmSizeOfNonPagedPoolInBytes)))
836                         {
837                             /* Get the PFN entry and make sure it too is valid */
838                             Pfn2 = MiGetPfnEntry(PtePageIndex);
839                             if ((MmIsAddressValid(Pfn2)) &&
840                                 (MmIsAddressValid(Pfn2 + 1)))
841                             {
842                                 /* Setup the PFN entry */
843                                 Pfn2->u4.PteFrame = PageFrameIndex;
844                                 Pfn2->PteAddress = PointerPte;
845                                 Pfn2->u2.ShareCount++;
846                                 Pfn2->u3.e2.ReferenceCount = 1;
847                                 Pfn2->u3.e1.PageLocation = ActiveAndValid;
848                                 Pfn2->u3.e1.CacheAttribute = MiNonCached;
849 #if MI_TRACE_PFNS
850                                 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
851                                 MI_SET_PFN_PROCESS_NAME(Pfn2, "Initial PTE");
852 #endif
853                             }
854                         }
855                     }
856                 }
857 
858                 /* Next PTE */
859                 PointerPte++;
860                 BaseAddress += PAGE_SIZE;
861             }
862         }
863         else
864         {
865             /* Next PDE mapped address */
866             BaseAddress += PDE_MAPPED_VA;
867         }
868 
869         /* Next PTE */
870         PointerPde++;
871     }
872 }
873 
874 CODE_SEG("INIT")
875 VOID
876 NTAPI
MiBuildPfnDatabaseZeroPage(VOID)877 MiBuildPfnDatabaseZeroPage(VOID)
878 {
879     PMMPFN Pfn1;
880     PMMPDE PointerPde;
881 
882     /* Grab the lowest page and check if it has no real references */
883     Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
884     if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
885     {
886         /* Make it a bogus page to catch errors */
887         PointerPde = MiAddressToPde(0xFFFFFFFF);
888         Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
889         Pfn1->PteAddress = (PMMPTE)PointerPde;
890         Pfn1->u2.ShareCount++;
891         Pfn1->u3.e2.ReferenceCount = 0xFFF0;
892         Pfn1->u3.e1.PageLocation = ActiveAndValid;
893         Pfn1->u3.e1.CacheAttribute = MiNonCached;
894     }
895 }
896 
897 CODE_SEG("INIT")
898 VOID
899 NTAPI
MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)900 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
901 {
902     PLIST_ENTRY NextEntry;
903     PFN_NUMBER PageCount = 0;
904     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
905     PFN_NUMBER PageFrameIndex;
906     PMMPFN Pfn1;
907     PMMPTE PointerPte;
908     PMMPDE PointerPde;
909     KIRQL OldIrql;
910 
911     /* Now loop through the descriptors */
912     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
913     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
914     {
915         /* Get the current descriptor */
916         MdBlock = CONTAINING_RECORD(NextEntry,
917                                     MEMORY_ALLOCATION_DESCRIPTOR,
918                                     ListEntry);
919 
920         /* Read its data */
921         PageCount = MdBlock->PageCount;
922         PageFrameIndex = MdBlock->BasePage;
923 
924         /* Don't allow memory above what the PFN database is mapping */
925         if (PageFrameIndex > MmHighestPhysicalPage)
926         {
927             /* Since they are ordered, everything past here will be larger */
928             break;
929         }
930 
931         /* On the other hand, the end page might be higher up... */
932         if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
933         {
934             /* In which case we'll trim the descriptor to go as high as we can */
935             PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
936             MdBlock->PageCount = PageCount;
937 
938             /* But if there's nothing left to trim, we got too high, so quit */
939             if (!PageCount) break;
940         }
941 
942         /* Now check the descriptor type */
943         switch (MdBlock->MemoryType)
944         {
945             /* Check for bad RAM */
946             case LoaderBad:
947 
948                 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
949                 break;
950 
951             /* Check for free RAM */
952             case LoaderFree:
953             case LoaderLoadedProgram:
954             case LoaderFirmwareTemporary:
955             case LoaderOsloaderStack:
956 
957                 /* Get the last page of this descriptor. Note we loop backwards */
958                 PageFrameIndex += PageCount - 1;
959                 Pfn1 = MiGetPfnEntry(PageFrameIndex);
960 
961                 /* Lock the PFN Database */
962                 OldIrql = MiAcquirePfnLock();
963                 while (PageCount--)
964                 {
965                     /* If the page really has no references, mark it as free */
966                     if (!Pfn1->u3.e2.ReferenceCount)
967                     {
968                         /* Add it to the free list */
969                         Pfn1->u3.e1.CacheAttribute = MiNonCached;
970                         MiInsertPageInFreeList(PageFrameIndex);
971                     }
972 
973                     /* Go to the next page */
974                     Pfn1--;
975                     PageFrameIndex--;
976                 }
977 
978                 /* Release PFN database */
979                 MiReleasePfnLock(OldIrql);
980 
981                 /* Done with this block */
982                 break;
983 
984             /* Check for pages that are invisible to us */
985             case LoaderFirmwarePermanent:
986             case LoaderSpecialMemory:
987             case LoaderBBTMemory:
988 
989                 /* And skip them */
990                 break;
991 
992             default:
993 
994                 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
995                 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
996                 Pfn1 = MiGetPfnEntry(PageFrameIndex);
997                 while (PageCount--)
998                 {
999                     /* Check if the page is really unused */
1000                     PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
1001                     if (!Pfn1->u3.e2.ReferenceCount)
1002                     {
1003                         /* Mark it as being in-use */
1004                         Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
1005                         Pfn1->PteAddress = PointerPte;
1006                         Pfn1->u2.ShareCount++;
1007                         Pfn1->u3.e2.ReferenceCount = 1;
1008                         Pfn1->u3.e1.PageLocation = ActiveAndValid;
1009                         Pfn1->u3.e1.CacheAttribute = MiNonCached;
1010 #if MI_TRACE_PFNS
1011                         Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
1012 #endif
1013 
1014                         /* Check for RAM disk page */
1015                         if (MdBlock->MemoryType == LoaderXIPRom)
1016                         {
1017                             /* Make it a pseudo-I/O ROM mapping */
1018                             Pfn1->u1.Flink = 0;
1019                             Pfn1->u2.ShareCount = 0;
1020                             Pfn1->u3.e2.ReferenceCount = 0;
1021                             Pfn1->u3.e1.PageLocation = 0;
1022                             Pfn1->u3.e1.Rom = 1;
1023                             Pfn1->u4.InPageError = 0;
1024                             Pfn1->u3.e1.PrototypePte = 1;
1025                         }
1026                     }
1027 
1028                     /* Advance page structures */
1029                     Pfn1++;
1030                     PageFrameIndex++;
1031                     PointerPte++;
1032                 }
1033                 break;
1034         }
1035 
1036         /* Next descriptor entry */
1037         NextEntry = MdBlock->ListEntry.Flink;
1038     }
1039 }
1040 
1041 CODE_SEG("INIT")
1042 VOID
1043 NTAPI
MiBuildPfnDatabaseSelf(VOID)1044 MiBuildPfnDatabaseSelf(VOID)
1045 {
1046     PMMPTE PointerPte, LastPte;
1047     PMMPFN Pfn1;
1048 
1049     /* Loop the PFN database page */
1050     PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
1051     LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
1052     while (PointerPte <= LastPte)
1053     {
1054         /* Make sure the page is valid */
1055         if (PointerPte->u.Hard.Valid == 1)
1056         {
1057             /* Get the PFN entry and just mark it referenced */
1058             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1059             Pfn1->u2.ShareCount = 1;
1060             Pfn1->u3.e2.ReferenceCount = 1;
1061 #if MI_TRACE_PFNS
1062             Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
1063 #endif
1064         }
1065 
1066         /* Next */
1067         PointerPte++;
1068     }
1069 }
1070 
1071 CODE_SEG("INIT")
1072 VOID
1073 NTAPI
MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)1074 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1075 {
1076     /* Scan memory and start setting up PFN entries */
1077     MiBuildPfnDatabaseFromPages(LoaderBlock);
1078 
1079     /* Add the zero page */
1080     MiBuildPfnDatabaseZeroPage();
1081 
1082     /* Scan the loader block and build the rest of the PFN database */
1083     MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
1084 
1085     /* Finally add the pages for the PFN database itself */
1086     MiBuildPfnDatabaseSelf();
1087 }
1088 #endif /* !_M_AMD64 */
1089 
1090 CODE_SEG("INIT")
1091 VOID
1092 NTAPI
MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)1093 MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1094 {
1095     PLIST_ENTRY NextMd;
1096     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1097     ULONG_PTR i;
1098     PFN_NUMBER BasePage, LoaderPages;
1099     PMMPFN Pfn1;
1100     KIRQL OldIrql;
1101     PPHYSICAL_MEMORY_RUN Buffer, Entry;
1102 
1103     /* Loop the descriptors in order to count them */
1104     i = 0;
1105     NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1106     while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1107     {
1108         MdBlock = CONTAINING_RECORD(NextMd,
1109                                     MEMORY_ALLOCATION_DESCRIPTOR,
1110                                     ListEntry);
1111         i++;
1112         NextMd = MdBlock->ListEntry.Flink;
1113     }
1114 
1115     /* Allocate a structure to hold the physical runs */
1116     Buffer = ExAllocatePoolWithTag(NonPagedPool,
1117                                    i * sizeof(PHYSICAL_MEMORY_RUN),
1118                                    'lMmM');
1119     ASSERT(Buffer != NULL);
1120     Entry = Buffer;
1121 
1122     /* Loop the descriptors again */
1123     NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1124     while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1125     {
1126         /* Check what kind this was */
1127         MdBlock = CONTAINING_RECORD(NextMd,
1128                                     MEMORY_ALLOCATION_DESCRIPTOR,
1129                                     ListEntry);
1130         switch (MdBlock->MemoryType)
1131         {
1132             /* Registry, NLS, and heap data */
1133             case LoaderRegistryData:
1134             case LoaderOsloaderHeap:
1135             case LoaderNlsData:
1136                 /* Are all a candidate for deletion */
1137                 Entry->BasePage = MdBlock->BasePage;
1138                 Entry->PageCount = MdBlock->PageCount;
1139                 Entry++;
1140 
1141             /* We keep the rest */
1142             default:
1143                 break;
1144         }
1145 
1146         /* Move to the next descriptor */
1147         NextMd = MdBlock->ListEntry.Flink;
1148     }
1149 
1150     /* Acquire the PFN lock */
1151     OldIrql = MiAcquirePfnLock();
1152 
1153     /* Loop the runs */
1154     LoaderPages = 0;
1155     while (--Entry >= Buffer)
1156     {
1157         /* See how many pages are in this run */
1158         i = Entry->PageCount;
1159         BasePage = Entry->BasePage;
1160 
1161         /* Loop each page */
1162         Pfn1 = MiGetPfnEntry(BasePage);
1163         while (i--)
1164         {
1165             /* Check if it has references or is in any kind of list */
1166             if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
1167             {
1168                 /* Set the new PTE address and put this page into the free list */
1169                 Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
1170                 MiInsertPageInFreeList(BasePage);
1171                 LoaderPages++;
1172             }
1173             else if (BasePage)
1174             {
1175                 /* It has a reference, so simply drop it */
1176                 ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
1177 
1178                 /* Drop a dereference on this page, which should delete it */
1179                 Pfn1->PteAddress->u.Long = 0;
1180                 MI_SET_PFN_DELETED(Pfn1);
1181                 MiDecrementShareCount(Pfn1, BasePage);
1182                 LoaderPages++;
1183             }
1184 
1185             /* Move to the next page */
1186             Pfn1++;
1187             BasePage++;
1188         }
1189     }
1190 
1191     /* Release the PFN lock and flush the TLB */
1192     DPRINT("Loader pages freed: %lx\n", LoaderPages);
1193     MiReleasePfnLock(OldIrql);
1194     KeFlushCurrentTb();
1195 
1196     /* Free our run structure */
1197     ExFreePoolWithTag(Buffer, 'lMmM');
1198 }
1199 
1200 CODE_SEG("INIT")
1201 VOID
1202 NTAPI
MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)1203 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
1204 {
1205     /* This function needs to do more work, for now, we tune page minimums */
1206 
1207     /* Check for a system with around 64MB RAM or more */
1208     if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
1209     {
1210         /* Double the minimum amount of pages we consider for a "plenty free" scenario */
1211         MmPlentyFreePages *= 2;
1212     }
1213 }
1214 
1215 CODE_SEG("INIT")
1216 VOID
1217 NTAPI
MiNotifyMemoryEvents(VOID)1218 MiNotifyMemoryEvents(VOID)
1219 {
1220     /* Are we in a low-memory situation? */
1221     if (MmAvailablePages < MmLowMemoryThreshold)
1222     {
1223         /* Clear high, set low  */
1224         if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1225         if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
1226     }
1227     else if (MmAvailablePages < MmHighMemoryThreshold)
1228     {
1229         /* We are in between, clear both */
1230         if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1231         if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1232     }
1233     else
1234     {
1235         /* Clear low, set high  */
1236         if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1237         if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1238     }
1239 }
1240 
1241 CODE_SEG("INIT")
1242 NTSTATUS
1243 NTAPI
MiCreateMemoryEvent(IN PUNICODE_STRING Name,OUT PKEVENT * Event)1244 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1245                     OUT PKEVENT *Event)
1246 {
1247     PACL Dacl;
1248     HANDLE EventHandle;
1249     ULONG DaclLength;
1250     NTSTATUS Status;
1251     OBJECT_ATTRIBUTES ObjectAttributes;
1252     SECURITY_DESCRIPTOR SecurityDescriptor;
1253 
1254     /* Create the SD */
1255     Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1256                                          SECURITY_DESCRIPTOR_REVISION);
1257     if (!NT_SUCCESS(Status)) return Status;
1258 
1259     /* One ACL with 3 ACEs, containing each one SID */
1260     DaclLength = sizeof(ACL) +
1261                  3 * sizeof(ACCESS_ALLOWED_ACE) +
1262                  RtlLengthSid(SeLocalSystemSid) +
1263                  RtlLengthSid(SeAliasAdminsSid) +
1264                  RtlLengthSid(SeWorldSid);
1265 
1266     /* Allocate space for the DACL */
1267     Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, TAG_DACL);
1268     if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1269 
1270     /* Setup the ACL inside it */
1271     Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1272     if (!NT_SUCCESS(Status)) goto CleanUp;
1273 
1274     /* Add query rights for everyone */
1275     Status = RtlAddAccessAllowedAce(Dacl,
1276                                     ACL_REVISION,
1277                                     SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1278                                     SeWorldSid);
1279     if (!NT_SUCCESS(Status)) goto CleanUp;
1280 
1281     /* Full rights for the admin */
1282     Status = RtlAddAccessAllowedAce(Dacl,
1283                                     ACL_REVISION,
1284                                     EVENT_ALL_ACCESS,
1285                                     SeAliasAdminsSid);
1286     if (!NT_SUCCESS(Status)) goto CleanUp;
1287 
1288     /* As well as full rights for the system */
1289     Status = RtlAddAccessAllowedAce(Dacl,
1290                                     ACL_REVISION,
1291                                     EVENT_ALL_ACCESS,
1292                                     SeLocalSystemSid);
1293     if (!NT_SUCCESS(Status)) goto CleanUp;
1294 
1295     /* Set this DACL inside the SD */
1296     Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1297                                           TRUE,
1298                                           Dacl,
1299                                           FALSE);
1300     if (!NT_SUCCESS(Status)) goto CleanUp;
1301 
1302     /* Setup the event attributes, making sure it's a permanent one */
1303     InitializeObjectAttributes(&ObjectAttributes,
1304                                Name,
1305                                OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1306                                NULL,
1307                                &SecurityDescriptor);
1308 
1309     /* Create the event */
1310     Status = ZwCreateEvent(&EventHandle,
1311                            EVENT_ALL_ACCESS,
1312                            &ObjectAttributes,
1313                            NotificationEvent,
1314                            FALSE);
1315 CleanUp:
1316     /* Free the DACL */
1317     ExFreePoolWithTag(Dacl, TAG_DACL);
1318 
1319     /* Check if this is the success path */
1320     if (NT_SUCCESS(Status))
1321     {
1322         /* Add a reference to the object, then close the handle we had */
1323         Status = ObReferenceObjectByHandle(EventHandle,
1324                                            EVENT_MODIFY_STATE,
1325                                            ExEventObjectType,
1326                                            KernelMode,
1327                                            (PVOID*)Event,
1328                                            NULL);
1329         ZwClose (EventHandle);
1330     }
1331 
1332     /* Return status */
1333     return Status;
1334 }
1335 
1336 CODE_SEG("INIT")
1337 BOOLEAN
1338 NTAPI
MiInitializeMemoryEvents(VOID)1339 MiInitializeMemoryEvents(VOID)
1340 {
1341     UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1342     UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1343     UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1344     UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1345     UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1346     UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1347     NTSTATUS Status;
1348 
1349     /* Check if we have a registry setting */
1350     if (MmLowMemoryThreshold)
1351     {
1352         /* Convert it to pages */
1353         MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1354     }
1355     else
1356     {
1357         /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1358         MmLowMemoryThreshold = MmPlentyFreePages;
1359 
1360         /* More than one GB of memory? */
1361         if (MmNumberOfPhysicalPages > 0x40000)
1362         {
1363             /* Start at 32MB, and add another 16MB for each GB */
1364             MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1365             MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1366         }
1367         else if (MmNumberOfPhysicalPages > 0x8000)
1368         {
1369             /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1370             MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1371         }
1372 
1373         /* Don't let the minimum threshold go past 64MB */
1374         MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1375     }
1376 
1377     /* Check if we have a registry setting */
1378     if (MmHighMemoryThreshold)
1379     {
1380         /* Convert it into pages */
1381         MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1382     }
1383     else
1384     {
1385         /* Otherwise, the default is three times the low memory threshold */
1386         MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1387         ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1388     }
1389 
1390     /* Make sure high threshold is actually higher than the low */
1391     MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1392 
1393     /* Create the memory events for all the thresholds */
1394     Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1395     if (!NT_SUCCESS(Status)) return FALSE;
1396     Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1397     if (!NT_SUCCESS(Status)) return FALSE;
1398     Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1399     if (!NT_SUCCESS(Status)) return FALSE;
1400     Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1401     if (!NT_SUCCESS(Status)) return FALSE;
1402     Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1403     if (!NT_SUCCESS(Status)) return FALSE;
1404     Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1405     if (!NT_SUCCESS(Status)) return FALSE;
1406 
1407     /* Now setup the pool events */
1408     MiInitializePoolEvents();
1409 
1410     /* Set the initial event state */
1411     MiNotifyMemoryEvents();
1412     return TRUE;
1413 }
1414 
1415 CODE_SEG("INIT")
1416 VOID
1417 NTAPI
MiAddHalIoMappings(VOID)1418 MiAddHalIoMappings(VOID)
1419 {
1420     PVOID BaseAddress;
1421     PMMPDE PointerPde, LastPde;
1422     PMMPTE PointerPte;
1423     ULONG j;
1424     PFN_NUMBER PageFrameIndex;
1425 
1426     /* HAL Heap address -- should be on a PDE boundary */
1427     BaseAddress = (PVOID)MM_HAL_VA_START;
1428     ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1429 
1430     /* Check how many PDEs the heap has */
1431     PointerPde = MiAddressToPde(BaseAddress);
1432     LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
1433 
1434     while (PointerPde <= LastPde)
1435     {
1436         /* Does the HAL own this mapping? */
1437         if ((PointerPde->u.Hard.Valid == 1) &&
1438             (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
1439         {
1440             /* Get the PTE for it and scan each page */
1441             PointerPte = MiAddressToPte(BaseAddress);
1442             for (j = 0; j < PTE_PER_PAGE; j++)
1443             {
1444                 /* Does the HAL own this page? */
1445                 if (PointerPte->u.Hard.Valid == 1)
1446                 {
1447                     /* Is the HAL using it for device or I/O mapped memory? */
1448                     PageFrameIndex = PFN_FROM_PTE(PointerPte);
1449                     if (!MiGetPfnEntry(PageFrameIndex))
1450                     {
1451                         /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1452                         DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1453                     }
1454                 }
1455 
1456                 /* Move to the next page */
1457                 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1458                 PointerPte++;
1459             }
1460         }
1461         else
1462         {
1463             /* Move to the next address */
1464             BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1465         }
1466 
1467         /* Move to the next PDE */
1468         PointerPde++;
1469     }
1470 }
1471 
1472 VOID
1473 NTAPI
MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)1474 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1475 {
1476     ULONG i;
1477     PMMPFN Pfn1;
1478     PCHAR Consumer = "Unknown";
1479     KIRQL OldIrql;
1480     ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1481 #if MI_TRACE_PFNS
1482     ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1483     PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1484     {
1485         "Not set",
1486         "Paged Pool",
1487         "Nonpaged Pool",
1488         "Nonpaged Pool Ex",
1489         "Kernel Stack",
1490         "Kernel Stack Ex",
1491         "System PTE",
1492         "VAD",
1493         "PEB/TEB",
1494         "Section",
1495         "Page Table",
1496         "Page Directory",
1497         "Old Page Table",
1498         "Driver Page",
1499         "Contiguous Alloc",
1500         "MDL",
1501         "Demand Zero",
1502         "Zero Loop",
1503         "Cache",
1504         "PFN Database",
1505         "Boot Driver",
1506         "Initial Memory",
1507         "Free Page"
1508     };
1509 #endif
1510     //
1511     // Loop the PFN database
1512     //
1513     KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1514     for (i = 0; i <= MmHighestPhysicalPage; i++)
1515     {
1516         Pfn1 = MiGetPfnEntry(i);
1517         if (!Pfn1) continue;
1518 #if MI_TRACE_PFNS
1519         ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1520 #endif
1521         //
1522         // Get the page location
1523         //
1524         switch (Pfn1->u3.e1.PageLocation)
1525         {
1526             case ActiveAndValid:
1527 
1528                 Consumer = "Active and Valid";
1529                 ActivePages++;
1530                 break;
1531 
1532             case ZeroedPageList:
1533 
1534                 Consumer = "Zero Page List";
1535                 FreePages++;
1536                 break;//continue;
1537 
1538             case FreePageList:
1539 
1540                 Consumer = "Free Page List";
1541                 FreePages++;
1542                 break;//continue;
1543 
1544             default:
1545 
1546                 Consumer = "Other (ASSERT!)";
1547                 OtherPages++;
1548                 break;
1549         }
1550 
1551 #if MI_TRACE_PFNS
1552         /* Add into bucket */
1553         UsageBucket[Pfn1->PfnUsage]++;
1554 #endif
1555 
1556         //
1557         // Pretty-print the page
1558         //
1559         if (!StatusOnly)
1560         DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s]\n",
1561                  i << PAGE_SHIFT,
1562                  Consumer,
1563                  Pfn1->u3.e2.ReferenceCount,
1564                  Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1565 #if MI_TRACE_PFNS
1566                  MI_USAGE_TEXT[Pfn1->PfnUsage],
1567                  Pfn1->ProcessName);
1568 #else
1569                  "Page tracking",
1570                  "is disabled");
1571 #endif
1572     }
1573 
1574     DbgPrint("Active:               %5d pages\t[%6d KB]\n", ActivePages,  (ActivePages    << PAGE_SHIFT) / 1024);
1575     DbgPrint("Free:                 %5d pages\t[%6d KB]\n", FreePages,    (FreePages      << PAGE_SHIFT) / 1024);
1576     DbgPrint("Other:                %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1577     DbgPrint("-----------------------------------------\n");
1578 #if MI_TRACE_PFNS
1579     OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1580     DbgPrint("Boot Images:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1581     OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1582     DbgPrint("System Drivers:       %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1583     OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1584     DbgPrint("PFN Database:         %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1585     OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_PAGE_DIRECTORY] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1586     DbgPrint("Page Tables:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1587     OtherPages = UsageBucket[MI_USAGE_SYSTEM_PTE];
1588     DbgPrint("System PTEs:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1589     OtherPages = UsageBucket[MI_USAGE_VAD];
1590     DbgPrint("VADs:                 %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1591     OtherPages = UsageBucket[MI_USAGE_CONTINOUS_ALLOCATION];
1592     DbgPrint("Continuous Allocs:    %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1593     OtherPages = UsageBucket[MI_USAGE_MDL];
1594     DbgPrint("MDLs:                 %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1595     OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1596     DbgPrint("NonPaged Pool:        %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1597     OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1598     DbgPrint("Paged Pool:           %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1599     OtherPages = UsageBucket[MI_USAGE_DEMAND_ZERO];
1600     DbgPrint("Demand Zero:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1601     OtherPages = UsageBucket[MI_USAGE_ZERO_LOOP];
1602     DbgPrint("Zero Loop:            %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1603     OtherPages = UsageBucket[MI_USAGE_PEB_TEB];
1604     DbgPrint("PEB/TEB:              %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1605     OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1606     DbgPrint("Kernel Stack:         %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1607     OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1608     DbgPrint("Init Memory:          %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1609     OtherPages = UsageBucket[MI_USAGE_SECTION];
1610     DbgPrint("Sections:             %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1611     OtherPages = UsageBucket[MI_USAGE_CACHE];
1612     DbgPrint("Cache:                %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1613     OtherPages = UsageBucket[MI_USAGE_FREE_PAGE];
1614     DbgPrint("Free:                 %5d pages\t[%6d KB]\n", OtherPages,   (OtherPages     << PAGE_SHIFT) / 1024);
1615 #endif
1616     KeLowerIrql(OldIrql);
1617 }
1618 
1619 CODE_SEG("INIT")
1620 PPHYSICAL_MEMORY_DESCRIPTOR
1621 NTAPI
MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,IN PBOOLEAN IncludeType)1622 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1623                          IN PBOOLEAN IncludeType)
1624 {
1625     PLIST_ENTRY NextEntry;
1626     ULONG Run = 0, InitialRuns;
1627     PFN_NUMBER NextPage = -1, PageCount = 0;
1628     PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1629     PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1630 
1631     //
1632     // Start with the maximum we might need
1633     //
1634     InitialRuns = MiNumberDescriptors;
1635 
1636     //
1637     // Allocate the maximum we'll ever need
1638     //
1639     Buffer = ExAllocatePoolWithTag(NonPagedPool,
1640                                    sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1641                                    sizeof(PHYSICAL_MEMORY_RUN) *
1642                                    (InitialRuns - 1),
1643                                    'lMmM');
1644     if (!Buffer) return NULL;
1645 
1646     //
1647     // For now that's how many runs we have
1648     //
1649     Buffer->NumberOfRuns = InitialRuns;
1650 
1651     //
1652     // Now loop through the descriptors again
1653     //
1654     NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1655     while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1656     {
1657         //
1658         // Grab each one, and check if it's one we should include
1659         //
1660         MdBlock = CONTAINING_RECORD(NextEntry,
1661                                     MEMORY_ALLOCATION_DESCRIPTOR,
1662                                     ListEntry);
1663         if ((MdBlock->MemoryType < LoaderMaximum) &&
1664             (IncludeType[MdBlock->MemoryType]))
1665         {
1666             //
1667             // Add this to our running total
1668             //
1669             PageCount += MdBlock->PageCount;
1670 
1671             //
1672             // Check if the next page is described by the next descriptor
1673             //
1674             if (MdBlock->BasePage == NextPage)
1675             {
1676                 //
1677                 // Combine it into the same physical run
1678                 //
1679                 ASSERT(MdBlock->PageCount != 0);
1680                 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1681                 NextPage += MdBlock->PageCount;
1682             }
1683             else
1684             {
1685                 //
1686                 // Otherwise just duplicate the descriptor's contents
1687                 //
1688                 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1689                 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1690                 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1691 
1692                 //
1693                 // And in this case, increase the number of runs
1694                 //
1695                 Run++;
1696             }
1697         }
1698 
1699         //
1700         // Try the next descriptor
1701         //
1702         NextEntry = MdBlock->ListEntry.Flink;
1703     }
1704 
1705     //
1706     // We should not have been able to go past our initial estimate
1707     //
1708     ASSERT(Run <= Buffer->NumberOfRuns);
1709 
1710     //
1711     // Our guess was probably exaggerated...
1712     //
1713     if (InitialRuns > Run)
1714     {
1715         //
1716         // Allocate a more accurately sized buffer
1717         //
1718         NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1719                                           sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1720                                           sizeof(PHYSICAL_MEMORY_RUN) *
1721                                           (Run - 1),
1722                                           'lMmM');
1723         if (NewBuffer)
1724         {
1725             //
1726             // Copy the old buffer into the new, then free it
1727             //
1728             RtlCopyMemory(NewBuffer->Run,
1729                           Buffer->Run,
1730                           sizeof(PHYSICAL_MEMORY_RUN) * Run);
1731             ExFreePoolWithTag(Buffer, 'lMmM');
1732 
1733             //
1734             // Now use the new buffer
1735             //
1736             Buffer = NewBuffer;
1737         }
1738     }
1739 
1740     //
1741     // Write the final numbers, and return it
1742     //
1743     Buffer->NumberOfRuns = Run;
1744     Buffer->NumberOfPages = PageCount;
1745     return Buffer;
1746 }
1747 
1748 CODE_SEG("INIT")
1749 VOID
1750 NTAPI
MiBuildPagedPool(VOID)1751 MiBuildPagedPool(VOID)
1752 {
1753     PMMPTE PointerPte;
1754     PMMPDE PointerPde;
1755     MMPDE TempPde = ValidKernelPde;
1756     PFN_NUMBER PageFrameIndex;
1757     KIRQL OldIrql;
1758     SIZE_T Size, NumberOfPages, NumberOfPdes;
1759     ULONG BitMapSize;
1760 #if (_MI_PAGING_LEVELS >= 3)
1761     MMPPE TempPpe = ValidKernelPpe;
1762     PMMPPE PointerPpe;
1763 #elif (_MI_PAGING_LEVELS == 2)
1764     MMPTE TempPte = ValidKernelPte;
1765 
1766     //
1767     // Get the page frame number for the system page directory
1768     //
1769     PointerPte = MiAddressToPte(PDE_BASE);
1770     ASSERT(PPE_PER_PAGE == 1);
1771     MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1772 
1773     //
1774     // Allocate a system PTE which will hold a copy of the page directory
1775     //
1776     PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1777     ASSERT(PointerPte);
1778     MmSystemPagePtes = MiPteToAddress(PointerPte);
1779 
1780     //
1781     // Make this system PTE point to the system page directory.
1782     // It is now essentially double-mapped. This will be used later for lazy
1783     // evaluation of PDEs accross process switches, similarly to how the Global
1784     // page directory array in the old ReactOS Mm is used (but in a less hacky
1785     // way).
1786     //
1787     TempPte = ValidKernelPte;
1788     ASSERT(PPE_PER_PAGE == 1);
1789     TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1790     MI_WRITE_VALID_PTE(PointerPte, TempPte);
1791 #endif
1792 
1793 #ifdef _M_IX86
1794     //
1795     // Let's get back to paged pool work: size it up.
1796     // By default, it should be twice as big as nonpaged pool.
1797     //
1798     MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1799     if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1800                                     (ULONG_PTR)MmPagedPoolStart))
1801     {
1802         //
1803         // On the other hand, we have limited VA space, so make sure that the VA
1804         // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1805         // whatever maximum is possible.
1806         //
1807         MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1808                                    (ULONG_PTR)MmPagedPoolStart;
1809     }
1810 #endif // _M_IX86
1811 
1812     //
1813     // Get the size in pages and make sure paged pool is at least 32MB.
1814     //
1815     Size = MmSizeOfPagedPoolInBytes;
1816     if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1817     NumberOfPages = BYTES_TO_PAGES(Size);
1818 
1819     //
1820     // Now check how many PDEs will be required for these many pages.
1821     //
1822     NumberOfPdes = (NumberOfPages + (PTE_PER_PAGE - 1)) / PTE_PER_PAGE;
1823 
1824     //
1825     // Recompute the PDE-aligned size of the paged pool, in bytes and pages.
1826     //
1827     MmSizeOfPagedPoolInBytes = NumberOfPdes * PTE_PER_PAGE * PAGE_SIZE;
1828     MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1829 
1830 #ifdef _M_IX86
1831     //
1832     // Let's be really sure this doesn't overflow into nonpaged system VA
1833     //
1834     ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1835            (ULONG_PTR)MmNonPagedSystemStart);
1836 #endif // _M_IX86
1837 
1838     //
1839     // This is where paged pool ends
1840     //
1841     MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1842                               MmSizeOfPagedPoolInBytes) - 1);
1843 
1844     //
1845     // Lock the PFN database
1846     //
1847     OldIrql = MiAcquirePfnLock();
1848 
1849 #if (_MI_PAGING_LEVELS >= 3)
1850     /* On these systems, there's no double-mapping, so instead, the PPEs
1851      * are setup to span the entire paged pool area, so there's no need for the
1852      * system PD */
1853     for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
1854          PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
1855          PointerPpe++)
1856     {
1857         /* Check if the PPE is already valid */
1858         if (!PointerPpe->u.Hard.Valid)
1859         {
1860             /* It is not, so map a fresh zeroed page */
1861             TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
1862             MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
1863             MiInitializePfnForOtherProcess(TempPpe.u.Hard.PageFrameNumber,
1864                                            (PMMPTE)PointerPpe,
1865                                            PFN_FROM_PTE(MiAddressToPte(PointerPpe)));
1866         }
1867     }
1868 #endif
1869 
1870     //
1871     // So now get the PDE for paged pool and zero it out
1872     //
1873     PointerPde = MiAddressToPde(MmPagedPoolStart);
1874     RtlZeroMemory(PointerPde,
1875                   (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
1876 
1877     //
1878     // Next, get the first and last PTE
1879     //
1880     PointerPte = MiAddressToPte(MmPagedPoolStart);
1881     MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1882     MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1883 
1884     /* Allocate a page and map the first paged pool PDE */
1885     MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1886     MI_SET_PROCESS2("Kernel");
1887     PageFrameIndex = MiRemoveZeroPage(0);
1888     TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1889     MI_WRITE_VALID_PDE(PointerPde, TempPde);
1890 #if (_MI_PAGING_LEVELS >= 3)
1891     /* Use the PPE of MmPagedPoolStart that was setup above */
1892 //    Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1893 
1894     /* Initialize the PFN entry for it */
1895     MiInitializePfnForOtherProcess(PageFrameIndex,
1896                                    (PMMPTE)PointerPde,
1897                                    PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
1898 #else
1899     /* Do it this way */
1900 //    Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_PER_PAGE]
1901 
1902     /* Initialize the PFN entry for it */
1903     MiInitializePfnForOtherProcess(PageFrameIndex,
1904                                    (PMMPTE)PointerPde,
1905                                    MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_PER_PAGE]);
1906 #endif
1907 
1908     //
1909     // Release the PFN database lock
1910     //
1911     MiReleasePfnLock(OldIrql);
1912 
1913     //
1914     // We only have one PDE mapped for now... at fault time, additional PDEs
1915     // will be allocated to handle paged pool growth. This is where they'll have
1916     // to start.
1917     //
1918     MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1919 
1920     //
1921     // We keep track of each page via a bit, so check how big the bitmap will
1922     // have to be (make sure to align our page count such that it fits nicely
1923     // into a 4-byte aligned bitmap.
1924     //
1925     // We'll also allocate the bitmap header itself part of the same buffer.
1926     //
1927     NumberOfPages = NumberOfPdes * PTE_PER_PAGE;
1928     ASSERT(NumberOfPages == MmSizeOfPagedPoolInPages);
1929     BitMapSize = (ULONG)NumberOfPages;
1930     Size = sizeof(RTL_BITMAP) + (((BitMapSize + 31) / 32) * sizeof(ULONG));
1931 
1932     //
1933     // Allocate the allocation bitmap, which tells us which regions have not yet
1934     // been mapped into memory
1935     //
1936     MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1937                                                                    Size,
1938                                                                    TAG_MM);
1939     ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1940 
1941     //
1942     // Initialize it such that at first, only the first page's worth of PTEs is
1943     // marked as allocated (incidentially, the first PDE we allocated earlier).
1944     //
1945     RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1946                         (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1947                         BitMapSize);
1948     RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1949     RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, PTE_PER_PAGE);
1950 
1951     //
1952     // We have a second bitmap, which keeps track of where allocations end.
1953     // Given the allocation bitmap and a base address, we can therefore figure
1954     // out which page is the last page of that allocation, and thus how big the
1955     // entire allocation is.
1956     //
1957     MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1958                                                                  Size,
1959                                                                  TAG_MM);
1960     ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1961     RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1962                         (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1963                         BitMapSize);
1964 
1965     //
1966     // Since no allocations have been made yet, there are no bits set as the end
1967     //
1968     RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1969 
1970     //
1971     // Initialize paged pool.
1972     //
1973     InitializePool(PagedPool, 0);
1974 
1975     /* Initialize special pool */
1976     MiInitializeSpecialPool();
1977 
1978     /* Default low threshold of 30MB or one fifth of paged pool */
1979     MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1980     MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1981 
1982     /* Default high threshold of 60MB or 25% */
1983     MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1984     MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1985     ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1986 
1987     /* Setup the global session space */
1988     MiInitializeSystemSpaceMap(NULL);
1989 }
1990 
1991 CODE_SEG("INIT")
1992 VOID
1993 NTAPI
MiDbgDumpMemoryDescriptors(VOID)1994 MiDbgDumpMemoryDescriptors(VOID)
1995 {
1996     PLIST_ENTRY NextEntry;
1997     PMEMORY_ALLOCATION_DESCRIPTOR Md;
1998     PFN_NUMBER TotalPages = 0;
1999     PCHAR
2000     MemType[] =
2001     {
2002         "ExceptionBlock    ",
2003         "SystemBlock       ",
2004         "Free              ",
2005         "Bad               ",
2006         "LoadedProgram     ",
2007         "FirmwareTemporary ",
2008         "FirmwarePermanent ",
2009         "OsloaderHeap      ",
2010         "OsloaderStack     ",
2011         "SystemCode        ",
2012         "HalCode           ",
2013         "BootDriver        ",
2014         "ConsoleInDriver   ",
2015         "ConsoleOutDriver  ",
2016         "StartupDpcStack   ",
2017         "StartupKernelStack",
2018         "StartupPanicStack ",
2019         "StartupPcrPage    ",
2020         "StartupPdrPage    ",
2021         "RegistryData      ",
2022         "MemoryData        ",
2023         "NlsData           ",
2024         "SpecialMemory     ",
2025         "BBTMemory         ",
2026         "LoaderReserve     ",
2027         "LoaderXIPRom      "
2028     };
2029 
2030     DPRINT1("Base\t\tLength\t\tType\n");
2031     for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
2032          NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
2033          NextEntry = NextEntry->Flink)
2034     {
2035         Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
2036         DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
2037         TotalPages += Md->PageCount;
2038     }
2039 
2040     DPRINT1("Total: %08lX (%lu MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
2041 }
2042 
2043 CODE_SEG("INIT")
2044 BOOLEAN
2045 NTAPI
MmArmInitSystem(IN ULONG Phase,IN PLOADER_PARAMETER_BLOCK LoaderBlock)2046 MmArmInitSystem(IN ULONG Phase,
2047                 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
2048 {
2049     ULONG i;
2050     BOOLEAN IncludeType[LoaderMaximum];
2051     PVOID Bitmap;
2052     PPHYSICAL_MEMORY_RUN Run;
2053     PFN_NUMBER PageCount;
2054 #if DBG
2055     ULONG j;
2056     PMMPTE PointerPte, TestPte;
2057     MMPTE TempPte;
2058 #endif
2059 
2060     /* Dump memory descriptors */
2061     if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
2062 
2063     //
2064     // Instantiate memory that we don't consider RAM/usable
2065     // We use the same exclusions that Windows does, in order to try to be
2066     // compatible with WinLDR-style booting
2067     //
2068     for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
2069     IncludeType[LoaderBad] = FALSE;
2070     IncludeType[LoaderFirmwarePermanent] = FALSE;
2071     IncludeType[LoaderSpecialMemory] = FALSE;
2072     IncludeType[LoaderBBTMemory] = FALSE;
2073     if (Phase == 0)
2074     {
2075         /* Count physical pages on the system */
2076         MiScanMemoryDescriptors(LoaderBlock);
2077 
2078         /* Initialize the phase 0 temporary event */
2079         KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
2080 
2081         /* Set all the events to use the temporary event for now */
2082         MiLowMemoryEvent = &MiTempEvent;
2083         MiHighMemoryEvent = &MiTempEvent;
2084         MiLowPagedPoolEvent = &MiTempEvent;
2085         MiHighPagedPoolEvent = &MiTempEvent;
2086         MiLowNonPagedPoolEvent = &MiTempEvent;
2087         MiHighNonPagedPoolEvent = &MiTempEvent;
2088 
2089         //
2090         // Default throttling limits for Cc
2091         // May be ajusted later on depending on system type
2092         //
2093         MmThrottleTop = 450;
2094         MmThrottleBottom = 127;
2095 
2096         //
2097         // Define the basic user vs. kernel address space separation
2098         //
2099         MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
2100         MmUserProbeAddress = (ULONG_PTR)MI_USER_PROBE_ADDRESS;
2101         MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
2102 
2103         /* Highest PTE and PDE based on the addresses above */
2104         MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
2105         MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
2106 #if (_MI_PAGING_LEVELS >= 3)
2107         MiHighestUserPpe = MiAddressToPpe(MmHighestUserAddress);
2108 #if (_MI_PAGING_LEVELS >= 4)
2109         MiHighestUserPxe = MiAddressToPxe(MmHighestUserAddress);
2110 #endif
2111 #endif
2112         //
2113         // Get the size of the boot loader's image allocations and then round
2114         // that region up to a PDE size, so that any PDEs we might create for
2115         // whatever follows are separate from the PDEs that boot loader might've
2116         // already created (and later, we can blow all that away if we want to).
2117         //
2118         MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
2119         MmBootImageSize *= PAGE_SIZE;
2120         MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
2121         ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
2122 
2123         /* Initialize session space address layout */
2124         MiInitializeSessionSpaceLayout();
2125 
2126         /* Set the based section highest address */
2127         MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
2128 
2129         /* Loop all 8 standby lists */
2130         for (i = 0; i < 8; i++)
2131         {
2132             /* Initialize them */
2133             MmStandbyPageListByPriority[i].Total = 0;
2134             MmStandbyPageListByPriority[i].ListName = StandbyPageList;
2135             MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
2136             MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
2137         }
2138 
2139         /* Initialize the user mode image list */
2140         InitializeListHead(&MmLoadedUserImageList);
2141 
2142         /* Initalize the Working set list */
2143         InitializeListHead(&MmWorkingSetExpansionHead);
2144 
2145         /* Initialize critical section timeout value (relative time is negative) */
2146         MmCriticalSectionTimeout.QuadPart = MmCritsectTimeoutSeconds * (-10000000LL);
2147 
2148         /* Initialize the paged pool mutex and the section commit mutex */
2149         KeInitializeGuardedMutex(&MmPagedPoolMutex);
2150         KeInitializeGuardedMutex(&MmSectionCommitMutex);
2151         KeInitializeGuardedMutex(&MmSectionBasedMutex);
2152 
2153         /* Initialize the Loader Lock */
2154         KeInitializeMutant(&MmSystemLoadLock, FALSE);
2155 
2156         /* Set up the zero page event */
2157         KeInitializeEvent(&MmZeroingPageEvent, NotificationEvent, FALSE);
2158 
2159         /* Initialize the dead stack S-LIST */
2160         InitializeSListHead(&MmDeadStackSListHead);
2161 
2162         //
2163         // Check if this is a machine with less than 19MB of RAM
2164         //
2165         PageCount = MmNumberOfPhysicalPages;
2166         if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
2167         {
2168             //
2169             // Use the very minimum of system PTEs
2170             //
2171             MmNumberOfSystemPtes = 7000;
2172         }
2173         else
2174         {
2175             //
2176             // Use the default
2177             //
2178             MmNumberOfSystemPtes = 11000;
2179             if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
2180             {
2181                 //
2182                 // Double the amount of system PTEs
2183                 //
2184                 MmNumberOfSystemPtes <<= 1;
2185             }
2186             if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
2187             {
2188                 //
2189                 // Double the amount of system PTEs
2190                 //
2191                 MmNumberOfSystemPtes <<= 1;
2192             }
2193             if (MmSpecialPoolTag != 0 && MmSpecialPoolTag != -1)
2194             {
2195                 //
2196                 // Add some extra PTEs for special pool
2197                 //
2198                 MmNumberOfSystemPtes += 0x6000;
2199             }
2200         }
2201 
2202         DPRINT("System PTE count has been tuned to %lu (%lu bytes)\n",
2203                MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
2204 
2205         /* Check if no values are set for the heap limits */
2206         if (MmHeapSegmentReserve == 0)
2207         {
2208             MmHeapSegmentReserve = 2 * _1MB;
2209         }
2210 
2211         if (MmHeapSegmentCommit == 0)
2212         {
2213             MmHeapSegmentCommit = 2 * PAGE_SIZE;
2214         }
2215 
2216         if (MmHeapDeCommitTotalFreeThreshold == 0)
2217         {
2218             MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
2219         }
2220 
2221         if (MmHeapDeCommitFreeBlockThreshold == 0)
2222         {
2223             MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
2224         }
2225 
2226         /* Initialize the working set lock */
2227         ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
2228 
2229         /* Set commit limit */
2230         MmTotalCommitLimit = (2 * _1GB) >> PAGE_SHIFT;
2231         MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2232 
2233         /* Has the allocation fragment been setup? */
2234         if (!MmAllocationFragment)
2235         {
2236             /* Use the default value */
2237             MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
2238             if (PageCount < ((256 * _1MB) / PAGE_SIZE))
2239             {
2240                 /* On memory systems with less than 256MB, divide by 4 */
2241                 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
2242             }
2243             else if (PageCount < (_1GB / PAGE_SIZE))
2244             {
2245                 /* On systems with less than 1GB, divide by 2 */
2246                 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
2247             }
2248         }
2249         else
2250         {
2251             /* Convert from 1KB fragments to pages */
2252             MmAllocationFragment *= _1KB;
2253             MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2254 
2255             /* Don't let it past the maximum */
2256             MmAllocationFragment = min(MmAllocationFragment,
2257                                        MI_MAX_ALLOCATION_FRAGMENT);
2258 
2259             /* Don't let it too small either */
2260             MmAllocationFragment = max(MmAllocationFragment,
2261                                        MI_MIN_ALLOCATION_FRAGMENT);
2262         }
2263 
2264         /* Check for kernel stack size that's too big */
2265         if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
2266         {
2267             /* Sanitize to default value */
2268             MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
2269         }
2270         else
2271         {
2272             /* Take the registry setting, and convert it into bytes */
2273             MmLargeStackSize *= _1KB;
2274 
2275             /* Now align it to a page boundary */
2276             MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
2277 
2278             /* Sanity checks */
2279             ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
2280             ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
2281 
2282             /* Make sure it's not too low */
2283             if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
2284         }
2285 
2286         /* Compute color information (L2 cache-separated paging lists) */
2287         MiComputeColorInformation();
2288 
2289         // Calculate the number of bytes for the PFN database
2290         // then add the color tables and convert to pages
2291         MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
2292         MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
2293         MxPfnAllocation >>= PAGE_SHIFT;
2294 
2295         // We have to add one to the count here, because in the process of
2296         // shifting down to the page size, we actually ended up getting the
2297         // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
2298         // Later on, we'll shift this number back into bytes, which would cause
2299         // us to end up with only 0x5F000 bytes -- when we actually want to have
2300         // 0x60000 bytes.
2301         MxPfnAllocation++;
2302 
2303         /* Initialize the platform-specific parts */
2304         MiInitMachineDependent(LoaderBlock);
2305 
2306 #if DBG
2307         /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
2308         PointerPte = (PMMPTE)MmPagedPoolStart;
2309         MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2310         TestPte = MiProtoPteToPte(&TempPte);
2311         ASSERT(PointerPte == TestPte);
2312 
2313         /* Try the last nonpaged pool address */
2314         PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
2315         MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2316         TestPte = MiProtoPteToPte(&TempPte);
2317         ASSERT(PointerPte == TestPte);
2318 
2319         /* Try a bunch of random addresses near the end of the address space */
2320         PointerPte = (PMMPTE)((ULONG_PTR)MI_HIGHEST_SYSTEM_ADDRESS - 0x37FFF);
2321         for (j = 0; j < 20; j += 1)
2322         {
2323             MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2324             TestPte = MiProtoPteToPte(&TempPte);
2325             ASSERT(PointerPte == TestPte);
2326             PointerPte++;
2327         }
2328 
2329         /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
2330         PointerPte = (PMMPTE)((ULONG_PTR)MmNonPagedPoolStart + (MmSizeOfNonPagedPoolInBytes / 2));
2331         MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
2332         TestPte = MiSubsectionPteToSubsection(&TempPte);
2333         ASSERT(PointerPte == TestPte);
2334 #endif
2335 
2336         //
2337         // Build the physical memory block
2338         //
2339         MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2340                                                          IncludeType);
2341 
2342         //
2343         // Allocate enough buffer for the PFN bitmap
2344         // Align it up to a 32-bit boundary
2345         //
2346         Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2347                                        (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2348                                        TAG_MM);
2349         if (!Bitmap)
2350         {
2351             //
2352             // This is critical
2353             //
2354             KeBugCheckEx(INSTALL_MORE_MEMORY,
2355                          MmNumberOfPhysicalPages,
2356                          MmLowestPhysicalPage,
2357                          MmHighestPhysicalPage,
2358                          0x101);
2359         }
2360 
2361         //
2362         // Initialize it and clear all the bits to begin with
2363         //
2364         RtlInitializeBitMap(&MiPfnBitMap,
2365                             Bitmap,
2366                             (ULONG)MmHighestPhysicalPage + 1);
2367         RtlClearAllBits(&MiPfnBitMap);
2368 
2369         //
2370         // Loop physical memory runs
2371         //
2372         for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2373         {
2374             //
2375             // Get the run
2376             //
2377             Run = &MmPhysicalMemoryBlock->Run[i];
2378             DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2379                    Run->BasePage << PAGE_SHIFT,
2380                    (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2381 
2382             //
2383             // Make sure it has pages inside it
2384             //
2385             if (Run->PageCount)
2386             {
2387                 //
2388                 // Set the bits in the PFN bitmap
2389                 //
2390                 RtlSetBits(&MiPfnBitMap, (ULONG)Run->BasePage, (ULONG)Run->PageCount);
2391             }
2392         }
2393 
2394         /* Look for large page cache entries that need caching */
2395         MiSyncCachedRanges();
2396 
2397         /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2398         MiAddHalIoMappings();
2399 
2400         /* Set the initial resident page count */
2401         MmResidentAvailablePages = MmAvailablePages - 32;
2402 
2403         /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2404         MiInitializeLargePageSupport();
2405 
2406         /* Check if the registry says any drivers should be loaded with large pages */
2407         MiInitializeDriverLargePageList();
2408 
2409         /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2410         MiReloadBootLoadedDrivers(LoaderBlock);
2411 
2412         /* FIXME: Call out into Driver Verifier for initialization  */
2413 
2414         /* Check how many pages the system has */
2415         if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2416         {
2417             /* Set small system */
2418             MmSystemSize = MmSmallSystem;
2419             MmMaximumDeadKernelStacks = 0;
2420         }
2421         else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2422         {
2423             /* Set small system and add 100 pages for the cache */
2424             MmSystemSize = MmSmallSystem;
2425             MmSystemCacheWsMinimum += 100;
2426             MmMaximumDeadKernelStacks = 2;
2427         }
2428         else
2429         {
2430             /* Set medium system and add 400 pages for the cache */
2431             MmSystemSize = MmMediumSystem;
2432             MmSystemCacheWsMinimum += 400;
2433             MmMaximumDeadKernelStacks = 5;
2434         }
2435 
2436         /* Check for less than 24MB */
2437         if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2438         {
2439             /* No more than 32 pages */
2440             MmSystemCacheWsMinimum = 32;
2441         }
2442 
2443         /* Check for more than 32MB */
2444         if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2445         {
2446             /* Check for product type being "Wi" for WinNT */
2447             if (MmProductType == '\0i\0W')
2448             {
2449                 /* Then this is a large system */
2450                 MmSystemSize = MmLargeSystem;
2451             }
2452             else
2453             {
2454                 /* For servers, we need 64MB to consider this as being large */
2455                 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2456                 {
2457                     /* Set it as large */
2458                     MmSystemSize = MmLargeSystem;
2459                 }
2460             }
2461         }
2462 
2463         /* Check for more than 33 MB */
2464         if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2465         {
2466             /* Add another 500 pages to the cache */
2467             MmSystemCacheWsMinimum += 500;
2468         }
2469 
2470         /* Now setup the shared user data fields */
2471         ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2472         SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2473         SharedUserData->LargePageMinimum = 0;
2474 
2475         /* Check for workstation (Wi for WinNT) */
2476         if (MmProductType == '\0i\0W')
2477         {
2478             /* Set Windows NT Workstation product type */
2479             SharedUserData->NtProductType = NtProductWinNt;
2480             MmProductType = 0;
2481 
2482             /* For this product, we wait till the last moment to throttle */
2483             MmThrottleTop = 250;
2484             MmThrottleBottom = 30;
2485         }
2486         else
2487         {
2488             /* Check for LanMan server (La for LanmanNT) */
2489             if (MmProductType == '\0a\0L')
2490             {
2491                 /* This is a domain controller */
2492                 SharedUserData->NtProductType = NtProductLanManNt;
2493             }
2494             else
2495             {
2496                 /* Otherwise it must be a normal server (Se for ServerNT) */
2497                 SharedUserData->NtProductType = NtProductServer;
2498             }
2499 
2500             /* Set the product type, and make the system more aggressive with low memory */
2501             MmProductType = 1;
2502             MmMinimumFreePages = 81;
2503 
2504             /* We will throttle earlier to preserve memory */
2505             MmThrottleTop = 450;
2506             MmThrottleBottom = 80;
2507         }
2508 
2509         /* Update working set tuning parameters */
2510         MiAdjustWorkingSetManagerParameters(!MmProductType);
2511 
2512         /* Finetune the page count by removing working set and NP expansion */
2513         MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2514         MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2515         MmResidentAvailableAtInit = MmResidentAvailablePages;
2516         if (MmResidentAvailablePages <= 0)
2517         {
2518             /* This should not happen */
2519             DPRINT1("System cache working set too big\n");
2520             return FALSE;
2521         }
2522 
2523         /* Define limits for system cache */
2524 #ifdef _M_AMD64
2525         MmSizeOfSystemCacheInPages = ((MI_SYSTEM_CACHE_END + 1) - MI_SYSTEM_CACHE_START) / PAGE_SIZE;
2526 #else
2527         MmSizeOfSystemCacheInPages = ((ULONG_PTR)MI_PAGED_POOL_START - (ULONG_PTR)MI_SYSTEM_CACHE_START) / PAGE_SIZE;
2528 #endif
2529         MmSystemCacheEnd = (PVOID)((ULONG_PTR)MmSystemCacheStart + (MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1);
2530 #ifdef _M_AMD64
2531         ASSERT(MmSystemCacheEnd == (PVOID)MI_SYSTEM_CACHE_END);
2532 #else
2533         ASSERT(MmSystemCacheEnd == (PVOID)((ULONG_PTR)MI_PAGED_POOL_START - 1));
2534 #endif
2535 
2536         /* Initialize the system cache */
2537         //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2538 
2539         /* Update the commit limit */
2540         MmTotalCommitLimit = MmAvailablePages;
2541         if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2542         MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2543 
2544         /* Size up paged pool and build the shadow system page directory */
2545         MiBuildPagedPool();
2546 
2547         /* Debugger physical memory support is now ready to be used */
2548         MmDebugPte = MiAddressToPte(MiDebugMapping);
2549 
2550         /* Initialize the loaded module list */
2551         MiInitializeLoadedModuleList(LoaderBlock);
2552     }
2553 
2554     //
2555     // Always return success for now
2556     //
2557     return TRUE;
2558 }
2559 
2560 /* EOF */
2561