1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "miarm.h"
17 #undef MmSystemRangeStart
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionSpaceWs;
131 PVOID MiSessionViewStart; // 0xBE000000
132 PVOID MiSessionPoolEnd; // 0xBE000000
133 PVOID MiSessionPoolStart; // 0xBD000000
134 PVOID MmSessionBase; // 0xBD000000
135 SIZE_T MmSessionSize;
136 SIZE_T MmSessionViewSize;
137 SIZE_T MmSessionPoolSize;
138 SIZE_T MmSessionImageSize;
139
140 /*
141 * These are the PTE addresses of the boundaries carved out above
142 */
143 PMMPTE MiSessionImagePteStart;
144 PMMPTE MiSessionImagePteEnd;
145 PMMPTE MiSessionBasePte;
146 PMMPTE MiSessionLastPte;
147
148 //
149 // The system view space, on the other hand, is where sections that are memory
150 // mapped into "system space" end up.
151 //
152 // By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
153 //
154 PVOID MiSystemViewStart;
155 SIZE_T MmSystemViewSize;
156
157 #if (_MI_PAGING_LEVELS == 2)
158 //
159 // A copy of the system page directory (the page directory associated with the
160 // System process) is kept (double-mapped) by the manager in order to lazily
161 // map paged pool PDEs into external processes when they fault on a paged pool
162 // address.
163 //
164 PFN_NUMBER MmSystemPageDirectory[PPE_PER_PAGE];
165 PMMPDE MmSystemPagePtes;
166 #endif
167
168 //
169 // The system cache starts right after hyperspace. The first few pages are for
170 // keeping track of the system working set list.
171 //
172 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
173 //
174 PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
175
176 //
177 // Windows NT seems to choose between 7000, 11000 and 50000
178 // On systems with more than 32MB, this number is then doubled, and further
179 // aligned up to a PDE boundary (4MB).
180 //
181 PFN_COUNT MmNumberOfSystemPtes;
182
183 //
184 // This is how many pages the PFN database will take up
185 // In Windows, this includes the Quark Color Table, but not in ARM³
186 //
187 PFN_NUMBER MxPfnAllocation;
188
189 //
190 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
191 // of pages that are not actually valid physical memory, such as ACPI reserved
192 // regions, BIOS address ranges, or holes in physical memory address space which
193 // could indicate device-mapped I/O memory.
194 //
195 // In fact, the lack of a PFN entry for a page usually indicates that this is
196 // I/O space instead.
197 //
198 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
199 // a bit to each. If the bit is set, then the page is valid physical RAM.
200 //
201 RTL_BITMAP MiPfnBitMap;
202
203 //
204 // This structure describes the different pieces of RAM-backed address space
205 //
206 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
207
208 //
209 // This is where we keep track of the most basic physical layout markers
210 //
211 PFN_NUMBER MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
212 PFN_COUNT MmNumberOfPhysicalPages;
213
214 //
215 // The total number of pages mapped by the boot loader, which include the kernel
216 // HAL, boot drivers, registry, NLS files and other loader data structures is
217 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
218 // coming from the loader.
219 //
220 // This number is later aligned up to a PDE boundary.
221 //
222 SIZE_T MmBootImageSize;
223
224 //
225 // These three variables keep track of the core separation of address space that
226 // exists between kernel mode and user mode.
227 //
228 ULONG_PTR MmUserProbeAddress;
229 PVOID MmHighestUserAddress;
230 PVOID MmSystemRangeStart;
231
232 /* And these store the respective highest PTE/PDE address */
233 PMMPTE MiHighestUserPte;
234 PMMPDE MiHighestUserPde;
235 #if (_MI_PAGING_LEVELS >= 3)
236 PMMPTE MiHighestUserPpe;
237 #if (_MI_PAGING_LEVELS >= 4)
238 PMMPTE MiHighestUserPxe;
239 #endif
240 #endif
241
242 /* These variables define the system cache address space */
243 PVOID MmSystemCacheStart = (PVOID)MI_SYSTEM_CACHE_START;
244 PVOID MmSystemCacheEnd;
245 ULONG_PTR MmSizeOfSystemCacheInPages;
246 MMSUPPORT MmSystemCacheWs;
247
248 //
249 // This is where hyperspace ends (followed by the system cache working set)
250 //
251 PVOID MmHyperSpaceEnd;
252
253 //
254 // Page coloring algorithm data
255 //
256 ULONG MmSecondaryColors;
257 ULONG MmSecondaryColorMask;
258
259 //
260 // Actual (registry-configurable) size of a GUI thread's stack
261 //
262 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
263
264 //
265 // Before we have a PFN database, memory comes straight from our physical memory
266 // blocks, which is nice because it's guaranteed contiguous and also because once
267 // we take a page from here, the system doesn't see it anymore.
268 // However, once the fun is over, those pages must be re-integrated back into
269 // PFN society life, and that requires us keeping a copy of the original layout
270 // so that we can parse it later.
271 //
272 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
273 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
274
275 /*
276 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
277 * free lists are organized in what is called a "color".
278 *
279 * This array points to the two lists, so it can be thought of as a multi-dimensional
280 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
281 * we describe the array in pointer form instead.
282 *
283 * On a final note, the color tables themselves are right after the PFN database.
284 */
285 C_ASSERT(FreePageList == 1);
286 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
287
288 /* An event used in Phase 0 before the rest of the system is ready to go */
289 KEVENT MiTempEvent;
290
291 /* All the events used for memory threshold notifications */
292 PKEVENT MiLowMemoryEvent;
293 PKEVENT MiHighMemoryEvent;
294 PKEVENT MiLowPagedPoolEvent;
295 PKEVENT MiHighPagedPoolEvent;
296 PKEVENT MiLowNonPagedPoolEvent;
297 PKEVENT MiHighNonPagedPoolEvent;
298
299 /* The actual thresholds themselves, in page numbers */
300 PFN_NUMBER MmLowMemoryThreshold;
301 PFN_NUMBER MmHighMemoryThreshold;
302 PFN_NUMBER MiLowPagedPoolThreshold;
303 PFN_NUMBER MiHighPagedPoolThreshold;
304 PFN_NUMBER MiLowNonPagedPoolThreshold;
305 PFN_NUMBER MiHighNonPagedPoolThreshold;
306
307 /*
308 * This number determines how many free pages must exist, at minimum, until we
309 * start trimming working sets and flushing modified pages to obtain more free
310 * pages.
311 *
312 * This number changes if the system detects that this is a server product
313 */
314 PFN_NUMBER MmMinimumFreePages = 26;
315
316 /*
317 * This number indicates how many pages we consider to be a low limit of having
318 * "plenty" of free memory.
319 *
320 * It is doubled on systems that have more than 63MB of memory
321 */
322 PFN_NUMBER MmPlentyFreePages = 400;
323
324 /* These values store the type of system this is (small, med, large) and if server */
325 ULONG MmProductType;
326 MM_SYSTEMSIZE MmSystemSize;
327
328 /*
329 * These values store the cache working set minimums and maximums, in pages
330 *
331 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
332 * down to only 32 pages on embedded (<24MB RAM) systems.
333 *
334 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
335 */
336 PFN_NUMBER MmSystemCacheWsMinimum = 288;
337 PFN_NUMBER MmSystemCacheWsMaximum = 350;
338
339 /* FIXME: Move to cache/working set code later */
340 BOOLEAN MmLargeSystemCache;
341
342 /*
343 * This value determines in how many fragments/chunks the subsection prototype
344 * PTEs should be allocated when mapping a section object. It is configurable in
345 * the registry through the MapAllocationFragment parameter.
346 *
347 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
348 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
349 *
350 * The maximum it can be set to is 2MB, and the minimum is 4KB.
351 */
352 SIZE_T MmAllocationFragment;
353
354 /*
355 * These two values track how much virtual memory can be committed, and when
356 * expansion should happen.
357 */
358 // FIXME: They should be moved elsewhere since it's not an "init" setting?
359 SIZE_T MmTotalCommitLimit;
360 SIZE_T MmTotalCommitLimitMaximum;
361
362 /*
363 * These values tune certain user parameters. They have default values set here,
364 * as well as in the code, and can be overwritten by registry settings.
365 */
366 SIZE_T MmHeapSegmentReserve = 1 * _1MB;
367 SIZE_T MmHeapSegmentCommit = 2 * PAGE_SIZE;
368 SIZE_T MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
369 SIZE_T MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
370 SIZE_T MmMinimumStackCommitInBytes = 0;
371
372 /* Internal setting used for debugging memory descriptors */
373 BOOLEAN MiDbgEnableMdDump =
374 #ifdef _ARM_
375 TRUE;
376 #else
377 FALSE;
378 #endif
379
380 /* Number of memory descriptors in the loader block */
381 ULONG MiNumberDescriptors = 0;
382
383 /* Number of free pages in the loader block */
384 PFN_NUMBER MiNumberOfFreePages = 0;
385
386 /* Timeout value for critical sections (2.5 minutes) */
387 ULONG MmCritsectTimeoutSeconds = 150; // NT value: 720 * 60 * 60; (30 days)
388 LARGE_INTEGER MmCriticalSectionTimeout;
389
390 //
391 // Throttling limits for Cc (in pages)
392 // Above top, we don't throttle
393 // Above bottom, we throttle depending on the amount of modified pages
394 // Otherwise, we throttle!
395 //
396 ULONG MmThrottleTop;
397 ULONG MmThrottleBottom;
398
399 /* PRIVATE FUNCTIONS **********************************************************/
400
401 VOID
402 NTAPI
MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)403 MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
404 {
405 PLIST_ENTRY ListEntry;
406 PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
407 PFN_NUMBER PageFrameIndex, FreePages = 0;
408
409 /* Loop the memory descriptors */
410 for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
411 ListEntry != &LoaderBlock->MemoryDescriptorListHead;
412 ListEntry = ListEntry->Flink)
413 {
414 /* Get the descriptor */
415 Descriptor = CONTAINING_RECORD(ListEntry,
416 MEMORY_ALLOCATION_DESCRIPTOR,
417 ListEntry);
418 DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
419 Descriptor->MemoryType, Descriptor->BasePage, Descriptor->PageCount);
420
421 /* Count this descriptor */
422 MiNumberDescriptors++;
423
424 /* If this is invisible memory, skip this descriptor */
425 if (MiIsMemoryTypeInvisible(Descriptor->MemoryType))
426 continue;
427
428 /* Check if this isn't bad memory */
429 if (Descriptor->MemoryType != LoaderBad)
430 {
431 /* Count it in the physical pages */
432 MmNumberOfPhysicalPages += (PFN_COUNT)Descriptor->PageCount;
433 }
434
435 /* Check if this is the new lowest page */
436 if (Descriptor->BasePage < MmLowestPhysicalPage)
437 {
438 /* Update the lowest page */
439 MmLowestPhysicalPage = Descriptor->BasePage;
440 }
441
442 /* Check if this is the new highest page */
443 PageFrameIndex = Descriptor->BasePage + Descriptor->PageCount;
444 if (PageFrameIndex > MmHighestPhysicalPage)
445 {
446 /* Update the highest page */
447 MmHighestPhysicalPage = PageFrameIndex - 1;
448 }
449
450 /* Check if this is free memory */
451 if (MiIsMemoryTypeFree(Descriptor->MemoryType))
452 {
453 /* Count it in the free pages */
454 MiNumberOfFreePages += Descriptor->PageCount;
455
456 /* Check if this is the largest memory descriptor */
457 if (Descriptor->PageCount > FreePages)
458 {
459 /* Remember it */
460 MxFreeDescriptor = Descriptor;
461 FreePages = Descriptor->PageCount;
462 }
463 }
464 }
465
466 /* Save original values of the free descriptor, since it'll be
467 * altered by early allocations */
468 MxOldFreeDescriptor = *MxFreeDescriptor;
469 }
470
471 CODE_SEG("INIT")
472 PFN_NUMBER
473 NTAPI
MxGetNextPage(IN PFN_NUMBER PageCount)474 MxGetNextPage(IN PFN_NUMBER PageCount)
475 {
476 PFN_NUMBER Pfn;
477
478 /* Make sure we have enough pages */
479 if (PageCount > MxFreeDescriptor->PageCount)
480 {
481 /* Crash the system */
482 KeBugCheckEx(INSTALL_MORE_MEMORY,
483 MmNumberOfPhysicalPages,
484 MxFreeDescriptor->PageCount,
485 MxOldFreeDescriptor.PageCount,
486 PageCount);
487 }
488
489 /* Use our lowest usable free pages */
490 Pfn = MxFreeDescriptor->BasePage;
491 MxFreeDescriptor->BasePage += PageCount;
492 MxFreeDescriptor->PageCount -= PageCount;
493 return Pfn;
494 }
495
496 CODE_SEG("INIT")
497 VOID
498 NTAPI
MiComputeColorInformation(VOID)499 MiComputeColorInformation(VOID)
500 {
501 ULONG L2Associativity;
502
503 /* Check if no setting was provided already */
504 if (!MmSecondaryColors)
505 {
506 /* Get L2 cache information */
507 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
508
509 /* The number of colors is the number of cache bytes by set/way */
510 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
511 if (L2Associativity) MmSecondaryColors /= L2Associativity;
512 }
513
514 /* Now convert cache bytes into pages */
515 MmSecondaryColors >>= PAGE_SHIFT;
516 if (!MmSecondaryColors)
517 {
518 /* If there was no cache data from the KPCR, use the default colors */
519 MmSecondaryColors = MI_SECONDARY_COLORS;
520 }
521 else
522 {
523 /* Otherwise, make sure there aren't too many colors */
524 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
525 {
526 /* Set the maximum */
527 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
528 }
529
530 /* Make sure there aren't too little colors */
531 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
532 {
533 /* Set the default */
534 MmSecondaryColors = MI_SECONDARY_COLORS;
535 }
536
537 /* Finally make sure the colors are a power of two */
538 if (MmSecondaryColors & (MmSecondaryColors - 1))
539 {
540 /* Set the default */
541 MmSecondaryColors = MI_SECONDARY_COLORS;
542 }
543 }
544
545 /* Compute the mask and store it */
546 MmSecondaryColorMask = MmSecondaryColors - 1;
547 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
548 }
549
550 CODE_SEG("INIT")
551 VOID
552 NTAPI
MiInitializeColorTables(VOID)553 MiInitializeColorTables(VOID)
554 {
555 ULONG i;
556 PMMPTE PointerPte, LastPte;
557 MMPTE TempPte = ValidKernelPte;
558
559 /* The color table starts after the ARM3 PFN database */
560 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
561
562 /* Loop the PTEs. We have two color tables for each secondary color */
563 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
564 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
565 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
566 - 1);
567 while (PointerPte <= LastPte)
568 {
569 /* Check for valid PTE */
570 if (PointerPte->u.Hard.Valid == 0)
571 {
572 /* Get a page and map it */
573 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
574 MI_WRITE_VALID_PTE(PointerPte, TempPte);
575
576 /* Zero out the page */
577 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
578 }
579
580 /* Next */
581 PointerPte++;
582 }
583
584 /* Now set the address of the next list, right after this one */
585 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
586
587 /* Now loop the lists to set them up */
588 for (i = 0; i < MmSecondaryColors; i++)
589 {
590 /* Set both free and zero lists for each color */
591 MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
592 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
593 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
594 MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
595 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
596 MmFreePagesByColor[FreePageList][i].Count = 0;
597 }
598 }
599
600 #ifndef _M_AMD64
601 CODE_SEG("INIT")
602 BOOLEAN
603 NTAPI
MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,IN PFN_NUMBER Pfn)604 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
605 IN PFN_NUMBER Pfn)
606 {
607 PLIST_ENTRY NextEntry;
608 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
609
610 /* Loop the memory descriptors */
611 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
612 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
613 {
614 /* Get the memory descriptor */
615 MdBlock = CONTAINING_RECORD(NextEntry,
616 MEMORY_ALLOCATION_DESCRIPTOR,
617 ListEntry);
618
619 /* Check if this PFN could be part of the block */
620 if (Pfn >= (MdBlock->BasePage))
621 {
622 /* Check if it really is part of the block */
623 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
624 {
625 /* Check if the block is actually memory we don't map */
626 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
627 (MdBlock->MemoryType == LoaderBBTMemory) ||
628 (MdBlock->MemoryType == LoaderSpecialMemory))
629 {
630 /* We don't need PFN database entries for this memory */
631 break;
632 }
633
634 /* This is memory we want to map */
635 return TRUE;
636 }
637 }
638 else
639 {
640 /* Blocks are ordered, so if it's not here, it doesn't exist */
641 break;
642 }
643
644 /* Get to the next descriptor */
645 NextEntry = MdBlock->ListEntry.Flink;
646 }
647
648 /* Check if this PFN is actually from our free memory descriptor */
649 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
650 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
651 {
652 /* We use these pages for initial mappings, so we do want to count them */
653 return TRUE;
654 }
655
656 /* Otherwise this isn't memory that we describe or care about */
657 return FALSE;
658 }
659
660 CODE_SEG("INIT")
661 VOID
662 NTAPI
MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)663 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
664 {
665 PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
666 PLIST_ENTRY NextEntry;
667 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
668 PMMPTE PointerPte, LastPte;
669 MMPTE TempPte = ValidKernelPte;
670
671 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
672 FreePage = MxFreeDescriptor->BasePage;
673 FreePageCount = MxFreeDescriptor->PageCount;
674 PagesLeft = 0;
675
676 /* Loop the memory descriptors */
677 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
678 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
679 {
680 /* Get the descriptor */
681 MdBlock = CONTAINING_RECORD(NextEntry,
682 MEMORY_ALLOCATION_DESCRIPTOR,
683 ListEntry);
684 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
685 (MdBlock->MemoryType == LoaderBBTMemory) ||
686 (MdBlock->MemoryType == LoaderSpecialMemory))
687 {
688 /* These pages are not part of the PFN database */
689 NextEntry = MdBlock->ListEntry.Flink;
690 continue;
691 }
692
693 /* Next, check if this is our special free descriptor we've found */
694 if (MdBlock == MxFreeDescriptor)
695 {
696 /* Use the real numbers instead */
697 BasePage = MxOldFreeDescriptor.BasePage;
698 PageCount = MxOldFreeDescriptor.PageCount;
699 }
700 else
701 {
702 /* Use the descriptor's numbers */
703 BasePage = MdBlock->BasePage;
704 PageCount = MdBlock->PageCount;
705 }
706
707 /* Get the PTEs for this range */
708 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
709 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
710 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
711
712 /* Loop them */
713 while (PointerPte <= LastPte)
714 {
715 /* We'll only touch PTEs that aren't already valid */
716 if (PointerPte->u.Hard.Valid == 0)
717 {
718 /* Use the next free page */
719 TempPte.u.Hard.PageFrameNumber = FreePage;
720 ASSERT(FreePageCount != 0);
721
722 /* Consume free pages */
723 FreePage++;
724 FreePageCount--;
725 if (!FreePageCount)
726 {
727 /* Out of memory */
728 KeBugCheckEx(INSTALL_MORE_MEMORY,
729 MmNumberOfPhysicalPages,
730 FreePageCount,
731 MxOldFreeDescriptor.PageCount,
732 1);
733 }
734
735 /* Write out this PTE */
736 PagesLeft++;
737 MI_WRITE_VALID_PTE(PointerPte, TempPte);
738
739 /* Zero this page */
740 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
741 }
742
743 /* Next! */
744 PointerPte++;
745 }
746
747 /* Do the next address range */
748 NextEntry = MdBlock->ListEntry.Flink;
749 }
750
751 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
752 MxFreeDescriptor->BasePage = FreePage;
753 MxFreeDescriptor->PageCount = FreePageCount;
754 }
755
756 CODE_SEG("INIT")
757 VOID
758 NTAPI
MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)759 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
760 {
761 PMMPDE PointerPde;
762 PMMPTE PointerPte;
763 ULONG i, Count, j;
764 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
765 PMMPFN Pfn1, Pfn2;
766 ULONG_PTR BaseAddress = 0;
767
768 /* PFN of the startup page directory */
769 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
770
771 /* Start with the first PDE and scan them all */
772 PointerPde = MiAddressToPde(NULL);
773 Count = PPE_PER_PAGE * PDE_PER_PAGE;
774 for (i = 0; i < Count; i++)
775 {
776 /* Check for valid PDE */
777 if (PointerPde->u.Hard.Valid == 1)
778 {
779 /* Get the PFN from it */
780 PageFrameIndex = PFN_FROM_PTE(PointerPde);
781
782 /* Do we want a PFN entry for this page? */
783 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
784 {
785 /* Yes we do, set it up */
786 Pfn1 = MiGetPfnEntry(PageFrameIndex);
787 Pfn1->u4.PteFrame = StartupPdIndex;
788 Pfn1->PteAddress = (PMMPTE)PointerPde;
789 Pfn1->u2.ShareCount++;
790 Pfn1->u3.e2.ReferenceCount = 1;
791 Pfn1->u3.e1.PageLocation = ActiveAndValid;
792 Pfn1->u3.e1.CacheAttribute = MiNonCached;
793 #if MI_TRACE_PFNS
794 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
795 MI_SET_PFN_PROCESS_NAME(Pfn1, "Initial PDE");
796 #endif
797 }
798 else
799 {
800 /* No PFN entry */
801 Pfn1 = NULL;
802 }
803
804 /* Now get the PTE and scan the pages */
805 PointerPte = MiAddressToPte(BaseAddress);
806 for (j = 0; j < PTE_PER_PAGE; j++)
807 {
808 /* Check for a valid PTE */
809 if (PointerPte->u.Hard.Valid == 1)
810 {
811 /* Increase the shared count of the PFN entry for the PDE */
812 ASSERT(Pfn1 != NULL);
813 Pfn1->u2.ShareCount++;
814
815 /* Now check if the PTE is valid memory too */
816 PtePageIndex = PFN_FROM_PTE(PointerPte);
817 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
818 {
819 /*
820 * Only add pages above the end of system code or pages
821 * that are part of nonpaged pool
822 */
823 if ((BaseAddress >= 0xA0000000) ||
824 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
825 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
826 MmSizeOfNonPagedPoolInBytes)))
827 {
828 /* Get the PFN entry and make sure it too is valid */
829 Pfn2 = MiGetPfnEntry(PtePageIndex);
830 if ((MmIsAddressValid(Pfn2)) &&
831 (MmIsAddressValid(Pfn2 + 1)))
832 {
833 /* Setup the PFN entry */
834 Pfn2->u4.PteFrame = PageFrameIndex;
835 Pfn2->PteAddress = PointerPte;
836 Pfn2->u2.ShareCount++;
837 Pfn2->u3.e2.ReferenceCount = 1;
838 Pfn2->u3.e1.PageLocation = ActiveAndValid;
839 Pfn2->u3.e1.CacheAttribute = MiNonCached;
840 #if MI_TRACE_PFNS
841 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
842 MI_SET_PFN_PROCESS_NAME(Pfn2, "Initial PTE");
843 #endif
844 }
845 }
846 }
847 }
848
849 /* Next PTE */
850 PointerPte++;
851 BaseAddress += PAGE_SIZE;
852 }
853 }
854 else
855 {
856 /* Next PDE mapped address */
857 BaseAddress += PDE_MAPPED_VA;
858 }
859
860 /* Next PTE */
861 PointerPde++;
862 }
863 }
864
865 CODE_SEG("INIT")
866 VOID
867 NTAPI
MiBuildPfnDatabaseZeroPage(VOID)868 MiBuildPfnDatabaseZeroPage(VOID)
869 {
870 PMMPFN Pfn1;
871 PMMPDE PointerPde;
872
873 /* Grab the lowest page and check if it has no real references */
874 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
875 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
876 {
877 /* Make it a bogus page to catch errors */
878 PointerPde = MiAddressToPde(0xFFFFFFFF);
879 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
880 Pfn1->PteAddress = (PMMPTE)PointerPde;
881 Pfn1->u2.ShareCount++;
882 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
883 Pfn1->u3.e1.PageLocation = ActiveAndValid;
884 Pfn1->u3.e1.CacheAttribute = MiNonCached;
885 }
886 }
887
888 CODE_SEG("INIT")
889 VOID
890 NTAPI
MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)891 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
892 {
893 PLIST_ENTRY NextEntry;
894 PFN_NUMBER PageCount = 0;
895 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
896 PFN_NUMBER PageFrameIndex;
897 PMMPFN Pfn1;
898 PMMPTE PointerPte;
899 PMMPDE PointerPde;
900 KIRQL OldIrql;
901
902 /* Now loop through the descriptors */
903 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
904 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
905 {
906 /* Get the current descriptor */
907 MdBlock = CONTAINING_RECORD(NextEntry,
908 MEMORY_ALLOCATION_DESCRIPTOR,
909 ListEntry);
910
911 /* Read its data */
912 PageCount = MdBlock->PageCount;
913 PageFrameIndex = MdBlock->BasePage;
914
915 /* Don't allow memory above what the PFN database is mapping */
916 if (PageFrameIndex > MmHighestPhysicalPage)
917 {
918 /* Since they are ordered, everything past here will be larger */
919 break;
920 }
921
922 /* On the other hand, the end page might be higher up... */
923 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
924 {
925 /* In which case we'll trim the descriptor to go as high as we can */
926 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
927 MdBlock->PageCount = PageCount;
928
929 /* But if there's nothing left to trim, we got too high, so quit */
930 if (!PageCount) break;
931 }
932
933 /* Now check the descriptor type */
934 switch (MdBlock->MemoryType)
935 {
936 /* Check for bad RAM */
937 case LoaderBad:
938
939 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
940 break;
941
942 /* Check for free RAM */
943 case LoaderFree:
944 case LoaderLoadedProgram:
945 case LoaderFirmwareTemporary:
946 case LoaderOsloaderStack:
947
948 /* Get the last page of this descriptor. Note we loop backwards */
949 PageFrameIndex += PageCount - 1;
950 Pfn1 = MiGetPfnEntry(PageFrameIndex);
951
952 /* Lock the PFN Database */
953 OldIrql = MiAcquirePfnLock();
954 while (PageCount--)
955 {
956 /* If the page really has no references, mark it as free */
957 if (!Pfn1->u3.e2.ReferenceCount)
958 {
959 /* Add it to the free list */
960 Pfn1->u3.e1.CacheAttribute = MiNonCached;
961 MiInsertPageInFreeList(PageFrameIndex);
962 }
963
964 /* Go to the next page */
965 Pfn1--;
966 PageFrameIndex--;
967 }
968
969 /* Release PFN database */
970 MiReleasePfnLock(OldIrql);
971
972 /* Done with this block */
973 break;
974
975 /* Check for pages that are invisible to us */
976 case LoaderFirmwarePermanent:
977 case LoaderSpecialMemory:
978 case LoaderBBTMemory:
979
980 /* And skip them */
981 break;
982
983 default:
984
985 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
986 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
987 Pfn1 = MiGetPfnEntry(PageFrameIndex);
988 while (PageCount--)
989 {
990 /* Check if the page is really unused */
991 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
992 if (!Pfn1->u3.e2.ReferenceCount)
993 {
994 /* Mark it as being in-use */
995 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
996 Pfn1->PteAddress = PointerPte;
997 Pfn1->u2.ShareCount++;
998 Pfn1->u3.e2.ReferenceCount = 1;
999 Pfn1->u3.e1.PageLocation = ActiveAndValid;
1000 Pfn1->u3.e1.CacheAttribute = MiNonCached;
1001 #if MI_TRACE_PFNS
1002 Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
1003 #endif
1004
1005 /* Check for RAM disk page */
1006 if (MdBlock->MemoryType == LoaderXIPRom)
1007 {
1008 /* Make it a pseudo-I/O ROM mapping */
1009 Pfn1->u1.Flink = 0;
1010 Pfn1->u2.ShareCount = 0;
1011 Pfn1->u3.e2.ReferenceCount = 0;
1012 Pfn1->u3.e1.PageLocation = 0;
1013 Pfn1->u3.e1.Rom = 1;
1014 Pfn1->u4.InPageError = 0;
1015 Pfn1->u3.e1.PrototypePte = 1;
1016 }
1017 }
1018
1019 /* Advance page structures */
1020 Pfn1++;
1021 PageFrameIndex++;
1022 PointerPte++;
1023 }
1024 break;
1025 }
1026
1027 /* Next descriptor entry */
1028 NextEntry = MdBlock->ListEntry.Flink;
1029 }
1030 }
1031
1032 CODE_SEG("INIT")
1033 VOID
1034 NTAPI
MiBuildPfnDatabaseSelf(VOID)1035 MiBuildPfnDatabaseSelf(VOID)
1036 {
1037 PMMPTE PointerPte, LastPte;
1038 PMMPFN Pfn1;
1039
1040 /* Loop the PFN database page */
1041 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
1042 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
1043 while (PointerPte <= LastPte)
1044 {
1045 /* Make sure the page is valid */
1046 if (PointerPte->u.Hard.Valid == 1)
1047 {
1048 /* Get the PFN entry and just mark it referenced */
1049 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1050 Pfn1->u2.ShareCount = 1;
1051 Pfn1->u3.e2.ReferenceCount = 1;
1052 #if MI_TRACE_PFNS
1053 Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
1054 #endif
1055 }
1056
1057 /* Next */
1058 PointerPte++;
1059 }
1060 }
1061
1062 CODE_SEG("INIT")
1063 VOID
1064 NTAPI
MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)1065 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1066 {
1067 /* Scan memory and start setting up PFN entries */
1068 MiBuildPfnDatabaseFromPages(LoaderBlock);
1069
1070 /* Add the zero page */
1071 MiBuildPfnDatabaseZeroPage();
1072
1073 /* Scan the loader block and build the rest of the PFN database */
1074 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
1075
1076 /* Finally add the pages for the PFN database itself */
1077 MiBuildPfnDatabaseSelf();
1078 }
1079 #endif /* !_M_AMD64 */
1080
1081 CODE_SEG("INIT")
1082 VOID
1083 NTAPI
MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)1084 MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1085 {
1086 PLIST_ENTRY NextMd;
1087 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1088 ULONG_PTR i;
1089 PFN_NUMBER BasePage, LoaderPages;
1090 PMMPFN Pfn1;
1091 KIRQL OldIrql;
1092 PPHYSICAL_MEMORY_RUN Buffer, Entry;
1093
1094 /* Loop the descriptors in order to count them */
1095 i = 0;
1096 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1097 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1098 {
1099 MdBlock = CONTAINING_RECORD(NextMd,
1100 MEMORY_ALLOCATION_DESCRIPTOR,
1101 ListEntry);
1102 i++;
1103 NextMd = MdBlock->ListEntry.Flink;
1104 }
1105
1106 /* Allocate a structure to hold the physical runs */
1107 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1108 i * sizeof(PHYSICAL_MEMORY_RUN),
1109 'lMmM');
1110 ASSERT(Buffer != NULL);
1111 Entry = Buffer;
1112
1113 /* Loop the descriptors again */
1114 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1115 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1116 {
1117 /* Check what kind this was */
1118 MdBlock = CONTAINING_RECORD(NextMd,
1119 MEMORY_ALLOCATION_DESCRIPTOR,
1120 ListEntry);
1121 switch (MdBlock->MemoryType)
1122 {
1123 /* Registry, NLS, and heap data */
1124 case LoaderRegistryData:
1125 case LoaderOsloaderHeap:
1126 case LoaderNlsData:
1127 /* Are all a candidate for deletion */
1128 Entry->BasePage = MdBlock->BasePage;
1129 Entry->PageCount = MdBlock->PageCount;
1130 Entry++;
1131
1132 /* We keep the rest */
1133 default:
1134 break;
1135 }
1136
1137 /* Move to the next descriptor */
1138 NextMd = MdBlock->ListEntry.Flink;
1139 }
1140
1141 /* Acquire the PFN lock */
1142 OldIrql = MiAcquirePfnLock();
1143
1144 /* Loop the runs */
1145 LoaderPages = 0;
1146 while (--Entry >= Buffer)
1147 {
1148 /* See how many pages are in this run */
1149 i = Entry->PageCount;
1150 BasePage = Entry->BasePage;
1151
1152 /* Loop each page */
1153 Pfn1 = MiGetPfnEntry(BasePage);
1154 while (i--)
1155 {
1156 /* Check if it has references or is in any kind of list */
1157 if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
1158 {
1159 /* Set the new PTE address and put this page into the free list */
1160 Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
1161 MiInsertPageInFreeList(BasePage);
1162 LoaderPages++;
1163 }
1164 else if (BasePage)
1165 {
1166 /* It has a reference, so simply drop it */
1167 ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
1168
1169 /* Drop a dereference on this page, which should delete it */
1170 Pfn1->PteAddress->u.Long = 0;
1171 MI_SET_PFN_DELETED(Pfn1);
1172 MiDecrementShareCount(Pfn1, BasePage);
1173 LoaderPages++;
1174 }
1175
1176 /* Move to the next page */
1177 Pfn1++;
1178 BasePage++;
1179 }
1180 }
1181
1182 /* Release the PFN lock and flush the TLB */
1183 DPRINT("Loader pages freed: %lx\n", LoaderPages);
1184 MiReleasePfnLock(OldIrql);
1185 KeFlushCurrentTb();
1186
1187 /* Free our run structure */
1188 ExFreePoolWithTag(Buffer, 'lMmM');
1189 }
1190
1191 CODE_SEG("INIT")
1192 VOID
1193 NTAPI
MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)1194 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
1195 {
1196 /* This function needs to do more work, for now, we tune page minimums */
1197
1198 /* Check for a system with around 64MB RAM or more */
1199 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
1200 {
1201 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
1202 MmPlentyFreePages *= 2;
1203 }
1204 }
1205
1206 CODE_SEG("INIT")
1207 VOID
1208 NTAPI
MiNotifyMemoryEvents(VOID)1209 MiNotifyMemoryEvents(VOID)
1210 {
1211 /* Are we in a low-memory situation? */
1212 if (MmAvailablePages < MmLowMemoryThreshold)
1213 {
1214 /* Clear high, set low */
1215 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1216 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
1217 }
1218 else if (MmAvailablePages < MmHighMemoryThreshold)
1219 {
1220 /* We are in between, clear both */
1221 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1222 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1223 }
1224 else
1225 {
1226 /* Clear low, set high */
1227 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1228 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1229 }
1230 }
1231
1232 CODE_SEG("INIT")
1233 NTSTATUS
1234 NTAPI
MiCreateMemoryEvent(IN PUNICODE_STRING Name,OUT PKEVENT * Event)1235 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1236 OUT PKEVENT *Event)
1237 {
1238 PACL Dacl;
1239 HANDLE EventHandle;
1240 ULONG DaclLength;
1241 NTSTATUS Status;
1242 OBJECT_ATTRIBUTES ObjectAttributes;
1243 SECURITY_DESCRIPTOR SecurityDescriptor;
1244
1245 /* Create the SD */
1246 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1247 SECURITY_DESCRIPTOR_REVISION);
1248 if (!NT_SUCCESS(Status)) return Status;
1249
1250 /* One ACL with 3 ACEs, containing each one SID */
1251 DaclLength = sizeof(ACL) +
1252 3 * sizeof(ACCESS_ALLOWED_ACE) +
1253 RtlLengthSid(SeLocalSystemSid) +
1254 RtlLengthSid(SeAliasAdminsSid) +
1255 RtlLengthSid(SeWorldSid);
1256
1257 /* Allocate space for the DACL */
1258 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, TAG_DACL);
1259 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1260
1261 /* Setup the ACL inside it */
1262 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1263 if (!NT_SUCCESS(Status)) goto CleanUp;
1264
1265 /* Add query rights for everyone */
1266 Status = RtlAddAccessAllowedAce(Dacl,
1267 ACL_REVISION,
1268 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1269 SeWorldSid);
1270 if (!NT_SUCCESS(Status)) goto CleanUp;
1271
1272 /* Full rights for the admin */
1273 Status = RtlAddAccessAllowedAce(Dacl,
1274 ACL_REVISION,
1275 EVENT_ALL_ACCESS,
1276 SeAliasAdminsSid);
1277 if (!NT_SUCCESS(Status)) goto CleanUp;
1278
1279 /* As well as full rights for the system */
1280 Status = RtlAddAccessAllowedAce(Dacl,
1281 ACL_REVISION,
1282 EVENT_ALL_ACCESS,
1283 SeLocalSystemSid);
1284 if (!NT_SUCCESS(Status)) goto CleanUp;
1285
1286 /* Set this DACL inside the SD */
1287 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1288 TRUE,
1289 Dacl,
1290 FALSE);
1291 if (!NT_SUCCESS(Status)) goto CleanUp;
1292
1293 /* Setup the event attributes, making sure it's a permanent one */
1294 InitializeObjectAttributes(&ObjectAttributes,
1295 Name,
1296 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1297 NULL,
1298 &SecurityDescriptor);
1299
1300 /* Create the event */
1301 Status = ZwCreateEvent(&EventHandle,
1302 EVENT_ALL_ACCESS,
1303 &ObjectAttributes,
1304 NotificationEvent,
1305 FALSE);
1306 CleanUp:
1307 /* Free the DACL */
1308 ExFreePoolWithTag(Dacl, TAG_DACL);
1309
1310 /* Check if this is the success path */
1311 if (NT_SUCCESS(Status))
1312 {
1313 /* Add a reference to the object, then close the handle we had */
1314 Status = ObReferenceObjectByHandle(EventHandle,
1315 EVENT_MODIFY_STATE,
1316 ExEventObjectType,
1317 KernelMode,
1318 (PVOID*)Event,
1319 NULL);
1320 ZwClose (EventHandle);
1321 }
1322
1323 /* Return status */
1324 return Status;
1325 }
1326
1327 CODE_SEG("INIT")
1328 BOOLEAN
1329 NTAPI
MiInitializeMemoryEvents(VOID)1330 MiInitializeMemoryEvents(VOID)
1331 {
1332 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1333 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1334 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1335 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1336 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1337 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1338 NTSTATUS Status;
1339
1340 /* Check if we have a registry setting */
1341 if (MmLowMemoryThreshold)
1342 {
1343 /* Convert it to pages */
1344 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1345 }
1346 else
1347 {
1348 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1349 MmLowMemoryThreshold = MmPlentyFreePages;
1350
1351 /* More than one GB of memory? */
1352 if (MmNumberOfPhysicalPages > 0x40000)
1353 {
1354 /* Start at 32MB, and add another 16MB for each GB */
1355 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1356 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1357 }
1358 else if (MmNumberOfPhysicalPages > 0x8000)
1359 {
1360 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1361 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1362 }
1363
1364 /* Don't let the minimum threshold go past 64MB */
1365 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1366 }
1367
1368 /* Check if we have a registry setting */
1369 if (MmHighMemoryThreshold)
1370 {
1371 /* Convert it into pages */
1372 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1373 }
1374 else
1375 {
1376 /* Otherwise, the default is three times the low memory threshold */
1377 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1378 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1379 }
1380
1381 /* Make sure high threshold is actually higher than the low */
1382 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1383
1384 /* Create the memory events for all the thresholds */
1385 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1386 if (!NT_SUCCESS(Status)) return FALSE;
1387 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1388 if (!NT_SUCCESS(Status)) return FALSE;
1389 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1390 if (!NT_SUCCESS(Status)) return FALSE;
1391 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1392 if (!NT_SUCCESS(Status)) return FALSE;
1393 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1394 if (!NT_SUCCESS(Status)) return FALSE;
1395 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1396 if (!NT_SUCCESS(Status)) return FALSE;
1397
1398 /* Now setup the pool events */
1399 MiInitializePoolEvents();
1400
1401 /* Set the initial event state */
1402 MiNotifyMemoryEvents();
1403 return TRUE;
1404 }
1405
1406 CODE_SEG("INIT")
1407 VOID
1408 NTAPI
MiAddHalIoMappings(VOID)1409 MiAddHalIoMappings(VOID)
1410 {
1411 PVOID BaseAddress;
1412 PMMPDE PointerPde, LastPde;
1413 PMMPTE PointerPte;
1414 ULONG j;
1415 PFN_NUMBER PageFrameIndex;
1416
1417 /* HAL Heap address -- should be on a PDE boundary */
1418 BaseAddress = (PVOID)MM_HAL_VA_START;
1419 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1420
1421 /* Check how many PDEs the heap has */
1422 PointerPde = MiAddressToPde(BaseAddress);
1423 LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
1424
1425 while (PointerPde <= LastPde)
1426 {
1427 /* Does the HAL own this mapping? */
1428 if ((PointerPde->u.Hard.Valid == 1) &&
1429 (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
1430 {
1431 /* Get the PTE for it and scan each page */
1432 PointerPte = MiAddressToPte(BaseAddress);
1433 for (j = 0; j < PTE_PER_PAGE; j++)
1434 {
1435 /* Does the HAL own this page? */
1436 if (PointerPte->u.Hard.Valid == 1)
1437 {
1438 /* Is the HAL using it for device or I/O mapped memory? */
1439 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1440 if (!MiGetPfnEntry(PageFrameIndex))
1441 {
1442 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1443 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1444 }
1445 }
1446
1447 /* Move to the next page */
1448 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1449 PointerPte++;
1450 }
1451 }
1452 else
1453 {
1454 /* Move to the next address */
1455 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1456 }
1457
1458 /* Move to the next PDE */
1459 PointerPde++;
1460 }
1461 }
1462
1463 VOID
1464 NTAPI
MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)1465 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1466 {
1467 ULONG i;
1468 PMMPFN Pfn1;
1469 PCHAR Consumer = "Unknown";
1470 KIRQL OldIrql;
1471 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1472 #if MI_TRACE_PFNS
1473 ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1474 PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1475 {
1476 "Not set",
1477 "Paged Pool",
1478 "Nonpaged Pool",
1479 "Nonpaged Pool Ex",
1480 "Kernel Stack",
1481 "Kernel Stack Ex",
1482 "System PTE",
1483 "VAD",
1484 "PEB/TEB",
1485 "Section",
1486 "Page Table",
1487 "Page Directory",
1488 "Old Page Table",
1489 "Driver Page",
1490 "Contiguous Alloc",
1491 "MDL",
1492 "Demand Zero",
1493 "Zero Loop",
1494 "Cache",
1495 "PFN Database",
1496 "Boot Driver",
1497 "Initial Memory",
1498 "Free Page"
1499 };
1500 #endif
1501 //
1502 // Loop the PFN database
1503 //
1504 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1505 for (i = 0; i <= MmHighestPhysicalPage; i++)
1506 {
1507 Pfn1 = MiGetPfnEntry(i);
1508 if (!Pfn1) continue;
1509 #if MI_TRACE_PFNS
1510 ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1511 #endif
1512 //
1513 // Get the page location
1514 //
1515 switch (Pfn1->u3.e1.PageLocation)
1516 {
1517 case ActiveAndValid:
1518
1519 Consumer = "Active and Valid";
1520 ActivePages++;
1521 break;
1522
1523 case ZeroedPageList:
1524
1525 Consumer = "Zero Page List";
1526 FreePages++;
1527 break;//continue;
1528
1529 case FreePageList:
1530
1531 Consumer = "Free Page List";
1532 FreePages++;
1533 break;//continue;
1534
1535 default:
1536
1537 Consumer = "Other (ASSERT!)";
1538 OtherPages++;
1539 break;
1540 }
1541
1542 #if MI_TRACE_PFNS
1543 /* Add into bucket */
1544 UsageBucket[Pfn1->PfnUsage]++;
1545 #endif
1546
1547 //
1548 // Pretty-print the page
1549 //
1550 if (!StatusOnly)
1551 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s]\n",
1552 i << PAGE_SHIFT,
1553 Consumer,
1554 Pfn1->u3.e2.ReferenceCount,
1555 Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1556 #if MI_TRACE_PFNS
1557 MI_USAGE_TEXT[Pfn1->PfnUsage],
1558 Pfn1->ProcessName);
1559 #else
1560 "Page tracking",
1561 "is disabled");
1562 #endif
1563 }
1564
1565 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1566 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1567 DbgPrint("Other: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1568 DbgPrint("-----------------------------------------\n");
1569 #if MI_TRACE_PFNS
1570 OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1571 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1572 OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1573 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1574 OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1575 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1576 OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_PAGE_DIRECTORY] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1577 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1578 OtherPages = UsageBucket[MI_USAGE_SYSTEM_PTE];
1579 DbgPrint("System PTEs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1580 OtherPages = UsageBucket[MI_USAGE_VAD];
1581 DbgPrint("VADs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1582 OtherPages = UsageBucket[MI_USAGE_CONTINOUS_ALLOCATION];
1583 DbgPrint("Continuous Allocs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1584 OtherPages = UsageBucket[MI_USAGE_MDL];
1585 DbgPrint("MDLs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1586 OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1587 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1588 OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1589 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1590 OtherPages = UsageBucket[MI_USAGE_DEMAND_ZERO];
1591 DbgPrint("Demand Zero: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1592 OtherPages = UsageBucket[MI_USAGE_ZERO_LOOP];
1593 DbgPrint("Zero Loop: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1594 OtherPages = UsageBucket[MI_USAGE_PEB_TEB];
1595 DbgPrint("PEB/TEB: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1596 OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1597 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1598 OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1599 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1600 OtherPages = UsageBucket[MI_USAGE_SECTION];
1601 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1602 OtherPages = UsageBucket[MI_USAGE_CACHE];
1603 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1604 OtherPages = UsageBucket[MI_USAGE_FREE_PAGE];
1605 DbgPrint("Free: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1606 #endif
1607 KeLowerIrql(OldIrql);
1608 }
1609
1610 CODE_SEG("INIT")
1611 PPHYSICAL_MEMORY_DESCRIPTOR
1612 NTAPI
MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,IN PBOOLEAN IncludeType)1613 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1614 IN PBOOLEAN IncludeType)
1615 {
1616 PLIST_ENTRY NextEntry;
1617 ULONG Run = 0, InitialRuns;
1618 PFN_NUMBER NextPage = -1, PageCount = 0;
1619 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1620 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1621
1622 //
1623 // Start with the maximum we might need
1624 //
1625 InitialRuns = MiNumberDescriptors;
1626
1627 //
1628 // Allocate the maximum we'll ever need
1629 //
1630 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1631 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1632 sizeof(PHYSICAL_MEMORY_RUN) *
1633 (InitialRuns - 1),
1634 'lMmM');
1635 if (!Buffer) return NULL;
1636
1637 //
1638 // For now that's how many runs we have
1639 //
1640 Buffer->NumberOfRuns = InitialRuns;
1641
1642 //
1643 // Now loop through the descriptors again
1644 //
1645 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1646 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1647 {
1648 //
1649 // Grab each one, and check if it's one we should include
1650 //
1651 MdBlock = CONTAINING_RECORD(NextEntry,
1652 MEMORY_ALLOCATION_DESCRIPTOR,
1653 ListEntry);
1654 if ((MdBlock->MemoryType < LoaderMaximum) &&
1655 (IncludeType[MdBlock->MemoryType]))
1656 {
1657 //
1658 // Add this to our running total
1659 //
1660 PageCount += MdBlock->PageCount;
1661
1662 //
1663 // Check if the next page is described by the next descriptor
1664 //
1665 if (MdBlock->BasePage == NextPage)
1666 {
1667 //
1668 // Combine it into the same physical run
1669 //
1670 ASSERT(MdBlock->PageCount != 0);
1671 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1672 NextPage += MdBlock->PageCount;
1673 }
1674 else
1675 {
1676 //
1677 // Otherwise just duplicate the descriptor's contents
1678 //
1679 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1680 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1681 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1682
1683 //
1684 // And in this case, increase the number of runs
1685 //
1686 Run++;
1687 }
1688 }
1689
1690 //
1691 // Try the next descriptor
1692 //
1693 NextEntry = MdBlock->ListEntry.Flink;
1694 }
1695
1696 //
1697 // We should not have been able to go past our initial estimate
1698 //
1699 ASSERT(Run <= Buffer->NumberOfRuns);
1700
1701 //
1702 // Our guess was probably exaggerated...
1703 //
1704 if (InitialRuns > Run)
1705 {
1706 //
1707 // Allocate a more accurately sized buffer
1708 //
1709 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1710 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1711 sizeof(PHYSICAL_MEMORY_RUN) *
1712 (Run - 1),
1713 'lMmM');
1714 if (NewBuffer)
1715 {
1716 //
1717 // Copy the old buffer into the new, then free it
1718 //
1719 RtlCopyMemory(NewBuffer->Run,
1720 Buffer->Run,
1721 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1722 ExFreePoolWithTag(Buffer, 'lMmM');
1723
1724 //
1725 // Now use the new buffer
1726 //
1727 Buffer = NewBuffer;
1728 }
1729 }
1730
1731 //
1732 // Write the final numbers, and return it
1733 //
1734 Buffer->NumberOfRuns = Run;
1735 Buffer->NumberOfPages = PageCount;
1736 return Buffer;
1737 }
1738
1739 CODE_SEG("INIT")
1740 VOID
1741 NTAPI
MiBuildPagedPool(VOID)1742 MiBuildPagedPool(VOID)
1743 {
1744 PMMPTE PointerPte;
1745 PMMPDE PointerPde;
1746 MMPDE TempPde = ValidKernelPde;
1747 PFN_NUMBER PageFrameIndex;
1748 KIRQL OldIrql;
1749 SIZE_T Size, NumberOfPages, NumberOfPdes;
1750 ULONG BitMapSize;
1751 #if (_MI_PAGING_LEVELS >= 3)
1752 MMPPE TempPpe = ValidKernelPpe;
1753 PMMPPE PointerPpe;
1754 #elif (_MI_PAGING_LEVELS == 2)
1755 MMPTE TempPte = ValidKernelPte;
1756
1757 //
1758 // Get the page frame number for the system page directory
1759 //
1760 PointerPte = MiAddressToPte(PDE_BASE);
1761 ASSERT(PPE_PER_PAGE == 1);
1762 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1763
1764 //
1765 // Allocate a system PTE which will hold a copy of the page directory
1766 //
1767 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1768 ASSERT(PointerPte);
1769 MmSystemPagePtes = MiPteToAddress(PointerPte);
1770
1771 //
1772 // Make this system PTE point to the system page directory.
1773 // It is now essentially double-mapped. This will be used later for lazy
1774 // evaluation of PDEs accross process switches, similarly to how the Global
1775 // page directory array in the old ReactOS Mm is used (but in a less hacky
1776 // way).
1777 //
1778 TempPte = ValidKernelPte;
1779 ASSERT(PPE_PER_PAGE == 1);
1780 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1781 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1782 #endif
1783
1784 #ifdef _M_IX86
1785 //
1786 // Let's get back to paged pool work: size it up.
1787 // By default, it should be twice as big as nonpaged pool.
1788 //
1789 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1790 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1791 (ULONG_PTR)MmPagedPoolStart))
1792 {
1793 //
1794 // On the other hand, we have limited VA space, so make sure that the VA
1795 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1796 // whatever maximum is possible.
1797 //
1798 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1799 (ULONG_PTR)MmPagedPoolStart;
1800 }
1801 #endif // _M_IX86
1802
1803 //
1804 // Get the size in pages and make sure paged pool is at least 32MB.
1805 //
1806 Size = MmSizeOfPagedPoolInBytes;
1807 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1808 NumberOfPages = BYTES_TO_PAGES(Size);
1809
1810 //
1811 // Now check how many PDEs will be required for these many pages.
1812 //
1813 NumberOfPdes = (NumberOfPages + (PTE_PER_PAGE - 1)) / PTE_PER_PAGE;
1814
1815 //
1816 // Recompute the PDE-aligned size of the paged pool, in bytes and pages.
1817 //
1818 MmSizeOfPagedPoolInBytes = NumberOfPdes * PTE_PER_PAGE * PAGE_SIZE;
1819 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1820
1821 #ifdef _M_IX86
1822 //
1823 // Let's be really sure this doesn't overflow into nonpaged system VA
1824 //
1825 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1826 (ULONG_PTR)MmNonPagedSystemStart);
1827 #endif // _M_IX86
1828
1829 //
1830 // This is where paged pool ends
1831 //
1832 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1833 MmSizeOfPagedPoolInBytes) - 1);
1834
1835 //
1836 // Lock the PFN database
1837 //
1838 OldIrql = MiAcquirePfnLock();
1839
1840 #if (_MI_PAGING_LEVELS >= 3)
1841 /* On these systems, there's no double-mapping, so instead, the PPEs
1842 * are setup to span the entire paged pool area, so there's no need for the
1843 * system PD */
1844 for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
1845 PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
1846 PointerPpe++)
1847 {
1848 /* Check if the PPE is already valid */
1849 if (!PointerPpe->u.Hard.Valid)
1850 {
1851 /* It is not, so map a fresh zeroed page */
1852 TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
1853 MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
1854 MiInitializePfnForOtherProcess(TempPpe.u.Hard.PageFrameNumber,
1855 (PMMPTE)PointerPpe,
1856 PFN_FROM_PTE(MiAddressToPte(PointerPpe)));
1857 }
1858 }
1859 #endif
1860
1861 //
1862 // So now get the PDE for paged pool and zero it out
1863 //
1864 PointerPde = MiAddressToPde(MmPagedPoolStart);
1865 RtlZeroMemory(PointerPde,
1866 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
1867
1868 //
1869 // Next, get the first and last PTE
1870 //
1871 PointerPte = MiAddressToPte(MmPagedPoolStart);
1872 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1873 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1874
1875 /* Allocate a page and map the first paged pool PDE */
1876 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1877 MI_SET_PROCESS2("Kernel");
1878 PageFrameIndex = MiRemoveZeroPage(0);
1879 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1880 MI_WRITE_VALID_PDE(PointerPde, TempPde);
1881 #if (_MI_PAGING_LEVELS >= 3)
1882 /* Use the PPE of MmPagedPoolStart that was setup above */
1883 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1884
1885 /* Initialize the PFN entry for it */
1886 MiInitializePfnForOtherProcess(PageFrameIndex,
1887 (PMMPTE)PointerPde,
1888 PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
1889 #else
1890 /* Do it this way */
1891 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_PER_PAGE]
1892
1893 /* Initialize the PFN entry for it */
1894 MiInitializePfnForOtherProcess(PageFrameIndex,
1895 (PMMPTE)PointerPde,
1896 MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_PER_PAGE]);
1897 #endif
1898
1899 //
1900 // Release the PFN database lock
1901 //
1902 MiReleasePfnLock(OldIrql);
1903
1904 //
1905 // We only have one PDE mapped for now... at fault time, additional PDEs
1906 // will be allocated to handle paged pool growth. This is where they'll have
1907 // to start.
1908 //
1909 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1910
1911 //
1912 // We keep track of each page via a bit, so check how big the bitmap will
1913 // have to be (make sure to align our page count such that it fits nicely
1914 // into a 4-byte aligned bitmap.
1915 //
1916 // We'll also allocate the bitmap header itself part of the same buffer.
1917 //
1918 NumberOfPages = NumberOfPdes * PTE_PER_PAGE;
1919 ASSERT(NumberOfPages == MmSizeOfPagedPoolInPages);
1920 BitMapSize = (ULONG)NumberOfPages;
1921 Size = sizeof(RTL_BITMAP) + (((BitMapSize + 31) / 32) * sizeof(ULONG));
1922
1923 //
1924 // Allocate the allocation bitmap, which tells us which regions have not yet
1925 // been mapped into memory
1926 //
1927 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1928 Size,
1929 TAG_MM);
1930 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1931
1932 //
1933 // Initialize it such that at first, only the first page's worth of PTEs is
1934 // marked as allocated (incidentially, the first PDE we allocated earlier).
1935 //
1936 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1937 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1938 BitMapSize);
1939 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1940 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, PTE_PER_PAGE);
1941
1942 //
1943 // We have a second bitmap, which keeps track of where allocations end.
1944 // Given the allocation bitmap and a base address, we can therefore figure
1945 // out which page is the last page of that allocation, and thus how big the
1946 // entire allocation is.
1947 //
1948 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1949 Size,
1950 TAG_MM);
1951 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1952 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1953 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1954 BitMapSize);
1955
1956 //
1957 // Since no allocations have been made yet, there are no bits set as the end
1958 //
1959 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1960
1961 //
1962 // Initialize paged pool.
1963 //
1964 InitializePool(PagedPool, 0);
1965
1966 /* Initialize special pool */
1967 MiInitializeSpecialPool();
1968
1969 /* Default low threshold of 30MB or one fifth of paged pool */
1970 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1971 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1972
1973 /* Default high threshold of 60MB or 25% */
1974 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1975 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1976 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1977
1978 /* Setup the global session space */
1979 MiInitializeSystemSpaceMap(NULL);
1980 }
1981
1982 CODE_SEG("INIT")
1983 VOID
1984 NTAPI
MiDbgDumpMemoryDescriptors(VOID)1985 MiDbgDumpMemoryDescriptors(VOID)
1986 {
1987 PLIST_ENTRY NextEntry;
1988 PMEMORY_ALLOCATION_DESCRIPTOR Md;
1989 PFN_NUMBER TotalPages = 0;
1990 PCHAR
1991 MemType[] =
1992 {
1993 "ExceptionBlock ",
1994 "SystemBlock ",
1995 "Free ",
1996 "Bad ",
1997 "LoadedProgram ",
1998 "FirmwareTemporary ",
1999 "FirmwarePermanent ",
2000 "OsloaderHeap ",
2001 "OsloaderStack ",
2002 "SystemCode ",
2003 "HalCode ",
2004 "BootDriver ",
2005 "ConsoleInDriver ",
2006 "ConsoleOutDriver ",
2007 "StartupDpcStack ",
2008 "StartupKernelStack",
2009 "StartupPanicStack ",
2010 "StartupPcrPage ",
2011 "StartupPdrPage ",
2012 "RegistryData ",
2013 "MemoryData ",
2014 "NlsData ",
2015 "SpecialMemory ",
2016 "BBTMemory ",
2017 "LoaderReserve ",
2018 "LoaderXIPRom "
2019 };
2020
2021 DPRINT1("Base\t\tLength\t\tType\n");
2022 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
2023 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
2024 NextEntry = NextEntry->Flink)
2025 {
2026 Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
2027 DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
2028 TotalPages += Md->PageCount;
2029 }
2030
2031 DPRINT1("Total: %08lX (%lu MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
2032 }
2033
2034 CODE_SEG("INIT")
2035 BOOLEAN
2036 NTAPI
MmArmInitSystem(IN ULONG Phase,IN PLOADER_PARAMETER_BLOCK LoaderBlock)2037 MmArmInitSystem(IN ULONG Phase,
2038 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
2039 {
2040 ULONG i;
2041 BOOLEAN IncludeType[LoaderMaximum];
2042 PVOID Bitmap;
2043 PPHYSICAL_MEMORY_RUN Run;
2044 PFN_NUMBER PageCount;
2045 #if DBG
2046 ULONG j;
2047 PMMPTE PointerPte, TestPte;
2048 MMPTE TempPte;
2049 #endif
2050
2051 /* Dump memory descriptors */
2052 if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
2053
2054 //
2055 // Instantiate memory that we don't consider RAM/usable
2056 // We use the same exclusions that Windows does, in order to try to be
2057 // compatible with WinLDR-style booting
2058 //
2059 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
2060 IncludeType[LoaderBad] = FALSE;
2061 IncludeType[LoaderFirmwarePermanent] = FALSE;
2062 IncludeType[LoaderSpecialMemory] = FALSE;
2063 IncludeType[LoaderBBTMemory] = FALSE;
2064 if (Phase == 0)
2065 {
2066 /* Count physical pages on the system */
2067 MiScanMemoryDescriptors(LoaderBlock);
2068
2069 /* Initialize the phase 0 temporary event */
2070 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
2071
2072 /* Set all the events to use the temporary event for now */
2073 MiLowMemoryEvent = &MiTempEvent;
2074 MiHighMemoryEvent = &MiTempEvent;
2075 MiLowPagedPoolEvent = &MiTempEvent;
2076 MiHighPagedPoolEvent = &MiTempEvent;
2077 MiLowNonPagedPoolEvent = &MiTempEvent;
2078 MiHighNonPagedPoolEvent = &MiTempEvent;
2079
2080 //
2081 // Default throttling limits for Cc
2082 // May be ajusted later on depending on system type
2083 //
2084 MmThrottleTop = 450;
2085 MmThrottleBottom = 127;
2086
2087 //
2088 // Define the basic user vs. kernel address space separation
2089 //
2090 MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
2091 MmUserProbeAddress = (ULONG_PTR)MI_USER_PROBE_ADDRESS;
2092 MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
2093
2094 /* Highest PTE and PDE based on the addresses above */
2095 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
2096 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
2097 #if (_MI_PAGING_LEVELS >= 3)
2098 MiHighestUserPpe = MiAddressToPpe(MmHighestUserAddress);
2099 #if (_MI_PAGING_LEVELS >= 4)
2100 MiHighestUserPxe = MiAddressToPxe(MmHighestUserAddress);
2101 #endif
2102 #endif
2103 //
2104 // Get the size of the boot loader's image allocations and then round
2105 // that region up to a PDE size, so that any PDEs we might create for
2106 // whatever follows are separate from the PDEs that boot loader might've
2107 // already created (and later, we can blow all that away if we want to).
2108 //
2109 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
2110 MmBootImageSize *= PAGE_SIZE;
2111 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
2112 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
2113
2114 /* Initialize session space address layout */
2115 MiInitializeSessionSpaceLayout();
2116
2117 /* Set the based section highest address */
2118 MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
2119
2120 /* Loop all 8 standby lists */
2121 for (i = 0; i < 8; i++)
2122 {
2123 /* Initialize them */
2124 MmStandbyPageListByPriority[i].Total = 0;
2125 MmStandbyPageListByPriority[i].ListName = StandbyPageList;
2126 MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
2127 MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
2128 }
2129
2130 /* Initialize the user mode image list */
2131 InitializeListHead(&MmLoadedUserImageList);
2132
2133 /* Initalize the Working set list */
2134 InitializeListHead(&MmWorkingSetExpansionHead);
2135
2136 /* Initialize critical section timeout value (relative time is negative) */
2137 MmCriticalSectionTimeout.QuadPart = MmCritsectTimeoutSeconds * (-10000000LL);
2138
2139 /* Initialize the paged pool mutex and the section commit mutex */
2140 KeInitializeGuardedMutex(&MmPagedPoolMutex);
2141 KeInitializeGuardedMutex(&MmSectionCommitMutex);
2142 KeInitializeGuardedMutex(&MmSectionBasedMutex);
2143
2144 /* Initialize the Loader Lock */
2145 KeInitializeMutant(&MmSystemLoadLock, FALSE);
2146
2147 /* Set up the zero page event */
2148 KeInitializeEvent(&MmZeroingPageEvent, NotificationEvent, FALSE);
2149
2150 /* Initialize the dead stack S-LIST */
2151 InitializeSListHead(&MmDeadStackSListHead);
2152
2153 //
2154 // Check if this is a machine with less than 19MB of RAM
2155 //
2156 PageCount = MmNumberOfPhysicalPages;
2157 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
2158 {
2159 //
2160 // Use the very minimum of system PTEs
2161 //
2162 MmNumberOfSystemPtes = 7000;
2163 }
2164 else
2165 {
2166 //
2167 // Use the default
2168 //
2169 MmNumberOfSystemPtes = 11000;
2170 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
2171 {
2172 //
2173 // Double the amount of system PTEs
2174 //
2175 MmNumberOfSystemPtes <<= 1;
2176 }
2177 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
2178 {
2179 //
2180 // Double the amount of system PTEs
2181 //
2182 MmNumberOfSystemPtes <<= 1;
2183 }
2184 if (MmSpecialPoolTag != 0 && MmSpecialPoolTag != -1)
2185 {
2186 //
2187 // Add some extra PTEs for special pool
2188 //
2189 MmNumberOfSystemPtes += 0x6000;
2190 }
2191 }
2192
2193 DPRINT("System PTE count has been tuned to %lu (%lu bytes)\n",
2194 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
2195
2196 /* Check if no values are set for the heap limits */
2197 if (MmHeapSegmentReserve == 0)
2198 {
2199 MmHeapSegmentReserve = 2 * _1MB;
2200 }
2201
2202 if (MmHeapSegmentCommit == 0)
2203 {
2204 MmHeapSegmentCommit = 2 * PAGE_SIZE;
2205 }
2206
2207 if (MmHeapDeCommitTotalFreeThreshold == 0)
2208 {
2209 MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
2210 }
2211
2212 if (MmHeapDeCommitFreeBlockThreshold == 0)
2213 {
2214 MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
2215 }
2216
2217 /* Initialize the working set lock */
2218 ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
2219
2220 /* Set commit limit */
2221 MmTotalCommitLimit = (2 * _1GB) >> PAGE_SHIFT;
2222 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2223
2224 /* Has the allocation fragment been setup? */
2225 if (!MmAllocationFragment)
2226 {
2227 /* Use the default value */
2228 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
2229 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
2230 {
2231 /* On memory systems with less than 256MB, divide by 4 */
2232 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
2233 }
2234 else if (PageCount < (_1GB / PAGE_SIZE))
2235 {
2236 /* On systems with less than 1GB, divide by 2 */
2237 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
2238 }
2239 }
2240 else
2241 {
2242 /* Convert from 1KB fragments to pages */
2243 MmAllocationFragment *= _1KB;
2244 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2245
2246 /* Don't let it past the maximum */
2247 MmAllocationFragment = min(MmAllocationFragment,
2248 MI_MAX_ALLOCATION_FRAGMENT);
2249
2250 /* Don't let it too small either */
2251 MmAllocationFragment = max(MmAllocationFragment,
2252 MI_MIN_ALLOCATION_FRAGMENT);
2253 }
2254
2255 /* Check for kernel stack size that's too big */
2256 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
2257 {
2258 /* Sanitize to default value */
2259 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
2260 }
2261 else
2262 {
2263 /* Take the registry setting, and convert it into bytes */
2264 MmLargeStackSize *= _1KB;
2265
2266 /* Now align it to a page boundary */
2267 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
2268
2269 /* Sanity checks */
2270 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
2271 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
2272
2273 /* Make sure it's not too low */
2274 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
2275 }
2276
2277 /* Compute color information (L2 cache-separated paging lists) */
2278 MiComputeColorInformation();
2279
2280 // Calculate the number of bytes for the PFN database
2281 // then add the color tables and convert to pages
2282 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
2283 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
2284 MxPfnAllocation >>= PAGE_SHIFT;
2285
2286 // We have to add one to the count here, because in the process of
2287 // shifting down to the page size, we actually ended up getting the
2288 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
2289 // Later on, we'll shift this number back into bytes, which would cause
2290 // us to end up with only 0x5F000 bytes -- when we actually want to have
2291 // 0x60000 bytes.
2292 MxPfnAllocation++;
2293
2294 /* Initialize the platform-specific parts */
2295 MiInitMachineDependent(LoaderBlock);
2296
2297 #if DBG
2298 /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
2299 PointerPte = (PMMPTE)MmPagedPoolStart;
2300 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2301 TestPte = MiProtoPteToPte(&TempPte);
2302 ASSERT(PointerPte == TestPte);
2303
2304 /* Try the last nonpaged pool address */
2305 PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
2306 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2307 TestPte = MiProtoPteToPte(&TempPte);
2308 ASSERT(PointerPte == TestPte);
2309
2310 /* Try a bunch of random addresses near the end of the address space */
2311 PointerPte = (PMMPTE)((ULONG_PTR)MI_HIGHEST_SYSTEM_ADDRESS - 0x37FFF);
2312 for (j = 0; j < 20; j += 1)
2313 {
2314 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2315 TestPte = MiProtoPteToPte(&TempPte);
2316 ASSERT(PointerPte == TestPte);
2317 PointerPte++;
2318 }
2319
2320 /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
2321 PointerPte = (PMMPTE)((ULONG_PTR)MmNonPagedPoolStart + (MmSizeOfNonPagedPoolInBytes / 2));
2322 MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
2323 TestPte = MiSubsectionPteToSubsection(&TempPte);
2324 ASSERT(PointerPte == TestPte);
2325 #endif
2326
2327 //
2328 // Build the physical memory block
2329 //
2330 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2331 IncludeType);
2332
2333 //
2334 // Allocate enough buffer for the PFN bitmap
2335 // Align it up to a 32-bit boundary
2336 //
2337 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2338 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2339 TAG_MM);
2340 if (!Bitmap)
2341 {
2342 //
2343 // This is critical
2344 //
2345 KeBugCheckEx(INSTALL_MORE_MEMORY,
2346 MmNumberOfPhysicalPages,
2347 MmLowestPhysicalPage,
2348 MmHighestPhysicalPage,
2349 0x101);
2350 }
2351
2352 //
2353 // Initialize it and clear all the bits to begin with
2354 //
2355 RtlInitializeBitMap(&MiPfnBitMap,
2356 Bitmap,
2357 (ULONG)MmHighestPhysicalPage + 1);
2358 RtlClearAllBits(&MiPfnBitMap);
2359
2360 //
2361 // Loop physical memory runs
2362 //
2363 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2364 {
2365 //
2366 // Get the run
2367 //
2368 Run = &MmPhysicalMemoryBlock->Run[i];
2369 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2370 Run->BasePage << PAGE_SHIFT,
2371 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2372
2373 //
2374 // Make sure it has pages inside it
2375 //
2376 if (Run->PageCount)
2377 {
2378 //
2379 // Set the bits in the PFN bitmap
2380 //
2381 RtlSetBits(&MiPfnBitMap, (ULONG)Run->BasePage, (ULONG)Run->PageCount);
2382 }
2383 }
2384
2385 /* Look for large page cache entries that need caching */
2386 MiSyncCachedRanges();
2387
2388 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2389 MiAddHalIoMappings();
2390
2391 /* Set the initial resident page count */
2392 MmResidentAvailablePages = MmAvailablePages - 32;
2393
2394 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2395 MiInitializeLargePageSupport();
2396
2397 /* Check if the registry says any drivers should be loaded with large pages */
2398 MiInitializeDriverLargePageList();
2399
2400 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2401 MiReloadBootLoadedDrivers(LoaderBlock);
2402
2403 /* FIXME: Call out into Driver Verifier for initialization */
2404
2405 /* Check how many pages the system has */
2406 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2407 {
2408 /* Set small system */
2409 MmSystemSize = MmSmallSystem;
2410 MmMaximumDeadKernelStacks = 0;
2411 }
2412 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2413 {
2414 /* Set small system and add 100 pages for the cache */
2415 MmSystemSize = MmSmallSystem;
2416 MmSystemCacheWsMinimum += 100;
2417 MmMaximumDeadKernelStacks = 2;
2418 }
2419 else
2420 {
2421 /* Set medium system and add 400 pages for the cache */
2422 MmSystemSize = MmMediumSystem;
2423 MmSystemCacheWsMinimum += 400;
2424 MmMaximumDeadKernelStacks = 5;
2425 }
2426
2427 /* Check for less than 24MB */
2428 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2429 {
2430 /* No more than 32 pages */
2431 MmSystemCacheWsMinimum = 32;
2432 }
2433
2434 /* Check for more than 32MB */
2435 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2436 {
2437 /* Check for product type being "Wi" for WinNT */
2438 if (MmProductType == '\0i\0W')
2439 {
2440 /* Then this is a large system */
2441 MmSystemSize = MmLargeSystem;
2442 }
2443 else
2444 {
2445 /* For servers, we need 64MB to consider this as being large */
2446 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2447 {
2448 /* Set it as large */
2449 MmSystemSize = MmLargeSystem;
2450 }
2451 }
2452 }
2453
2454 /* Check for more than 33 MB */
2455 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2456 {
2457 /* Add another 500 pages to the cache */
2458 MmSystemCacheWsMinimum += 500;
2459 }
2460
2461 /* Now setup the shared user data fields */
2462 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2463 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2464 SharedUserData->LargePageMinimum = 0;
2465
2466 /* Check for workstation (Wi for WinNT) */
2467 if (MmProductType == '\0i\0W')
2468 {
2469 /* Set Windows NT Workstation product type */
2470 SharedUserData->NtProductType = NtProductWinNt;
2471 MmProductType = 0;
2472
2473 /* For this product, we wait till the last moment to throttle */
2474 MmThrottleTop = 250;
2475 MmThrottleBottom = 30;
2476 }
2477 else
2478 {
2479 /* Check for LanMan server (La for LanmanNT) */
2480 if (MmProductType == '\0a\0L')
2481 {
2482 /* This is a domain controller */
2483 SharedUserData->NtProductType = NtProductLanManNt;
2484 }
2485 else
2486 {
2487 /* Otherwise it must be a normal server (Se for ServerNT) */
2488 SharedUserData->NtProductType = NtProductServer;
2489 }
2490
2491 /* Set the product type, and make the system more aggressive with low memory */
2492 MmProductType = 1;
2493 MmMinimumFreePages = 81;
2494
2495 /* We will throttle earlier to preserve memory */
2496 MmThrottleTop = 450;
2497 MmThrottleBottom = 80;
2498 }
2499
2500 /* Update working set tuning parameters */
2501 MiAdjustWorkingSetManagerParameters(!MmProductType);
2502
2503 /* Finetune the page count by removing working set and NP expansion */
2504 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2505 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2506 MmResidentAvailableAtInit = MmResidentAvailablePages;
2507 if (MmResidentAvailablePages <= 0)
2508 {
2509 /* This should not happen */
2510 DPRINT1("System cache working set too big\n");
2511 return FALSE;
2512 }
2513
2514 /* Define limits for system cache */
2515 #ifdef _M_AMD64
2516 MmSizeOfSystemCacheInPages = ((MI_SYSTEM_CACHE_END + 1) - MI_SYSTEM_CACHE_START) / PAGE_SIZE;
2517 #else
2518 MmSizeOfSystemCacheInPages = ((ULONG_PTR)MI_PAGED_POOL_START - (ULONG_PTR)MI_SYSTEM_CACHE_START) / PAGE_SIZE;
2519 #endif
2520 MmSystemCacheEnd = (PVOID)((ULONG_PTR)MmSystemCacheStart + (MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1);
2521 #ifdef _M_AMD64
2522 ASSERT(MmSystemCacheEnd == (PVOID)MI_SYSTEM_CACHE_END);
2523 #else
2524 ASSERT(MmSystemCacheEnd == (PVOID)((ULONG_PTR)MI_PAGED_POOL_START - 1));
2525 #endif
2526
2527 /* Initialize the system cache */
2528 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2529
2530 /* Update the commit limit */
2531 MmTotalCommitLimit = MmAvailablePages;
2532 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2533 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2534
2535 /* Size up paged pool and build the shadow system page directory */
2536 MiBuildPagedPool();
2537
2538 /* Debugger physical memory support is now ready to be used */
2539 MmDebugPte = MiAddressToPte(MiDebugMapping);
2540
2541 /* Initialize the loaded module list */
2542 MiInitializeLoadedModuleList(LoaderBlock);
2543 }
2544
2545 //
2546 // Always return success for now
2547 //
2548 return TRUE;
2549 }
2550
2551 /* EOF */
2552