1 /** @file
2 Page Fault (#PF) handler for X64 processors
3 
4 Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6 
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8 
9 **/
10 
11 #include "PiSmmCpuDxeSmm.h"
12 
13 #define PAGE_TABLE_PAGES            8
14 #define ACC_MAX_BIT                 BIT3
15 
16 extern UINTN mSmmShadowStackSize;
17 
18 LIST_ENTRY                          mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
19 BOOLEAN                             m1GPageTableSupport = FALSE;
20 BOOLEAN                             mCpuSmmRestrictedMemoryAccess;
21 BOOLEAN                             m5LevelPagingNeeded;
22 X86_ASSEMBLY_PATCH_LABEL            gPatch5LevelPagingNeeded;
23 
24 /**
25   Disable CET.
26 **/
27 VOID
28 EFIAPI
29 DisableCet (
30   VOID
31   );
32 
33 /**
34   Enable CET.
35 **/
36 VOID
37 EFIAPI
38 EnableCet (
39   VOID
40   );
41 
42 /**
43   Check if 1-GByte pages is supported by processor or not.
44 
45   @retval TRUE   1-GByte pages is supported.
46   @retval FALSE  1-GByte pages is not supported.
47 
48 **/
49 BOOLEAN
Is1GPageSupport(VOID)50 Is1GPageSupport (
51   VOID
52   )
53 {
54   UINT32         RegEax;
55   UINT32         RegEdx;
56 
57   AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
58   if (RegEax >= 0x80000001) {
59     AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
60     if ((RegEdx & BIT26) != 0) {
61       return TRUE;
62     }
63   }
64   return FALSE;
65 }
66 
67 /**
68   The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
69   the max physical address bits is bigger than 48. Because 4-level paging can support
70   to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
71   with max physical address bits <= 48.
72 
73   @retval TRUE  5-level paging enabling is needed.
74   @retval FALSE 5-level paging enabling is not needed.
75 **/
76 BOOLEAN
Is5LevelPagingNeeded(VOID)77 Is5LevelPagingNeeded (
78   VOID
79   )
80 {
81   CPUID_VIR_PHY_ADDRESS_SIZE_EAX              VirPhyAddressSize;
82   CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
83   UINT32                                      MaxExtendedFunctionId;
84 
85   AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
86   if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
87     AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
88   } else {
89     VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
90   }
91   AsmCpuidEx (
92     CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
93     CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
94     NULL, NULL, &ExtFeatureEcx.Uint32, NULL
95     );
96   DEBUG ((
97     DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
98     VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
99     ));
100 
101   if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
102     ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
103     return TRUE;
104   } else {
105     return FALSE;
106   }
107 }
108 
109 /**
110   Get page table base address and the depth of the page table.
111 
112   @param[out] Base        Page table base address.
113   @param[out] FiveLevels  TRUE means 5 level paging. FALSE means 4 level paging.
114 **/
115 VOID
GetPageTable(OUT UINTN * Base,OUT BOOLEAN * FiveLevels OPTIONAL)116 GetPageTable (
117   OUT UINTN   *Base,
118   OUT BOOLEAN *FiveLevels OPTIONAL
119   )
120 {
121   IA32_CR4 Cr4;
122 
123   if (mInternalCr3 == 0) {
124     *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
125     if (FiveLevels != NULL) {
126       Cr4.UintN = AsmReadCr4 ();
127       *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
128     }
129     return;
130   }
131 
132   *Base = mInternalCr3;
133   if (FiveLevels != NULL) {
134     *FiveLevels = m5LevelPagingNeeded;
135   }
136 }
137 
138 /**
139   Set sub-entries number in entry.
140 
141   @param[in, out] Entry        Pointer to entry
142   @param[in]      SubEntryNum  Sub-entries number based on 0:
143                                0 means there is 1 sub-entry under this entry
144                                0x1ff means there is 512 sub-entries under this entry
145 
146 **/
147 VOID
SetSubEntriesNum(IN OUT UINT64 * Entry,IN UINT64 SubEntryNum)148 SetSubEntriesNum (
149   IN OUT UINT64               *Entry,
150   IN     UINT64               SubEntryNum
151   )
152 {
153   //
154   // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
155   //
156   *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
157 }
158 
159 /**
160   Return sub-entries number in entry.
161 
162   @param[in] Entry        Pointer to entry
163 
164   @return Sub-entries number based on 0:
165           0 means there is 1 sub-entry under this entry
166           0x1ff means there is 512 sub-entries under this entry
167 **/
168 UINT64
GetSubEntriesNum(IN UINT64 * Entry)169 GetSubEntriesNum (
170   IN UINT64            *Entry
171   )
172 {
173   //
174   // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
175   //
176   return BitFieldRead64 (*Entry, 52, 60);
177 }
178 
179 /**
180   Calculate the maximum support address.
181 
182   @return the maximum support address.
183 **/
184 UINT8
CalculateMaximumSupportAddress(VOID)185 CalculateMaximumSupportAddress (
186   VOID
187   )
188 {
189   UINT32                                        RegEax;
190   UINT8                                         PhysicalAddressBits;
191   VOID                                          *Hob;
192 
193   //
194   // Get physical address bits supported.
195   //
196   Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
197   if (Hob != NULL) {
198     PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
199   } else {
200     AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
201     if (RegEax >= 0x80000008) {
202       AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
203       PhysicalAddressBits = (UINT8) RegEax;
204     } else {
205       PhysicalAddressBits = 36;
206     }
207   }
208   return PhysicalAddressBits;
209 }
210 
211 /**
212   Set static page table.
213 
214   @param[in] PageTable     Address of page table.
215 **/
216 VOID
SetStaticPageTable(IN UINTN PageTable)217 SetStaticPageTable (
218   IN UINTN               PageTable
219   )
220 {
221   UINT64                                        PageAddress;
222   UINTN                                         NumberOfPml5EntriesNeeded;
223   UINTN                                         NumberOfPml4EntriesNeeded;
224   UINTN                                         NumberOfPdpEntriesNeeded;
225   UINTN                                         IndexOfPml5Entries;
226   UINTN                                         IndexOfPml4Entries;
227   UINTN                                         IndexOfPdpEntries;
228   UINTN                                         IndexOfPageDirectoryEntries;
229   UINT64                                        *PageMapLevel5Entry;
230   UINT64                                        *PageMapLevel4Entry;
231   UINT64                                        *PageMap;
232   UINT64                                        *PageDirectoryPointerEntry;
233   UINT64                                        *PageDirectory1GEntry;
234   UINT64                                        *PageDirectoryEntry;
235 
236   //
237   // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
238   //  when 5-Level Paging is disabled.
239   //
240   ASSERT (mPhysicalAddressBits <= 52);
241   if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {
242     mPhysicalAddressBits = 48;
243   }
244 
245   NumberOfPml5EntriesNeeded = 1;
246   if (mPhysicalAddressBits > 48) {
247     NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);
248     mPhysicalAddressBits = 48;
249   }
250 
251   NumberOfPml4EntriesNeeded = 1;
252   if (mPhysicalAddressBits > 39) {
253     NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);
254     mPhysicalAddressBits = 39;
255   }
256 
257   NumberOfPdpEntriesNeeded = 1;
258   ASSERT (mPhysicalAddressBits > 30);
259   NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);
260 
261   //
262   // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
263   //
264   PageMap         = (VOID *) PageTable;
265 
266   PageMapLevel4Entry = PageMap;
267   PageMapLevel5Entry = NULL;
268   if (m5LevelPagingNeeded) {
269     //
270     // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
271     //
272     PageMapLevel5Entry = PageMap;
273   }
274   PageAddress        = 0;
275 
276   for ( IndexOfPml5Entries = 0
277       ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
278       ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
279     //
280     // Each PML5 entry points to a page of PML4 entires.
281     // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
282     // When 5-Level Paging is disabled, below allocation happens only once.
283     //
284     if (m5LevelPagingNeeded) {
285       PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
286       if (PageMapLevel4Entry == NULL) {
287         PageMapLevel4Entry = AllocatePageTableMemory (1);
288         ASSERT(PageMapLevel4Entry != NULL);
289         ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
290 
291         *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
292       }
293     }
294 
295     for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
296       //
297       // Each PML4 entry points to a page of Page Directory Pointer entries.
298       //
299       PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
300       if (PageDirectoryPointerEntry == NULL) {
301         PageDirectoryPointerEntry = AllocatePageTableMemory (1);
302         ASSERT(PageDirectoryPointerEntry != NULL);
303         ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
304 
305         *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
306       }
307 
308       if (m1GPageTableSupport) {
309         PageDirectory1GEntry = PageDirectoryPointerEntry;
310         for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
311           if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
312             //
313             // Skip the < 4G entries
314             //
315             continue;
316           }
317           //
318           // Fill in the Page Directory entries
319           //
320           *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
321         }
322       } else {
323         PageAddress = BASE_4GB;
324         for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
325           if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
326             //
327             // Skip the < 4G entries
328             //
329             continue;
330           }
331           //
332           // Each Directory Pointer entries points to a page of Page Directory entires.
333           // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
334           //
335           PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
336           if (PageDirectoryEntry == NULL) {
337             PageDirectoryEntry = AllocatePageTableMemory (1);
338             ASSERT(PageDirectoryEntry != NULL);
339             ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
340 
341             //
342             // Fill in a Page Directory Pointer Entries
343             //
344             *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
345           }
346 
347           for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
348             //
349             // Fill in the Page Directory entries
350             //
351             *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
352           }
353         }
354       }
355     }
356   }
357 }
358 
359 /**
360   Create PageTable for SMM use.
361 
362   @return The address of PML4 (to set CR3).
363 
364 **/
365 UINT32
SmmInitPageTable(VOID)366 SmmInitPageTable (
367   VOID
368   )
369 {
370   EFI_PHYSICAL_ADDRESS              Pages;
371   UINT64                            *PTEntry;
372   LIST_ENTRY                        *FreePage;
373   UINTN                             Index;
374   UINTN                             PageFaultHandlerHookAddress;
375   IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
376   EFI_STATUS                        Status;
377   UINT64                            *Pml4Entry;
378   UINT64                            *Pml5Entry;
379 
380   //
381   // Initialize spin lock
382   //
383   InitializeSpinLock (mPFLock);
384 
385   mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
386   m1GPageTableSupport           = Is1GPageSupport ();
387   m5LevelPagingNeeded           = Is5LevelPagingNeeded ();
388   mPhysicalAddressBits          = CalculateMaximumSupportAddress ();
389   PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
390   DEBUG ((DEBUG_INFO, "5LevelPaging Needed             - %d\n", m5LevelPagingNeeded));
391   DEBUG ((DEBUG_INFO, "1GPageTable Support             - %d\n", m1GPageTableSupport));
392   DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
393   DEBUG ((DEBUG_INFO, "PhysicalAddressBits             - %d\n", mPhysicalAddressBits));
394   //
395   // Generate PAE page table for the first 4GB memory space
396   //
397   Pages = Gen4GPageTable (FALSE);
398 
399   //
400   // Set IA32_PG_PMNT bit to mask this entry
401   //
402   PTEntry = (UINT64*)(UINTN)Pages;
403   for (Index = 0; Index < 4; Index++) {
404     PTEntry[Index] |= IA32_PG_PMNT;
405   }
406 
407   //
408   // Fill Page-Table-Level4 (PML4) entry
409   //
410   Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
411   ASSERT (Pml4Entry != NULL);
412   *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
413   ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
414 
415   //
416   // Set sub-entries number
417   //
418   SetSubEntriesNum (Pml4Entry, 3);
419   PTEntry = Pml4Entry;
420 
421   if (m5LevelPagingNeeded) {
422     //
423     // Fill PML5 entry
424     //
425     Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
426     ASSERT (Pml5Entry != NULL);
427     *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
428     ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
429     //
430     // Set sub-entries number
431     //
432     SetSubEntriesNum (Pml5Entry, 1);
433     PTEntry = Pml5Entry;
434   }
435 
436   if (mCpuSmmRestrictedMemoryAccess) {
437     //
438     // When access to non-SMRAM memory is restricted, create page table
439     // that covers all memory space.
440     //
441     SetStaticPageTable ((UINTN)PTEntry);
442   } else {
443     //
444     // Add pages to page pool
445     //
446     FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
447     ASSERT (FreePage != NULL);
448     for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
449       InsertTailList (&mPagePool, FreePage);
450       FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
451     }
452   }
453 
454   if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
455       HEAP_GUARD_NONSTOP_MODE ||
456       NULL_DETECTION_NONSTOP_MODE) {
457     //
458     // Set own Page Fault entry instead of the default one, because SMM Profile
459     // feature depends on IRET instruction to do Single Step
460     //
461     PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
462     IdtEntry  = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
463     IdtEntry += EXCEPT_IA32_PAGE_FAULT;
464     IdtEntry->Bits.OffsetLow      = (UINT16)PageFaultHandlerHookAddress;
465     IdtEntry->Bits.Reserved_0     = 0;
466     IdtEntry->Bits.GateType       = IA32_IDT_GATE_TYPE_INTERRUPT_32;
467     IdtEntry->Bits.OffsetHigh     = (UINT16)(PageFaultHandlerHookAddress >> 16);
468     IdtEntry->Bits.OffsetUpper    = (UINT32)(PageFaultHandlerHookAddress >> 32);
469     IdtEntry->Bits.Reserved_1     = 0;
470   } else {
471     //
472     // Register Smm Page Fault Handler
473     //
474     Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
475     ASSERT_EFI_ERROR (Status);
476   }
477 
478   //
479   // Additional SMM IDT initialization for SMM stack guard
480   //
481   if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
482     InitializeIDTSmmStackGuard ();
483   }
484 
485   //
486   // Return the address of PML4/PML5 (to set CR3)
487   //
488   return (UINT32)(UINTN)PTEntry;
489 }
490 
491 /**
492   Set access record in entry.
493 
494   @param[in, out] Entry        Pointer to entry
495   @param[in]      Acc          Access record value
496 
497 **/
498 VOID
SetAccNum(IN OUT UINT64 * Entry,IN UINT64 Acc)499 SetAccNum (
500   IN OUT UINT64               *Entry,
501   IN     UINT64               Acc
502   )
503 {
504   //
505   // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
506   //
507   *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
508 }
509 
510 /**
511   Return access record in entry.
512 
513   @param[in] Entry        Pointer to entry
514 
515   @return Access record value.
516 
517 **/
518 UINT64
GetAccNum(IN UINT64 * Entry)519 GetAccNum (
520   IN UINT64            *Entry
521   )
522 {
523   //
524   // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
525   //
526   return BitFieldRead64 (*Entry, 9, 11);
527 }
528 
529 /**
530   Return and update the access record in entry.
531 
532   @param[in, out]  Entry    Pointer to entry
533 
534   @return Access record value.
535 
536 **/
537 UINT64
GetAndUpdateAccNum(IN OUT UINT64 * Entry)538 GetAndUpdateAccNum (
539   IN OUT UINT64      *Entry
540   )
541 {
542   UINT64         Acc;
543 
544   Acc = GetAccNum (Entry);
545   if ((*Entry & IA32_PG_A) != 0) {
546     //
547     // If this entry has been accessed, clear access flag in Entry and update access record
548     // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
549     //
550     *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
551     SetAccNum (Entry, 0x7);
552     return (0x7 + ACC_MAX_BIT);
553   } else {
554     if (Acc != 0) {
555       //
556       // If the access record is not the smallest value 0, minus 1 and update the access record field
557       //
558       SetAccNum (Entry, Acc - 1);
559     }
560   }
561   return Acc;
562 }
563 
564 /**
565   Reclaim free pages for PageFault handler.
566 
567   Search the whole entries tree to find the leaf entry that has the smallest
568   access record value. Insert the page pointed by this leaf entry into the
569   page pool. And check its upper entries if need to be inserted into the page
570   pool or not.
571 
572 **/
573 VOID
ReclaimPages(VOID)574 ReclaimPages (
575   VOID
576   )
577 {
578   UINT64                       Pml5Entry;
579   UINT64                       *Pml5;
580   UINT64                       *Pml4;
581   UINT64                       *Pdpt;
582   UINT64                       *Pdt;
583   UINTN                        Pml5Index;
584   UINTN                        Pml4Index;
585   UINTN                        PdptIndex;
586   UINTN                        PdtIndex;
587   UINTN                        MinPml5;
588   UINTN                        MinPml4;
589   UINTN                        MinPdpt;
590   UINTN                        MinPdt;
591   UINT64                       MinAcc;
592   UINT64                       Acc;
593   UINT64                       SubEntriesNum;
594   BOOLEAN                      PML4EIgnore;
595   BOOLEAN                      PDPTEIgnore;
596   UINT64                       *ReleasePageAddress;
597   IA32_CR4                     Cr4;
598   BOOLEAN                      Enable5LevelPaging;
599   UINT64                       PFAddress;
600   UINT64                       PFAddressPml5Index;
601   UINT64                       PFAddressPml4Index;
602   UINT64                       PFAddressPdptIndex;
603   UINT64                       PFAddressPdtIndex;
604 
605   Pml4 = NULL;
606   Pdpt = NULL;
607   Pdt  = NULL;
608   MinAcc  = (UINT64)-1;
609   MinPml4 = (UINTN)-1;
610   MinPml5 = (UINTN)-1;
611   MinPdpt = (UINTN)-1;
612   MinPdt  = (UINTN)-1;
613   Acc     = 0;
614   ReleasePageAddress = 0;
615   PFAddress = AsmReadCr2 ();
616   PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
617   PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
618   PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
619   PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
620 
621   Cr4.UintN = AsmReadCr4 ();
622   Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
623   Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
624 
625   if (!Enable5LevelPaging) {
626     //
627     // Create one fake PML5 entry for 4-Level Paging
628     // so that the page table parsing logic only handles 5-Level page structure.
629     //
630     Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
631     Pml5 = &Pml5Entry;
632   }
633 
634   //
635   // First, find the leaf entry has the smallest access record value
636   //
637   for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
638     if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
639       //
640       // If the PML5 entry is not present or is masked, skip it
641       //
642       continue;
643     }
644     Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
645     for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
646       if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
647         //
648         // If the PML4 entry is not present or is masked, skip it
649         //
650         continue;
651       }
652       Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
653       PML4EIgnore = FALSE;
654       for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
655         if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
656           //
657           // If the PDPT entry is not present or is masked, skip it
658           //
659           if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
660             //
661             // If the PDPT entry is masked, we will ignore checking the PML4 entry
662             //
663             PML4EIgnore = TRUE;
664           }
665           continue;
666         }
667         if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
668           //
669           // It's not 1-GByte pages entry, it should be a PDPT entry,
670           // we will not check PML4 entry more
671           //
672           PML4EIgnore = TRUE;
673           Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
674           PDPTEIgnore = FALSE;
675           for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
676             if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
677               //
678               // If the PD entry is not present or is masked, skip it
679               //
680               if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
681                 //
682                 // If the PD entry is masked, we will not PDPT entry more
683                 //
684                 PDPTEIgnore = TRUE;
685               }
686               continue;
687             }
688             if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
689               //
690               // It's not 2 MByte page table entry, it should be PD entry
691               // we will find the entry has the smallest access record value
692               //
693               PDPTEIgnore = TRUE;
694               if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
695                   Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
696                 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
697                 if (Acc < MinAcc) {
698                   //
699                   // If the PD entry has the smallest access record value,
700                   // save the Page address to be released
701                   //
702                   MinAcc  = Acc;
703                   MinPml5 = Pml5Index;
704                   MinPml4 = Pml4Index;
705                   MinPdpt = PdptIndex;
706                   MinPdt  = PdtIndex;
707                   ReleasePageAddress = Pdt + PdtIndex;
708                 }
709               }
710             }
711           }
712           if (!PDPTEIgnore) {
713             //
714             // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
715             // it should only has the entries point to 2 MByte Pages
716             //
717             if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
718                 Pml5Index != PFAddressPml5Index) {
719               Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
720               if (Acc < MinAcc) {
721                 //
722                 // If the PDPT entry has the smallest access record value,
723                 // save the Page address to be released
724                 //
725                 MinAcc  = Acc;
726                 MinPml5 = Pml5Index;
727                 MinPml4 = Pml4Index;
728                 MinPdpt = PdptIndex;
729                 MinPdt  = (UINTN)-1;
730                 ReleasePageAddress = Pdpt + PdptIndex;
731               }
732             }
733           }
734         }
735       }
736       if (!PML4EIgnore) {
737         //
738         // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
739         // it should only has the entries point to 1 GByte Pages
740         //
741         if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
742           Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
743           if (Acc < MinAcc) {
744             //
745             // If the PML4 entry has the smallest access record value,
746             // save the Page address to be released
747             //
748             MinAcc  = Acc;
749             MinPml5 = Pml5Index;
750             MinPml4 = Pml4Index;
751             MinPdpt = (UINTN)-1;
752             MinPdt  = (UINTN)-1;
753             ReleasePageAddress = Pml4 + Pml4Index;
754           }
755         }
756       }
757     }
758   }
759   //
760   // Make sure one PML4/PDPT/PD entry is selected
761   //
762   ASSERT (MinAcc != (UINT64)-1);
763 
764   //
765   // Secondly, insert the page pointed by this entry into page pool and clear this entry
766   //
767   InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
768   *ReleasePageAddress = 0;
769 
770   //
771   // Lastly, check this entry's upper entries if need to be inserted into page pool
772   // or not
773   //
774   while (TRUE) {
775     if (MinPdt != (UINTN)-1) {
776       //
777       // If 4 KByte Page Table is released, check the PDPT entry
778       //
779       Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
780       Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
781       SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
782       if (SubEntriesNum == 0 &&
783           (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
784         //
785         // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
786         // clear the Page directory entry
787         //
788         InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
789         Pdpt[MinPdpt] = 0;
790         //
791         // Go on checking the PML4 table
792         //
793         MinPdt = (UINTN)-1;
794         continue;
795       }
796       //
797       // Update the sub-entries filed in PDPT entry and exit
798       //
799       SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
800       break;
801     }
802     if (MinPdpt != (UINTN)-1) {
803       //
804       // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
805       //
806       SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
807       if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
808         //
809         // Release the empty PML4 table if there was no more 1G KByte Page Table entry
810         // clear the Page directory entry
811         //
812         InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
813         Pml4[MinPml4] = 0;
814         MinPdpt = (UINTN)-1;
815         continue;
816       }
817       //
818       // Update the sub-entries filed in PML4 entry and exit
819       //
820       SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
821       break;
822     }
823     //
824     // PLM4 table has been released before, exit it
825     //
826     break;
827   }
828 }
829 
830 /**
831   Allocate free Page for PageFault handler use.
832 
833   @return Page address.
834 
835 **/
836 UINT64
AllocPage(VOID)837 AllocPage (
838   VOID
839   )
840 {
841   UINT64                            RetVal;
842 
843   if (IsListEmpty (&mPagePool)) {
844     //
845     // If page pool is empty, reclaim the used pages and insert one into page pool
846     //
847     ReclaimPages ();
848   }
849 
850   //
851   // Get one free page and remove it from page pool
852   //
853   RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
854   RemoveEntryList (mPagePool.ForwardLink);
855   //
856   // Clean this page and return
857   //
858   ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
859   return RetVal;
860 }
861 
862 /**
863   Page Fault handler for SMM use.
864 
865 **/
866 VOID
SmiDefaultPFHandler(VOID)867 SmiDefaultPFHandler (
868   VOID
869   )
870 {
871   UINT64                            *PageTable;
872   UINT64                            *PageTableTop;
873   UINT64                            PFAddress;
874   UINTN                             StartBit;
875   UINTN                             EndBit;
876   UINT64                            PTIndex;
877   UINTN                             Index;
878   SMM_PAGE_SIZE_TYPE                PageSize;
879   UINTN                             NumOfPages;
880   UINTN                             PageAttribute;
881   EFI_STATUS                        Status;
882   UINT64                            *UpperEntry;
883   BOOLEAN                           Enable5LevelPaging;
884   IA32_CR4                          Cr4;
885 
886   //
887   // Set default SMM page attribute
888   //
889   PageSize = SmmPageSize2M;
890   NumOfPages = 1;
891   PageAttribute = 0;
892 
893   EndBit = 0;
894   PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
895   PFAddress = AsmReadCr2 ();
896 
897   Cr4.UintN = AsmReadCr4 ();
898   Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
899 
900   Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
901   //
902   // If platform not support page table attribute, set default SMM page attribute
903   //
904   if (Status != EFI_SUCCESS) {
905     PageSize = SmmPageSize2M;
906     NumOfPages = 1;
907     PageAttribute = 0;
908   }
909   if (PageSize >= MaxSmmPageSizeType) {
910     PageSize = SmmPageSize2M;
911   }
912   if (NumOfPages > 512) {
913     NumOfPages = 512;
914   }
915 
916   switch (PageSize) {
917   case SmmPageSize4K:
918     //
919     // BIT12 to BIT20 is Page Table index
920     //
921     EndBit = 12;
922     break;
923   case SmmPageSize2M:
924     //
925     // BIT21 to BIT29 is Page Directory index
926     //
927     EndBit = 21;
928     PageAttribute |= (UINTN)IA32_PG_PS;
929     break;
930   case SmmPageSize1G:
931     if (!m1GPageTableSupport) {
932       DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
933       ASSERT (FALSE);
934     }
935     //
936     // BIT30 to BIT38 is Page Directory Pointer Table index
937     //
938     EndBit = 30;
939     PageAttribute |= (UINTN)IA32_PG_PS;
940     break;
941   default:
942     ASSERT (FALSE);
943   }
944 
945   //
946   // If execute-disable is enabled, set NX bit
947   //
948   if (mXdEnabled) {
949     PageAttribute |= IA32_PG_NX;
950   }
951 
952   for (Index = 0; Index < NumOfPages; Index++) {
953     PageTable  = PageTableTop;
954     UpperEntry = NULL;
955     for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
956       PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
957       if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
958         //
959         // If the entry is not present, allocate one page from page pool for it
960         //
961         PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
962       } else {
963         //
964         // Save the upper entry address
965         //
966         UpperEntry = PageTable + PTIndex;
967       }
968       //
969       // BIT9 to BIT11 of entry is used to save access record,
970       // initialize value is 7
971       //
972       PageTable[PTIndex] |= (UINT64)IA32_PG_A;
973       SetAccNum (PageTable + PTIndex, 7);
974       PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
975     }
976 
977     PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
978     if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
979       //
980       // Check if the entry has already existed, this issue may occur when the different
981       // size page entries created under the same entry
982       //
983       DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
984       DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
985       ASSERT (FALSE);
986     }
987     //
988     // Fill the new entry
989     //
990     PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
991                          PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
992     if (UpperEntry != NULL) {
993       SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
994     }
995     //
996     // Get the next page address if we need to create more page tables
997     //
998     PFAddress += (1ull << EndBit);
999   }
1000 }
1001 
1002 /**
1003   ThePage Fault handler wrapper for SMM use.
1004 
1005   @param  InterruptType    Defines the type of interrupt or exception that
1006                            occurred on the processor.This parameter is processor architecture specific.
1007   @param  SystemContext    A pointer to the processor context when
1008                            the interrupt occurred on the processor.
1009 **/
1010 VOID
1011 EFIAPI
SmiPFHandler(IN EFI_EXCEPTION_TYPE InterruptType,IN EFI_SYSTEM_CONTEXT SystemContext)1012 SmiPFHandler (
1013   IN EFI_EXCEPTION_TYPE   InterruptType,
1014   IN EFI_SYSTEM_CONTEXT   SystemContext
1015   )
1016 {
1017   UINTN             PFAddress;
1018   UINTN             GuardPageAddress;
1019   UINTN             CpuIndex;
1020 
1021   ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
1022 
1023   AcquireSpinLock (mPFLock);
1024 
1025   PFAddress = AsmReadCr2 ();
1026 
1027   if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
1028     DumpCpuContext (InterruptType, SystemContext);
1029     DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
1030     CpuDeadLoop ();
1031     goto Exit;
1032   }
1033 
1034   //
1035   // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
1036   // or SMM page protection violation.
1037   //
1038   if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
1039       (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
1040     DumpCpuContext (InterruptType, SystemContext);
1041     CpuIndex = GetCpuIndex ();
1042     GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
1043     if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
1044         (PFAddress >= GuardPageAddress) &&
1045         (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
1046       DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
1047     } else {
1048       if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1049         DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
1050         DEBUG_CODE (
1051           DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1052         );
1053       } else {
1054         DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
1055         DEBUG_CODE (
1056           DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1057         );
1058       }
1059 
1060       if (HEAP_GUARD_NONSTOP_MODE) {
1061         GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1062         goto Exit;
1063       }
1064     }
1065     CpuDeadLoop ();
1066     goto Exit;
1067   }
1068 
1069   //
1070   // If a page fault occurs in non-SMRAM range.
1071   //
1072   if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
1073       (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
1074     if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
1075       DumpCpuContext (InterruptType, SystemContext);
1076       DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
1077       DEBUG_CODE (
1078         DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
1079       );
1080       CpuDeadLoop ();
1081       goto Exit;
1082     }
1083 
1084     //
1085     // If NULL pointer was just accessed
1086     //
1087     if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
1088         (PFAddress < EFI_PAGE_SIZE)) {
1089       DumpCpuContext (InterruptType, SystemContext);
1090       DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
1091       DEBUG_CODE (
1092         DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1093       );
1094 
1095       if (NULL_DETECTION_NONSTOP_MODE) {
1096         GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
1097         goto Exit;
1098       }
1099 
1100       CpuDeadLoop ();
1101       goto Exit;
1102     }
1103 
1104     if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
1105       DumpCpuContext (InterruptType, SystemContext);
1106       DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
1107       DEBUG_CODE (
1108         DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
1109       );
1110       CpuDeadLoop ();
1111       goto Exit;
1112     }
1113   }
1114 
1115   if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1116     SmmProfilePFHandler (
1117       SystemContext.SystemContextX64->Rip,
1118       SystemContext.SystemContextX64->ExceptionData
1119       );
1120   } else {
1121     SmiDefaultPFHandler ();
1122   }
1123 
1124 Exit:
1125   ReleaseSpinLock (mPFLock);
1126 }
1127 
1128 /**
1129   This function sets memory attribute for page table.
1130 **/
1131 VOID
SetPageTableAttributes(VOID)1132 SetPageTableAttributes (
1133   VOID
1134   )
1135 {
1136   UINTN                 Index2;
1137   UINTN                 Index3;
1138   UINTN                 Index4;
1139   UINTN                 Index5;
1140   UINT64                *L1PageTable;
1141   UINT64                *L2PageTable;
1142   UINT64                *L3PageTable;
1143   UINT64                *L4PageTable;
1144   UINT64                *L5PageTable;
1145   UINTN                 PageTableBase;
1146   BOOLEAN               IsSplitted;
1147   BOOLEAN               PageTableSplitted;
1148   BOOLEAN               CetEnabled;
1149   BOOLEAN               Enable5LevelPaging;
1150 
1151   //
1152   // Don't mark page table memory as read-only if
1153   //  - no restriction on access to non-SMRAM memory; or
1154   //  - SMM heap guard feature enabled; or
1155   //      BIT2: SMM page guard enabled
1156   //      BIT3: SMM pool guard enabled
1157   //  - SMM profile feature enabled
1158   //
1159   if (!mCpuSmmRestrictedMemoryAccess ||
1160       ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
1161       FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1162     //
1163     // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
1164     //
1165     ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
1166               (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
1167 
1168     //
1169     // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
1170     //
1171     ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
1172     return ;
1173   }
1174 
1175   DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
1176 
1177   //
1178   // Disable write protection, because we need mark page table to be write protected.
1179   // We need *write* page table memory, to mark itself to be *read only*.
1180   //
1181   CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
1182   if (CetEnabled) {
1183     //
1184     // CET must be disabled if WP is disabled.
1185     //
1186     DisableCet();
1187   }
1188   AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
1189 
1190   do {
1191     DEBUG ((DEBUG_INFO, "Start...\n"));
1192     PageTableSplitted = FALSE;
1193     L5PageTable = NULL;
1194 
1195     GetPageTable (&PageTableBase, &Enable5LevelPaging);
1196 
1197     if (Enable5LevelPaging) {
1198       L5PageTable = (UINT64 *)PageTableBase;
1199       SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1200       PageTableSplitted = (PageTableSplitted || IsSplitted);
1201     }
1202 
1203     for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
1204       if (Enable5LevelPaging) {
1205         L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1206         if (L4PageTable == NULL) {
1207           continue;
1208         }
1209       } else {
1210         L4PageTable = (UINT64 *)PageTableBase;
1211       }
1212       SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1213       PageTableSplitted = (PageTableSplitted || IsSplitted);
1214 
1215       for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
1216         L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1217         if (L3PageTable == NULL) {
1218           continue;
1219         }
1220 
1221         SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1222         PageTableSplitted = (PageTableSplitted || IsSplitted);
1223 
1224         for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
1225           if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
1226             // 1G
1227             continue;
1228           }
1229           L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1230           if (L2PageTable == NULL) {
1231             continue;
1232           }
1233 
1234           SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1235           PageTableSplitted = (PageTableSplitted || IsSplitted);
1236 
1237           for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
1238             if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
1239               // 2M
1240               continue;
1241             }
1242             L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
1243             if (L1PageTable == NULL) {
1244               continue;
1245             }
1246             SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
1247             PageTableSplitted = (PageTableSplitted || IsSplitted);
1248           }
1249         }
1250       }
1251     }
1252   } while (PageTableSplitted);
1253 
1254   //
1255   // Enable write protection, after page table updated.
1256   //
1257   AsmWriteCr0 (AsmReadCr0() | CR0_WP);
1258   if (CetEnabled) {
1259     //
1260     // re-enable CET.
1261     //
1262     EnableCet();
1263   }
1264 
1265   return ;
1266 }
1267 
1268 /**
1269   This function reads CR2 register when on-demand paging is enabled.
1270 
1271   @param[out]  *Cr2  Pointer to variable to hold CR2 register value.
1272 **/
1273 VOID
SaveCr2(OUT UINTN * Cr2)1274 SaveCr2 (
1275   OUT UINTN  *Cr2
1276   )
1277 {
1278   if (!mCpuSmmRestrictedMemoryAccess) {
1279     //
1280     // On-demand paging is enabled when access to non-SMRAM is not restricted.
1281     //
1282     *Cr2 = AsmReadCr2 ();
1283   }
1284 }
1285 
1286 /**
1287   This function restores CR2 register when on-demand paging is enabled.
1288 
1289   @param[in]  Cr2  Value to write into CR2 register.
1290 **/
1291 VOID
RestoreCr2(IN UINTN Cr2)1292 RestoreCr2 (
1293   IN UINTN  Cr2
1294   )
1295 {
1296   if (!mCpuSmmRestrictedMemoryAccess) {
1297     //
1298     // On-demand paging is enabled when access to non-SMRAM is not restricted.
1299     //
1300     AsmWriteCr2 (Cr2);
1301   }
1302 }
1303 
1304 /**
1305   Return whether access to non-SMRAM is restricted.
1306 
1307   @retval TRUE  Access to non-SMRAM is restricted.
1308   @retval FALSE Access to non-SMRAM is not restricted.
1309 **/
1310 BOOLEAN
IsRestrictedMemoryAccess(VOID)1311 IsRestrictedMemoryAccess (
1312   VOID
1313   )
1314 {
1315   return mCpuSmmRestrictedMemoryAccess;
1316 }
1317