1 /** @file
2   x64 Virtual Memory Management Services in the form of an IA-32 driver.
3   Used to establish a 1:1 Virtual to Physical Mapping that is required to
4   enter Long Mode (x64 64-bit mode).
5 
6   While we make a 1:1 mapping (identity mapping) for all physical pages
7   we still need to use the MTRR's to ensure that the cachability attributes
8   for all memory regions is correct.
9 
10   The basic idea is to use 2MB page table entries where ever possible. If
11   more granularity of cachability is required then 4K page tables are used.
12 
13   References:
14     1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15     2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16     3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17 
18 Copyright (c) 2006 - 2020, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20 
21 SPDX-License-Identifier: BSD-2-Clause-Patent
22 
23 **/
24 
25 #include <PiPei.h>
26 #include <Library/BaseLib.h>
27 #include <Library/DebugLib.h>
28 #include <Library/BaseMemoryLib.h>
29 #include <Library/MemoryAllocationLib.h>
30 #include <Library/PcdLib.h>
31 #include <Library/HobLib.h>
32 #include <Register/Intel/Cpuid.h>
33 #include "VirtualMemory.h"
34 
35 //
36 // Global variable to keep track current available memory used as page table.
37 //
38 PAGE_TABLE_POOL   *mPageTablePool = NULL;
39 
40 /**
41   Clear legacy memory located at the first 4K-page, if available.
42 
43   This function traverses the whole HOB list to check if memory from 0 to 4095
44   exists and has not been allocated, and then clear it if so.
45 
46   @param HobStart                  The start of HobList passed to DxeCore.
47 
48 **/
49 VOID
ClearFirst4KPage(IN VOID * HobStart)50 ClearFirst4KPage (
51   IN  VOID *HobStart
52   )
53 {
54   EFI_PEI_HOB_POINTERS          RscHob;
55   EFI_PEI_HOB_POINTERS          MemHob;
56   BOOLEAN                       DoClear;
57 
58   RscHob.Raw = HobStart;
59   MemHob.Raw = HobStart;
60   DoClear = FALSE;
61 
62   //
63   // Check if page 0 exists and free
64   //
65   while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
66                                    RscHob.Raw)) != NULL) {
67     if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&
68         RscHob.ResourceDescriptor->PhysicalStart == 0) {
69       DoClear = TRUE;
70       //
71       // Make sure memory at 0-4095 has not been allocated.
72       //
73       while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,
74                                        MemHob.Raw)) != NULL) {
75         if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
76             < EFI_PAGE_SIZE) {
77           DoClear = FALSE;
78           break;
79         }
80         MemHob.Raw = GET_NEXT_HOB (MemHob);
81       }
82       break;
83     }
84     RscHob.Raw = GET_NEXT_HOB (RscHob);
85   }
86 
87   if (DoClear) {
88     DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
89     SetMem (NULL, EFI_PAGE_SIZE, 0);
90   }
91 
92   return;
93 }
94 
95 /**
96   Return configure status of NULL pointer detection feature.
97 
98   @return TRUE   NULL pointer detection feature is enabled
99   @return FALSE  NULL pointer detection feature is disabled
100 
101 **/
102 BOOLEAN
IsNullDetectionEnabled(VOID)103 IsNullDetectionEnabled (
104   VOID
105   )
106 {
107   return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
108 }
109 
110 /**
111   The function will check if Execute Disable Bit is available.
112 
113   @retval TRUE      Execute Disable Bit is available.
114   @retval FALSE     Execute Disable Bit is not available.
115 
116 **/
117 BOOLEAN
IsExecuteDisableBitAvailable(VOID)118 IsExecuteDisableBitAvailable (
119   VOID
120   )
121 {
122   UINT32            RegEax;
123   UINT32            RegEdx;
124   BOOLEAN           Available;
125 
126   Available = FALSE;
127   AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
128   if (RegEax >= 0x80000001) {
129     AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
130     if ((RegEdx & BIT20) != 0) {
131       //
132       // Bit 20: Execute Disable Bit available.
133       //
134       Available = TRUE;
135     }
136   }
137 
138   return Available;
139 }
140 
141 /**
142   Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.
143 
144   @retval TRUE    IA32_EFER.NXE should be enabled.
145   @retval FALSE   IA32_EFER.NXE should not be enabled.
146 
147 **/
148 BOOLEAN
IsEnableNonExecNeeded(VOID)149 IsEnableNonExecNeeded (
150   VOID
151   )
152 {
153   if (!IsExecuteDisableBitAvailable ()) {
154     return FALSE;
155   }
156 
157   //
158   // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.
159   // Features controlled by Following PCDs need this feature to be enabled.
160   //
161   return (PcdGetBool (PcdSetNxForStack) ||
162           PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||
163           PcdGet32 (PcdImageProtectionPolicy) != 0);
164 }
165 
166 /**
167   Enable Execute Disable Bit.
168 
169 **/
170 VOID
EnableExecuteDisableBit(VOID)171 EnableExecuteDisableBit (
172   VOID
173   )
174 {
175   UINT64           MsrRegisters;
176 
177   MsrRegisters = AsmReadMsr64 (0xC0000080);
178   MsrRegisters |= BIT11;
179   AsmWriteMsr64 (0xC0000080, MsrRegisters);
180 }
181 
182 /**
183   The function will check if page table entry should be splitted to smaller
184   granularity.
185 
186   @param Address      Physical memory address.
187   @param Size         Size of the given physical memory.
188   @param StackBase    Base address of stack.
189   @param StackSize    Size of stack.
190   @param GhcbBase     Base address of GHCB pages.
191   @param GhcbSize     Size of GHCB area.
192 
193   @retval TRUE      Page table should be split.
194   @retval FALSE     Page table should not be split.
195 **/
196 BOOLEAN
ToSplitPageTable(IN EFI_PHYSICAL_ADDRESS Address,IN UINTN Size,IN EFI_PHYSICAL_ADDRESS StackBase,IN UINTN StackSize,IN EFI_PHYSICAL_ADDRESS GhcbBase,IN UINTN GhcbSize)197 ToSplitPageTable (
198   IN EFI_PHYSICAL_ADDRESS               Address,
199   IN UINTN                              Size,
200   IN EFI_PHYSICAL_ADDRESS               StackBase,
201   IN UINTN                              StackSize,
202   IN EFI_PHYSICAL_ADDRESS               GhcbBase,
203   IN UINTN                              GhcbSize
204   )
205 {
206   if (IsNullDetectionEnabled () && Address == 0) {
207     return TRUE;
208   }
209 
210   if (PcdGetBool (PcdCpuStackGuard)) {
211     if (StackBase >= Address && StackBase < (Address + Size)) {
212       return TRUE;
213     }
214   }
215 
216   if (PcdGetBool (PcdSetNxForStack)) {
217     if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
218       return TRUE;
219     }
220   }
221 
222   if (GhcbBase != 0) {
223     if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {
224       return TRUE;
225     }
226   }
227 
228   return FALSE;
229 }
230 /**
231   Initialize a buffer pool for page table use only.
232 
233   To reduce the potential split operation on page table, the pages reserved for
234   page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
235   at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
236   initialized with number of pages greater than or equal to the given PoolPages.
237 
238   Once the pages in the pool are used up, this method should be called again to
239   reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't
240   happen in practice.
241 
242   @param PoolPages  The least page number of the pool to be created.
243 
244   @retval TRUE    The pool is initialized successfully.
245   @retval FALSE   The memory is out of resource.
246 **/
247 BOOLEAN
InitializePageTablePool(IN UINTN PoolPages)248 InitializePageTablePool (
249   IN UINTN           PoolPages
250   )
251 {
252   VOID          *Buffer;
253 
254   //
255   // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
256   // header.
257   //
258   PoolPages += 1;   // Add one page for header.
259   PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
260               PAGE_TABLE_POOL_UNIT_PAGES;
261   Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
262   if (Buffer == NULL) {
263     DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
264     return FALSE;
265   }
266 
267   //
268   // Link all pools into a list for easier track later.
269   //
270   if (mPageTablePool == NULL) {
271     mPageTablePool = Buffer;
272     mPageTablePool->NextPool = mPageTablePool;
273   } else {
274     ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
275     mPageTablePool->NextPool = Buffer;
276     mPageTablePool = Buffer;
277   }
278 
279   //
280   // Reserve one page for pool header.
281   //
282   mPageTablePool->FreePages  = PoolPages - 1;
283   mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
284 
285   return TRUE;
286 }
287 
288 /**
289   This API provides a way to allocate memory for page table.
290 
291   This API can be called more than once to allocate memory for page tables.
292 
293   Allocates the number of 4KB pages and returns a pointer to the allocated
294   buffer. The buffer returned is aligned on a 4KB boundary.
295 
296   If Pages is 0, then NULL is returned.
297   If there is not enough memory remaining to satisfy the request, then NULL is
298   returned.
299 
300   @param  Pages                 The number of 4 KB pages to allocate.
301 
302   @return A pointer to the allocated buffer or NULL if allocation fails.
303 
304 **/
305 VOID *
AllocatePageTableMemory(IN UINTN Pages)306 AllocatePageTableMemory (
307   IN UINTN           Pages
308   )
309 {
310   VOID          *Buffer;
311 
312   if (Pages == 0) {
313     return NULL;
314   }
315 
316   //
317   // Renew the pool if necessary.
318   //
319   if (mPageTablePool == NULL ||
320       Pages > mPageTablePool->FreePages) {
321     if (!InitializePageTablePool (Pages)) {
322       return NULL;
323     }
324   }
325 
326   Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
327 
328   mPageTablePool->Offset     += EFI_PAGES_TO_SIZE (Pages);
329   mPageTablePool->FreePages  -= Pages;
330 
331   return Buffer;
332 }
333 
334 /**
335   Split 2M page to 4K.
336 
337   @param[in]      PhysicalAddress       Start physical address the 2M page covered.
338   @param[in, out] PageEntry2M           Pointer to 2M page entry.
339   @param[in]      StackBase             Stack base address.
340   @param[in]      StackSize             Stack size.
341   @param[in]      GhcbBase              GHCB page area base address.
342   @param[in]      GhcbSize              GHCB page area size.
343 
344 **/
345 VOID
Split2MPageTo4K(IN EFI_PHYSICAL_ADDRESS PhysicalAddress,IN OUT UINT64 * PageEntry2M,IN EFI_PHYSICAL_ADDRESS StackBase,IN UINTN StackSize,IN EFI_PHYSICAL_ADDRESS GhcbBase,IN UINTN GhcbSize)346 Split2MPageTo4K (
347   IN EFI_PHYSICAL_ADDRESS               PhysicalAddress,
348   IN OUT UINT64                         *PageEntry2M,
349   IN EFI_PHYSICAL_ADDRESS               StackBase,
350   IN UINTN                              StackSize,
351   IN EFI_PHYSICAL_ADDRESS               GhcbBase,
352   IN UINTN                              GhcbSize
353   )
354 {
355   EFI_PHYSICAL_ADDRESS                  PhysicalAddress4K;
356   UINTN                                 IndexOfPageTableEntries;
357   PAGE_TABLE_4K_ENTRY                   *PageTableEntry;
358   UINT64                                AddressEncMask;
359 
360   //
361   // Make sure AddressEncMask is contained to smallest supported address field
362   //
363   AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
364 
365   PageTableEntry = AllocatePageTableMemory (1);
366   ASSERT (PageTableEntry != NULL);
367 
368   //
369   // Fill in 2M page entry.
370   //
371   *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
372 
373   PhysicalAddress4K = PhysicalAddress;
374   for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
375     //
376     // Fill in the Page Table entries
377     //
378     PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K;
379 
380     //
381     // The GHCB range consists of two pages per CPU, the GHCB and a
382     // per-CPU variable page. The GHCB page needs to be mapped as an
383     // unencrypted page while the per-CPU variable page needs to be
384     // mapped encrypted. These pages alternate in assignment.
385     //
386     if ((GhcbBase == 0)
387         || (PhysicalAddress4K < GhcbBase)
388         || (PhysicalAddress4K >= GhcbBase + GhcbSize)
389         || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0)) {
390       PageTableEntry->Uint64 |= AddressEncMask;
391     }
392     PageTableEntry->Bits.ReadWrite = 1;
393 
394     if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||
395         (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {
396       PageTableEntry->Bits.Present = 0;
397     } else {
398       PageTableEntry->Bits.Present = 1;
399     }
400 
401     if (PcdGetBool (PcdSetNxForStack)
402         && (PhysicalAddress4K >= StackBase)
403         && (PhysicalAddress4K < StackBase + StackSize)) {
404       //
405       // Set Nx bit for stack.
406       //
407       PageTableEntry->Bits.Nx = 1;
408     }
409   }
410 }
411 
412 /**
413   Split 1G page to 2M.
414 
415   @param[in]      PhysicalAddress       Start physical address the 1G page covered.
416   @param[in, out] PageEntry1G           Pointer to 1G page entry.
417   @param[in]      StackBase             Stack base address.
418   @param[in]      StackSize             Stack size.
419   @param[in]      GhcbBase              GHCB page area base address.
420   @param[in]      GhcbSize              GHCB page area size.
421 
422 **/
423 VOID
Split1GPageTo2M(IN EFI_PHYSICAL_ADDRESS PhysicalAddress,IN OUT UINT64 * PageEntry1G,IN EFI_PHYSICAL_ADDRESS StackBase,IN UINTN StackSize,IN EFI_PHYSICAL_ADDRESS GhcbBase,IN UINTN GhcbSize)424 Split1GPageTo2M (
425   IN EFI_PHYSICAL_ADDRESS               PhysicalAddress,
426   IN OUT UINT64                         *PageEntry1G,
427   IN EFI_PHYSICAL_ADDRESS               StackBase,
428   IN UINTN                              StackSize,
429   IN EFI_PHYSICAL_ADDRESS               GhcbBase,
430   IN UINTN                              GhcbSize
431   )
432 {
433   EFI_PHYSICAL_ADDRESS                  PhysicalAddress2M;
434   UINTN                                 IndexOfPageDirectoryEntries;
435   PAGE_TABLE_ENTRY                      *PageDirectoryEntry;
436   UINT64                                AddressEncMask;
437 
438   //
439   // Make sure AddressEncMask is contained to smallest supported address field
440   //
441   AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
442 
443   PageDirectoryEntry = AllocatePageTableMemory (1);
444   ASSERT (PageDirectoryEntry != NULL);
445 
446   //
447   // Fill in 1G page entry.
448   //
449   *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
450 
451   PhysicalAddress2M = PhysicalAddress;
452   for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
453     if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
454       //
455       // Need to split this 2M page that covers NULL or stack range.
456       //
457       Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
458     } else {
459       //
460       // Fill in the Page Directory entries
461       //
462       PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
463       PageDirectoryEntry->Bits.ReadWrite = 1;
464       PageDirectoryEntry->Bits.Present = 1;
465       PageDirectoryEntry->Bits.MustBe1 = 1;
466     }
467   }
468 }
469 
470 /**
471   Set one page of page table pool memory to be read-only.
472 
473   @param[in] PageTableBase    Base address of page table (CR3).
474   @param[in] Address          Start address of a page to be set as read-only.
475   @param[in] Level4Paging     Level 4 paging flag.
476 
477 **/
478 VOID
SetPageTablePoolReadOnly(IN UINTN PageTableBase,IN EFI_PHYSICAL_ADDRESS Address,IN BOOLEAN Level4Paging)479 SetPageTablePoolReadOnly (
480   IN  UINTN                             PageTableBase,
481   IN  EFI_PHYSICAL_ADDRESS              Address,
482   IN  BOOLEAN                           Level4Paging
483   )
484 {
485   UINTN                 Index;
486   UINTN                 EntryIndex;
487   UINT64                AddressEncMask;
488   EFI_PHYSICAL_ADDRESS  PhysicalAddress;
489   UINT64                *PageTable;
490   UINT64                *NewPageTable;
491   UINT64                PageAttr;
492   UINT64                LevelSize[5];
493   UINT64                LevelMask[5];
494   UINTN                 LevelShift[5];
495   UINTN                 Level;
496   UINT64                PoolUnitSize;
497 
498   ASSERT (PageTableBase != 0);
499 
500   //
501   // Since the page table is always from page table pool, which is always
502   // located at the boundary of PcdPageTablePoolAlignment, we just need to
503   // set the whole pool unit to be read-only.
504   //
505   Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
506 
507   LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
508   LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
509   LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
510   LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
511 
512   LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
513   LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
514   LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
515   LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
516 
517   LevelSize[1] = SIZE_4KB;
518   LevelSize[2] = SIZE_2MB;
519   LevelSize[3] = SIZE_1GB;
520   LevelSize[4] = SIZE_512GB;
521 
522   AddressEncMask  = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
523                     PAGING_1G_ADDRESS_MASK_64;
524   PageTable       = (UINT64 *)(UINTN)PageTableBase;
525   PoolUnitSize    = PAGE_TABLE_POOL_UNIT_SIZE;
526 
527   for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
528     Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
529     Index &= PAGING_PAE_INDEX_MASK;
530 
531     PageAttr = PageTable[Index];
532     if ((PageAttr & IA32_PG_PS) == 0) {
533       //
534       // Go to next level of table.
535       //
536       PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
537                                     PAGING_4K_ADDRESS_MASK_64);
538       continue;
539     }
540 
541     if (PoolUnitSize >= LevelSize[Level]) {
542       //
543       // Clear R/W bit if current page granularity is not larger than pool unit
544       // size.
545       //
546       if ((PageAttr & IA32_PG_RW) != 0) {
547         while (PoolUnitSize > 0) {
548           //
549           // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
550           // one page (2MB). Then we don't need to update attributes for pages
551           // crossing page directory. ASSERT below is for that purpose.
552           //
553           ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
554 
555           PageTable[Index] &= ~(UINT64)IA32_PG_RW;
556           PoolUnitSize    -= LevelSize[Level];
557 
558           ++Index;
559         }
560       }
561 
562       break;
563 
564     } else {
565       //
566       // The smaller granularity of page must be needed.
567       //
568       ASSERT (Level > 1);
569 
570       NewPageTable = AllocatePageTableMemory (1);
571       ASSERT (NewPageTable != NULL);
572 
573       PhysicalAddress = PageAttr & LevelMask[Level];
574       for (EntryIndex = 0;
575             EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
576             ++EntryIndex) {
577         NewPageTable[EntryIndex] = PhysicalAddress  | AddressEncMask |
578                                    IA32_PG_P | IA32_PG_RW;
579         if (Level > 2) {
580           NewPageTable[EntryIndex] |= IA32_PG_PS;
581         }
582         PhysicalAddress += LevelSize[Level - 1];
583       }
584 
585       PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
586                                         IA32_PG_P | IA32_PG_RW;
587       PageTable = NewPageTable;
588     }
589   }
590 }
591 
592 /**
593   Prevent the memory pages used for page table from been overwritten.
594 
595   @param[in] PageTableBase    Base address of page table (CR3).
596   @param[in] Level4Paging     Level 4 paging flag.
597 
598 **/
599 VOID
EnablePageTableProtection(IN UINTN PageTableBase,IN BOOLEAN Level4Paging)600 EnablePageTableProtection (
601   IN  UINTN     PageTableBase,
602   IN  BOOLEAN   Level4Paging
603   )
604 {
605   PAGE_TABLE_POOL         *HeadPool;
606   PAGE_TABLE_POOL         *Pool;
607   UINT64                  PoolSize;
608   EFI_PHYSICAL_ADDRESS    Address;
609 
610   if (mPageTablePool == NULL) {
611     return;
612   }
613 
614   //
615   // Disable write protection, because we need to mark page table to be write
616   // protected.
617   //
618   AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
619 
620   //
621   // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
622   // remember original one in advance.
623   //
624   HeadPool = mPageTablePool;
625   Pool = HeadPool;
626   do {
627     Address  = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
628     PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
629 
630     //
631     // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
632     // is one of page size of the processor (2MB by default). Let's apply the
633     // protection to them one by one.
634     //
635     while (PoolSize > 0) {
636       SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);
637       Address   += PAGE_TABLE_POOL_UNIT_SIZE;
638       PoolSize  -= PAGE_TABLE_POOL_UNIT_SIZE;
639     }
640 
641     Pool = Pool->NextPool;
642   } while (Pool != HeadPool);
643 
644   //
645   // Enable write protection, after page table attribute updated.
646   //
647   AsmWriteCr0 (AsmReadCr0() | CR0_WP);
648 }
649 
650 /**
651   Allocates and fills in the Page Directory and Page Table Entries to
652   establish a 1:1 Virtual to Physical mapping.
653 
654   @param[in] StackBase  Stack base address.
655   @param[in] StackSize  Stack size.
656   @param[in] GhcbBase   GHCB base address.
657   @param[in] GhcbSize   GHCB size.
658 
659   @return The address of 4 level page map.
660 
661 **/
662 UINTN
CreateIdentityMappingPageTables(IN EFI_PHYSICAL_ADDRESS StackBase,IN UINTN StackSize,IN EFI_PHYSICAL_ADDRESS GhcbBase,IN UINTN GhcbSize)663 CreateIdentityMappingPageTables (
664   IN EFI_PHYSICAL_ADDRESS   StackBase,
665   IN UINTN                  StackSize,
666   IN EFI_PHYSICAL_ADDRESS   GhcbBase,
667   IN UINTN                  GhcbSize
668   )
669 {
670   UINT32                                        RegEax;
671   CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX   EcxFlags;
672   UINT32                                        RegEdx;
673   UINT8                                         PhysicalAddressBits;
674   EFI_PHYSICAL_ADDRESS                          PageAddress;
675   UINTN                                         IndexOfPml5Entries;
676   UINTN                                         IndexOfPml4Entries;
677   UINTN                                         IndexOfPdpEntries;
678   UINTN                                         IndexOfPageDirectoryEntries;
679   UINT32                                        NumberOfPml5EntriesNeeded;
680   UINT32                                        NumberOfPml4EntriesNeeded;
681   UINT32                                        NumberOfPdpEntriesNeeded;
682   PAGE_MAP_AND_DIRECTORY_POINTER                *PageMapLevel5Entry;
683   PAGE_MAP_AND_DIRECTORY_POINTER                *PageMapLevel4Entry;
684   PAGE_MAP_AND_DIRECTORY_POINTER                *PageMap;
685   PAGE_MAP_AND_DIRECTORY_POINTER                *PageDirectoryPointerEntry;
686   PAGE_TABLE_ENTRY                              *PageDirectoryEntry;
687   UINTN                                         TotalPagesNum;
688   UINTN                                         BigPageAddress;
689   VOID                                          *Hob;
690   BOOLEAN                                       Page5LevelSupport;
691   BOOLEAN                                       Page1GSupport;
692   PAGE_TABLE_1G_ENTRY                           *PageDirectory1GEntry;
693   UINT64                                        AddressEncMask;
694   IA32_CR4                                      Cr4;
695 
696   //
697   // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings
698   //
699   PageMapLevel5Entry = NULL;
700 
701   //
702   // Make sure AddressEncMask is contained to smallest supported address field
703   //
704   AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
705 
706   Page1GSupport = FALSE;
707   if (PcdGetBool(PcdUse1GPageTable)) {
708     AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
709     if (RegEax >= 0x80000001) {
710       AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
711       if ((RegEdx & BIT26) != 0) {
712         Page1GSupport = TRUE;
713       }
714     }
715   }
716 
717   //
718   // Get physical address bits supported.
719   //
720   Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
721   if (Hob != NULL) {
722     PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
723   } else {
724     AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
725     if (RegEax >= 0x80000008) {
726       AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
727       PhysicalAddressBits = (UINT8) RegEax;
728     } else {
729       PhysicalAddressBits = 36;
730     }
731   }
732 
733   Page5LevelSupport = FALSE;
734   if (PcdGetBool (PcdUse5LevelPageTable)) {
735     AsmCpuidEx (
736       CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL,
737       &EcxFlags.Uint32, NULL, NULL
738       );
739     if (EcxFlags.Bits.FiveLevelPage != 0) {
740       Page5LevelSupport = TRUE;
741     }
742   }
743 
744   DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport));
745 
746   //
747   // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
748   //  when 5-Level Paging is disabled,
749   //  due to either unsupported by HW, or disabled by PCD.
750   //
751   ASSERT (PhysicalAddressBits <= 52);
752   if (!Page5LevelSupport && PhysicalAddressBits > 48) {
753     PhysicalAddressBits = 48;
754   }
755 
756   //
757   // Calculate the table entries needed.
758   //
759   NumberOfPml5EntriesNeeded = 1;
760   if (PhysicalAddressBits > 48) {
761     NumberOfPml5EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 48);
762     PhysicalAddressBits = 48;
763   }
764 
765   NumberOfPml4EntriesNeeded = 1;
766   if (PhysicalAddressBits > 39) {
767     NumberOfPml4EntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 39);
768     PhysicalAddressBits = 39;
769   }
770 
771   NumberOfPdpEntriesNeeded = 1;
772   ASSERT (PhysicalAddressBits > 30);
773   NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits - 30);
774 
775   //
776   // Pre-allocate big pages to avoid later allocations.
777   //
778   if (!Page1GSupport) {
779     TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
780   } else {
781     TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
782   }
783 
784   //
785   // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.
786   //
787   if (!Page5LevelSupport) {
788     TotalPagesNum--;
789   }
790 
791   DEBUG ((DEBUG_INFO, "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",
792     NumberOfPml5EntriesNeeded, NumberOfPml4EntriesNeeded,
793     NumberOfPdpEntriesNeeded, (UINT64)TotalPagesNum));
794 
795   BigPageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);
796   ASSERT (BigPageAddress != 0);
797 
798   //
799   // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
800   //
801   PageMap         = (VOID *) BigPageAddress;
802   if (Page5LevelSupport) {
803     //
804     // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
805     //
806     PageMapLevel5Entry = PageMap;
807     BigPageAddress    += SIZE_4KB;
808   }
809   PageAddress        = 0;
810 
811   for ( IndexOfPml5Entries = 0
812       ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
813       ; IndexOfPml5Entries++) {
814     //
815     // Each PML5 entry points to a page of PML4 entires.
816     // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
817     // When 5-Level Paging is disabled, below allocation happens only once.
818     //
819     PageMapLevel4Entry = (VOID *) BigPageAddress;
820     BigPageAddress    += SIZE_4KB;
821 
822     if (Page5LevelSupport) {
823       //
824       // Make a PML5 Entry
825       //
826       PageMapLevel5Entry->Uint64 = (UINT64) (UINTN) PageMapLevel4Entry | AddressEncMask;
827       PageMapLevel5Entry->Bits.ReadWrite = 1;
828       PageMapLevel5Entry->Bits.Present   = 1;
829       PageMapLevel5Entry++;
830     }
831 
832     for ( IndexOfPml4Entries = 0
833         ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)
834         ; IndexOfPml4Entries++, PageMapLevel4Entry++) {
835       //
836       // Each PML4 entry points to a page of Page Directory Pointer entires.
837       // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
838       //
839       PageDirectoryPointerEntry = (VOID *) BigPageAddress;
840       BigPageAddress += SIZE_4KB;
841 
842       //
843       // Make a PML4 Entry
844       //
845       PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
846       PageMapLevel4Entry->Bits.ReadWrite = 1;
847       PageMapLevel4Entry->Bits.Present = 1;
848 
849       if (Page1GSupport) {
850         PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
851 
852         for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
853           if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {
854             Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);
855           } else {
856             //
857             // Fill in the Page Directory entries
858             //
859             PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
860             PageDirectory1GEntry->Bits.ReadWrite = 1;
861             PageDirectory1GEntry->Bits.Present = 1;
862             PageDirectory1GEntry->Bits.MustBe1 = 1;
863           }
864         }
865       } else {
866         for ( IndexOfPdpEntries = 0
867             ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)
868             ; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
869           //
870           // Each Directory Pointer entries points to a page of Page Directory entires.
871           // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
872           //
873           PageDirectoryEntry = (VOID *) BigPageAddress;
874           BigPageAddress += SIZE_4KB;
875 
876           //
877           // Fill in a Page Directory Pointer Entries
878           //
879           PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
880           PageDirectoryPointerEntry->Bits.ReadWrite = 1;
881           PageDirectoryPointerEntry->Bits.Present = 1;
882 
883           for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
884             if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
885               //
886               // Need to split this 2M page that covers NULL or stack range.
887               //
888               Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
889             } else {
890               //
891               // Fill in the Page Directory entries
892               //
893               PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
894               PageDirectoryEntry->Bits.ReadWrite = 1;
895               PageDirectoryEntry->Bits.Present = 1;
896               PageDirectoryEntry->Bits.MustBe1 = 1;
897             }
898           }
899         }
900 
901         //
902         // Fill with null entry for unused PDPTE
903         //
904         ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof(PAGE_MAP_AND_DIRECTORY_POINTER));
905       }
906     }
907 
908     //
909     // For the PML4 entries we are not using fill in a null entry.
910     //
911     ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
912   }
913 
914   if (Page5LevelSupport) {
915     Cr4.UintN = AsmReadCr4 ();
916     Cr4.Bits.LA57 = 1;
917     AsmWriteCr4 (Cr4.UintN);
918     //
919     // For the PML5 entries we are not using fill in a null entry.
920     //
921     ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
922   }
923 
924   //
925   // Protect the page table by marking the memory used for page table to be
926   // read-only.
927   //
928   EnablePageTableProtection ((UINTN)PageMap, TRUE);
929 
930   //
931   // Set IA32_EFER.NXE if necessary.
932   //
933   if (IsEnableNonExecNeeded ()) {
934     EnableExecuteDisableBit ();
935   }
936 
937   return (UINTN)PageMap;
938 }
939 
940