1 /** @file
2   Basic paging support for the CPU to enable Stack Guard.
3 
4 Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
5 
6 SPDX-License-Identifier: BSD-2-Clause-Patent
7 
8 **/
9 
10 #include <Register/Cpuid.h>
11 #include <Register/Msr.h>
12 #include <Library/MemoryAllocationLib.h>
13 #include <Library/CpuLib.h>
14 #include <Library/BaseLib.h>
15 
16 #include "CpuMpPei.h"
17 
18 #define IA32_PG_P             BIT0
19 #define IA32_PG_RW            BIT1
20 #define IA32_PG_U             BIT2
21 #define IA32_PG_A             BIT5
22 #define IA32_PG_D             BIT6
23 #define IA32_PG_PS            BIT7
24 #define IA32_PG_NX            BIT63
25 
26 #define PAGE_ATTRIBUTE_BITS   (IA32_PG_RW | IA32_PG_P)
27 #define PAGE_PROGATE_BITS     (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\
28                                PAGE_ATTRIBUTE_BITS)
29 
30 #define PAGING_PAE_INDEX_MASK       0x1FF
31 #define PAGING_4K_ADDRESS_MASK_64   0x000FFFFFFFFFF000ull
32 #define PAGING_2M_ADDRESS_MASK_64   0x000FFFFFFFE00000ull
33 #define PAGING_1G_ADDRESS_MASK_64   0x000FFFFFC0000000ull
34 #define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull
35 
36 typedef enum {
37   PageNone = 0,
38   PageMin  = 1,
39   Page4K   = PageMin,
40   Page2M   = 2,
41   Page1G   = 3,
42   Page512G = 4,
43   PageMax  = Page512G
44 } PAGE_ATTRIBUTE;
45 
46 typedef struct {
47   PAGE_ATTRIBUTE   Attribute;
48   UINT64           Length;
49   UINT64           AddressMask;
50   UINTN            AddressBitOffset;
51   UINTN            AddressBitLength;
52 } PAGE_ATTRIBUTE_TABLE;
53 
54 PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
55   {PageNone,          0,                           0,  0, 0},
56   {Page4K,     SIZE_4KB,   PAGING_4K_ADDRESS_MASK_64, 12, 9},
57   {Page2M,     SIZE_2MB,   PAGING_2M_ADDRESS_MASK_64, 21, 9},
58   {Page1G,     SIZE_1GB,   PAGING_1G_ADDRESS_MASK_64, 30, 9},
59   {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},
60 };
61 
62 EFI_PEI_NOTIFY_DESCRIPTOR  mPostMemNotifyList[] = {
63   {
64     (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
65     &gEfiPeiMemoryDiscoveredPpiGuid,
66     MemoryDiscoveredPpiNotifyCallback
67   }
68 };
69 
70 /**
71   The function will check if IA32 PAE is supported.
72 
73   @retval TRUE      IA32 PAE is supported.
74   @retval FALSE     IA32 PAE is not supported.
75 
76 **/
77 BOOLEAN
IsIa32PaeSupported(VOID)78 IsIa32PaeSupported (
79   VOID
80   )
81 {
82   UINT32                    RegEax;
83   CPUID_VERSION_INFO_EDX    RegEdx;
84 
85   AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
86   if (RegEax >= CPUID_VERSION_INFO) {
87     AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
88     if (RegEdx.Bits.PAE != 0) {
89       return TRUE;
90     }
91   }
92 
93   return FALSE;
94 }
95 
96 /**
97   This API provides a way to allocate memory for page table.
98 
99   @param  Pages                 The number of 4 KB pages to allocate.
100 
101   @return A pointer to the allocated buffer or NULL if allocation fails.
102 
103 **/
104 VOID *
AllocatePageTableMemory(IN UINTN Pages)105 AllocatePageTableMemory (
106   IN UINTN           Pages
107   )
108 {
109   VOID      *Address;
110 
111   Address = AllocatePages(Pages);
112   if (Address != NULL) {
113     ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));
114   }
115 
116   return Address;
117 }
118 
119 /**
120   Get the address width supported by current processor.
121 
122   @retval 32      If processor is in 32-bit mode.
123   @retval 36-48   If processor is in 64-bit mode.
124 
125 **/
126 UINTN
GetPhysicalAddressWidth(VOID)127 GetPhysicalAddressWidth (
128   VOID
129   )
130 {
131   UINT32          RegEax;
132 
133   if (sizeof(UINTN) == 4) {
134     return 32;
135   }
136 
137   AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
138   if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {
139     AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
140     RegEax &= 0xFF;
141     if (RegEax > 48) {
142       return 48;
143     }
144 
145     return (UINTN)RegEax;
146   }
147 
148   return 36;
149 }
150 
151 /**
152   Get the type of top level page table.
153 
154   @retval Page512G  PML4 paging.
155   @retval Page1G    PAE paing.
156 
157 **/
158 PAGE_ATTRIBUTE
GetPageTableTopLevelType(VOID)159 GetPageTableTopLevelType (
160   VOID
161   )
162 {
163   MSR_IA32_EFER_REGISTER      MsrEfer;
164 
165   MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);
166 
167   return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;
168 }
169 
170 /**
171   Return page table entry matching the address.
172 
173   @param[in]   Address          The address to be checked.
174   @param[out]  PageAttributes   The page attribute of the page entry.
175 
176   @return The page entry.
177 **/
178 VOID *
GetPageTableEntry(IN PHYSICAL_ADDRESS Address,OUT PAGE_ATTRIBUTE * PageAttribute)179 GetPageTableEntry (
180   IN  PHYSICAL_ADDRESS                  Address,
181   OUT PAGE_ATTRIBUTE                    *PageAttribute
182   )
183 {
184   INTN                  Level;
185   UINTN                 Index;
186   UINT64                *PageTable;
187   UINT64                AddressEncMask;
188 
189   AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
190   PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
191   for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {
192     Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);
193     Index &= PAGING_PAE_INDEX_MASK;
194 
195     //
196     // No mapping?
197     //
198     if (PageTable[Index] == 0) {
199       *PageAttribute = PageNone;
200       return NULL;
201     }
202 
203     //
204     // Page memory?
205     //
206     if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {
207       *PageAttribute = (PAGE_ATTRIBUTE)Level;
208       return &PageTable[Index];
209     }
210 
211     //
212     // Page directory or table
213     //
214     PageTable = (UINT64 *)(UINTN)(PageTable[Index] &
215                                   ~AddressEncMask &
216                                   PAGING_4K_ADDRESS_MASK_64);
217   }
218 
219   *PageAttribute = PageNone;
220   return NULL;
221 }
222 
223 /**
224   This function splits one page entry to smaller page entries.
225 
226   @param[in]  PageEntry        The page entry to be splitted.
227   @param[in]  PageAttribute    The page attribute of the page entry.
228   @param[in]  SplitAttribute   How to split the page entry.
229   @param[in]  Recursively      Do the split recursively or not.
230 
231   @retval RETURN_SUCCESS            The page entry is splitted.
232   @retval RETURN_INVALID_PARAMETER  If target page attribute is invalid
233   @retval RETURN_OUT_OF_RESOURCES   No resource to split page entry.
234 **/
235 RETURN_STATUS
SplitPage(IN UINT64 * PageEntry,IN PAGE_ATTRIBUTE PageAttribute,IN PAGE_ATTRIBUTE SplitAttribute,IN BOOLEAN Recursively)236 SplitPage (
237   IN  UINT64                            *PageEntry,
238   IN  PAGE_ATTRIBUTE                    PageAttribute,
239   IN  PAGE_ATTRIBUTE                    SplitAttribute,
240   IN  BOOLEAN                           Recursively
241   )
242 {
243   UINT64            BaseAddress;
244   UINT64            *NewPageEntry;
245   UINTN             Index;
246   UINT64            AddressEncMask;
247   PAGE_ATTRIBUTE    SplitTo;
248 
249   if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {
250     ASSERT (SplitAttribute != PageNone);
251     ASSERT (SplitAttribute < PageAttribute);
252     return RETURN_INVALID_PARAMETER;
253   }
254 
255   NewPageEntry = AllocatePageTableMemory (1);
256   if (NewPageEntry == NULL) {
257     ASSERT (NewPageEntry != NULL);
258     return RETURN_OUT_OF_RESOURCES;
259   }
260 
261   //
262   // One level down each step to achieve more compact page table.
263   //
264   SplitTo = PageAttribute - 1;
265   AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
266                    mPageAttributeTable[SplitTo].AddressMask;
267   BaseAddress    = *PageEntry &
268                    ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
269                    mPageAttributeTable[PageAttribute].AddressMask;
270   for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
271     NewPageEntry[Index] = BaseAddress | AddressEncMask |
272                           ((*PageEntry) & PAGE_PROGATE_BITS);
273 
274     if (SplitTo != PageMin) {
275       NewPageEntry[Index] |= IA32_PG_PS;
276     }
277 
278     if (Recursively && SplitTo > SplitAttribute) {
279       SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);
280     }
281 
282     BaseAddress += mPageAttributeTable[SplitTo].Length;
283   }
284 
285   (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;
286 
287   return RETURN_SUCCESS;
288 }
289 
290 /**
291   This function modifies the page attributes for the memory region specified
292   by BaseAddress and Length from their current attributes to the attributes
293   specified by Attributes.
294 
295   Caller should make sure BaseAddress and Length is at page boundary.
296 
297   @param[in]   BaseAddress      Start address of a memory region.
298   @param[in]   Length           Size in bytes of the memory region.
299   @param[in]   Attributes       Bit mask of attributes to modify.
300 
301   @retval RETURN_SUCCESS            The attributes were modified for the memory
302                                     region.
303   @retval RETURN_INVALID_PARAMETER  Length is zero; or,
304                                     Attributes specified an illegal combination
305                                     of attributes that cannot be set together; or
306                                     Addressis not 4KB aligned.
307   @retval RETURN_OUT_OF_RESOURCES   There are not enough system resources to modify
308                                     the attributes.
309   @retval RETURN_UNSUPPORTED        Cannot modify the attributes of given memory.
310 
311 **/
312 RETURN_STATUS
313 EFIAPI
ConvertMemoryPageAttributes(IN PHYSICAL_ADDRESS BaseAddress,IN UINT64 Length,IN UINT64 Attributes)314 ConvertMemoryPageAttributes (
315   IN  PHYSICAL_ADDRESS                  BaseAddress,
316   IN  UINT64                            Length,
317   IN  UINT64                            Attributes
318   )
319 {
320   UINT64                            *PageEntry;
321   PAGE_ATTRIBUTE                    PageAttribute;
322   RETURN_STATUS                     Status;
323   EFI_PHYSICAL_ADDRESS              MaximumAddress;
324 
325   if (Length == 0 ||
326       (BaseAddress & (SIZE_4KB - 1)) != 0 ||
327       (Length & (SIZE_4KB - 1)) != 0) {
328 
329     ASSERT (Length > 0);
330     ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
331     ASSERT ((Length & (SIZE_4KB - 1)) == 0);
332 
333     return RETURN_INVALID_PARAMETER;
334   }
335 
336   MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
337   if (BaseAddress > MaximumAddress ||
338       Length > MaximumAddress ||
339       (BaseAddress > MaximumAddress - (Length - 1))) {
340     return RETURN_UNSUPPORTED;
341   }
342 
343   //
344   // Below logic is to check 2M/4K page to make sure we do not waste memory.
345   //
346   while (Length != 0) {
347     PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
348     if (PageEntry == NULL) {
349       return RETURN_UNSUPPORTED;
350     }
351 
352     if (PageAttribute != Page4K) {
353       Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);
354       if (RETURN_ERROR (Status)) {
355         return Status;
356       }
357       //
358       // Do it again until the page is 4K.
359       //
360       continue;
361     }
362 
363     //
364     // Just take care of 'present' bit for Stack Guard.
365     //
366     if ((Attributes & IA32_PG_P) != 0) {
367       *PageEntry |= (UINT64)IA32_PG_P;
368     } else {
369       *PageEntry &= ~((UINT64)IA32_PG_P);
370     }
371 
372     //
373     // Convert success, move to next
374     //
375     BaseAddress += SIZE_4KB;
376     Length -= SIZE_4KB;
377   }
378 
379   return RETURN_SUCCESS;
380 }
381 
382 /**
383   Get maximum size of page memory supported by current processor.
384 
385   @param[in]   TopLevelType     The type of top level page entry.
386 
387   @retval Page1G     If processor supports 1G page and PML4.
388   @retval Page2M     For all other situations.
389 
390 **/
391 PAGE_ATTRIBUTE
GetMaxMemoryPage(IN PAGE_ATTRIBUTE TopLevelType)392 GetMaxMemoryPage (
393   IN  PAGE_ATTRIBUTE  TopLevelType
394   )
395 {
396   UINT32          RegEax;
397   UINT32          RegEdx;
398 
399   if (TopLevelType == Page512G) {
400     AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
401     if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
402       AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
403       if ((RegEdx & BIT26) != 0) {
404         return Page1G;
405       }
406     }
407   }
408 
409   return Page2M;
410 }
411 
412 /**
413   Create PML4 or PAE page table.
414 
415   @return The address of page table.
416 
417 **/
418 UINTN
CreatePageTable(VOID)419 CreatePageTable (
420   VOID
421   )
422 {
423   RETURN_STATUS           Status;
424   UINTN                   PhysicalAddressBits;
425   UINTN                   NumberOfEntries;
426   PAGE_ATTRIBUTE          TopLevelPageAttr;
427   UINTN                   PageTable;
428   PAGE_ATTRIBUTE          MaxMemoryPage;
429   UINTN                   Index;
430   UINT64                  AddressEncMask;
431   UINT64                  *PageEntry;
432   EFI_PHYSICAL_ADDRESS    PhysicalAddress;
433 
434   TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
435   PhysicalAddressBits = GetPhysicalAddressWidth ();
436   NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
437                                  mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);
438 
439   PageTable = (UINTN) AllocatePageTableMemory (1);
440   if (PageTable == 0) {
441     return 0;
442   }
443 
444   AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
445   AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
446   MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
447   PageEntry = (UINT64 *)PageTable;
448 
449   PhysicalAddress = 0;
450   for (Index = 0; Index < NumberOfEntries; ++Index) {
451     *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;
452 
453     //
454     // Split the top page table down to the maximum page size supported
455     //
456     if (MaxMemoryPage < TopLevelPageAttr) {
457       Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
458       ASSERT_EFI_ERROR (Status);
459     }
460 
461     if (TopLevelPageAttr == Page1G) {
462       //
463       // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
464       //
465       *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
466     }
467 
468     PageEntry += 1;
469     PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
470   }
471 
472 
473   return PageTable;
474 }
475 
476 /**
477   Setup page tables and make them work.
478 
479 **/
480 VOID
EnablePaging(VOID)481 EnablePaging (
482   VOID
483   )
484 {
485   UINTN                       PageTable;
486 
487   PageTable = CreatePageTable ();
488   ASSERT (PageTable != 0);
489   if (PageTable != 0) {
490     AsmWriteCr3(PageTable);
491     AsmWriteCr4 (AsmReadCr4 () | BIT5);   // CR4.PAE
492     AsmWriteCr0 (AsmReadCr0 () | BIT31);  // CR0.PG
493   }
494 }
495 
496 /**
497   Get the base address of current AP's stack.
498 
499   This function is called in AP's context and assumes that whole calling stacks
500   (till this function) consumed by AP's wakeup procedure will not exceed 4KB.
501 
502   PcdCpuApStackSize must be configured with value taking the Guard page into
503   account.
504 
505   @param[in,out] Buffer  The pointer to private data buffer.
506 
507 **/
508 VOID
509 EFIAPI
GetStackBase(IN OUT VOID * Buffer)510 GetStackBase (
511   IN OUT VOID *Buffer
512   )
513 {
514   EFI_PHYSICAL_ADDRESS    StackBase;
515 
516   StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;
517   StackBase += BASE_4KB;
518   StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);
519   StackBase -= PcdGet32(PcdCpuApStackSize);
520 
521   *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;
522 }
523 
524 /**
525   Setup stack Guard page at the stack base of each processor. BSP and APs have
526   different way to get stack base address.
527 
528 **/
529 VOID
SetupStackGuardPage(VOID)530 SetupStackGuardPage (
531   VOID
532   )
533 {
534   EFI_PEI_HOB_POINTERS        Hob;
535   EFI_PHYSICAL_ADDRESS        StackBase;
536   UINTN                       NumberOfProcessors;
537   UINTN                       Bsp;
538   UINTN                       Index;
539 
540   //
541   // One extra page at the bottom of the stack is needed for Guard page.
542   //
543   if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {
544     DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
545     ASSERT (FALSE);
546   }
547 
548   MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);
549   MpInitLibWhoAmI (&Bsp);
550   for (Index = 0; Index < NumberOfProcessors; ++Index) {
551     StackBase = 0;
552 
553     if (Index == Bsp) {
554       Hob.Raw = GetHobList ();
555       while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {
556         if (CompareGuid (&gEfiHobMemoryAllocStackGuid,
557                          &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {
558           StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;
559           break;
560         }
561         Hob.Raw = GET_NEXT_HOB (Hob);
562       }
563     } else {
564       //
565       // Ask AP to return is stack base address.
566       //
567       MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);
568     }
569     ASSERT (StackBase != 0);
570     //
571     // Set Guard page at stack base address.
572     //
573     ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);
574     DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",
575             (UINT64)StackBase, (UINT64)Index));
576   }
577 
578   //
579   // Publish the changes of page table.
580   //
581   CpuFlushTlb ();
582 }
583 
584 /**
585   Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.
586 
587   Doing this in the memory-discovered callback is to make sure the Stack Guard
588   feature to cover as most PEI code as possible.
589 
590   @param[in] PeiServices          General purpose services available to every PEIM.
591   @param[in] NotifyDescriptor     The notification structure this PEIM registered on install.
592   @param[in] Ppi                  The memory discovered PPI.  Not used.
593 
594   @retval EFI_SUCCESS             The function completed successfully.
595   @retval others                  There's error in MP initialization.
596 **/
597 EFI_STATUS
598 EFIAPI
MemoryDiscoveredPpiNotifyCallback(IN EFI_PEI_SERVICES ** PeiServices,IN EFI_PEI_NOTIFY_DESCRIPTOR * NotifyDescriptor,IN VOID * Ppi)599 MemoryDiscoveredPpiNotifyCallback (
600   IN EFI_PEI_SERVICES           **PeiServices,
601   IN EFI_PEI_NOTIFY_DESCRIPTOR  *NotifyDescriptor,
602   IN VOID                       *Ppi
603   )
604 {
605   EFI_STATUS      Status;
606   BOOLEAN         InitStackGuard;
607 
608   //
609   // Paging must be setup first. Otherwise the exception TSS setup during MP
610   // initialization later will not contain paging information and then fail
611   // the task switch (for the sake of stack switch).
612   //
613   InitStackGuard = FALSE;
614   if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) {
615     EnablePaging ();
616     InitStackGuard = TRUE;
617   }
618 
619   Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);
620   ASSERT_EFI_ERROR (Status);
621 
622   if (InitStackGuard) {
623     SetupStackGuardPage ();
624   }
625 
626   return Status;
627 }
628 
629