1 /** @file
2 X64 processor specific functions to enable SMM profile.
3 
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6 
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8 
9 **/
10 
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
13 
14 //
15 // Current page index.
16 //
17 UINTN                     mPFPageIndex;
18 
19 //
20 // Pool for dynamically creating page table in page fault handler.
21 //
22 UINT64                    mPFPageBuffer;
23 
24 //
25 // Store the uplink information for each page being used.
26 //
27 UINT64                    *mPFPageUplink[MAX_PF_PAGE_COUNT];
28 
29 /**
30   Create SMM page table for S3 path.
31 
32 **/
33 VOID
InitSmmS3Cr3(VOID)34 InitSmmS3Cr3 (
35   VOID
36   )
37 {
38   EFI_PHYSICAL_ADDRESS              Pages;
39   UINT64                            *PTEntry;
40 
41   //
42   // Generate PAE page table for the first 4GB memory space
43   //
44   Pages = Gen4GPageTable (FALSE);
45 
46   //
47   // Fill Page-Table-Level4 (PML4) entry
48   //
49   PTEntry = (UINT64*)AllocatePageTableMemory (1);
50   ASSERT (PTEntry != NULL);
51   *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
52   ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
53 
54   //
55   // Return the address of PML4 (to set CR3)
56   //
57   mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;
58 
59   return ;
60 }
61 
62 /**
63   Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
64 
65 **/
66 VOID
InitPagesForPFHandler(VOID)67 InitPagesForPFHandler (
68   VOID
69   )
70 {
71   VOID          *Address;
72 
73   //
74   // Pre-Allocate memory for page fault handler
75   //
76   Address = NULL;
77   Address = AllocatePages (MAX_PF_PAGE_COUNT);
78   ASSERT (Address != NULL);
79 
80   mPFPageBuffer =  (UINT64)(UINTN) Address;
81   mPFPageIndex = 0;
82   ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
83   ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));
84 
85   return;
86 }
87 
88 /**
89   Allocate one page for creating 4KB-page based on 2MB-page.
90 
91   @param  Uplink   The address of Page-Directory entry.
92 
93 **/
94 VOID
AcquirePage(UINT64 * Uplink)95 AcquirePage (
96   UINT64          *Uplink
97   )
98 {
99   UINT64          Address;
100 
101   //
102   // Get the buffer
103   //
104   Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);
105   ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);
106 
107   //
108   // Cut the previous uplink if it exists and wasn't overwritten
109   //
110   if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {
111     *mPFPageUplink[mPFPageIndex] = 0;
112   }
113 
114   //
115   // Link & Record the current uplink
116   //
117   *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
118   mPFPageUplink[mPFPageIndex] = Uplink;
119 
120   mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
121 }
122 
123 /**
124   Update page table to map the memory correctly in order to make the instruction
125   which caused page fault execute successfully. And it also save the original page
126   table to be restored in single-step exception.
127 
128   @param  PageTable           PageTable Address.
129   @param  PFAddress           The memory address which caused page fault exception.
130   @param  CpuIndex            The index of the processor.
131   @param  ErrorCode           The Error code of exception.
132   @param  IsValidPFAddress    The flag indicates if SMM profile data need be added.
133 
134 **/
135 VOID
RestorePageTableAbove4G(UINT64 * PageTable,UINT64 PFAddress,UINTN CpuIndex,UINTN ErrorCode,BOOLEAN * IsValidPFAddress)136 RestorePageTableAbove4G (
137   UINT64        *PageTable,
138   UINT64        PFAddress,
139   UINTN         CpuIndex,
140   UINTN         ErrorCode,
141   BOOLEAN       *IsValidPFAddress
142   )
143 {
144   UINTN         PTIndex;
145   UINT64        Address;
146   BOOLEAN       Nx;
147   BOOLEAN       Existed;
148   UINTN         Index;
149   UINTN         PFIndex;
150   IA32_CR4      Cr4;
151   BOOLEAN       Enable5LevelPaging;
152 
153   ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
154 
155   Cr4.UintN = AsmReadCr4 ();
156   Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
157 
158   //
159   // If page fault address is 4GB above.
160   //
161 
162   //
163   // Check if page fault address has existed in page table.
164   // If it exists in page table but page fault is generated,
165   // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
166   //
167   Existed = FALSE;
168   PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
169   PTIndex = 0;
170   if (Enable5LevelPaging) {
171     PTIndex = BitFieldRead64 (PFAddress, 48, 56);
172   }
173   if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
174     // PML5E
175     if (Enable5LevelPaging) {
176       PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
177     }
178     PTIndex = BitFieldRead64 (PFAddress, 39, 47);
179     if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
180       // PML4E
181       PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
182       PTIndex = BitFieldRead64 (PFAddress, 30, 38);
183       if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
184         // PDPTE
185         PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
186         PTIndex = BitFieldRead64 (PFAddress, 21, 29);
187         // PD
188         if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
189           //
190           // 2MB page
191           //
192           Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
193           if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
194             Existed = TRUE;
195           }
196         } else {
197           //
198           // 4KB page
199           //
200           PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
201           if (PageTable != 0) {
202             //
203             // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
204             //
205             PTIndex = BitFieldRead64 (PFAddress, 12, 20);
206             Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
207             if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
208               Existed = TRUE;
209             }
210           }
211         }
212       }
213     }
214   }
215 
216   //
217   // If page entry does not existed in page table at all, create a new entry.
218   //
219   if (!Existed) {
220 
221     if (IsAddressValid (PFAddress, &Nx)) {
222       //
223       // If page fault address above 4GB is in protected range but it causes a page fault exception,
224       // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
225       // this access is not saved into SMM profile data.
226       //
227       *IsValidPFAddress = TRUE;
228     }
229 
230     //
231     // Create one entry in page table for page fault address.
232     //
233     SmiDefaultPFHandler ();
234     //
235     // Find the page table entry created just now.
236     //
237     PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
238     PFAddress = AsmReadCr2 ();
239     // PML5E
240     if (Enable5LevelPaging) {
241       PTIndex = BitFieldRead64 (PFAddress, 48, 56);
242       PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
243     }
244     // PML4E
245     PTIndex = BitFieldRead64 (PFAddress, 39, 47);
246     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
247     // PDPTE
248     PTIndex = BitFieldRead64 (PFAddress, 30, 38);
249     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
250     // PD
251     PTIndex = BitFieldRead64 (PFAddress, 21, 29);
252     Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;
253     //
254     // Check if 2MB-page entry need be changed to 4KB-page entry.
255     //
256     if (IsAddressSplit (Address)) {
257       AcquirePage (&PageTable[PTIndex]);
258 
259       // PTE
260       PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
261       for (Index = 0; Index < 512; Index++) {
262         PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
263         if (!IsAddressValid (Address, &Nx)) {
264           PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
265         }
266         if (Nx && mXdSupported) {
267           PageTable[Index] = PageTable[Index] | IA32_PG_NX;
268         }
269         if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
270           PTIndex = Index;
271         }
272         Address += SIZE_4KB;
273       } // end for PT
274     } else {
275       //
276       // Update 2MB page entry.
277       //
278       if (!IsAddressValid (Address, &Nx)) {
279         //
280         // Patch to remove present flag and rw flag.
281         //
282         PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
283       }
284       //
285       // Set XD bit to 1
286       //
287       if (Nx && mXdSupported) {
288         PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;
289       }
290     }
291   }
292 
293   //
294   // Record old entries with non-present status
295   // Old entries include the memory which instruction is at and the memory which instruction access.
296   //
297   //
298   ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
299   if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
300     PFIndex = mPFEntryCount[CpuIndex];
301     mLastPFEntryValue[CpuIndex][PFIndex]   = PageTable[PTIndex];
302     mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
303     mPFEntryCount[CpuIndex]++;
304   }
305 
306   //
307   // Add present flag or clear XD flag to make page fault handler succeed.
308   //
309   PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);
310   if ((ErrorCode & IA32_PF_EC_ID) != 0) {
311     //
312     // If page fault is caused by instruction fetch, clear XD bit in the entry.
313     //
314     PageTable[PTIndex] &= ~IA32_PG_NX;
315   }
316 
317   return;
318 }
319 
320 /**
321   Clear TF in FLAGS.
322 
323   @param  SystemContext    A pointer to the processor context when
324                            the interrupt occurred on the processor.
325 
326 **/
327 VOID
ClearTrapFlag(IN OUT EFI_SYSTEM_CONTEXT SystemContext)328 ClearTrapFlag (
329   IN OUT EFI_SYSTEM_CONTEXT   SystemContext
330   )
331 {
332   SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;
333 }
334