1 /** @file
2 Enable SMM profile.
3 
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6 
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8 
9 **/
10 
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
13 
14 UINT32                    mSmmProfileCr3;
15 
16 SMM_PROFILE_HEADER        *mSmmProfileBase;
17 MSR_DS_AREA_STRUCT        *mMsrDsAreaBase;
18 //
19 // The buffer to store SMM profile data.
20 //
21 UINTN                     mSmmProfileSize;
22 
23 //
24 // The buffer to enable branch trace store.
25 //
26 UINTN                     mMsrDsAreaSize   = SMM_PROFILE_DTS_SIZE;
27 
28 //
29 // The flag indicates if execute-disable is supported by processor.
30 //
31 BOOLEAN                   mXdSupported     = TRUE;
32 
33 //
34 // The flag indicates if execute-disable is enabled on processor.
35 //
36 BOOLEAN                   mXdEnabled       = FALSE;
37 
38 //
39 // The flag indicates if BTS is supported by processor.
40 //
41 BOOLEAN                   mBtsSupported     = TRUE;
42 
43 //
44 // The flag indicates if SMM profile starts to record data.
45 //
46 BOOLEAN                   mSmmProfileStart = FALSE;
47 
48 //
49 // The flag indicates if #DB will be setup in #PF handler.
50 //
51 BOOLEAN                   mSetupDebugTrap = FALSE;
52 
53 //
54 // Record the page fault exception count for one instruction execution.
55 //
56 UINTN                     *mPFEntryCount;
57 
58 UINT64                    (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
59 UINT64                    *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
60 
61 MSR_DS_AREA_STRUCT        **mMsrDsArea;
62 BRANCH_TRACE_RECORD       **mMsrBTSRecord;
63 UINTN                     mBTSRecordNumber;
64 PEBS_RECORD               **mMsrPEBSRecord;
65 
66 //
67 // These memory ranges are always present, they does not generate the access type of page fault exception,
68 // but they possibly generate instruction fetch type of page fault exception.
69 //
70 MEMORY_PROTECTION_RANGE   *mProtectionMemRange     = NULL;
71 UINTN                     mProtectionMemRangeCount = 0;
72 
73 //
74 // Some predefined memory ranges.
75 //
76 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
77   //
78   // SMRAM range (to be fixed in runtime).
79   // It is always present and instruction fetches are allowed.
80   //
81   {{0x00000000, 0x00000000},TRUE,FALSE},
82 
83   //
84   // SMM profile data range( to be fixed in runtime).
85   // It is always present and instruction fetches are not allowed.
86   //
87   {{0x00000000, 0x00000000},TRUE,TRUE},
88 
89   //
90   // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
91   // It is always present and instruction fetches are allowed.
92   // {{0x00000000, 0x00000000},TRUE,FALSE},
93   //
94 
95   //
96   // Future extended range could be added here.
97   //
98 
99   //
100   // PCI MMIO ranges (to be added in runtime).
101   // They are always present and instruction fetches are not allowed.
102   //
103 };
104 
105 //
106 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
107 //
108 MEMORY_RANGE              *mSplitMemRange          = NULL;
109 UINTN                     mSplitMemRangeCount      = 0;
110 
111 //
112 // SMI command port.
113 //
114 UINT32                    mSmiCommandPort;
115 
116 /**
117   Disable branch trace store.
118 
119 **/
120 VOID
DisableBTS(VOID)121 DisableBTS (
122   VOID
123   )
124 {
125   AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
126 }
127 
128 /**
129   Enable branch trace store.
130 
131 **/
132 VOID
EnableBTS(VOID)133 EnableBTS (
134   VOID
135   )
136 {
137   AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
138 }
139 
140 /**
141   Get CPU Index from APIC ID.
142 
143 **/
144 UINTN
GetCpuIndex(VOID)145 GetCpuIndex (
146   VOID
147   )
148 {
149   UINTN     Index;
150   UINT32    ApicId;
151 
152   ApicId = GetApicId ();
153 
154   for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
155     if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
156       return Index;
157     }
158   }
159   ASSERT (FALSE);
160   return 0;
161 }
162 
163 /**
164   Get the source of IP after execute-disable exception is triggered.
165 
166   @param  CpuIndex        The index of CPU.
167   @param  DestinationIP   The destination address.
168 
169 **/
170 UINT64
GetSourceFromDestinationOnBts(UINTN CpuIndex,UINT64 DestinationIP)171 GetSourceFromDestinationOnBts (
172   UINTN  CpuIndex,
173   UINT64 DestinationIP
174   )
175 {
176   BRANCH_TRACE_RECORD  *CurrentBTSRecord;
177   UINTN                Index;
178   BOOLEAN              FirstMatch;
179 
180   FirstMatch = FALSE;
181 
182   CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
183   for (Index = 0; Index < mBTSRecordNumber; Index++) {
184     if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
185       //
186       // Underflow
187       //
188       CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
189       CurrentBTSRecord --;
190     }
191     if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
192       //
193       // Good! find 1st one, then find 2nd one.
194       //
195       if (!FirstMatch) {
196         //
197         // The first one is DEBUG exception
198         //
199         FirstMatch = TRUE;
200       } else {
201         //
202         // Good find proper one.
203         //
204         return CurrentBTSRecord->LastBranchFrom;
205       }
206     }
207     CurrentBTSRecord--;
208   }
209 
210   return 0;
211 }
212 
213 /**
214   SMM profile specific INT 1 (single-step) exception handler.
215 
216   @param  InterruptType    Defines the type of interrupt or exception that
217                            occurred on the processor.This parameter is processor architecture specific.
218   @param  SystemContext    A pointer to the processor context when
219                            the interrupt occurred on the processor.
220 **/
221 VOID
222 EFIAPI
DebugExceptionHandler(IN EFI_EXCEPTION_TYPE InterruptType,IN EFI_SYSTEM_CONTEXT SystemContext)223 DebugExceptionHandler (
224     IN EFI_EXCEPTION_TYPE   InterruptType,
225     IN EFI_SYSTEM_CONTEXT   SystemContext
226   )
227 {
228   UINTN  CpuIndex;
229   UINTN  PFEntry;
230 
231   if (!mSmmProfileStart &&
232       !HEAP_GUARD_NONSTOP_MODE &&
233       !NULL_DETECTION_NONSTOP_MODE) {
234     return;
235   }
236   CpuIndex = GetCpuIndex ();
237 
238   //
239   // Clear last PF entries
240   //
241   for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
242     *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
243   }
244 
245   //
246   // Reset page fault exception count for next page fault.
247   //
248   mPFEntryCount[CpuIndex] = 0;
249 
250   //
251   // Flush TLB
252   //
253   CpuFlushTlb ();
254 
255   //
256   // Clear TF in EFLAGS
257   //
258   ClearTrapFlag (SystemContext);
259 }
260 
261 /**
262   Check if the input address is in SMM ranges.
263 
264   @param[in]  Address       The input address.
265 
266   @retval TRUE     The input address is in SMM.
267   @retval FALSE    The input address is not in SMM.
268 **/
269 BOOLEAN
IsInSmmRanges(IN EFI_PHYSICAL_ADDRESS Address)270 IsInSmmRanges (
271   IN EFI_PHYSICAL_ADDRESS   Address
272   )
273 {
274   UINTN  Index;
275 
276   if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
277     return TRUE;
278   }
279   for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
280     if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&
281         Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {
282       return TRUE;
283     }
284   }
285   return FALSE;
286 }
287 
288 /**
289   Check if the memory address will be mapped by 4KB-page.
290 
291   @param  Address  The address of Memory.
292   @param  Nx       The flag indicates if the memory is execute-disable.
293 
294 **/
295 BOOLEAN
IsAddressValid(IN EFI_PHYSICAL_ADDRESS Address,IN BOOLEAN * Nx)296 IsAddressValid (
297   IN EFI_PHYSICAL_ADDRESS   Address,
298   IN BOOLEAN                *Nx
299   )
300 {
301   UINTN  Index;
302 
303   if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
304     //
305     // Check configuration
306     //
307     for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
308       if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
309         *Nx = mProtectionMemRange[Index].Nx;
310         return mProtectionMemRange[Index].Present;
311       }
312     }
313     *Nx = TRUE;
314     return FALSE;
315 
316   } else {
317     *Nx = TRUE;
318     if (IsInSmmRanges (Address)) {
319       *Nx = FALSE;
320     }
321     return TRUE;
322   }
323 }
324 
325 /**
326   Check if the memory address will be mapped by 4KB-page.
327 
328   @param  Address  The address of Memory.
329 
330 **/
331 BOOLEAN
IsAddressSplit(IN EFI_PHYSICAL_ADDRESS Address)332 IsAddressSplit (
333   IN EFI_PHYSICAL_ADDRESS   Address
334   )
335 {
336   UINTN  Index;
337 
338   if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
339     //
340     // Check configuration
341     //
342     for (Index = 0; Index < mSplitMemRangeCount; Index++) {
343       if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
344         return TRUE;
345       }
346     }
347   } else {
348     if (Address < mCpuHotPlugData.SmrrBase) {
349       if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
350         return TRUE;
351       }
352     } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB))  {
353       if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
354         return TRUE;
355       }
356     }
357   }
358   //
359   // Return default
360   //
361   return FALSE;
362 }
363 
364 /**
365   Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
366 
367 **/
368 VOID
InitProtectedMemRange(VOID)369 InitProtectedMemRange (
370   VOID
371   )
372 {
373   UINTN                            Index;
374   UINTN                            NumberOfDescriptors;
375   UINTN                            NumberOfAddedDescriptors;
376   UINTN                            NumberOfProtectRange;
377   UINTN                            NumberOfSpliteRange;
378   EFI_GCD_MEMORY_SPACE_DESCRIPTOR  *MemorySpaceMap;
379   UINTN                            TotalSize;
380   EFI_PHYSICAL_ADDRESS             ProtectBaseAddress;
381   EFI_PHYSICAL_ADDRESS             ProtectEndAddress;
382   EFI_PHYSICAL_ADDRESS             Top2MBAlignedAddress;
383   EFI_PHYSICAL_ADDRESS             Base2MBAlignedAddress;
384   UINT64                           High4KBPageSize;
385   UINT64                           Low4KBPageSize;
386 
387   NumberOfDescriptors      = 0;
388   NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
389   NumberOfSpliteRange      = 0;
390   MemorySpaceMap           = NULL;
391 
392   //
393   // Get MMIO ranges from GCD and add them into protected memory ranges.
394   //
395   gDS->GetMemorySpaceMap (
396        &NumberOfDescriptors,
397        &MemorySpaceMap
398        );
399   for (Index = 0; Index < NumberOfDescriptors; Index++) {
400     if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
401       NumberOfAddedDescriptors++;
402     }
403   }
404 
405   if (NumberOfAddedDescriptors != 0) {
406     TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
407     mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
408     ASSERT (mProtectionMemRange != NULL);
409     mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
410 
411     //
412     // Copy existing ranges.
413     //
414     CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
415 
416     //
417     // Create split ranges which come from protected ranges.
418     //
419     TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
420     mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
421     ASSERT (mSplitMemRange != NULL);
422 
423     //
424     // Create SMM ranges which are set to present and execution-enable.
425     //
426     NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
427     for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
428       if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&
429           mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {
430         //
431         // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
432         //
433         break;
434       }
435       mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
436       mProtectionMemRange[NumberOfProtectRange].Range.Top  = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
437       mProtectionMemRange[NumberOfProtectRange].Present    = TRUE;
438       mProtectionMemRange[NumberOfProtectRange].Nx         = FALSE;
439       NumberOfProtectRange++;
440     }
441 
442     //
443     // Create MMIO ranges which are set to present and execution-disable.
444     //
445     for (Index = 0; Index < NumberOfDescriptors; Index++) {
446       if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
447         continue;
448       }
449       mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
450       mProtectionMemRange[NumberOfProtectRange].Range.Top  = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
451       mProtectionMemRange[NumberOfProtectRange].Present    = TRUE;
452       mProtectionMemRange[NumberOfProtectRange].Nx         = TRUE;
453       NumberOfProtectRange++;
454     }
455 
456     //
457     // Check and updated actual protected memory ranges count
458     //
459     ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
460     mProtectionMemRangeCount = NumberOfProtectRange;
461   }
462 
463   //
464   // According to protected ranges, create the ranges which will be mapped by 2KB page.
465   //
466   NumberOfSpliteRange  = 0;
467   NumberOfProtectRange = mProtectionMemRangeCount;
468   for (Index = 0; Index < NumberOfProtectRange; Index++) {
469     //
470     // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
471     //
472     ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
473     ProtectEndAddress  = mProtectionMemRange[Index].Range.Top;
474     if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress  & (SIZE_2MB - 1)) != 0)) {
475       //
476       // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
477       // A mix of 4KB and 2MB page could save SMRAM space.
478       //
479       Top2MBAlignedAddress  = ProtectEndAddress & ~(SIZE_2MB - 1);
480       Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
481       if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
482           ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
483         //
484         // There is an range which could be mapped by 2MB-page.
485         //
486         High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
487         Low4KBPageSize  = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
488         if (High4KBPageSize != 0) {
489           //
490           // Add not 2MB-aligned range to be mapped by 4KB-page.
491           //
492           mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
493           mSplitMemRange[NumberOfSpliteRange].Top  = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
494           NumberOfSpliteRange++;
495         }
496         if (Low4KBPageSize != 0) {
497           //
498           // Add not 2MB-aligned range to be mapped by 4KB-page.
499           //
500           mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
501           mSplitMemRange[NumberOfSpliteRange].Top  = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
502           NumberOfSpliteRange++;
503         }
504       } else {
505         //
506         // The range could only be mapped by 4KB-page.
507         //
508         mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
509         mSplitMemRange[NumberOfSpliteRange].Top  = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
510         NumberOfSpliteRange++;
511       }
512     }
513   }
514 
515   mSplitMemRangeCount = NumberOfSpliteRange;
516 
517   DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
518   for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
519     DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
520     DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top  = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
521   }
522   for (Index = 0; Index < mSplitMemRangeCount; Index++) {
523     DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
524     DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top  = %lx\n", Index, mSplitMemRange[Index].Top));
525   }
526 }
527 
528 /**
529   Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
530 
531 **/
532 VOID
InitPaging(VOID)533 InitPaging (
534   VOID
535   )
536 {
537   UINT64                            Pml5Entry;
538   UINT64                            Pml4Entry;
539   UINT64                            *Pml5;
540   UINT64                            *Pml4;
541   UINT64                            *Pdpt;
542   UINT64                            *Pd;
543   UINT64                            *Pt;
544   UINTN                             Address;
545   UINTN                             Pml5Index;
546   UINTN                             Pml4Index;
547   UINTN                             PdptIndex;
548   UINTN                             PdIndex;
549   UINTN                             PtIndex;
550   UINTN                             NumberOfPdptEntries;
551   UINTN                             NumberOfPml4Entries;
552   UINTN                             NumberOfPml5Entries;
553   UINTN                             SizeOfMemorySpace;
554   BOOLEAN                           Nx;
555   IA32_CR4                          Cr4;
556   BOOLEAN                           Enable5LevelPaging;
557 
558   Cr4.UintN = AsmReadCr4 ();
559   Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
560 
561   if (sizeof (UINTN) == sizeof (UINT64)) {
562     if (!Enable5LevelPaging) {
563       Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
564       Pml5 = &Pml5Entry;
565     } else {
566       Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
567     }
568     SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
569     //
570     // Calculate the table entries of PML4E and PDPTE.
571     //
572     NumberOfPml5Entries = 1;
573     if (SizeOfMemorySpace > 48) {
574       NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48);
575       SizeOfMemorySpace = 48;
576     }
577 
578     NumberOfPml4Entries = 1;
579     if (SizeOfMemorySpace > 39) {
580       NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39);
581       SizeOfMemorySpace = 39;
582     }
583 
584     NumberOfPdptEntries = 1;
585     ASSERT (SizeOfMemorySpace > 30);
586     NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 30);
587   } else {
588     Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
589     Pml4 = &Pml4Entry;
590     Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
591     Pml5 = &Pml5Entry;
592     NumberOfPml5Entries  = 1;
593     NumberOfPml4Entries  = 1;
594     NumberOfPdptEntries  = 4;
595   }
596 
597   //
598   // Go through page table and change 2MB-page into 4KB-page.
599   //
600   for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
601     if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
602       //
603       // If PML5 entry does not exist, skip it
604       //
605       continue;
606     }
607     Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
608     for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
609       if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
610         //
611         // If PML4 entry does not exist, skip it
612         //
613         continue;
614       }
615       Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
616       for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
617         if ((*Pdpt & IA32_PG_P) == 0) {
618           //
619           // If PDPT entry does not exist, skip it
620           //
621           continue;
622         }
623         if ((*Pdpt & IA32_PG_PS) != 0) {
624           //
625           // This is 1G entry, skip it
626           //
627           continue;
628         }
629         Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
630         if (Pd == 0) {
631           continue;
632         }
633         for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
634           if ((*Pd & IA32_PG_P) == 0) {
635             //
636             // If PD entry does not exist, skip it
637             //
638             continue;
639           }
640           Address = (UINTN) LShiftU64 (
641                               LShiftU64 (
642                                 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
643                                 9
644                                 ) + PdIndex,
645                                 21
646                               );
647 
648           //
649           // If it is 2M page, check IsAddressSplit()
650           //
651           if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
652             //
653             // Based on current page table, create 4KB page table for split area.
654             //
655             ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
656 
657             Pt = AllocatePageTableMemory (1);
658             ASSERT (Pt != NULL);
659 
660             *Pd = (UINTN) Pt | IA32_PG_RW | IA32_PG_P;
661 
662             // Split it
663             for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
664               *Pt = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
665             } // end for PT
666             *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
667           } // end if IsAddressSplit
668         } // end for PD
669       } // end for PDPT
670     } // end for PML4
671   } // end for PML5
672 
673   //
674   // Go through page table and set several page table entries to absent or execute-disable.
675   //
676   DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
677   for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
678     if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
679       //
680       // If PML5 entry does not exist, skip it
681       //
682       continue;
683     }
684     Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
685     for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
686       if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
687         //
688         // If PML4 entry does not exist, skip it
689         //
690         continue;
691       }
692       Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
693       for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
694         if ((*Pdpt & IA32_PG_P) == 0) {
695           //
696           // If PDPT entry does not exist, skip it
697           //
698           continue;
699         }
700         if ((*Pdpt & IA32_PG_PS) != 0) {
701           //
702           // This is 1G entry, set NX bit and skip it
703           //
704           if (mXdSupported) {
705             *Pdpt = *Pdpt | IA32_PG_NX;
706           }
707           continue;
708         }
709         Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
710         if (Pd == 0) {
711           continue;
712         }
713         for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
714           if ((*Pd & IA32_PG_P) == 0) {
715             //
716             // If PD entry does not exist, skip it
717             //
718             continue;
719           }
720           Address = (UINTN) LShiftU64 (
721                               LShiftU64 (
722                                 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
723                                 9
724                                 ) + PdIndex,
725                                 21
726                               );
727 
728           if ((*Pd & IA32_PG_PS) != 0) {
729             // 2MB page
730 
731             if (!IsAddressValid (Address, &Nx)) {
732               //
733               // Patch to remove Present flag and RW flag
734               //
735               *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
736             }
737             if (Nx && mXdSupported) {
738               *Pd = *Pd | IA32_PG_NX;
739             }
740           } else {
741             // 4KB page
742             Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
743             if (Pt == 0) {
744               continue;
745             }
746             for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
747               if (!IsAddressValid (Address, &Nx)) {
748                 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
749               }
750               if (Nx && mXdSupported) {
751                 *Pt = *Pt | IA32_PG_NX;
752               }
753               Address += SIZE_4KB;
754             } // end for PT
755           } // end if PS
756         } // end for PD
757       } // end for PDPT
758     } // end for PML4
759   } // end for PML5
760 
761   //
762   // Flush TLB
763   //
764   CpuFlushTlb ();
765   DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
766   //
767   // Set execute-disable flag
768   //
769   mXdEnabled = TRUE;
770 
771   return ;
772 }
773 
774 /**
775   To get system port address of the SMI Command Port in FADT table.
776 
777 **/
778 VOID
GetSmiCommandPort(VOID)779 GetSmiCommandPort (
780   VOID
781   )
782 {
783   EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
784 
785   Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (
786                                                          EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
787                                                          );
788   ASSERT (Fadt != NULL);
789 
790   mSmiCommandPort = Fadt->SmiCmd;
791   DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
792 }
793 
794 /**
795   Updates page table to make some memory ranges (like system memory) absent
796   and make some memory ranges (like MMIO) present and execute disable. It also
797   update 2MB-page to 4KB-page for some memory ranges.
798 
799 **/
800 VOID
SmmProfileStart(VOID)801 SmmProfileStart (
802   VOID
803   )
804 {
805   //
806   // The flag indicates SMM profile starts to work.
807   //
808   mSmmProfileStart = TRUE;
809 }
810 
811 /**
812   Initialize SMM profile in SmmReadyToLock protocol callback function.
813 
814   @param  Protocol   Points to the protocol's unique identifier.
815   @param  Interface  Points to the interface instance.
816   @param  Handle     The handle on which the interface was installed.
817 
818   @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
819 **/
820 EFI_STATUS
821 EFIAPI
InitSmmProfileCallBack(IN CONST EFI_GUID * Protocol,IN VOID * Interface,IN EFI_HANDLE Handle)822 InitSmmProfileCallBack (
823   IN CONST EFI_GUID  *Protocol,
824   IN VOID            *Interface,
825   IN EFI_HANDLE      Handle
826   )
827 {
828   //
829   // Save to variable so that SMM profile data can be found.
830   //
831   gRT->SetVariable (
832          SMM_PROFILE_NAME,
833          &gEfiCallerIdGuid,
834          EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
835          sizeof(mSmmProfileBase),
836          &mSmmProfileBase
837          );
838 
839   //
840   // Get Software SMI from FADT
841   //
842   GetSmiCommandPort ();
843 
844   //
845   // Initialize protected memory range for patching page table later.
846   //
847   InitProtectedMemRange ();
848 
849   return EFI_SUCCESS;
850 }
851 
852 /**
853   Initialize SMM profile data structures.
854 
855 **/
856 VOID
InitSmmProfileInternal(VOID)857 InitSmmProfileInternal (
858   VOID
859   )
860 {
861   EFI_STATUS                 Status;
862   EFI_PHYSICAL_ADDRESS       Base;
863   VOID                       *Registration;
864   UINTN                      Index;
865   UINTN                      MsrDsAreaSizePerCpu;
866   UINTN                      TotalSize;
867 
868   mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
869   ASSERT (mPFEntryCount != NULL);
870   mLastPFEntryValue = (UINT64  (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
871                                                          sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);
872   ASSERT (mLastPFEntryValue != NULL);
873   mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
874                                                            sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);
875   ASSERT (mLastPFEntryPointer != NULL);
876 
877   //
878   // Allocate memory for SmmProfile below 4GB.
879   // The base address
880   //
881   mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
882   ASSERT ((mSmmProfileSize & 0xFFF) == 0);
883 
884   if (mBtsSupported) {
885     TotalSize = mSmmProfileSize + mMsrDsAreaSize;
886   } else {
887     TotalSize = mSmmProfileSize;
888   }
889 
890   Base = 0xFFFFFFFF;
891   Status = gBS->AllocatePages (
892                   AllocateMaxAddress,
893                   EfiReservedMemoryType,
894                   EFI_SIZE_TO_PAGES (TotalSize),
895                   &Base
896                   );
897   ASSERT_EFI_ERROR (Status);
898   ZeroMem ((VOID *)(UINTN)Base, TotalSize);
899   mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
900 
901   //
902   // Initialize SMM profile data header.
903   //
904   mSmmProfileBase->HeaderSize     = sizeof (SMM_PROFILE_HEADER);
905   mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
906   mSmmProfileBase->MaxDataSize    = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
907   mSmmProfileBase->CurDataEntries = 0;
908   mSmmProfileBase->CurDataSize    = 0;
909   mSmmProfileBase->TsegStart      = mCpuHotPlugData.SmrrBase;
910   mSmmProfileBase->TsegSize       = mCpuHotPlugData.SmrrSize;
911   mSmmProfileBase->NumSmis        = 0;
912   mSmmProfileBase->NumCpus        = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
913 
914   if (mBtsSupported) {
915     mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
916     ASSERT (mMsrDsArea != NULL);
917     mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
918     ASSERT (mMsrBTSRecord != NULL);
919     mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
920     ASSERT (mMsrPEBSRecord != NULL);
921 
922     mMsrDsAreaBase  = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
923     MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
924     mBTSRecordNumber    = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
925     for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
926       mMsrDsArea[Index]     = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
927       mMsrBTSRecord[Index]  = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
928       mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
929 
930       mMsrDsArea[Index]->BTSBufferBase          = (UINTN)mMsrBTSRecord[Index];
931       mMsrDsArea[Index]->BTSIndex               = mMsrDsArea[Index]->BTSBufferBase;
932       mMsrDsArea[Index]->BTSAbsoluteMaximum     = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
933       mMsrDsArea[Index]->BTSInterruptThreshold  = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
934 
935       mMsrDsArea[Index]->PEBSBufferBase         = (UINTN)mMsrPEBSRecord[Index];
936       mMsrDsArea[Index]->PEBSIndex              = mMsrDsArea[Index]->PEBSBufferBase;
937       mMsrDsArea[Index]->PEBSAbsoluteMaximum    = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
938       mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
939     }
940   }
941 
942   mProtectionMemRange      = mProtectionMemRangeTemplate;
943   mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
944 
945   //
946   // Update TSeg entry.
947   //
948   mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
949   mProtectionMemRange[0].Range.Top  = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
950 
951   //
952   // Update SMM profile entry.
953   //
954   mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
955   mProtectionMemRange[1].Range.Top  = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
956 
957   //
958   // Allocate memory reserved for creating 4KB pages.
959   //
960   InitPagesForPFHandler ();
961 
962   //
963   // Start SMM profile when SmmReadyToLock protocol is installed.
964   //
965   Status = gSmst->SmmRegisterProtocolNotify (
966                     &gEfiSmmReadyToLockProtocolGuid,
967                     InitSmmProfileCallBack,
968                     &Registration
969                     );
970   ASSERT_EFI_ERROR (Status);
971 
972   return ;
973 }
974 
975 /**
976   Check if feature is supported by a processor.
977 
978 **/
979 VOID
CheckFeatureSupported(VOID)980 CheckFeatureSupported (
981   VOID
982   )
983 {
984   UINT32                         RegEax;
985   UINT32                         RegEcx;
986   UINT32                         RegEdx;
987   MSR_IA32_MISC_ENABLE_REGISTER  MiscEnableMsr;
988 
989   if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
990     AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
991     if (RegEax <= CPUID_EXTENDED_FUNCTION) {
992       mCetSupported = FALSE;
993       PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
994     }
995     AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
996     if ((RegEcx & CPUID_CET_SS) == 0) {
997       mCetSupported = FALSE;
998       PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
999     }
1000   }
1001 
1002   if (mXdSupported) {
1003     AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
1004     if (RegEax <= CPUID_EXTENDED_FUNCTION) {
1005       //
1006       // Extended CPUID functions are not supported on this processor.
1007       //
1008       mXdSupported = FALSE;
1009       PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1010     }
1011 
1012     AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
1013     if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
1014       //
1015       // Execute Disable Bit feature is not supported on this processor.
1016       //
1017       mXdSupported = FALSE;
1018       PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1019     }
1020   }
1021 
1022   if (mBtsSupported) {
1023     AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
1024     if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
1025       //
1026       // Per IA32 manuals:
1027       // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
1028       // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
1029       //    availability of the BTS facilities, including the ability to set the BTS and
1030       //    BTINT bits in the MSR_DEBUGCTLA MSR.
1031       // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
1032       //
1033       MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1034       if (MiscEnableMsr.Bits.BTS == 1) {
1035         //
1036         // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
1037         //
1038         mBtsSupported = FALSE;
1039       }
1040     }
1041   }
1042 }
1043 
1044 /**
1045   Enable single step.
1046 
1047 **/
1048 VOID
ActivateSingleStepDB(VOID)1049 ActivateSingleStepDB (
1050   VOID
1051   )
1052 {
1053   UINTN    Dr6;
1054 
1055   Dr6 = AsmReadDr6 ();
1056   if ((Dr6 & DR6_SINGLE_STEP) != 0) {
1057     return;
1058   }
1059   Dr6 |= DR6_SINGLE_STEP;
1060   AsmWriteDr6 (Dr6);
1061 }
1062 
1063 /**
1064   Enable last branch.
1065 
1066 **/
1067 VOID
ActivateLBR(VOID)1068 ActivateLBR (
1069   VOID
1070   )
1071 {
1072   UINT64  DebugCtl;
1073 
1074   DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1075   if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1076     return ;
1077   }
1078   DebugCtl |= MSR_DEBUG_CTL_LBR;
1079   AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1080 }
1081 
1082 /**
1083   Enable branch trace store.
1084 
1085   @param  CpuIndex  The index of the processor.
1086 
1087 **/
1088 VOID
ActivateBTS(IN UINTN CpuIndex)1089 ActivateBTS (
1090   IN      UINTN                     CpuIndex
1091   )
1092 {
1093   UINT64  DebugCtl;
1094 
1095   DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1096   if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1097     return ;
1098   }
1099 
1100   AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1101   DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1102   DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1103   AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1104 }
1105 
1106 /**
1107   Increase SMI number in each SMI entry.
1108 
1109 **/
1110 VOID
SmmProfileRecordSmiNum(VOID)1111 SmmProfileRecordSmiNum (
1112   VOID
1113   )
1114 {
1115   if (mSmmProfileStart) {
1116     mSmmProfileBase->NumSmis++;
1117   }
1118 }
1119 
1120 /**
1121   Initialize processor environment for SMM profile.
1122 
1123   @param  CpuIndex  The index of the processor.
1124 
1125 **/
1126 VOID
ActivateSmmProfile(IN UINTN CpuIndex)1127 ActivateSmmProfile (
1128   IN UINTN CpuIndex
1129   )
1130 {
1131   //
1132   // Enable Single Step DB#
1133   //
1134   ActivateSingleStepDB ();
1135 
1136   if (mBtsSupported) {
1137     //
1138     // We can not get useful information from LER, so we have to use BTS.
1139     //
1140     ActivateLBR ();
1141 
1142     //
1143     // Enable BTS
1144     //
1145     ActivateBTS (CpuIndex);
1146   }
1147 }
1148 
1149 /**
1150   Initialize SMM profile in SMM CPU entry point.
1151 
1152   @param[in] Cr3  The base address of the page tables to use in SMM.
1153 
1154 **/
1155 VOID
InitSmmProfile(UINT32 Cr3)1156 InitSmmProfile (
1157   UINT32  Cr3
1158   )
1159 {
1160   //
1161   // Save Cr3
1162   //
1163   mSmmProfileCr3 = Cr3;
1164 
1165   //
1166   // Skip SMM profile initialization if feature is disabled
1167   //
1168   if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1169       !HEAP_GUARD_NONSTOP_MODE &&
1170       !NULL_DETECTION_NONSTOP_MODE) {
1171     return;
1172   }
1173 
1174   //
1175   // Initialize SmmProfile here
1176   //
1177   InitSmmProfileInternal ();
1178 
1179   //
1180   // Initialize profile IDT.
1181   //
1182   InitIdtr ();
1183 
1184   //
1185   // Tell #PF handler to prepare a #DB subsequently.
1186   //
1187   mSetupDebugTrap = TRUE;
1188 }
1189 
1190 /**
1191   Update page table to map the memory correctly in order to make the instruction
1192   which caused page fault execute successfully. And it also save the original page
1193   table to be restored in single-step exception.
1194 
1195   @param  PageTable           PageTable Address.
1196   @param  PFAddress           The memory address which caused page fault exception.
1197   @param  CpuIndex            The index of the processor.
1198   @param  ErrorCode           The Error code of exception.
1199 
1200 **/
1201 VOID
RestorePageTableBelow4G(UINT64 * PageTable,UINT64 PFAddress,UINTN CpuIndex,UINTN ErrorCode)1202 RestorePageTableBelow4G (
1203   UINT64        *PageTable,
1204   UINT64        PFAddress,
1205   UINTN         CpuIndex,
1206   UINTN         ErrorCode
1207   )
1208 {
1209   UINTN         PTIndex;
1210   UINTN         PFIndex;
1211   IA32_CR4      Cr4;
1212   BOOLEAN       Enable5LevelPaging;
1213 
1214   Cr4.UintN = AsmReadCr4 ();
1215   Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
1216 
1217   //
1218   // PML5
1219   //
1220   if (Enable5LevelPaging) {
1221     PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
1222     ASSERT (PageTable[PTIndex] != 0);
1223     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1224   }
1225 
1226   //
1227   // PML4
1228   //
1229   if (sizeof(UINT64) == sizeof(UINTN)) {
1230     PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1231     ASSERT (PageTable[PTIndex] != 0);
1232     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1233   }
1234 
1235   //
1236   // PDPTE
1237   //
1238   PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1239   ASSERT (PageTable[PTIndex] != 0);
1240   PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1241 
1242   //
1243   // PD
1244   //
1245   PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1246   if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1247     //
1248     // Large page
1249     //
1250 
1251     //
1252     // Record old entries with non-present status
1253     // Old entries include the memory which instruction is at and the memory which instruction access.
1254     //
1255     //
1256     ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1257     if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1258       PFIndex = mPFEntryCount[CpuIndex];
1259       mLastPFEntryValue[CpuIndex][PFIndex]   = PageTable[PTIndex];
1260       mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1261       mPFEntryCount[CpuIndex]++;
1262     }
1263 
1264     //
1265     // Set new entry
1266     //
1267     PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1268     PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1269     PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1270     if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1271       PageTable[PTIndex] &= ~IA32_PG_NX;
1272     }
1273   } else {
1274     //
1275     // Small page
1276     //
1277     ASSERT (PageTable[PTIndex] != 0);
1278     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1279 
1280     //
1281     // 4K PTE
1282     //
1283     PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1284 
1285     //
1286     // Record old entries with non-present status
1287     // Old entries include the memory which instruction is at and the memory which instruction access.
1288     //
1289     //
1290     ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1291     if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1292       PFIndex = mPFEntryCount[CpuIndex];
1293       mLastPFEntryValue[CpuIndex][PFIndex]   = PageTable[PTIndex];
1294       mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1295       mPFEntryCount[CpuIndex]++;
1296     }
1297 
1298     //
1299     // Set new entry
1300     //
1301     PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1302     PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1303     if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1304       PageTable[PTIndex] &= ~IA32_PG_NX;
1305     }
1306   }
1307 }
1308 
1309 /**
1310   Handler for Page Fault triggered by Guard page.
1311 
1312   @param  ErrorCode  The Error code of exception.
1313 
1314 **/
1315 VOID
GuardPagePFHandler(UINTN ErrorCode)1316 GuardPagePFHandler (
1317   UINTN ErrorCode
1318   )
1319 {
1320   UINT64                *PageTable;
1321   UINT64                PFAddress;
1322   UINT64                RestoreAddress;
1323   UINTN                 RestorePageNumber;
1324   UINTN                 CpuIndex;
1325 
1326   PageTable         = (UINT64 *)AsmReadCr3 ();
1327   PFAddress         = AsmReadCr2 ();
1328   CpuIndex          = GetCpuIndex ();
1329 
1330   //
1331   // Memory operation cross pages, like "rep mov" instruction, will cause
1332   // infinite loop between this and Debug Trap handler. We have to make sure
1333   // that current page and the page followed are both in PRESENT state.
1334   //
1335   RestorePageNumber = 2;
1336   RestoreAddress = PFAddress;
1337   while (RestorePageNumber > 0) {
1338     RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1339     RestoreAddress += EFI_PAGE_SIZE;
1340     RestorePageNumber--;
1341   }
1342 
1343   //
1344   // Flush TLB
1345   //
1346   CpuFlushTlb ();
1347 }
1348 
1349 /**
1350   The Page fault handler to save SMM profile data.
1351 
1352   @param  Rip        The RIP when exception happens.
1353   @param  ErrorCode  The Error code of exception.
1354 
1355 **/
1356 VOID
SmmProfilePFHandler(UINTN Rip,UINTN ErrorCode)1357 SmmProfilePFHandler (
1358   UINTN Rip,
1359   UINTN ErrorCode
1360   )
1361 {
1362   UINT64                *PageTable;
1363   UINT64                PFAddress;
1364   UINT64                RestoreAddress;
1365   UINTN                 RestorePageNumber;
1366   UINTN                 CpuIndex;
1367   UINTN                 Index;
1368   UINT64                InstructionAddress;
1369   UINTN                 MaxEntryNumber;
1370   UINTN                 CurrentEntryNumber;
1371   BOOLEAN               IsValidPFAddress;
1372   SMM_PROFILE_ENTRY     *SmmProfileEntry;
1373   UINT64                SmiCommand;
1374   EFI_STATUS            Status;
1375   UINT8                 SoftSmiValue;
1376   EFI_SMM_SAVE_STATE_IO_INFO    IoInfo;
1377 
1378   if (!mSmmProfileStart) {
1379     //
1380     // If SMM profile does not start, call original page fault handler.
1381     //
1382     SmiDefaultPFHandler ();
1383     return;
1384   }
1385 
1386   if (mBtsSupported) {
1387     DisableBTS ();
1388   }
1389 
1390   IsValidPFAddress  = FALSE;
1391   PageTable         = (UINT64 *)AsmReadCr3 ();
1392   PFAddress         = AsmReadCr2 ();
1393   CpuIndex          = GetCpuIndex ();
1394 
1395   //
1396   // Memory operation cross pages, like "rep mov" instruction, will cause
1397   // infinite loop between this and Debug Trap handler. We have to make sure
1398   // that current page and the page followed are both in PRESENT state.
1399   //
1400   RestorePageNumber = 2;
1401   RestoreAddress = PFAddress;
1402   while (RestorePageNumber > 0) {
1403     if (RestoreAddress <= 0xFFFFFFFF) {
1404       RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1405     } else {
1406       RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1407     }
1408     RestoreAddress += EFI_PAGE_SIZE;
1409     RestorePageNumber--;
1410   }
1411 
1412   if (!IsValidPFAddress) {
1413     InstructionAddress = Rip;
1414     if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
1415       //
1416       // If it is instruction fetch failure, get the correct IP from BTS.
1417       //
1418       InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1419       if (InstructionAddress == 0) {
1420         //
1421         // It indicates the instruction which caused page fault is not a jump instruction,
1422         // set instruction address same as the page fault address.
1423         //
1424         InstructionAddress = PFAddress;
1425       }
1426     }
1427 
1428     //
1429     // Indicate it is not software SMI
1430     //
1431     SmiCommand    = 0xFFFFFFFFFFFFFFFFULL;
1432     for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1433       Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1434       if (EFI_ERROR (Status)) {
1435         continue;
1436       }
1437       if (IoInfo.IoPort == mSmiCommandPort) {
1438         //
1439         // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1440         //
1441         SoftSmiValue = IoRead8 (mSmiCommandPort);
1442         SmiCommand = (UINT64)SoftSmiValue;
1443         break;
1444       }
1445     }
1446 
1447     SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1448     //
1449     // Check if there is already a same entry in profile data.
1450     //
1451     for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
1452       if ((SmmProfileEntry[Index].ErrorCode   == (UINT64)ErrorCode) &&
1453           (SmmProfileEntry[Index].Address     == PFAddress) &&
1454           (SmmProfileEntry[Index].CpuNum      == (UINT64)CpuIndex) &&
1455           (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1456           (SmmProfileEntry[Index].SmiCmd      == SmiCommand)) {
1457         //
1458         // Same record exist, need not save again.
1459         //
1460         break;
1461       }
1462     }
1463     if (Index == mSmmProfileBase->CurDataEntries) {
1464       CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
1465       MaxEntryNumber     = (UINTN) mSmmProfileBase->MaxDataEntries;
1466       if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1467         CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1468       }
1469       if (CurrentEntryNumber < MaxEntryNumber) {
1470         //
1471         // Log the new entry
1472         //
1473         SmmProfileEntry[CurrentEntryNumber].SmiNum      = mSmmProfileBase->NumSmis;
1474         SmmProfileEntry[CurrentEntryNumber].ErrorCode   = (UINT64)ErrorCode;
1475         SmmProfileEntry[CurrentEntryNumber].ApicId      = (UINT64)GetApicId ();
1476         SmmProfileEntry[CurrentEntryNumber].CpuNum      = (UINT64)CpuIndex;
1477         SmmProfileEntry[CurrentEntryNumber].Address     = PFAddress;
1478         SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1479         SmmProfileEntry[CurrentEntryNumber].SmiCmd      = SmiCommand;
1480         //
1481         // Update current entry index and data size in the header.
1482         //
1483         mSmmProfileBase->CurDataEntries++;
1484         mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1485       }
1486     }
1487   }
1488   //
1489   // Flush TLB
1490   //
1491   CpuFlushTlb ();
1492 
1493   if (mBtsSupported) {
1494     EnableBTS ();
1495   }
1496 }
1497 
1498 /**
1499   Replace INT1 exception handler to restore page table to absent/execute-disable state
1500   in order to trigger page fault again to save SMM profile data..
1501 
1502 **/
1503 VOID
InitIdtr(VOID)1504 InitIdtr (
1505   VOID
1506   )
1507 {
1508   EFI_STATUS                        Status;
1509 
1510   Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1511   ASSERT_EFI_ERROR (Status);
1512 }
1513