1 /** @file
2 Enable SMM profile.
3 
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>
6 
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8 
9 **/
10 
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
13 
14 UINT32                    mSmmProfileCr3;
15 
16 SMM_PROFILE_HEADER        *mSmmProfileBase;
17 MSR_DS_AREA_STRUCT        *mMsrDsAreaBase;
18 //
19 // The buffer to store SMM profile data.
20 //
21 UINTN                     mSmmProfileSize;
22 
23 //
24 // The buffer to enable branch trace store.
25 //
26 UINTN                     mMsrDsAreaSize   = SMM_PROFILE_DTS_SIZE;
27 
28 //
29 // The flag indicates if execute-disable is supported by processor.
30 //
31 BOOLEAN                   mXdSupported     = TRUE;
32 
33 //
34 // The flag indicates if execute-disable is enabled on processor.
35 //
36 BOOLEAN                   mXdEnabled       = FALSE;
37 
38 //
39 // The flag indicates if BTS is supported by processor.
40 //
41 BOOLEAN                   mBtsSupported     = TRUE;
42 
43 //
44 // The flag indicates if SMM profile starts to record data.
45 //
46 BOOLEAN                   mSmmProfileStart = FALSE;
47 
48 //
49 // The flag indicates if #DB will be setup in #PF handler.
50 //
51 BOOLEAN                   mSetupDebugTrap = FALSE;
52 
53 //
54 // Record the page fault exception count for one instruction execution.
55 //
56 UINTN                     *mPFEntryCount;
57 
58 UINT64                    (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
59 UINT64                    *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
60 
61 MSR_DS_AREA_STRUCT        **mMsrDsArea;
62 BRANCH_TRACE_RECORD       **mMsrBTSRecord;
63 UINTN                     mBTSRecordNumber;
64 PEBS_RECORD               **mMsrPEBSRecord;
65 
66 //
67 // These memory ranges are always present, they does not generate the access type of page fault exception,
68 // but they possibly generate instruction fetch type of page fault exception.
69 //
70 MEMORY_PROTECTION_RANGE   *mProtectionMemRange     = NULL;
71 UINTN                     mProtectionMemRangeCount = 0;
72 
73 //
74 // Some predefined memory ranges.
75 //
76 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
77   //
78   // SMRAM range (to be fixed in runtime).
79   // It is always present and instruction fetches are allowed.
80   //
81   {{0x00000000, 0x00000000},TRUE,FALSE},
82 
83   //
84   // SMM profile data range( to be fixed in runtime).
85   // It is always present and instruction fetches are not allowed.
86   //
87   {{0x00000000, 0x00000000},TRUE,TRUE},
88 
89   //
90   // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
91   // It is always present and instruction fetches are allowed.
92   // {{0x00000000, 0x00000000},TRUE,FALSE},
93   //
94 
95   //
96   // Future extended range could be added here.
97   //
98 
99   //
100   // PCI MMIO ranges (to be added in runtime).
101   // They are always present and instruction fetches are not allowed.
102   //
103 };
104 
105 //
106 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
107 //
108 MEMORY_RANGE              *mSplitMemRange          = NULL;
109 UINTN                     mSplitMemRangeCount      = 0;
110 
111 //
112 // SMI command port.
113 //
114 UINT32                    mSmiCommandPort;
115 
116 /**
117   Disable branch trace store.
118 
119 **/
120 VOID
121 DisableBTS (
122   VOID
123   )
124 {
125   AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
126 }
127 
128 /**
129   Enable branch trace store.
130 
131 **/
132 VOID
133 EnableBTS (
134   VOID
135   )
136 {
137   AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
138 }
139 
140 /**
141   Get CPU Index from APIC ID.
142 
143 **/
144 UINTN
145 GetCpuIndex (
146   VOID
147   )
148 {
149   UINTN     Index;
150   UINT32    ApicId;
151 
152   ApicId = GetApicId ();
153 
154   for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
155     if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
156       return Index;
157     }
158   }
159   ASSERT (FALSE);
160   return 0;
161 }
162 
163 /**
164   Get the source of IP after execute-disable exception is triggered.
165 
166   @param  CpuIndex        The index of CPU.
167   @param  DestinationIP   The destination address.
168 
169 **/
170 UINT64
171 GetSourceFromDestinationOnBts (
172   UINTN  CpuIndex,
173   UINT64 DestinationIP
174   )
175 {
176   BRANCH_TRACE_RECORD  *CurrentBTSRecord;
177   UINTN                Index;
178   BOOLEAN              FirstMatch;
179 
180   FirstMatch = FALSE;
181 
182   CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
183   for (Index = 0; Index < mBTSRecordNumber; Index++) {
184     if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
185       //
186       // Underflow
187       //
188       CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
189       CurrentBTSRecord --;
190     }
191     if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
192       //
193       // Good! find 1st one, then find 2nd one.
194       //
195       if (!FirstMatch) {
196         //
197         // The first one is DEBUG exception
198         //
199         FirstMatch = TRUE;
200       } else {
201         //
202         // Good find proper one.
203         //
204         return CurrentBTSRecord->LastBranchFrom;
205       }
206     }
207     CurrentBTSRecord--;
208   }
209 
210   return 0;
211 }
212 
213 /**
214   SMM profile specific INT 1 (single-step) exception handler.
215 
216   @param  InterruptType    Defines the type of interrupt or exception that
217                            occurred on the processor.This parameter is processor architecture specific.
218   @param  SystemContext    A pointer to the processor context when
219                            the interrupt occurred on the processor.
220 **/
221 VOID
222 EFIAPI
223 DebugExceptionHandler (
224     IN EFI_EXCEPTION_TYPE   InterruptType,
225     IN EFI_SYSTEM_CONTEXT   SystemContext
226   )
227 {
228   UINTN  CpuIndex;
229   UINTN  PFEntry;
230 
231   if (!mSmmProfileStart &&
232       !HEAP_GUARD_NONSTOP_MODE &&
233       !NULL_DETECTION_NONSTOP_MODE) {
234     return;
235   }
236   CpuIndex = GetCpuIndex ();
237 
238   //
239   // Clear last PF entries
240   //
241   for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
242     *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
243   }
244 
245   //
246   // Reset page fault exception count for next page fault.
247   //
248   mPFEntryCount[CpuIndex] = 0;
249 
250   //
251   // Flush TLB
252   //
253   CpuFlushTlb ();
254 
255   //
256   // Clear TF in EFLAGS
257   //
258   ClearTrapFlag (SystemContext);
259 }
260 
261 /**
262   Check if the input address is in SMM ranges.
263 
264   @param[in]  Address       The input address.
265 
266   @retval TRUE     The input address is in SMM.
267   @retval FALSE    The input address is not in SMM.
268 **/
269 BOOLEAN
270 IsInSmmRanges (
271   IN EFI_PHYSICAL_ADDRESS   Address
272   )
273 {
274   UINTN  Index;
275 
276   if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
277     return TRUE;
278   }
279   for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
280     if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&
281         Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {
282       return TRUE;
283     }
284   }
285   return FALSE;
286 }
287 
288 /**
289   Check if the memory address will be mapped by 4KB-page.
290 
291   @param  Address  The address of Memory.
292   @param  Nx       The flag indicates if the memory is execute-disable.
293 
294 **/
295 BOOLEAN
296 IsAddressValid (
297   IN EFI_PHYSICAL_ADDRESS   Address,
298   IN BOOLEAN                *Nx
299   )
300 {
301   UINTN  Index;
302 
303   if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
304     //
305     // Check configuration
306     //
307     for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
308       if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
309         *Nx = mProtectionMemRange[Index].Nx;
310         return mProtectionMemRange[Index].Present;
311       }
312     }
313     *Nx = TRUE;
314     return FALSE;
315 
316   } else {
317     *Nx = TRUE;
318     if (IsInSmmRanges (Address)) {
319       *Nx = FALSE;
320     }
321     return TRUE;
322   }
323 }
324 
325 /**
326   Check if the memory address will be mapped by 4KB-page.
327 
328   @param  Address  The address of Memory.
329 
330 **/
331 BOOLEAN
332 IsAddressSplit (
333   IN EFI_PHYSICAL_ADDRESS   Address
334   )
335 {
336   UINTN  Index;
337 
338   if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
339     //
340     // Check configuration
341     //
342     for (Index = 0; Index < mSplitMemRangeCount; Index++) {
343       if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
344         return TRUE;
345       }
346     }
347   } else {
348     if (Address < mCpuHotPlugData.SmrrBase) {
349       if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
350         return TRUE;
351       }
352     } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB))  {
353       if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
354         return TRUE;
355       }
356     }
357   }
358   //
359   // Return default
360   //
361   return FALSE;
362 }
363 
364 /**
365   Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
366 
367 **/
368 VOID
369 InitProtectedMemRange (
370   VOID
371   )
372 {
373   UINTN                            Index;
374   UINTN                            NumberOfDescriptors;
375   UINTN                            NumberOfAddedDescriptors;
376   UINTN                            NumberOfProtectRange;
377   UINTN                            NumberOfSpliteRange;
378   EFI_GCD_MEMORY_SPACE_DESCRIPTOR  *MemorySpaceMap;
379   UINTN                            TotalSize;
380   EFI_PHYSICAL_ADDRESS             ProtectBaseAddress;
381   EFI_PHYSICAL_ADDRESS             ProtectEndAddress;
382   EFI_PHYSICAL_ADDRESS             Top2MBAlignedAddress;
383   EFI_PHYSICAL_ADDRESS             Base2MBAlignedAddress;
384   UINT64                           High4KBPageSize;
385   UINT64                           Low4KBPageSize;
386 
387   NumberOfDescriptors      = 0;
388   NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
389   NumberOfSpliteRange      = 0;
390   MemorySpaceMap           = NULL;
391 
392   //
393   // Get MMIO ranges from GCD and add them into protected memory ranges.
394   //
395   gDS->GetMemorySpaceMap (
396        &NumberOfDescriptors,
397        &MemorySpaceMap
398        );
399   for (Index = 0; Index < NumberOfDescriptors; Index++) {
400     if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
401       NumberOfAddedDescriptors++;
402     }
403   }
404 
405   if (NumberOfAddedDescriptors != 0) {
406     TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
407     mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
408     ASSERT (mProtectionMemRange != NULL);
409     mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
410 
411     //
412     // Copy existing ranges.
413     //
414     CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
415 
416     //
417     // Create split ranges which come from protected ranges.
418     //
419     TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
420     mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
421     ASSERT (mSplitMemRange != NULL);
422 
423     //
424     // Create SMM ranges which are set to present and execution-enable.
425     //
426     NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
427     for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
428       if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&
429           mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {
430         //
431         // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
432         //
433         break;
434       }
435       mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
436       mProtectionMemRange[NumberOfProtectRange].Range.Top  = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
437       mProtectionMemRange[NumberOfProtectRange].Present    = TRUE;
438       mProtectionMemRange[NumberOfProtectRange].Nx         = FALSE;
439       NumberOfProtectRange++;
440     }
441 
442     //
443     // Create MMIO ranges which are set to present and execution-disable.
444     //
445     for (Index = 0; Index < NumberOfDescriptors; Index++) {
446       if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
447         continue;
448       }
449       mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
450       mProtectionMemRange[NumberOfProtectRange].Range.Top  = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
451       mProtectionMemRange[NumberOfProtectRange].Present    = TRUE;
452       mProtectionMemRange[NumberOfProtectRange].Nx         = TRUE;
453       NumberOfProtectRange++;
454     }
455 
456     //
457     // Check and updated actual protected memory ranges count
458     //
459     ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
460     mProtectionMemRangeCount = NumberOfProtectRange;
461   }
462 
463   //
464   // According to protected ranges, create the ranges which will be mapped by 2KB page.
465   //
466   NumberOfSpliteRange  = 0;
467   NumberOfProtectRange = mProtectionMemRangeCount;
468   for (Index = 0; Index < NumberOfProtectRange; Index++) {
469     //
470     // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
471     //
472     ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
473     ProtectEndAddress  = mProtectionMemRange[Index].Range.Top;
474     if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress  & (SIZE_2MB - 1)) != 0)) {
475       //
476       // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
477       // A mix of 4KB and 2MB page could save SMRAM space.
478       //
479       Top2MBAlignedAddress  = ProtectEndAddress & ~(SIZE_2MB - 1);
480       Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
481       if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
482           ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
483         //
484         // There is an range which could be mapped by 2MB-page.
485         //
486         High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
487         Low4KBPageSize  = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
488         if (High4KBPageSize != 0) {
489           //
490           // Add not 2MB-aligned range to be mapped by 4KB-page.
491           //
492           mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
493           mSplitMemRange[NumberOfSpliteRange].Top  = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
494           NumberOfSpliteRange++;
495         }
496         if (Low4KBPageSize != 0) {
497           //
498           // Add not 2MB-aligned range to be mapped by 4KB-page.
499           //
500           mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
501           mSplitMemRange[NumberOfSpliteRange].Top  = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
502           NumberOfSpliteRange++;
503         }
504       } else {
505         //
506         // The range could only be mapped by 4KB-page.
507         //
508         mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
509         mSplitMemRange[NumberOfSpliteRange].Top  = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
510         NumberOfSpliteRange++;
511       }
512     }
513   }
514 
515   mSplitMemRangeCount = NumberOfSpliteRange;
516 
517   DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
518   for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
519     DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
520     DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top  = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
521   }
522   for (Index = 0; Index < mSplitMemRangeCount; Index++) {
523     DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
524     DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top  = %lx\n", Index, mSplitMemRange[Index].Top));
525   }
526 }
527 
528 /**
529   Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
530 
531 **/
532 VOID
533 InitPaging (
534   VOID
535   )
536 {
537   UINT64                            Pml5Entry;
538   UINT64                            Pml4Entry;
539   UINT64                            *Pml5;
540   UINT64                            *Pml4;
541   UINT64                            *Pdpt;
542   UINT64                            *Pd;
543   UINT64                            *Pt;
544   UINTN                             Address;
545   UINTN                             Pml5Index;
546   UINTN                             Pml4Index;
547   UINTN                             PdptIndex;
548   UINTN                             PdIndex;
549   UINTN                             PtIndex;
550   UINTN                             NumberOfPdptEntries;
551   UINTN                             NumberOfPml4Entries;
552   UINTN                             NumberOfPml5Entries;
553   UINTN                             SizeOfMemorySpace;
554   BOOLEAN                           Nx;
555   IA32_CR4                          Cr4;
556   BOOLEAN                           Enable5LevelPaging;
557 
558   Cr4.UintN = AsmReadCr4 ();
559   Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
560 
561   if (sizeof (UINTN) == sizeof (UINT64)) {
562     if (!Enable5LevelPaging) {
563       Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
564       Pml5 = &Pml5Entry;
565     } else {
566       Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
567     }
568     SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
569     //
570     // Calculate the table entries of PML4E and PDPTE.
571     //
572     NumberOfPml5Entries = 1;
573     if (SizeOfMemorySpace > 48) {
574       NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48);
575       SizeOfMemorySpace = 48;
576     }
577 
578     NumberOfPml4Entries = 1;
579     if (SizeOfMemorySpace > 39) {
580       NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39);
581       SizeOfMemorySpace = 39;
582     }
583 
584     NumberOfPdptEntries = 1;
585     ASSERT (SizeOfMemorySpace > 30);
586     NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 30);
587   } else {
588     Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
589     Pml4 = &Pml4Entry;
590     Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
591     Pml5 = &Pml5Entry;
592     NumberOfPml5Entries  = 1;
593     NumberOfPml4Entries  = 1;
594     NumberOfPdptEntries  = 4;
595   }
596 
597   //
598   // Go through page table and change 2MB-page into 4KB-page.
599   //
600   for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
601     if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
602       //
603       // If PML5 entry does not exist, skip it
604       //
605       continue;
606     }
607     Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
608     for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
609       if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
610         //
611         // If PML4 entry does not exist, skip it
612         //
613         continue;
614       }
615       Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
616       for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
617         if ((*Pdpt & IA32_PG_P) == 0) {
618           //
619           // If PDPT entry does not exist, skip it
620           //
621           continue;
622         }
623         if ((*Pdpt & IA32_PG_PS) != 0) {
624           //
625           // This is 1G entry, skip it
626           //
627           continue;
628         }
629         Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
630         if (Pd == 0) {
631           continue;
632         }
633         for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
634           if ((*Pd & IA32_PG_P) == 0) {
635             //
636             // If PD entry does not exist, skip it
637             //
638             continue;
639           }
640           Address = (UINTN) LShiftU64 (
641                               LShiftU64 (
642                                 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
643                                 9
644                                 ) + PdIndex,
645                                 21
646                               );
647 
648           //
649           // If it is 2M page, check IsAddressSplit()
650           //
651           if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
652             //
653             // Based on current page table, create 4KB page table for split area.
654             //
655             ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
656 
657             Pt = AllocatePageTableMemory (1);
658             ASSERT (Pt != NULL);
659 
660             // Split it
661             for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++) {
662               Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
663             } // end for PT
664             *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
665           } // end if IsAddressSplit
666         } // end for PD
667       } // end for PDPT
668     } // end for PML4
669   } // end for PML5
670 
671   //
672   // Go through page table and set several page table entries to absent or execute-disable.
673   //
674   DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
675   for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
676     if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
677       //
678       // If PML5 entry does not exist, skip it
679       //
680       continue;
681     }
682     Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
683     for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
684       if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
685         //
686         // If PML4 entry does not exist, skip it
687         //
688         continue;
689       }
690       Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
691       for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
692         if ((*Pdpt & IA32_PG_P) == 0) {
693           //
694           // If PDPT entry does not exist, skip it
695           //
696           continue;
697         }
698         if ((*Pdpt & IA32_PG_PS) != 0) {
699           //
700           // This is 1G entry, set NX bit and skip it
701           //
702           if (mXdSupported) {
703             *Pdpt = *Pdpt | IA32_PG_NX;
704           }
705           continue;
706         }
707         Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
708         if (Pd == 0) {
709           continue;
710         }
711         for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
712           if ((*Pd & IA32_PG_P) == 0) {
713             //
714             // If PD entry does not exist, skip it
715             //
716             continue;
717           }
718           Address = (UINTN) LShiftU64 (
719                               LShiftU64 (
720                                 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
721                                 9
722                                 ) + PdIndex,
723                                 21
724                               );
725 
726           if ((*Pd & IA32_PG_PS) != 0) {
727             // 2MB page
728 
729             if (!IsAddressValid (Address, &Nx)) {
730               //
731               // Patch to remove Present flag and RW flag
732               //
733               *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
734             }
735             if (Nx && mXdSupported) {
736               *Pd = *Pd | IA32_PG_NX;
737             }
738           } else {
739             // 4KB page
740             Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
741             if (Pt == 0) {
742               continue;
743             }
744             for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
745               if (!IsAddressValid (Address, &Nx)) {
746                 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
747               }
748               if (Nx && mXdSupported) {
749                 *Pt = *Pt | IA32_PG_NX;
750               }
751               Address += SIZE_4KB;
752             } // end for PT
753           } // end if PS
754         } // end for PD
755       } // end for PDPT
756     } // end for PML4
757   } // end for PML5
758 
759   //
760   // Flush TLB
761   //
762   CpuFlushTlb ();
763   DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
764   //
765   // Set execute-disable flag
766   //
767   mXdEnabled = TRUE;
768 
769   return ;
770 }
771 
772 /**
773   To get system port address of the SMI Command Port in FADT table.
774 
775 **/
776 VOID
777 GetSmiCommandPort (
778   VOID
779   )
780 {
781   EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
782 
783   Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (
784                                                          EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
785                                                          );
786   ASSERT (Fadt != NULL);
787 
788   mSmiCommandPort = Fadt->SmiCmd;
789   DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
790 }
791 
792 /**
793   Updates page table to make some memory ranges (like system memory) absent
794   and make some memory ranges (like MMIO) present and execute disable. It also
795   update 2MB-page to 4KB-page for some memory ranges.
796 
797 **/
798 VOID
799 SmmProfileStart (
800   VOID
801   )
802 {
803   //
804   // The flag indicates SMM profile starts to work.
805   //
806   mSmmProfileStart = TRUE;
807 }
808 
809 /**
810   Initialize SMM profile in SmmReadyToLock protocol callback function.
811 
812   @param  Protocol   Points to the protocol's unique identifier.
813   @param  Interface  Points to the interface instance.
814   @param  Handle     The handle on which the interface was installed.
815 
816   @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
817 **/
818 EFI_STATUS
819 EFIAPI
820 InitSmmProfileCallBack (
821   IN CONST EFI_GUID  *Protocol,
822   IN VOID            *Interface,
823   IN EFI_HANDLE      Handle
824   )
825 {
826   //
827   // Save to variable so that SMM profile data can be found.
828   //
829   gRT->SetVariable (
830          SMM_PROFILE_NAME,
831          &gEfiCallerIdGuid,
832          EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
833          sizeof(mSmmProfileBase),
834          &mSmmProfileBase
835          );
836 
837   //
838   // Get Software SMI from FADT
839   //
840   GetSmiCommandPort ();
841 
842   //
843   // Initialize protected memory range for patching page table later.
844   //
845   InitProtectedMemRange ();
846 
847   return EFI_SUCCESS;
848 }
849 
850 /**
851   Initialize SMM profile data structures.
852 
853 **/
854 VOID
855 InitSmmProfileInternal (
856   VOID
857   )
858 {
859   EFI_STATUS                 Status;
860   EFI_PHYSICAL_ADDRESS       Base;
861   VOID                       *Registration;
862   UINTN                      Index;
863   UINTN                      MsrDsAreaSizePerCpu;
864   UINTN                      TotalSize;
865 
866   mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
867   ASSERT (mPFEntryCount != NULL);
868   mLastPFEntryValue = (UINT64  (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
869                                                          sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);
870   ASSERT (mLastPFEntryValue != NULL);
871   mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
872                                                            sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);
873   ASSERT (mLastPFEntryPointer != NULL);
874 
875   //
876   // Allocate memory for SmmProfile below 4GB.
877   // The base address
878   //
879   mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
880   ASSERT ((mSmmProfileSize & 0xFFF) == 0);
881 
882   if (mBtsSupported) {
883     TotalSize = mSmmProfileSize + mMsrDsAreaSize;
884   } else {
885     TotalSize = mSmmProfileSize;
886   }
887 
888   Base = 0xFFFFFFFF;
889   Status = gBS->AllocatePages (
890                   AllocateMaxAddress,
891                   EfiReservedMemoryType,
892                   EFI_SIZE_TO_PAGES (TotalSize),
893                   &Base
894                   );
895   ASSERT_EFI_ERROR (Status);
896   ZeroMem ((VOID *)(UINTN)Base, TotalSize);
897   mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
898 
899   //
900   // Initialize SMM profile data header.
901   //
902   mSmmProfileBase->HeaderSize     = sizeof (SMM_PROFILE_HEADER);
903   mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
904   mSmmProfileBase->MaxDataSize    = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
905   mSmmProfileBase->CurDataEntries = 0;
906   mSmmProfileBase->CurDataSize    = 0;
907   mSmmProfileBase->TsegStart      = mCpuHotPlugData.SmrrBase;
908   mSmmProfileBase->TsegSize       = mCpuHotPlugData.SmrrSize;
909   mSmmProfileBase->NumSmis        = 0;
910   mSmmProfileBase->NumCpus        = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
911 
912   if (mBtsSupported) {
913     mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
914     ASSERT (mMsrDsArea != NULL);
915     mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
916     ASSERT (mMsrBTSRecord != NULL);
917     mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
918     ASSERT (mMsrPEBSRecord != NULL);
919 
920     mMsrDsAreaBase  = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
921     MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
922     mBTSRecordNumber    = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
923     for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
924       mMsrDsArea[Index]     = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
925       mMsrBTSRecord[Index]  = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
926       mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
927 
928       mMsrDsArea[Index]->BTSBufferBase          = (UINTN)mMsrBTSRecord[Index];
929       mMsrDsArea[Index]->BTSIndex               = mMsrDsArea[Index]->BTSBufferBase;
930       mMsrDsArea[Index]->BTSAbsoluteMaximum     = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
931       mMsrDsArea[Index]->BTSInterruptThreshold  = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
932 
933       mMsrDsArea[Index]->PEBSBufferBase         = (UINTN)mMsrPEBSRecord[Index];
934       mMsrDsArea[Index]->PEBSIndex              = mMsrDsArea[Index]->PEBSBufferBase;
935       mMsrDsArea[Index]->PEBSAbsoluteMaximum    = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
936       mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
937     }
938   }
939 
940   mProtectionMemRange      = mProtectionMemRangeTemplate;
941   mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
942 
943   //
944   // Update TSeg entry.
945   //
946   mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
947   mProtectionMemRange[0].Range.Top  = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
948 
949   //
950   // Update SMM profile entry.
951   //
952   mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
953   mProtectionMemRange[1].Range.Top  = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
954 
955   //
956   // Allocate memory reserved for creating 4KB pages.
957   //
958   InitPagesForPFHandler ();
959 
960   //
961   // Start SMM profile when SmmReadyToLock protocol is installed.
962   //
963   Status = gSmst->SmmRegisterProtocolNotify (
964                     &gEfiSmmReadyToLockProtocolGuid,
965                     InitSmmProfileCallBack,
966                     &Registration
967                     );
968   ASSERT_EFI_ERROR (Status);
969 
970   return ;
971 }
972 
973 /**
974   Check if feature is supported by a processor.
975 
976 **/
977 VOID
978 CheckFeatureSupported (
979   VOID
980   )
981 {
982   UINT32                         RegEax;
983   UINT32                         RegEcx;
984   UINT32                         RegEdx;
985   MSR_IA32_MISC_ENABLE_REGISTER  MiscEnableMsr;
986 
987   if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
988     AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
989     if (RegEax <= CPUID_EXTENDED_FUNCTION) {
990       mCetSupported = FALSE;
991       PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
992     }
993     AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
994     if ((RegEcx & CPUID_CET_SS) == 0) {
995       mCetSupported = FALSE;
996       PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
997     }
998   }
999 
1000   if (mXdSupported) {
1001     AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
1002     if (RegEax <= CPUID_EXTENDED_FUNCTION) {
1003       //
1004       // Extended CPUID functions are not supported on this processor.
1005       //
1006       mXdSupported = FALSE;
1007       PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1008     }
1009 
1010     AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
1011     if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
1012       //
1013       // Execute Disable Bit feature is not supported on this processor.
1014       //
1015       mXdSupported = FALSE;
1016       PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1017     }
1018 
1019     if (StandardSignatureIsAuthenticAMD ()) {
1020       //
1021       // AMD processors do not support MSR_IA32_MISC_ENABLE
1022       //
1023       PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
1024     }
1025   }
1026 
1027   if (mBtsSupported) {
1028     AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
1029     if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
1030       //
1031       // Per IA32 manuals:
1032       // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
1033       // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
1034       //    availability of the BTS facilities, including the ability to set the BTS and
1035       //    BTINT bits in the MSR_DEBUGCTLA MSR.
1036       // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
1037       //
1038       MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1039       if (MiscEnableMsr.Bits.BTS == 1) {
1040         //
1041         // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
1042         //
1043         mBtsSupported = FALSE;
1044       }
1045     }
1046   }
1047 }
1048 
1049 /**
1050   Enable single step.
1051 
1052 **/
1053 VOID
1054 ActivateSingleStepDB (
1055   VOID
1056   )
1057 {
1058   UINTN    Dr6;
1059 
1060   Dr6 = AsmReadDr6 ();
1061   if ((Dr6 & DR6_SINGLE_STEP) != 0) {
1062     return;
1063   }
1064   Dr6 |= DR6_SINGLE_STEP;
1065   AsmWriteDr6 (Dr6);
1066 }
1067 
1068 /**
1069   Enable last branch.
1070 
1071 **/
1072 VOID
1073 ActivateLBR (
1074   VOID
1075   )
1076 {
1077   UINT64  DebugCtl;
1078 
1079   DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1080   if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1081     return ;
1082   }
1083   DebugCtl |= MSR_DEBUG_CTL_LBR;
1084   AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1085 }
1086 
1087 /**
1088   Enable branch trace store.
1089 
1090   @param  CpuIndex  The index of the processor.
1091 
1092 **/
1093 VOID
1094 ActivateBTS (
1095   IN      UINTN                     CpuIndex
1096   )
1097 {
1098   UINT64  DebugCtl;
1099 
1100   DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1101   if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1102     return ;
1103   }
1104 
1105   AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1106   DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1107   DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1108   AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1109 }
1110 
1111 /**
1112   Increase SMI number in each SMI entry.
1113 
1114 **/
1115 VOID
1116 SmmProfileRecordSmiNum (
1117   VOID
1118   )
1119 {
1120   if (mSmmProfileStart) {
1121     mSmmProfileBase->NumSmis++;
1122   }
1123 }
1124 
1125 /**
1126   Initialize processor environment for SMM profile.
1127 
1128   @param  CpuIndex  The index of the processor.
1129 
1130 **/
1131 VOID
1132 ActivateSmmProfile (
1133   IN UINTN CpuIndex
1134   )
1135 {
1136   //
1137   // Enable Single Step DB#
1138   //
1139   ActivateSingleStepDB ();
1140 
1141   if (mBtsSupported) {
1142     //
1143     // We can not get useful information from LER, so we have to use BTS.
1144     //
1145     ActivateLBR ();
1146 
1147     //
1148     // Enable BTS
1149     //
1150     ActivateBTS (CpuIndex);
1151   }
1152 }
1153 
1154 /**
1155   Initialize SMM profile in SMM CPU entry point.
1156 
1157   @param[in] Cr3  The base address of the page tables to use in SMM.
1158 
1159 **/
1160 VOID
1161 InitSmmProfile (
1162   UINT32  Cr3
1163   )
1164 {
1165   //
1166   // Save Cr3
1167   //
1168   mSmmProfileCr3 = Cr3;
1169 
1170   //
1171   // Skip SMM profile initialization if feature is disabled
1172   //
1173   if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1174       !HEAP_GUARD_NONSTOP_MODE &&
1175       !NULL_DETECTION_NONSTOP_MODE) {
1176     return;
1177   }
1178 
1179   //
1180   // Initialize SmmProfile here
1181   //
1182   InitSmmProfileInternal ();
1183 
1184   //
1185   // Initialize profile IDT.
1186   //
1187   InitIdtr ();
1188 
1189   //
1190   // Tell #PF handler to prepare a #DB subsequently.
1191   //
1192   mSetupDebugTrap = TRUE;
1193 }
1194 
1195 /**
1196   Update page table to map the memory correctly in order to make the instruction
1197   which caused page fault execute successfully. And it also save the original page
1198   table to be restored in single-step exception.
1199 
1200   @param  PageTable           PageTable Address.
1201   @param  PFAddress           The memory address which caused page fault exception.
1202   @param  CpuIndex            The index of the processor.
1203   @param  ErrorCode           The Error code of exception.
1204 
1205 **/
1206 VOID
1207 RestorePageTableBelow4G (
1208   UINT64        *PageTable,
1209   UINT64        PFAddress,
1210   UINTN         CpuIndex,
1211   UINTN         ErrorCode
1212   )
1213 {
1214   UINTN         PTIndex;
1215   UINTN         PFIndex;
1216   IA32_CR4      Cr4;
1217   BOOLEAN       Enable5LevelPaging;
1218 
1219   Cr4.UintN = AsmReadCr4 ();
1220   Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
1221 
1222   //
1223   // PML5
1224   //
1225   if (Enable5LevelPaging) {
1226     PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
1227     ASSERT (PageTable[PTIndex] != 0);
1228     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1229   }
1230 
1231   //
1232   // PML4
1233   //
1234   if (sizeof(UINT64) == sizeof(UINTN)) {
1235     PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1236     ASSERT (PageTable[PTIndex] != 0);
1237     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1238   }
1239 
1240   //
1241   // PDPTE
1242   //
1243   PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1244   ASSERT (PageTable[PTIndex] != 0);
1245   PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1246 
1247   //
1248   // PD
1249   //
1250   PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1251   if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1252     //
1253     // Large page
1254     //
1255 
1256     //
1257     // Record old entries with non-present status
1258     // Old entries include the memory which instruction is at and the memory which instruction access.
1259     //
1260     //
1261     ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1262     if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1263       PFIndex = mPFEntryCount[CpuIndex];
1264       mLastPFEntryValue[CpuIndex][PFIndex]   = PageTable[PTIndex];
1265       mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1266       mPFEntryCount[CpuIndex]++;
1267     }
1268 
1269     //
1270     // Set new entry
1271     //
1272     PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1273     PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1274     PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1275     if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1276       PageTable[PTIndex] &= ~IA32_PG_NX;
1277     }
1278   } else {
1279     //
1280     // Small page
1281     //
1282     ASSERT (PageTable[PTIndex] != 0);
1283     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1284 
1285     //
1286     // 4K PTE
1287     //
1288     PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1289 
1290     //
1291     // Record old entries with non-present status
1292     // Old entries include the memory which instruction is at and the memory which instruction access.
1293     //
1294     //
1295     ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1296     if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1297       PFIndex = mPFEntryCount[CpuIndex];
1298       mLastPFEntryValue[CpuIndex][PFIndex]   = PageTable[PTIndex];
1299       mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1300       mPFEntryCount[CpuIndex]++;
1301     }
1302 
1303     //
1304     // Set new entry
1305     //
1306     PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1307     PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1308     if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1309       PageTable[PTIndex] &= ~IA32_PG_NX;
1310     }
1311   }
1312 }
1313 
1314 /**
1315   Handler for Page Fault triggered by Guard page.
1316 
1317   @param  ErrorCode  The Error code of exception.
1318 
1319 **/
1320 VOID
1321 GuardPagePFHandler (
1322   UINTN ErrorCode
1323   )
1324 {
1325   UINT64                *PageTable;
1326   UINT64                PFAddress;
1327   UINT64                RestoreAddress;
1328   UINTN                 RestorePageNumber;
1329   UINTN                 CpuIndex;
1330 
1331   PageTable         = (UINT64 *)AsmReadCr3 ();
1332   PFAddress         = AsmReadCr2 ();
1333   CpuIndex          = GetCpuIndex ();
1334 
1335   //
1336   // Memory operation cross pages, like "rep mov" instruction, will cause
1337   // infinite loop between this and Debug Trap handler. We have to make sure
1338   // that current page and the page followed are both in PRESENT state.
1339   //
1340   RestorePageNumber = 2;
1341   RestoreAddress = PFAddress;
1342   while (RestorePageNumber > 0) {
1343     RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1344     RestoreAddress += EFI_PAGE_SIZE;
1345     RestorePageNumber--;
1346   }
1347 
1348   //
1349   // Flush TLB
1350   //
1351   CpuFlushTlb ();
1352 }
1353 
1354 /**
1355   The Page fault handler to save SMM profile data.
1356 
1357   @param  Rip        The RIP when exception happens.
1358   @param  ErrorCode  The Error code of exception.
1359 
1360 **/
1361 VOID
1362 SmmProfilePFHandler (
1363   UINTN Rip,
1364   UINTN ErrorCode
1365   )
1366 {
1367   UINT64                *PageTable;
1368   UINT64                PFAddress;
1369   UINT64                RestoreAddress;
1370   UINTN                 RestorePageNumber;
1371   UINTN                 CpuIndex;
1372   UINTN                 Index;
1373   UINT64                InstructionAddress;
1374   UINTN                 MaxEntryNumber;
1375   UINTN                 CurrentEntryNumber;
1376   BOOLEAN               IsValidPFAddress;
1377   SMM_PROFILE_ENTRY     *SmmProfileEntry;
1378   UINT64                SmiCommand;
1379   EFI_STATUS            Status;
1380   UINT8                 SoftSmiValue;
1381   EFI_SMM_SAVE_STATE_IO_INFO    IoInfo;
1382 
1383   if (!mSmmProfileStart) {
1384     //
1385     // If SMM profile does not start, call original page fault handler.
1386     //
1387     SmiDefaultPFHandler ();
1388     return;
1389   }
1390 
1391   if (mBtsSupported) {
1392     DisableBTS ();
1393   }
1394 
1395   IsValidPFAddress  = FALSE;
1396   PageTable         = (UINT64 *)AsmReadCr3 ();
1397   PFAddress         = AsmReadCr2 ();
1398   CpuIndex          = GetCpuIndex ();
1399 
1400   //
1401   // Memory operation cross pages, like "rep mov" instruction, will cause
1402   // infinite loop between this and Debug Trap handler. We have to make sure
1403   // that current page and the page followed are both in PRESENT state.
1404   //
1405   RestorePageNumber = 2;
1406   RestoreAddress = PFAddress;
1407   while (RestorePageNumber > 0) {
1408     if (RestoreAddress <= 0xFFFFFFFF) {
1409       RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1410     } else {
1411       RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1412     }
1413     RestoreAddress += EFI_PAGE_SIZE;
1414     RestorePageNumber--;
1415   }
1416 
1417   if (!IsValidPFAddress) {
1418     InstructionAddress = Rip;
1419     if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
1420       //
1421       // If it is instruction fetch failure, get the correct IP from BTS.
1422       //
1423       InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1424       if (InstructionAddress == 0) {
1425         //
1426         // It indicates the instruction which caused page fault is not a jump instruction,
1427         // set instruction address same as the page fault address.
1428         //
1429         InstructionAddress = PFAddress;
1430       }
1431     }
1432 
1433     //
1434     // Indicate it is not software SMI
1435     //
1436     SmiCommand    = 0xFFFFFFFFFFFFFFFFULL;
1437     for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1438       Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1439       if (EFI_ERROR (Status)) {
1440         continue;
1441       }
1442       if (IoInfo.IoPort == mSmiCommandPort) {
1443         //
1444         // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1445         //
1446         SoftSmiValue = IoRead8 (mSmiCommandPort);
1447         SmiCommand = (UINT64)SoftSmiValue;
1448         break;
1449       }
1450     }
1451 
1452     SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1453     //
1454     // Check if there is already a same entry in profile data.
1455     //
1456     for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
1457       if ((SmmProfileEntry[Index].ErrorCode   == (UINT64)ErrorCode) &&
1458           (SmmProfileEntry[Index].Address     == PFAddress) &&
1459           (SmmProfileEntry[Index].CpuNum      == (UINT64)CpuIndex) &&
1460           (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1461           (SmmProfileEntry[Index].SmiCmd      == SmiCommand)) {
1462         //
1463         // Same record exist, need not save again.
1464         //
1465         break;
1466       }
1467     }
1468     if (Index == mSmmProfileBase->CurDataEntries) {
1469       CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
1470       MaxEntryNumber     = (UINTN) mSmmProfileBase->MaxDataEntries;
1471       if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1472         CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1473       }
1474       if (CurrentEntryNumber < MaxEntryNumber) {
1475         //
1476         // Log the new entry
1477         //
1478         SmmProfileEntry[CurrentEntryNumber].SmiNum      = mSmmProfileBase->NumSmis;
1479         SmmProfileEntry[CurrentEntryNumber].ErrorCode   = (UINT64)ErrorCode;
1480         SmmProfileEntry[CurrentEntryNumber].ApicId      = (UINT64)GetApicId ();
1481         SmmProfileEntry[CurrentEntryNumber].CpuNum      = (UINT64)CpuIndex;
1482         SmmProfileEntry[CurrentEntryNumber].Address     = PFAddress;
1483         SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1484         SmmProfileEntry[CurrentEntryNumber].SmiCmd      = SmiCommand;
1485         //
1486         // Update current entry index and data size in the header.
1487         //
1488         mSmmProfileBase->CurDataEntries++;
1489         mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1490       }
1491     }
1492   }
1493   //
1494   // Flush TLB
1495   //
1496   CpuFlushTlb ();
1497 
1498   if (mBtsSupported) {
1499     EnableBTS ();
1500   }
1501 }
1502 
1503 /**
1504   Replace INT1 exception handler to restore page table to absent/execute-disable state
1505   in order to trigger page fault again to save SMM profile data..
1506 
1507 **/
1508 VOID
1509 InitIdtr (
1510   VOID
1511   )
1512 {
1513   EFI_STATUS                        Status;
1514 
1515   Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1516   ASSERT_EFI_ERROR (Status);
1517 }
1518