1 /** @file
2   Data type, macros and function prototypes of heap guard feature.
3 
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6 
7 **/
8 
9 #ifndef _HEAPGUARD_H_
10 #define _HEAPGUARD_H_
11 
12 //
13 // Following macros are used to define and access the guarded memory bitmap
14 // table.
15 //
16 // To simplify the access and reduce the memory used for this table, the
17 // table is constructed in the similar way as page table structure but in
18 // reverse direction, i.e. from bottom growing up to top.
19 //
20 //    - 1-bit tracks 1 page (4KB)
21 //    - 1-UINT64 map entry tracks 256KB memory
22 //    - 1K-UINT64 map table tracks 256MB memory
23 //    - Five levels of tables can track any address of memory of 64-bit
24 //      system, like below.
25 //
26 //       512   *   512   *   512   *   512    *    1K   *  64b *     4K
27 //    111111111 111111111 111111111 111111111 1111111111 111111 111111111111
28 //    63        54        45        36        27         17     11         0
29 //       9b        9b        9b        9b         10b      6b       12b
30 //       L0   ->   L1   ->   L2   ->   L3   ->    L4   -> bits  ->  page
31 //      1FF       1FF       1FF       1FF         3FF      3F       FFF
32 //
33 // L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
34 // memory. Each table of L0-L3 will be allocated when its memory address
35 // range is to be tracked. Only 1-page will be allocated each time. This
36 // can save memories used to establish this map table.
37 //
38 // For a normal configuration of system with 4G memory, two levels of tables
39 // can track the whole memory, because two levels (L3+L4) of map tables have
40 // already coverred 37-bit of memory address. And for a normal UEFI BIOS,
41 // less than 128M memory would be consumed during boot. That means we just
42 // need
43 //
44 //          1-page (L3) + 2-page (L4)
45 //
46 // memory (3 pages) to track the memory allocation works. In this case,
47 // there's no need to setup L0-L2 tables.
48 //
49 
50 //
51 // Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
52 // bits in address. (512 = 1 << 9)
53 //
54 #define BYTE_LENGTH_SHIFT                   3             // (8 = 1 << 3)
55 
56 #define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT  \
57         (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)
58 
59 #define GUARDED_HEAP_MAP_TABLE_DEPTH        5
60 
61 // Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
62 #define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT    6             // (64 = 1 << 6)
63 
64 #define GUARDED_HEAP_MAP_ENTRY_BITS         \
65         (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)
66 
67 #define GUARDED_HEAP_MAP_ENTRY_BYTES        \
68         (GUARDED_HEAP_MAP_ENTRY_BITS / 8)
69 
70 // L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
71 #define GUARDED_HEAP_MAP_ENTRY_SHIFT              \
72         (GUARDED_HEAP_MAP_ENTRY_BITS              \
73          - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
74          - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT       \
75          - EFI_PAGE_SHIFT)
76 
77 // L4 table address mask: (1 << 10 - 1) = 0x3FF
78 #define GUARDED_HEAP_MAP_ENTRY_MASK               \
79         ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)
80 
81 // Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
82 #define GUARDED_HEAP_MAP_SIZE                     \
83         ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)
84 
85 // Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
86 #define GUARDED_HEAP_MAP_UNIT_SIZE                \
87         (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)
88 
89 // L4 table entry number: 8KB / 8 = 1024
90 #define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT         \
91         (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)
92 
93 // L4 table entry indexing
94 #define GUARDED_HEAP_MAP_ENTRY_INDEX(Address)                       \
95         (RShiftU64 (Address, EFI_PAGE_SHIFT                         \
96                              + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)    \
97          & GUARDED_HEAP_MAP_ENTRY_MASK)
98 
99 // L4 table entry bit indexing
100 #define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address)       \
101         (RShiftU64 (Address, EFI_PAGE_SHIFT)            \
102          & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))
103 
104 //
105 // Total bits (pages) tracked by one L4 table (65536-bit)
106 //
107 #define GUARDED_HEAP_MAP_BITS                               \
108         (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT                 \
109                + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))
110 
111 //
112 // Bit indexing inside the whole L4 table (0 - 65535)
113 //
114 #define GUARDED_HEAP_MAP_BIT_INDEX(Address)                     \
115         (RShiftU64 (Address, EFI_PAGE_SHIFT)                    \
116          & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT                 \
117                    + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))
118 
119 //
120 // Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
121 //
122 #define GUARDED_HEAP_MAP_TABLE_SHIFT                                      \
123         (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT  \
124          + EFI_PAGE_SHIFT)
125 
126 //
127 // Macro used to initialize the local array variable for map table traversing
128 // {55, 46, 37, 28, 18}
129 //
130 #define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS                                 \
131   {                                                                         \
132     GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3,  \
133     GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2,  \
134     GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT,      \
135     GUARDED_HEAP_MAP_TABLE_SHIFT,                                           \
136     EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT                       \
137   }
138 
139 //
140 // Masks used to extract address range of each level of table
141 // {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
142 //
143 #define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS                                  \
144   {                                                                         \
145     (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
146     (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
147     (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
148     (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
149     (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1                                 \
150   }
151 
152 //
153 // Memory type to guard (matching the related PCD definition)
154 //
155 #define GUARD_HEAP_TYPE_PAGE        BIT0
156 #define GUARD_HEAP_TYPE_POOL        BIT1
157 #define GUARD_HEAP_TYPE_FREED       BIT4
158 #define GUARD_HEAP_TYPE_ALL         \
159         (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_FREED)
160 
161 //
162 // Debug message level
163 //
164 #define HEAP_GUARD_DEBUG_LEVEL  (DEBUG_POOL|DEBUG_PAGE)
165 
166 typedef struct {
167   UINT32                TailMark;
168   UINT32                HeadMark;
169   EFI_PHYSICAL_ADDRESS  Address;
170   LIST_ENTRY            Link;
171 } HEAP_GUARD_NODE;
172 
173 /**
174   Internal function.  Converts a memory range to the specified type.
175   The range must exist in the memory map.
176 
177   @param  Start                  The first address of the range Must be page
178                                  aligned.
179   @param  NumberOfPages          The number of pages to convert.
180   @param  NewType                The new type for the memory range.
181 
182   @retval EFI_INVALID_PARAMETER  Invalid parameter.
183   @retval EFI_NOT_FOUND          Could not find a descriptor cover the specified
184                                  range or convertion not allowed.
185   @retval EFI_SUCCESS            Successfully converts the memory range to the
186                                  specified type.
187 
188 **/
189 EFI_STATUS
190 CoreConvertPages (
191   IN UINT64           Start,
192   IN UINT64           NumberOfPages,
193   IN EFI_MEMORY_TYPE  NewType
194   );
195 
196 /**
197   Allocate or free guarded memory.
198 
199   @param[in]  Start           Start address of memory to allocate or free.
200   @param[in]  NumberOfPages   Memory size in pages.
201   @param[in]  NewType         Memory type to convert to.
202 
203   @return VOID.
204 **/
205 EFI_STATUS
206 CoreConvertPagesWithGuard (
207   IN UINT64           Start,
208   IN UINTN            NumberOfPages,
209   IN EFI_MEMORY_TYPE  NewType
210   );
211 
212 /**
213   Set head Guard and tail Guard for the given memory range.
214 
215   @param[in]  Memory          Base address of memory to set guard for.
216   @param[in]  NumberOfPages   Memory size in pages.
217 
218   @return VOID.
219 **/
220 VOID
221 SetGuardForMemory (
222   IN EFI_PHYSICAL_ADDRESS   Memory,
223   IN UINTN                  NumberOfPages
224   );
225 
226 /**
227   Unset head Guard and tail Guard for the given memory range.
228 
229   @param[in]  Memory          Base address of memory to unset guard for.
230   @param[in]  NumberOfPages   Memory size in pages.
231 
232   @return VOID.
233 **/
234 VOID
235 UnsetGuardForMemory (
236   IN EFI_PHYSICAL_ADDRESS   Memory,
237   IN UINTN                  NumberOfPages
238   );
239 
240 /**
241   Adjust the base and number of pages to really allocate according to Guard.
242 
243   @param[in,out]  Memory          Base address of free memory.
244   @param[in,out]  NumberOfPages   Size of memory to allocate.
245 
246   @return VOID.
247 **/
248 VOID
249 AdjustMemoryA (
250   IN OUT EFI_PHYSICAL_ADDRESS    *Memory,
251   IN OUT UINTN                   *NumberOfPages
252   );
253 
254 /**
255   Adjust the start address and number of pages to free according to Guard.
256 
257   The purpose of this function is to keep the shared Guard page with adjacent
258   memory block if it's still in guard, or free it if no more sharing. Another
259   is to reserve pages as Guard pages in partial page free situation.
260 
261   @param[in,out]  Memory          Base address of memory to free.
262   @param[in,out]  NumberOfPages   Size of memory to free.
263 
264   @return VOID.
265 **/
266 VOID
267 AdjustMemoryF (
268   IN OUT EFI_PHYSICAL_ADDRESS    *Memory,
269   IN OUT UINTN                   *NumberOfPages
270   );
271 
272 /**
273   Adjust address of free memory according to existing and/or required Guard.
274 
275   This function will check if there're existing Guard pages of adjacent
276   memory blocks, and try to use it as the Guard page of the memory to be
277   allocated.
278 
279   @param[in]  Start           Start address of free memory block.
280   @param[in]  Size            Size of free memory block.
281   @param[in]  SizeRequested   Size of memory to allocate.
282 
283   @return The end address of memory block found.
284   @return 0 if no enough space for the required size of memory and its Guard.
285 **/
286 UINT64
287 AdjustMemoryS (
288   IN UINT64                  Start,
289   IN UINT64                  Size,
290   IN UINT64                  SizeRequested
291   );
292 
293 /**
294   Check to see if the pool at the given address should be guarded or not.
295 
296   @param[in]  MemoryType      Pool type to check.
297 
298 
299   @return TRUE  The given type of pool should be guarded.
300   @return FALSE The given type of pool should not be guarded.
301 **/
302 BOOLEAN
303 IsPoolTypeToGuard (
304   IN EFI_MEMORY_TYPE        MemoryType
305   );
306 
307 /**
308   Check to see if the page at the given address should be guarded or not.
309 
310   @param[in]  MemoryType      Page type to check.
311   @param[in]  AllocateType    Allocation type to check.
312 
313   @return TRUE  The given type of page should be guarded.
314   @return FALSE The given type of page should not be guarded.
315 **/
316 BOOLEAN
317 IsPageTypeToGuard (
318   IN EFI_MEMORY_TYPE        MemoryType,
319   IN EFI_ALLOCATE_TYPE      AllocateType
320   );
321 
322 /**
323   Check to see if the page at the given address is guarded or not.
324 
325   @param[in]  Address     The address to check for.
326 
327   @return TRUE  The page at Address is guarded.
328   @return FALSE The page at Address is not guarded.
329 **/
330 BOOLEAN
331 EFIAPI
332 IsMemoryGuarded (
333   IN EFI_PHYSICAL_ADDRESS    Address
334   );
335 
336 /**
337   Check to see if the page at the given address is a Guard page or not.
338 
339   @param[in]  Address     The address to check for.
340 
341   @return TRUE  The page at Address is a Guard page.
342   @return FALSE The page at Address is not a Guard page.
343 **/
344 BOOLEAN
345 EFIAPI
346 IsGuardPage (
347   IN EFI_PHYSICAL_ADDRESS    Address
348   );
349 
350 /**
351   Dump the guarded memory bit map.
352 **/
353 VOID
354 EFIAPI
355 DumpGuardedMemoryBitmap (
356   VOID
357   );
358 
359 /**
360   Adjust the pool head position to make sure the Guard page is adjavent to
361   pool tail or pool head.
362 
363   @param[in]  Memory    Base address of memory allocated.
364   @param[in]  NoPages   Number of pages actually allocated.
365   @param[in]  Size      Size of memory requested.
366                         (plus pool head/tail overhead)
367 
368   @return Address of pool head.
369 **/
370 VOID *
371 AdjustPoolHeadA (
372   IN EFI_PHYSICAL_ADDRESS    Memory,
373   IN UINTN                   NoPages,
374   IN UINTN                   Size
375   );
376 
377 /**
378   Get the page base address according to pool head address.
379 
380   @param[in]  Memory    Head address of pool to free.
381 
382   @return Address of pool head.
383 **/
384 VOID *
385 AdjustPoolHeadF (
386   IN EFI_PHYSICAL_ADDRESS    Memory
387   );
388 
389 /**
390   Check to see if the heap guard is enabled for page and/or pool allocation.
391 
392   @param[in]  GuardType   Specify the sub-type(s) of Heap Guard.
393 
394   @return TRUE/FALSE.
395 **/
396 BOOLEAN
397 IsHeapGuardEnabled (
398   UINT8           GuardType
399   );
400 
401 /**
402   Notify function used to set all Guard pages after CPU Arch Protocol installed.
403 **/
404 VOID
405 HeapGuardCpuArchProtocolNotify (
406   VOID
407   );
408 
409 /**
410   This function checks to see if the given memory map descriptor in a memory map
411   can be merged with any guarded free pages.
412 
413   @param  MemoryMapEntry    A pointer to a descriptor in MemoryMap.
414   @param  MaxAddress        Maximum address to stop the merge.
415 
416   @return VOID
417 
418 **/
419 VOID
420 MergeGuardPages (
421   IN EFI_MEMORY_DESCRIPTOR      *MemoryMapEntry,
422   IN EFI_PHYSICAL_ADDRESS       MaxAddress
423   );
424 
425 /**
426   Record freed pages as well as mark them as not-present, if enabled.
427 
428   @param[in]  BaseAddress   Base address of just freed pages.
429   @param[in]  Pages         Number of freed pages.
430 
431   @return VOID.
432 **/
433 VOID
434 EFIAPI
435 GuardFreedPagesChecked (
436   IN  EFI_PHYSICAL_ADDRESS    BaseAddress,
437   IN  UINTN                   Pages
438   );
439 
440 /**
441   Put part (at most 64 pages a time) guarded free pages back to free page pool.
442 
443   Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
444   makes use of 'Used then throw away' way to detect any illegal access to freed
445   memory. The thrown-away memory will be marked as not-present so that any access
446   to those memory (after free) will be caught by page-fault exception.
447 
448   The problem is that this will consume lots of memory space. Once no memory
449   left in pool to allocate, we have to restore part of the freed pages to their
450   normal function. Otherwise the whole system will stop functioning.
451 
452   @param  StartAddress    Start address of promoted memory.
453   @param  EndAddress      End address of promoted memory.
454 
455   @return TRUE    Succeeded to promote memory.
456   @return FALSE   No free memory found.
457 
458 **/
459 BOOLEAN
460 PromoteGuardedFreePages (
461   OUT EFI_PHYSICAL_ADDRESS      *StartAddress,
462   OUT EFI_PHYSICAL_ADDRESS      *EndAddress
463   );
464 
465 extern BOOLEAN mOnGuarding;
466 
467 #endif
468