1 #ifndef _G_MEM_DESC_NVOC_H_
2 #define _G_MEM_DESC_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_mem_desc_nvoc.h"
33 
34 #ifndef _MEMDESC_H_
35 #define _MEMDESC_H_
36 
37 #include "core/prelude.h"
38 #include "poolalloc.h"
39 
40 
41 struct OBJVASPACE;
42 
43 #ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__
44 #define __NVOC_CLASS_OBJVASPACE_TYPEDEF__
45 typedef struct OBJVASPACE OBJVASPACE;
46 #endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */
47 
48 #ifndef __nvoc_class_id_OBJVASPACE
49 #define __nvoc_class_id_OBJVASPACE 0x6c347f
50 #endif /* __nvoc_class_id_OBJVASPACE */
51 
52 
53 struct OBJGPU;
54 
55 #ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__
56 #define __NVOC_CLASS_OBJGPU_TYPEDEF__
57 typedef struct OBJGPU OBJGPU;
58 #endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */
59 
60 #ifndef __nvoc_class_id_OBJGPU
61 #define __nvoc_class_id_OBJGPU 0x7ef3cb
62 #endif /* __nvoc_class_id_OBJGPU */
63 
64 
65 struct Heap;
66 
67 #ifndef __NVOC_CLASS_Heap_TYPEDEF__
68 #define __NVOC_CLASS_Heap_TYPEDEF__
69 typedef struct Heap Heap;
70 #endif /* __NVOC_CLASS_Heap_TYPEDEF__ */
71 
72 #ifndef __nvoc_class_id_Heap
73 #define __nvoc_class_id_Heap 0x556e9a
74 #endif /* __nvoc_class_id_Heap */
75 
76 
77 struct MEMORY_DESCRIPTOR;
78 
79 typedef struct CTX_BUF_POOL_INFO CTX_BUF_POOL_INFO;
80 typedef struct COMPR_INFO COMPR_INFO;
81 
82 //
83 // Address space identifiers.
84 // Note: This should match the NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_* defines
85 //       in ctrl2080gr.h
86 //
87 typedef NvU32      NV_ADDRESS_SPACE;
88 #define ADDR_UNKNOWN    0         // Address space is unknown
89 #define ADDR_SYSMEM     1         // System memory (PCI)
90 #define ADDR_FBMEM      2         // Frame buffer memory space
91 #define ADDR_REGMEM     3         // NV register memory space
92 #define ADDR_VIRTUAL    4         // Virtual address space only
93 #define ADDR_FABRIC_V2  6         // Fabric address space for the FLA based addressing. Will replace ADDR_FABRIC.
94 #define ADDR_EGM        7         // Extended GPU Memory (EGM)
95 #define ADDR_FABRIC_MC  8         // Multicast fabric address space (MCFLA)
96 
97 //
98 // Address translation identifiers:
99 //
100 // Memory descriptors are used to describe physical block(s) of memory.
101 // That memory can be described at various levels of address translation
102 // using the address translation (AT) enumerates. The levels of translation
103 // supported is illustrated below.
104 //
105 // The diagram is drawn for system memory with SR-IOV but the translations
106 // are similar for video memory (replace IOMMU with VMMU). VGPU pre-SR-IOV
107 // is also different.
108 //
109 // +-------------------+           +-------------------+
110 // |       CPU         |           |     GPU Engine    |
111 // +-------------------+           +-------------------+
112 //          |                               |
113 //          |                               | GPU VA
114 //          |                               V
115 //          |                      +-------------------+
116 //          | CPU VA               |       GMMU        |
117 //          |                      +-------------------+
118 //          |                               |
119 //          |                               | GPU GPA (AT_GPU)
120 //          v                               v
121 // +-------------------+           +-------------------+
122 // |  MMU (1st level)| |           | IOMMU (1st level) |
123 // +-------------------+           +-------------------+
124 //          |                               |
125 //          | CPU GPA (AT_CPU)              |                   <---- AT_PA for VGPU guest
126 //          v                               v
127 // +-------------------+           +-------------------+
128 // |  MMU (2nd level)  |           | IOMMU (2nd level) |
129 // +-------------------+           +-------------------+
130 //          |                               |
131 //          | SPA                           | SPA               <---- AT_PA for bare metal
132 //          v                               v                         or VGPU host
133 // +---------------------------------------------------+
134 // |                System Memory                      |
135 // +---------------------------------------------------+
136 //
137 //
138 // Descriptions for *physical* address translation levels:
139 //
140 // AT_CPU - CPU physical address or guest physical address (GPA)
141 // AT_GPU - GPU physical address or guest physical address (GPA)
142 // AT_PA  - When running in host RM or bare metal this is the system physical address. When
143 //          running inside a VGPU guest environment, this is the last level of translation
144 //          visible to the OS context that RM is running in.
145 //
146 // AT_CPU should typically == AT_PA, but there might be cases such as IBM P9 where vidmem
147 // might be 0-based on GPU but exposed elsewhere in the CPU address space.
148 //
149 // Descriptions for *virtual* address translation levels:
150 //
151 // AT_GPU_VA - Memory descriptors can also describe virtual memory allocations. AT_GPU_VA
152 //             represents a GMMU virtual address.
153 //
154 #define AT_CPU      AT_VARIANT(0)
155 #define AT_GPU      AT_VARIANT(1)
156 #define AT_PA       AT_VARIANT(2)
157 
158 #define AT_GPU_VA   AT_VARIANT(3)
159 
160 //
161 // TODO - switch to using numeric values for AT_XYZ. Using pointers for
162 // typesafety after initial split from using class IDs/mmuContext
163 //
164 typedef struct ADDRESS_TRANSLATION_ *ADDRESS_TRANSLATION;
165 #define AT_VARIANT(x)  ((struct ADDRESS_TRANSLATION_ *)x)
166 #define AT_VALUE(x)    ((NvU64)(NvUPtr)(x))
167 
168 //
169 // RM defined Memdesc surface names. The names are sent to Mods to enable feature verification.
170 //
171 #define NV_RM_SURF_NAME_INSTANCE_BLOCK                      "rm_instance_block_surface"
172 #define NV_RM_SURF_NAME_PAGE_TABLE                          "rm_page_table_surface"
173 #define NV_RM_SURF_NAME_NONREPLAYABLE_FAULT_BUFFER          "rm_non_replayable_fault_buffer_surface"
174 #define NV_RM_SURF_NAME_REPLAYABLE_FAULT_BUFFER             "rm_replayable_fault_buffer_surface"
175 #define NV_RM_SURF_NAME_CE_FAULT_METHOD_BUFFER              "rm_ce_fault_method_buffer_surface"
176 #define NV_RM_SURF_NAME_ACCESS_COUNTER_BUFFER               "rm_access_counter_buffer_surface"
177 #define NV_RM_SURF_NAME_VAB                                 "rm_vab_surface"
178 #define NV_RM_SURF_NAME_GR_CIRCULAR_BUFFER                  "rm_gr_ctx_circular_buffer_surface"
179 
180 //
181 // Tagging wrapper macro for memdescAlloc
182 //
183 #define memdescTagAlloc(stat, tag, pMemdesc)                        {(pMemdesc)->allocTag = tag; stat = memdescAlloc(pMemdesc);}
184 #define memdescTagAllocList(stat, tag, pMemdesc, pList)             {(pMemdesc)->allocTag = tag; stat = memdescAllocList(pMemdesc, pList);}
185 
186 //
187 // RM internal allocations owner tags
188 // Total 200 tags are introduced, out of which some are already
189 // replaced with known verbose strings
190 //
191 typedef enum
192 {
193     NV_FB_ALLOC_RM_INTERNAL_OWNER__MIN                  = 10U,
194     NV_FB_ALLOC_RM_INTERNAL_OWNER_COMPBIT_STORE         = 11U,
195     NV_FB_ALLOC_RM_INTERNAL_OWNER_CONTEXT_BUFFER        = 12U,
196     NV_FB_ALLOC_RM_INTERNAL_OWNER_ATTR_BUFFER           = 13U,
197     NV_FB_ALLOC_RM_INTERNAL_OWNER_PMU_SURFACE           = 14U,
198     NV_FB_ALLOC_RM_INTERNAL_OWNER_CIRCULAR_BUFFER       = 15U,
199     NV_FB_ALLOC_RM_INTERNAL_OWNER_PAGE_POOL             = 16U,
200     NV_FB_ALLOC_RM_INTERNAL_OWNER_ACCESS_MAP            = 17U,
201     NV_FB_ALLOC_RM_INTERNAL_OWNER_WPR_METADATA          = 18U,
202     NV_FB_ALLOC_RM_INTERNAL_OWNER_LIBOS_ARGS            = 19U,
203     NV_FB_ALLOC_RM_INTERNAL_OWNER_BOOTLOADER_ARGS       = 20U,
204     NV_FB_ALLOC_RM_INTERNAL_OWNER_SR_METADATA           = 21U,
205     NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_SETUP             = 22U,
206     NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_SHADOW            = 23U,
207     NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_BACKUP            = 24U,
208     NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_BINARY            = 25U,
209     NV_FB_ALLOC_RM_INTERNAL_OWNER_VBIOS_FRTS            = 26U,
210     NV_FB_ALLOC_RM_INTERNAL_OWNER_USERD_BUFFER          = 27U,
211     NV_FB_ALLOC_RM_INTERNAL_OWNER_RUNLIST_ENTRIES       = 28U,
212     NV_FB_ALLOC_RM_INTERNAL_OWNER_PAGE_PTE              = 29U,
213     NV_FB_ALLOC_RM_INTERNAL_OWNER_MMU_FAULT_BUFFER      = 30U,
214     NV_FB_ALLOC_RM_INTERNAL_OWNER_FAULT_METHOD          = 31U,
215     NV_FB_ALLOC_RM_INTERNAL_OWNER_WAR_PT                = 32U,
216     NV_FB_ALLOC_RM_INTERNAL_OWNER_WAR_PD                = 33U,
217     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_1         = 34U,
218     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_2         = 35U,
219     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_3         = 36U,
220     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_4         = 37U,
221     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_5         = 38U,
222     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_6         = 39U,
223     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_7         = 40U,
224     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_8         = 41U,
225     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_9         = 42U,
226     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_10        = 43U,
227     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_11        = 44U,
228     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_12        = 45U,
229     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_13        = 46U,
230     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_14        = 47U,
231     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_15        = 48U,
232     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_16        = 49U,
233     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_17        = 50U,
234     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_18        = 51U,
235     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_19        = 52U,
236     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_20        = 53U,
237     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_21        = 54U,
238     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_22        = 55U,
239     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_23        = 56U,
240     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_24        = 57U,
241     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_25        = 58U,
242     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_26        = 59U,
243     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_27        = 60U,
244     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_28        = 61U,
245     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_29        = 62U,
246     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_30        = 63U,
247     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_31        = 64U,
248     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_32        = 65U,
249     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_33        = 66U,
250     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_34        = 67U,
251     NV_FB_ALLOC_RM_INTERNAL_OWNER_RUSD_BUFFER           = 68U,
252     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_36        = 69U,
253     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_37        = 70U,
254     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_38        = 71U,
255     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_39        = 72U,
256     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_40        = 73U,
257     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_41        = 74U,
258     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_42        = 75U,
259     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_43        = 76U,
260     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_44        = 77U,
261     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_45        = 78U,
262     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_46        = 79U,
263     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_47        = 80U,
264     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_48        = 81U,
265     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_49        = 82U,
266     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_50        = 83U,
267     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_51        = 84U,
268     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_52        = 85U,
269     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_53        = 86U,
270     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_54        = 87U,
271     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_55        = 88U,
272     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_56        = 89U,
273     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_57        = 90U,
274     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_58        = 91U,
275     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_59        = 92U,
276     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_60        = 93U,
277     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_61        = 94U,
278     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_62        = 95U,
279     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_63        = 96U,
280     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_64        = 97U,
281     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_65        = 98U,
282     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_66        = 99U,
283     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_67        = 100U,
284     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_68        = 101U,
285     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_69        = 102U,
286     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_70        = 103U,
287     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_71        = 104U,
288     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_72        = 105U,
289     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_73        = 106U,
290     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_74        = 107U,
291     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_75        = 108U,
292     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_76        = 109U,
293     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_77        = 110U,
294     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_78        = 111U,
295     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_79        = 112U,
296     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_80        = 113U,
297     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_81        = 114U,
298     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_82        = 115U,
299     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_83        = 116U,
300     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_84        = 117U,
301     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_85        = 118U,
302     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_86        = 119U,
303     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_87        = 120U,
304     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_88        = 121U,
305     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_89        = 122U,
306     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_90        = 123U,
307     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_91        = 124U,
308     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_92        = 125U,
309     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_93        = 126U,
310     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_94        = 127U,
311     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_95        = 128U,
312     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_96        = 129U,
313     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_97        = 130U,
314     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_98        = 131U,
315     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_99        = 132U,
316     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_100       = 133U,
317     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_101       = 134U,
318     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_102       = 135U,
319     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_103       = 136U,
320     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_104       = 137U,
321     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_105       = 138U,
322     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_106       = 139U,
323     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_107       = 140U,
324     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_108       = 141U,
325     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_109       = 142U,
326     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_110       = 143U,
327     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_111       = 144U,
328     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_112       = 145U,
329     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_113       = 146U,
330     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_114       = 147U,
331     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_115       = 148U,
332     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_116       = 149U,
333     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_117       = 150U,
334     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_118       = 151U,
335     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_119       = 152U,
336     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_120       = 153U,
337     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_121       = 154U,
338     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_122       = 155U,
339     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_123       = 156U,
340     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_124       = 157U,
341     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_125       = 158U,
342     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_126       = 159U,
343     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_127       = 160U,
344     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_128       = 161U,
345     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_129       = 162U,
346     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_130       = 163U,
347     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_131       = 164U,
348     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_132       = 165U,
349     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_133       = 166U,
350     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_134       = 167U,
351     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_135       = 168U,
352     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_136       = 169U,
353     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_137       = 170U,
354     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_138       = 171U,
355     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_139       = 172U,
356     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_140       = 173U,
357     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_141       = 174U,
358     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_142       = 175U,
359     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_143       = 176U,
360     NV_FB_ALLOC_RM_INTERNAL_OWNER_GSP_NOTIFY_OP_SURFACE = 177U,
361 
362     // Unused tags from here, for any new use-case it's required
363     // to replace the below tags with known verbose strings
364     //
365     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_145       = 178U,
366     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_146       = 179U,
367     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_147       = 180U,
368     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_148       = 181U,
369     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_149       = 182U,
370     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_150       = 183U,
371     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_151       = 184U,
372     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_152       = 185U,
373     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_153       = 186U,
374     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_154       = 187U,
375     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_155       = 188U,
376     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_156       = 189U,
377     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_157       = 190U,
378     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_158       = 191U,
379     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_159       = 192U,
380     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_160       = 193U,
381     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_161       = 194U,
382     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_162       = 195U,
383     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_163       = 196U,
384     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_164       = 197U,
385     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_165       = 198U,
386     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_166       = 199U,
387     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_167       = 200U,
388     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_168       = 201U,
389     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_169       = 202U,
390     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_170       = 203U,
391     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_171       = 204U,
392     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_172       = 205U,
393     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_173       = 206U,
394     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_174       = 207U,
395     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_175       = 208U,
396     NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_176       = 209U,
397     NV_FB_ALLOC_RM_INTERNAL_OWNER__MAX                  = 210U,
398 } NV_FB_ALLOC_RM_INTERNAL_OWNER;
399 
400 //
401 // Overrides address translation in SR-IOV enabled usecases
402 //
403 // In SRIOV systems, an access from guest has to go through the following
404 // translations:
405 //
406 // GVA -> GPA -> SPA
407 //
408 // Given HOST manages channel/memory management for guest, there are certain
409 // code paths that expects VA -> GPA translations and some may need GPA -> SPA
410 // translations. We use address translation to differentiate between these
411 // cases.
412 //
413 // We use AT_PA to force GPA -> SPA translation for vidmem. In case of non-SRIOV systems,
414 // using IO_VASPACE_A will fall back to FERMI_VASPACE_A or default context.
415 //
416 #define FORCE_VMMU_TRANSLATION(pMemDesc, curAddressTranslation) \
417     ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ? AT_PA : curAddressTranslation)
418 
419 typedef struct _memdescDestroyCallback MEM_DESC_DESTROY_CALLBACK;
420 
421 typedef void (MEM_DATA_RELEASE_CALL_BACK)(struct MEMORY_DESCRIPTOR *);
422 
423 //
424 // A memory descriptor is an object that describes and can be used to manipulate
425 // a block of memory.  The memory can be video or system memory; it can be
426 // contiguous or noncontiguous; it can be tiled, block linear, etc.  However,
427 // regardless of what type of memory it is, clients can use a standard set of
428 // APIs to manipulate it.
429 //
430 DECLARE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST);
431 
432 typedef struct MEMORY_DESCRIPTOR
433 {
434     // The GPU that this memory belongs to
435     OBJGPU *pGpu;
436 
437     // Flags field for optional behavior
438     NvU64 _flags;
439 
440     // Size of mapping used for this allocation.  Multiple mappings on Fermi must always use the same page size.
441     NvU64 _pageSize;
442 
443     // Size of the memory allocation in pages
444     NvU64 PageCount;
445 
446     // Alignment of the memory allocation as size in bytes
447     // XXX: would 32b work here?
448     NvU64 Alignment;
449 
450     // Size of the memory allocation requested in bytes
451     NvU64 Size;
452 
453     // Actual size of memory allocated to satisfy alignment.
454     // We report the requested size, not the actual size. A number of callers
455     // depend on this.
456     NvU64 ActualSize;
457 
458     // The information returned from osAllocPages
459     NvP64 _address;
460     void *_pMemData;
461     MEM_DATA_RELEASE_CALL_BACK *_pMemDataReleaseCallback;
462 
463     // When memory is allocated by a guest Virtual Machine (VM)
464     // it is aliased by the host RM. We store a unique guest ID
465     // for each piece of aliased memory to facilitate host RM mappings
466     // to these pages (only in case of system memory).
467     // XXX: would 32b work here?
468     NvU64 _guestId;
469 
470     // To keep track of the offset from parent memdesc
471     NvU64 subMemOffset;
472 
473     //
474     // The byte offset at which the memory allocation begins within the first
475     // PTE.  To locate the physical address of the byte at offset i in the memory
476     // allocation, use the following logic:
477     //   i += PteAdjust;
478     //   if (PhysicallyContiguous)
479     //       PhysAddr = PteArray[0] + i;
480     //   else
481     //       PhysAddr = PteArray[i >> RM_PAGE_SHIFT] + (i & RM_PAGE_MASK);
482     //
483     NvU32 PteAdjust;
484 
485     // Has the memory been allocated yet?
486     NvBool Allocated;
487 
488     //
489     // Marks that a request to deallocate memory has been called on this memdesc while it had multiple references
490     // NV_TRUE denotes that memFree will be called when refcount reaches 0.
491     //
492     NvBool bDeferredFree;
493 
494     // Does this use SUBALLOCATOR?
495     NvBool bUsingSuballocator;
496 
497     // Where does the memory live?  Video, system, other
498     NV_ADDRESS_SPACE _addressSpace;
499 
500     // Attributes reflecting GPU caching of this memory.
501     NvU32 _gpuCacheAttrib;
502 
503     // Peer vid mem cacheability
504     NvU32 _gpuP2PCacheAttrib;
505 
506     // One of NV_MEMORY_CACHED, NV_MEMORY_UNCACHED, NV_MEMORY_WRITECOMBINED
507     NvU32 _cpuCacheAttrib;
508 
509     // The page kind of this memory
510     NvU32 _pteKind;
511     NvU32 _pteKindCompressed;
512 
513     //
514     // Scale memory allocation by this value
515     //
516     NvU32 _subDeviceAllocCount;
517 
518     //
519     // Reference count for the object.
520     //
521     NvU32 RefCount;
522 
523     // Reference count for duplication of memory object via RmDupObject.
524     NvU32 DupCount;
525 
526     //
527     // The HwResId is used by the device dependent HAL to keep track of
528     // resources attached to the memory (e.g.: compression tags, zcull).
529     //
530     NvU32 _hwResId;
531 
532     //
533     // alloc tag for tracking internal allocations @ref NV_FB_ALLOC_RM_INTERNAL_OWNER
534     //
535     NV_FB_ALLOC_RM_INTERNAL_OWNER allocTag;
536 
537     //
538     // Keep track which heap is actually used for this allocation
539     //
540     struct Heap *pHeap;
541 
542     //
543     // GFID that this memory allocation belongs to
544     //
545     NvU32    gfid;
546 
547     //
548     // Keep track of the PMA_ALLOC_INFO data.
549     //
550     struct PMA_ALLOC_INFO *pPmaAllocInfo;
551 
552     // Serve as head node in a list of page handles
553     PoolPageHandleList *pPageHandleList;
554 
555     //
556     // List of callbacks to call when destroying memory descriptor
557     //
558     MEM_DESC_DESTROY_CALLBACK *_pMemDestroyCallbackList;
559 
560     // pointer to descriptor which was used to subset current descriptor
561     struct MEMORY_DESCRIPTOR *_pParentDescriptor;
562 
563     // Count used for sanity check
564     NvU32 childDescriptorCnt;
565 
566     // Next memory descriptor in subdevice list
567     struct MEMORY_DESCRIPTOR *_pNext;
568 
569     // Pointer to system Memory descriptor which used to back some FB content across S3/S4.
570     struct MEMORY_DESCRIPTOR *_pStandbyBuffer;
571 
572     // Serve as a head node in a list of submemdescs
573     MEMORY_DESCRIPTOR_LIST *pSubMemDescList;
574 
575     // Reserved for RM exclusive use
576     NvBool bRmExclusiveUse;
577 
578     // If strung in a intrusive linked list
579     ListNode   node;
580 
581     //
582     // Pointer to IOVA mappings used to back the IOMMU VAs for different IOVA spaces
583     // Submemory descriptors only have on mapping, but the root descriptor will have
584     // one per IOVA space that the memory is mapped into.
585     //
586     struct IOVAMAPPING *_pIommuMappings;
587 
588     // Kernel mapping of the memory
589     NvP64 _kernelMapping;
590     NvP64 _kernelMappingPriv;
591 
592     // Internal mapping
593     void *_pInternalMapping;
594     void *_pInternalMappingPriv;
595     NvU32 _internalMappingRefCount;
596 
597     // Array to hold SPA addresses when memdesc is allocated from GPA. Valid only for SRIOV cases
598     RmPhysAddr *pPteSpaMappings;
599 
600     //
601     // context buffer pool from which this memdesc is to be allocated.
602     // This is controlled by PDB_PROP_GPU_MOVE_RM_BUFFERS_TO_PMA which is
603     // enabled only for SMC today
604     //
605     CTX_BUF_POOL_INFO *pCtxBufPool;
606 
607     // Max physical address width to be override
608     NvU32 _overridenAddressWidth;
609 
610     // We verified that memdesc is safe to be mapped as large pages
611     NvBool bForceHugePages;
612 
613     // Memory handle that libos 3+ returns for dynamically mapped sysmem
614     NvU32 libosRegionHandle;
615     NvU64 baseVirtualAddress;
616 
617     // Indicates granularity of mapping. Will be used to implement dynamic page sizes.
618     NvU32 pageArrayGranularity;
619 
620     // NUMA node ID from which memory should be allocated
621     NvS32 numaNode;
622 
623     // Array to hold EGM addresses when EGM is enabled
624     RmPhysAddr *pPteEgmMappings;
625 
626     //
627     // If PhysicallyContiguous is NV_TRUE, this array consists of one element.
628     // If PhysicallyContiguous is NV_FALSE, this array is actually larger and has
629     // one entry for each physical page in the memory allocation.  As a result,
630     // this structure must be allocated from the heap.
631     // If the AddressSpace is ADDR_FBMEM, each entry is an FB offset.
632     // Otherwise, each entry is a physical address on the system bus.
633     // TBD: for now, the array will be sized at one entry for every 4KB, but
634     // we probably want to optimize this later to support 64KB pages.
635     //
636     RmPhysAddr _pteArray[1];
637     //!!! Place nothing behind PteArray!!!
638 } MEMORY_DESCRIPTOR, *PMEMORY_DESCRIPTOR;
639 
640 MAKE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST, MEMORY_DESCRIPTOR, node);
641 
642 //
643 // Common address space lists
644 //
645 extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_PREFERRED[];
646 extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_PREFERRED[];
647 extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_ONLY[];
648 extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_ONLY[];
649 
650 NvU32 memdescAddrSpaceListToU32(const NV_ADDRESS_SPACE *addrlist);
651 const NV_ADDRESS_SPACE *memdescU32ToAddrSpaceList(NvU32 index);
652 
653 NV_STATUS _memdescUpdateSpaArray(PMEMORY_DESCRIPTOR   pMemDesc);
654 // Create a memory descriptor data structure (without allocating any physical
655 // storage).
656 NV_STATUS memdescCreate(MEMORY_DESCRIPTOR **ppMemDesc, OBJGPU *pGpu, NvU64 Size,
657                         NvU64 alignment, NvBool PhysicallyContiguous,
658                         NV_ADDRESS_SPACE AddressSpace, NvU32 CpuCacheAttrib, NvU64 Flags);
659 
660 #define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addressSpace) \
661     ((gpumgrGetBcEnabledStatus(pGpu) && (pGpu != NULL) && (addressSpace == ADDR_FBMEM)) ?  MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE : MEMDESC_FLAGS_NONE)
662 
663 // Initialize a caller supplied memory descriptor for use with memdescDescribe()
664 void memdescCreateExisting(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU64 Size,
665                            NV_ADDRESS_SPACE AddressSpace,
666                            NvU32 CpuCacheAttrib, NvU64 Flags);
667 
668 // Increment reference count
669 void memdescAddRef(MEMORY_DESCRIPTOR *pMemDesc);
670 
671 // Decrement reference count
672 void memdescRemoveRef(MEMORY_DESCRIPTOR *pMemDesc);
673 
674 // Decrement reference count and reclaim any resources when possible
675 void memdescDestroy(MEMORY_DESCRIPTOR *pMemDesc);
676 
677 //
678 // The destroy callback is called when the memory descriptor is
679 // destroyed with memdescDestroy().
680 //
681 // The caller is responsible for managing the memory used
682 // containing the callback.
683 //
684 typedef void (MemDescDestroyCallBack)(OBJGPU *, void *pObject, MEMORY_DESCRIPTOR *);
685 struct _memdescDestroyCallback
686 {
687     MemDescDestroyCallBack *destroyCallback;
688     void *pObject;
689     MEM_DESC_DESTROY_CALLBACK *pNext;
690 };
691 void memdescAddDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *);
692 void memdescRemoveDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *);
693 
694 // Allocate physical storage for a memory descriptor and fill in its PteArray
695 NV_STATUS memdescAlloc(MEMORY_DESCRIPTOR *pMemDesc);
696 
697 // Allocate memory from one of the possible locations specified in pList.
698 NV_STATUS memdescAllocList(MEMORY_DESCRIPTOR *pMemDesc, const NV_ADDRESS_SPACE *pList);
699 
700 // Free physical storage for a memory descriptor
701 void memdescFree(MEMORY_DESCRIPTOR *pMemDesc);
702 
703 // Lock the paged virtual memory
704 NV_STATUS memdescLock(MEMORY_DESCRIPTOR *pMemDesc);
705 
706 // Unlock the paged virtual memory
707 NV_STATUS memdescUnlock(MEMORY_DESCRIPTOR *pMemDesc);
708 
709 // Allocate a CPU mapping of an arbitrary subrange of the memory.
710 // 64-bit clean (mac can have a 32-bit kernel pointer and 64-bit client pointers)
711 NV_STATUS memdescMap(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size,
712                      NvBool Kernel, NvU32 Protect, NvP64 *pAddress, NvP64 *pPriv);
713 
714 // Free a CPU mapping of an arbitrary subrange of the memory.
715 void memdescUnmap(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, NvU32 ProcessId,
716                   NvP64 Address, NvP64 Priv);
717 
718 // Allocate a CPU mapping of an arbitrary subrange of the memory.
719 // fails unless Kernel == NV_TRUE
720 NV_STATUS memdescMapOld(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size,
721                         NvBool Kernel, NvU32 Protect, void **pAddress, void **pPriv);
722 
723 // Free a CPU mapping of an arbitrary subrange of the memory.
724 void memdescUnmapOld(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, NvU32 ProcessId,
725                      void *Address, void *Priv);
726 
727 // Fill in a MEMORY_DESCRIPTOR with a description of a preexisting contiguous
728 // memory allocation.  It should already be initialized with
729 // memdescCreate*().
730 void memdescDescribe(MEMORY_DESCRIPTOR *pMemDesc,
731                      NV_ADDRESS_SPACE AddressSpace,
732                      RmPhysAddr Base, NvU64 Size);
733 
734 // Fill in a MEMORY_DESCRIPTOR with the physical page addresses returned by PMA.
735 // It should already be initialized with memdescCreate*().
736 void memdescFillPages(MEMORY_DESCRIPTOR *pMemDesc, NvU32 offset,
737                       NvU64 *pPages, NvU32 pageCount, NvU64 pageSize);
738 
739 // Create a MEMORY_DESCRIPTOR for a subset of an existing memory allocation.
740 // The new MEMORY_DESCRIPTOR must be freed with memdescDestroy.
741 NV_STATUS memdescCreateSubMem(MEMORY_DESCRIPTOR **ppMemDescNew,
742                               MEMORY_DESCRIPTOR *pMemDesc,
743                               OBJGPU *pGpu, NvU64 Offset, NvU64 Size);
744 
745 // Compute the physical address of a byte within a MEMORY_DESCRIPTOR
746 RmPhysAddr memdescGetPhysAddr(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU64 offset);
747 
748 // Compute count physical addresses within a MEMORY_DESCRIPTOR. Starting at the
749 // given offset and advancing it by stride for each consecutive address.
750 void memdescGetPhysAddrs(MEMORY_DESCRIPTOR *pMemDesc,
751                          ADDRESS_TRANSLATION addressTranslation,
752                          NvU64 offset,
753                          NvU64 stride,
754                          NvU64 count,
755                          RmPhysAddr *pAddresses);
756 
757 // Compute count physical addresses within a MEMORY_DESCRIPTOR for a specific
758 // GPU. Starting at the given offset and advancing it by stride for each
759 // consecutive address.
760 void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc,
761                                OBJGPU *pGpu,
762                                ADDRESS_TRANSLATION addressTranslation,
763                                NvU64 offset,
764                                NvU64 stride,
765                                NvU64 count,
766                                RmPhysAddr *pAddresses);
767 
768 // Obtains one of the PTEs from the MEMORY_DESCRIPTOR.  Assumes 4KB pages,
769 // and works for either contiguous or noncontiguous descriptors.
770 RmPhysAddr memdescGetPte(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex);
771 
772 void memdescSetPte(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex, RmPhysAddr PhysAddr);
773 
774 // Obtains the PteArray from the MEMORY_DESCRIPTOR for the specified GPU.
775 RmPhysAddr * memdescGetPteArrayForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, ADDRESS_TRANSLATION addressTranslation);
776 
777 /*!
778  *  @brief Obtains the PteArray from the MEMORY_DESCRIPTOR.
779  *
780  *  @param[in]  pMemDesc           Memory descriptor to use
781  *  @param[in]  addressTranslation Address translation identifier
782  *
783  *  @returns PageArray
784  */
785 static inline RmPhysAddr *
786 memdescGetPteArray(PMEMORY_DESCRIPTOR  pMemDesc, ADDRESS_TRANSLATION addressTranslation)
787 {
788     return memdescGetPteArrayForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation);
789 }
790 
791 // Obtains the PteArray size from the MEMORY_DESCRIPTOR based on the mmuContext.
792 NvU32 memdescGetPteArraySize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation);
793 
794 // Return the aperture of the NV_ADDRESS_SPACE as a null terminated string.
795 // Useful for print statements.
796 const char* memdescGetApertureString(NV_ADDRESS_SPACE addrSpace);
797 
798 // Return true if two MEMORY_DESCRIPTOR are equal
799 NvBool memdescDescIsEqual(MEMORY_DESCRIPTOR *pMemDescOne, MEMORY_DESCRIPTOR *pMemDescTwo);
800 
801 // Retrieve the per-GPU memory descriptor for a subdevice
802 MEMORY_DESCRIPTOR *memdescGetMemDescFromSubDeviceInst(MEMORY_DESCRIPTOR *pMemDesc, NvU32 subDeviceInst);
803 
804 // Retrieve the per-GPU memory descriptor for a GPU
805 MEMORY_DESCRIPTOR *memdescGetMemDescFromGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu);
806 
807 // Retrieve the per-GPU memory descriptor at an index
808 MEMORY_DESCRIPTOR *memdescGetMemDescFromIndex(MEMORY_DESCRIPTOR *pMemDesc, NvU32 index);
809 
810 // Print information on memory descriptor
811 void memdescPrintMemdesc(MEMORY_DESCRIPTOR *pMemDesc, NvBool bPrintIndividualPages, const char *pPrefixMessage);
812 
813 // Get the page offset for an arbitrary power of two page size
814 NvU64 memdescGetPageOffset(MEMORY_DESCRIPTOR *pMemDesc, NvU64 pageSize);
815 
816 //
817 // Internal APIs for the IOVASPACE to manage IOMMU mappings in a memdesc.
818 //
819 // Note that the external APIs are memdescMapIommu(),
820 // memdescUnmapIommu() and memdescGetIommuMap().
821 //
822 NV_STATUS memdescAddIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap);
823 void memdescRemoveIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap);
824 
825 //
826 // Map and unmap IOMMU for the specified VA space
827 //
828 // Each memdescUnmapIommu() call has to be paired with a previous successful
829 // memdescMapIommu() call for the same VA space. The calls are refcounted for
830 // each VA space and only the last Unmap will remove the mappings.
831 //
832 // The caller has to guarantee that before the VA space is destroyed, either the
833 // mapping is explicitly unmapped with memdescUnmapIommu() or the memdesc is
834 // freed (or destroyed for memdescs that are not memdescFree()d).
835 //
836 NV_STATUS memdescMapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId);
837 void memdescUnmapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId);
838 
839 // Returns the IOVA mapping created by memdescMapIommu().
840 struct IOVAMAPPING *memdescGetIommuMap(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId);
841 
842 //
843 // Check subdevice consistency functions
844 //
845 void memdescCheckSubDevicePageSizeConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS,
846                                               NvU64 pageSize, NvU64 pageOffset);
847 void memdescCheckSubDeviceMemContiguityConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS,
848                                                    NvBool bIsMemContiguous);
849 NV_STATUS memdescCheckSubDeviceKindComprConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS,
850                                                     NvU32 kind, COMPR_INFO *pComprInfo);
851 
852 //
853 // Accessor functions
854 //
855 void memdescSetHeapOffset(MEMORY_DESCRIPTOR *pMemDesc, RmPhysAddr fbOffset);
856 void memdescSetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 cpuCacheAttrib);
857 void memdescSetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib);
858 NvU32 memdescGetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc);
859 void memdescSetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib);
860 NvU32 memdescGetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu);
861 void  memdescSetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU32 pteKind);
862 NvU32 memdescGetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc);
863 void  memdescSetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc, NvU32 pteKindCmpr);
864 NvP64 memdescGetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc);
865 void  memdescSetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMapping);
866 NvP64 memdescGetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc);
867 void  memdescSetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMappingPriv);
868 MEMORY_DESCRIPTOR *memdescGetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc);
869 void memdescSetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc, MEMORY_DESCRIPTOR *pStandbyBuffer);
870 void memdescSetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *pCb);
871 NvU64 memdescGetGuestId(MEMORY_DESCRIPTOR *pMemDesc);
872 void memdescSetGuestId(MEMORY_DESCRIPTOR *pMemDesc, NvU64 guestId);
873 NvBool memdescGetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag);
874 void memdescSetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag, NvBool bValue);
875 NvP64 memdescGetAddress(MEMORY_DESCRIPTOR *pMemDesc);
876 void memdescSetAddress(MEMORY_DESCRIPTOR *pMemDesc, NvP64 pAddress);
877 void *memdescGetMemData(MEMORY_DESCRIPTOR *pMemDesc);
878 void memdescSetMemData(MEMORY_DESCRIPTOR *pMemDesc, void *pMemData, MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback);
879 NvBool memdescGetVolatility(MEMORY_DESCRIPTOR *pMemDesc);
880 NvBool memdescGetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation);
881 void memdescSetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvBool isContiguous);
882 NvBool memdescCheckContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation);
883 NV_ADDRESS_SPACE memdescGetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc);
884 NvU64 memdescGetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation);
885 void  memdescSetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU64 pageSize);
886 PMEMORY_DESCRIPTOR memdescGetRootMemDesc(PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pRootOffset);
887 void memdescSetCustomHeap(PMEMORY_DESCRIPTOR);
888 NvBool memdescGetCustomHeap(PMEMORY_DESCRIPTOR);
889 NV_STATUS memdescSetPageArrayGranularity(MEMORY_DESCRIPTOR *pMemDesc, NvU64 pageArrayGranularity);
890 NvBool memdescAcquireRmExclusiveUse(MEMORY_DESCRIPTOR *pMemDesc);
891 NV_STATUS memdescFillMemdescForPhysAttr(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation,
892                                         NvU64 *pOffset,NvU32 *pMemAperture, NvU32 *pMemKind, NvU32 *pZCullId,
893                                         NvU32 *pGpuCacheAttr, NvU32 *pGpuP2PCacheAttr, NvU64 *contigSegmentSize);
894 
895 NvBool memdescIsEgm(MEMORY_DESCRIPTOR *pMemDesc);
896 /*!
897  *  @brief Get PTE kind
898  *
899  *  @param[in]  pMemDesc           Memory descriptor pointer
900  *  @param[in]  addressTranslation Address translation identifier
901  *
902  *  @returns Current PTE kind value.
903  */
904 static inline NvU32
905 memdescGetPteKind(PMEMORY_DESCRIPTOR pMemDesc)
906 {
907     return memdescGetPteKindForGpu(pMemDesc,  pMemDesc->pGpu);
908 }
909 
910 /*!
911  *  @brief Set PTE kind.
912  *
913  *  @param[in]  pMemDesc           Memory descriptor pointer
914  *  @param[in]  pteKind            New PTE kind
915  *
916  *  @returns nothing
917  */
918 static inline void
919 memdescSetPteKind(PMEMORY_DESCRIPTOR pMemDesc, NvU32 pteKind)
920 {
921     memdescSetPteKindForGpu(pMemDesc, pMemDesc->pGpu, pteKind);
922 }
923 
924 /*!
925  *  @brief Get HW resource identifier (HwResId)
926  *
927  *  TODO: Need to ensure this is checked per subdevice only.
928  *
929  *  @param[in]  pMemDesc           Memory descriptor pointer
930  *
931  *  @returns Current HW resource identifier
932  */
933 static inline NvU32
934 memdescGetHwResId(PMEMORY_DESCRIPTOR pMemDesc)
935 {
936     return pMemDesc->_hwResId;
937 }
938 
939 /*!
940  *  @brief Set HW resource identifier (HwResId)
941  *
942  *  @param[in]  pMemDesc           Memory descriptor pointer
943  *  @param[in]  hwResId            New HW resource identifier
944  *
945  *  @returns nothing
946  */
947 static inline void
948 memdescSetHwResId(PMEMORY_DESCRIPTOR pMemDesc, NvU32 hwResId)
949 {
950     pMemDesc->_hwResId = hwResId;
951 }
952 
953 /*!
954  *  @brief Get mem destroy callback list pointer
955  *
956  *  @param[in]  pMemDesc    Memory descriptor pointer
957  *
958  *  @returns Pointer to mem destroy callback list
959  */
960 static inline MEM_DESC_DESTROY_CALLBACK *
961 memdescGetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc)
962 {
963     return pMemDesc->_pMemDestroyCallbackList;
964 }
965 
966 /*!
967  *  @brief Get the byte offset relative to the root memory descriptor.
968  *
969  *  Root memory descriptor is the Top level memory descriptor with no parent,
970  *  from which this memory descriptor was derived.
971  *
972  *  @param[in]  pMemDesc  Return pointer to memory descriptor.
973  *
974  *  @returns the byte offset relative to Root memory descriptor.
975  */
976 static inline NvU64
977 memdescGetRootOffset(PMEMORY_DESCRIPTOR pMemDesc)
978 {
979     NvU64 rootOffset = 0;
980     (void)memdescGetRootMemDesc(pMemDesc, &rootOffset);
981     return rootOffset;
982 }
983 
984 /*!
985  *  @brief Get CPU cache attributes
986  *
987  *  @param[in]  pMemDesc    Memory descriptor pointer
988  *
989  *  @returns Current CPU cache attributes
990  */
991 static inline NvU32
992 memdescGetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc)
993 {
994     return pMemDesc->_cpuCacheAttrib;
995 }
996 
997 /*!
998  *  @brief Get GPU cache attributes
999  *
1000  *  @param[in]  pMemDesc    Memory descriptor pointer
1001  *
1002  *  @returns Current GPU cache attributes
1003  */
1004 static inline NvU32
1005 memdescGetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc)
1006 {
1007     return pMemDesc->_gpuCacheAttrib;
1008 }
1009 
1010 /*!
1011  *  @brief Return pte adjust
1012  *
1013  *  PteAdjust is zero whenever the memory is allocated as allocations are always
1014  *  going to be page-size aligned. However, we can have memory descriptors
1015  *  created on pre-allocated addresses + offset that aren't page aligned.
1016  *  PteAdjust is non-zero in such cases. We do not allow memdescDescribe operation
1017  *  (i.e. memory descriptors created on pre-allocated address) for subdevice
1018  *  memdesc and hence top level memdesc is always used to access pte adjust.
1019  *
1020  *  @param[in]  pMemDesc   Memory descriptor to use
1021  *
1022  *  @returns PteAdjust
1023  */
1024 static inline NvU32
1025 memdescGetPteAdjust(PMEMORY_DESCRIPTOR pMemDesc)
1026 {
1027     return pMemDesc->PteAdjust;
1028 }
1029 
1030 /*!
1031  *  @brief Get subdevice allocation count.
1032  *
1033  *  @param[in]  pMemDesc    Memory descriptor pointer
1034  *
1035  *  @returns Current subdevice allocation count value.
1036  */
1037 static inline NvU32
1038 memdescGetSubDeviceAllocCount (MEMORY_DESCRIPTOR *pMemDesc)
1039 {
1040     return pMemDesc->_subDeviceAllocCount;
1041 }
1042 
1043 /*!
1044  *  @brief Get memory descriptor of parent
1045  *
1046  *  @param[in]  pMemDesc    Memory descriptor pointer
1047  *
1048  *  @returns Memory descriptor of parent
1049  */
1050 static inline MEMORY_DESCRIPTOR *
1051 memdescGetParentDescriptor(MEMORY_DESCRIPTOR *pMemDesc)
1052 {
1053     return pMemDesc->_pParentDescriptor;
1054 }
1055 
1056 /*!
1057  *  @brief Set the address space of the memory descriptor
1058  *
1059  *  @param[in]  pMemDesc           Memory descriptor used
1060  *  @param[in]  addressTranslation Address translation identifier
1061  *  @param[in]  addressSpace       Address Space
1062  *
1063  *  @returns nothing
1064  */
1065 static inline void
1066 memdescSetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc, NV_ADDRESS_SPACE addressSpace)
1067 {
1068     pMemDesc->_addressSpace = addressSpace;
1069 }
1070 
1071 /*!
1072  *  @brief Return size
1073  *
1074  *  @param[in]  pMemDesc   Memory descriptor to use
1075  *
1076  *  @returns Size
1077  */
1078 static inline NvU64
1079 memdescGetSize(PMEMORY_DESCRIPTOR pMemDesc)
1080 {
1081     return pMemDesc->Size;
1082 }
1083 
1084 /*!
1085  *  @brief Set CPU NUMA node to allocate memory from
1086  *
1087  *  @param[in]  pMemDesc    Memory Descriptor to use
1088  *  @param[in]  numaNode    NUMA node to allocate memory from
1089  */
1090 static NV_INLINE void
1091 memdescSetNumaNode(MEMORY_DESCRIPTOR *pMemDesc, NvS32 numaNode)
1092 {
1093     pMemDesc->numaNode = numaNode;
1094 }
1095 
1096 /*!
1097  *  @brief Get CPU NUMA node to allocate memory from
1098  *
1099  *  @param[in]  pMemDesc    Memory Descriptor to use
1100  *
1101  *  @returns    NUMA node to allocate memory from
1102  */
1103 static NV_INLINE NvS32
1104 memdescGetNumaNode(MEMORY_DESCRIPTOR *pMemDesc)
1105 {
1106     return pMemDesc->numaNode;
1107 }
1108 
1109 /*!
1110  *  @brief Checks if subdevice memory descriptors are present
1111  *
1112  *  See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory
1113  *  descriptors
1114  *
1115  *  @param[in]  pMemDesc  Memory descriptor to query
1116  *
1117  *  @returns NV_TRUE if subdevice memory descriptors exist
1118  */
1119 static NV_INLINE NvBool
1120 memdescHasSubDeviceMemDescs(MEMORY_DESCRIPTOR *pMemDesc)
1121 {
1122     return (pMemDesc->_subDeviceAllocCount > 1);
1123 }
1124 
1125 /*!
1126  *  @brief Checks if memory descriptor describes memory that is submemory
1127  *
1128  *  @param[in]  pMemDesc  Memory descriptor to query
1129  *
1130  *  @returns NV_TRUE if it is a submemory desc, NV_FALSE otherwise.
1131  */
1132 static NV_INLINE NvBool
1133 memdescIsSubMemoryMemDesc(MEMORY_DESCRIPTOR *pMemDesc)
1134 {
1135     return pMemDesc->_pParentDescriptor != NULL ? NV_TRUE : NV_FALSE;
1136 }
1137 
1138 NV_STATUS memdescGetNvLinkGpa(OBJGPU *pGpu, NvU64 pageCount, RmPhysAddr *pGpa);
1139 
1140 NV_STATUS memdescSetCtxBufPool(PMEMORY_DESCRIPTOR pMemDesc, CTX_BUF_POOL_INFO* pCtxBufPool);
1141 CTX_BUF_POOL_INFO* memdescGetCtxBufPool(PMEMORY_DESCRIPTOR pMemDesc);
1142 
1143 /*!
1144  * @brief Override the registry INST_LOC two-bit enum to an aperture (list) + cpu attr.
1145  *
1146  * loc parameters uses NV_REG_STR_RM_INST_LOC defines.
1147  * Caller must set initial default values.
1148  */
1149 void memdescOverrideInstLoc(NvU32 loc, const char *name, NV_ADDRESS_SPACE *pAddrSpace, NvU32 *pCpuMappingAttr);
1150 void memdescOverrideInstLocList(NvU32 loc, const char *name, const NV_ADDRESS_SPACE **ppAllocList, NvU32 *pCpuMappingAttr);
1151 
1152 /*!
1153 * @brief Override the physical system address limit.
1154 *
1155 */
1156 void memdescOverridePhysicalAddressWidthWindowsWAR(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 addressWidth);
1157 
1158 /*!
1159 * @brief Register memory descriptor referenced by hMemory in CPU-RM to GSP
1160 *
1161 * @param[in]  pGpu          OBJGPU pointer
1162 * @param[in]  hClient       client handled
1163 * @param[in]  hSubDevice    subdevice handle
1164 * @param[in]  hMemory       memory handle
1165 *
1166 * @returns NV_STATUS
1167 */
1168 NV_STATUS memdescRegisterToGSP(OBJGPU *pGpu, NvHandle hClient, NvHandle hParent, NvHandle hMemory);
1169 
1170 /*!
1171 * @brief Deregister memory descriptor referenced by hMemory in CPU-RM from GSP
1172 *
1173 * @param[in]  pGpu          OBJGPU pointer
1174 * @param[in]  hClient       client handled
1175 * @param[in]  hSubDevice    subdevice handle
1176 * @param[in]  hMemory       memory handle
1177 *
1178 * @returns NV_STATUS
1179 */
1180 
1181 NV_STATUS memdescDeregisterFromGSP(OBJGPU *pGpu, NvHandle hClient, NvHandle hParent, NvHandle hMemory);
1182 
1183 /*!
1184 * @brief Send memory descriptor from CPU-RM to GSP
1185 *
1186 * This function will create a MemoryList object with the MEMORY_DESCRIPTOR information on CPU-RM
1187 * It will then use memdescRegisterToGSP API to create a corresponding MemoryList object on GSP-RM
1188 * with the same Handle as that on CPU-RM
1189 *
1190 * This MemoryList object has the same MEMORY_DESCRIPTOR info as the input pMemDesc
1191 * The CPU-RM handle can be sent to GSP-RM and then used on GSP end to retrieve the MemoryList object
1192 * and then the corresponding MEMORY_DESCRIPTOR
1193 *
1194 * @param[in]  pGpu          OBJGPU pointer
1195 * @param[in]  pMemDesc      MemDesc pointer
1196 * @param[out] pHandle       Pointer to handle of MemoryList object
1197 *
1198 * @returns NV_STATUS
1199 */
1200 NV_STATUS memdescSendMemDescToGSP(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvHandle *pHandle);
1201 
1202 // cache maintenance functions
1203 void memdescFlushGpuCaches(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc);
1204 void memdescFlushCpuCaches(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc);
1205 
1206 // Map memory descriptor for RM internal access
1207 void* memdescMapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags);
1208 void memdescUnmapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags);
1209 
1210 /*!
1211  * @brief Set the name of the surface.
1212  *
1213  * @param[in] pGpu     OBJGPU pointer.
1214  * @param[in] pMemDesc MEMORY_DESCRIPTOR pointer that the name is to be set for.
1215  * @param[in] name     const char pointer to the name to be set.
1216  */
1217 void memdescSetName(OBJGPU*, MEMORY_DESCRIPTOR *pMemDesc, const char *name, const char *suffix);
1218 
1219 //
1220 // External flags:
1221 //   ALLOC_PER_SUBDEVICE    Allocate independent system memory for each GPU
1222 //   LOST_ON_SUSPEND        PM code will skip this allocation during S/R
1223 //   LOCKLESS_SYSMEM_ALLOC  System memory should be allocated unprotected by
1224 //                          the  RM lock
1225 //   GPU_PRIVILEGED         This memory will be marked as privileged in the GPU
1226 //                          page tables.  When set only GPU requestors who are
1227 //                          "privileged" are allowed to access this memory.
1228 //                          This can be used for mapping sensitive memory into
1229 //                          a user's GPU address space (like context buffers).
1230 //                          Note support for this in our GPUs is limited, so
1231 //                          only use it if you know the HW accessing the memory
1232 //                          makes privileged requests.
1233 //
1234 // Internal flags:
1235 //   SET_KIND               Whether or not the kind was set a different value
1236 //                          than default.
1237 //   PRE_ALLOCATED          Caller provided memory descriptor memory
1238 //   FIXED_ADDRESS_ALLOCATE Allocate from the heap with a fixed address
1239 //   ALLOCATED              Has the memory been allocated yet?
1240 //   GUEST_ALLOCATED        Is the memory allocated by a guest VM?
1241 //                          We make aliased memory descriptors to guest
1242 //                          allocated memory and mark it so, so that we know
1243 //                          how to deal with it in memdescMap() etc.
1244 //   KERNEL_MODE            Is the memory for a user or kernel context?
1245 //                          XXX This is lame, and it would be best if we could
1246 //                          get rid of it.  Memory *storage* isn't either user
1247 //                          or kernel -- only mappings are user or kernel.
1248 //                          Unfortunately, osAllocPages requires that we
1249 //                          provide this information.
1250 //  PHYSICALLY_CONTIGUOUS   Are the underlying physical pages of this memory
1251 //                          allocation contiguous?
1252 //  ENCRYPTED               TurboCipher allocations need a bit in the PTE to
1253 //                          indicate encrypted
1254 //  UNICAST                 Memory descriptor was created via UC path
1255 //  PAGED_SYSMEM            Allocate the memory from paged system memory. When
1256 //                          this flag is used, memdescLock() should be called
1257 //                          to lock the memory in physical pages before we
1258 //                          access this memory descriptor.
1259 //  CPU_ONLY                Allocate memory only accessed by CPU.
1260 //
1261 #define MEMDESC_FLAGS_NONE                         ((NvU64)0x0)
1262 #define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE          NVBIT64(0)
1263 #define MEMDESC_FLAGS_SET_KIND                     NVBIT64(1)
1264 #define MEMDESC_FLAGS_LOST_ON_SUSPEND              NVBIT64(2)
1265 #define MEMDESC_FLAGS_PRE_ALLOCATED                NVBIT64(3)
1266 #define MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE       NVBIT64(4)
1267 #define MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC        NVBIT64(5)
1268 #define MEMDESC_FLAGS_GPU_IN_RESET                 NVBIT64(6)
1269 #define MEMDESC_ALLOC_FLAGS_PROTECTED              NVBIT64(7)
1270 #define MEMDESC_FLAGS_GUEST_ALLOCATED              NVBIT64(8)
1271 #define MEMDESC_FLAGS_KERNEL_MODE                  NVBIT64(9)
1272 #define MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS        NVBIT64(10)
1273 #define MEMDESC_FLAGS_ENCRYPTED                    NVBIT64(11)
1274 #define MEMDESC_FLAGS_PAGED_SYSMEM                 NVBIT64(12)
1275 #define MEMDESC_FLAGS_GPU_PRIVILEGED               NVBIT64(13)
1276 #define MEMDESC_FLAGS_PRESERVE_CONTENT_ON_SUSPEND  NVBIT64(14)
1277 #define MEMDESC_FLAGS_DUMMY_TOPLEVEL               NVBIT64(15)
1278 
1279 // Don't use the below two flags. For memdesc internal use only.
1280 // These flags will be removed on memory allocation refactoring in RM
1281 #define MEMDESC_FLAGS_PROVIDE_IOMMU_MAP            NVBIT64(16)
1282 #define MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE        NVBIT64(17)
1283 
1284 #define MEMDESC_FLAGS_CUSTOM_HEAP_ACR              NVBIT64(18)
1285 
1286 // Allocate in "fast" or "slow" memory, if there are multiple grades of memory (like mixed density)
1287 #define MEMDESC_FLAGS_HIGH_PRIORITY                NVBIT64(19)
1288 #define MEMDESC_FLAGS_LOW_PRIORITY                 NVBIT64(20)
1289 
1290 // Flag to specify if requested size should be rounded to page size
1291 #define MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE       NVBIT64(21)
1292 
1293 #define MEMDESC_FLAGS_CPU_ONLY                     NVBIT64(22)
1294 
1295 // This flags is used for a special SYSMEM descriptor that points to a memory
1296 // region allocated externally (e.g. malloc, kmalloc etc.)
1297 #define MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM           NVBIT64(23)
1298 
1299 // Owned by Physical Memory Allocator (PMA).
1300 #define MEMDESC_FLAGS_ALLOC_PMA_OWNED              NVBIT64(24)
1301 
1302 // This flag is added as part of Sub-Allocator feature meant to be used by VGPU clients.
1303 // Once VGPU clients allocate a large block of memory for their use, they carve-out a small
1304 // portion of it to be used for RM internal allocations originating from a given client. Each
1305 // allocation can choose to use this carved-out memory owned by client or be part of global heap.
1306 // This flag has to be used in RM internal allocation only when a particular allocation is tied to
1307 // the life-time of this client and will be freed before client gets destroyed.
1308 #define MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE      NVBIT64(25)
1309 
1310 // This flag is used to specify the pages are pinned using other kernel module or API
1311 // Currently, this flag is used for vGPU on KVM where RM calls vfio APIs to pin and unpin pages
1312 // instead of using os_lock_user_pages() and os_unlock_user_pages().
1313 #define MEMDESC_FLAGS_FOREIGN_PAGE                 NVBIT64(26)
1314 
1315 // These flags are used for SYSMEM descriptors that point to a physical BAR
1316 // range and do not take the usual memory mapping paths. Currently, these are used for vGPU.
1317 #define MEMDESC_FLAGS_BAR0_REFLECT                 NVBIT64(27)
1318 #define MEMDESC_FLAGS_BAR1_REFLECT                 NVBIT64(28)
1319 
1320 // This flag is used to create shared memory required for vGPU operation.
1321 // During RPC and all other shared memory allocations, VF RM will set this flag to instruct mods
1322 // layer to create shared memory between VF process and PF process.
1323 #define MEMDESC_FLAGS_MODS_SHARED_MEM              NVBIT64(29)
1324 
1325 // This flag is set in memdescs that describe client (currently MODS) managed VPR allocations.
1326 #define MEMDESC_FLAGS_VPR_REGION_CLIENT_MANAGED    NVBIT64(30)
1327 
1328 // This flags is used for a special SYSMEM descriptor that points to physical BAR
1329 // range of a third party device.
1330 #define MEMDESC_FLAGS_PEER_IO_MEM                  NVBIT64(31)
1331 
1332 // If the flag is set, the RM will only allow read-only CPU user-mappings
1333 // to the descriptor.
1334 #define MEMDESC_FLAGS_USER_READ_ONLY               NVBIT64(32)
1335 
1336 // If the flag is set, the RM will only allow read-only DMA mappings
1337 // to the descriptor.
1338 #define MEMDESC_FLAGS_DEVICE_READ_ONLY             NVBIT64(33)
1339 
1340 // This flag is used to denote the memory descriptor that is part of larger memory descriptor;
1341 // created using NV01_MEMORY_LIST_SYSTEM, NV01_MEMORY_LIST_FBMEM or NV01_MEMORY_LIST_OBJECT.
1342 #define MEMDESC_FLAGS_LIST_MEMORY                  NVBIT64(34)
1343 
1344 // This flag is used to denote that this memdesc is allocated from
1345 // a context buffer pool. When this flag is set, we expect a pointer
1346 // to this context buffer pool to be cached in memdesc.
1347 #define MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL        NVBIT64(36)
1348 
1349 //
1350 // This flag is used to skip privilege checks for the ADDR_REGMEM mapping type.
1351 // This flag is useful for cases like UserModeApi where we want to use this memory type
1352 // in a non-privileged user context
1353 #define MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK       NVBIT64(37)
1354 
1355 // This flag denotes the memory descriptor of type Display non iso
1356 #define MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO             NVBIT64(38)
1357 
1358 // This flag is used to force mapping of coherent sysmem through
1359 // the GMMU over BAR1. This is useful when we need some form
1360 // of special translation of the SYSMEM_COH aperture by the GMMU.
1361 #define MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1         NVBIT64(39)
1362 
1363 // This flag is used to override system memory limit to be allocated
1364 // within override address width.
1365 #define MEMDESC_FLAGS_OVERRIDE_SYSTEM_ADDRESS_LIMIT   NVBIT64(40)
1366 
1367 //
1368 // If this flag is set, Linux RM will ensure that the allocated memory is
1369 // 32-bit addressable.
1370 #define MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE      NVBIT64(41)
1371 
1372 //
1373 // If this flag is set, the memory is registered in GSP
1374 //
1375 #define MEMDESC_FLAGS_REGISTERED_TO_GSP      NVBIT64(42)
1376 
1377 //
1378 // If this flag is set then it indicates that the memory associated with
1379 // this descriptor was allocated from local EGM.
1380 //
1381 #define MEMDESC_FLAGS_ALLOC_FROM_EGM               NVBIT64(43)
1382 
1383 //
1384 // Indicates that this memdesc is tracking client sysmem allocation as
1385 // against RM internal sysmem allocation
1386 //
1387 #define MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT        NVBIT64(44)
1388 //
1389 // Clients (including RM) should set this flag to request allocations in
1390 // unprotected memory. This is required for Confidential Compute cases
1391 //
1392 #define MEMDESC_FLAGS_ALLOC_IN_UNPROTECTED_MEMORY   NVBIT64(45)
1393 
1394 //
1395 // The following is a special use case for sharing memory between
1396 // the GPU and a WSL client. There is no IOMMU-compliant support
1397 // currently for this, so a WAR is required for r515. The intent
1398 // is to remove this by r525.
1399 //
1400 #define MEMDESC_FLAGS_WSL_SHARED_MEMORY             NVBIT64(46)
1401 
1402 //
1403 // Skip IOMMU mapping creation during alloc for sysmem.
1404 // A mapping might be requested later with custom parameters.
1405 //
1406 #define MEMDESC_FLAGS_SKIP_IOMMU_MAPPING            NVBIT64(47)
1407 
1408 //
1409 // Specical case to allocate the runlists for Guests from its GPA
1410 // In MODS, VM's GPA allocated from subheap so using this define to
1411 // Forcing memdesc to allocated from subheap
1412 //
1413 #define MEMDESC_FLAGS_FORCE_ALLOC_FROM_SUBHEAP      NVBIT64(48)
1414 
1415 //
1416 // Indicate if memdesc needs to restore pte kind in the static bar1 mode
1417 // when it is freed.
1418 //
1419 #define MEMDESC_FLAGS_RESTORE_PTE_KIND_ON_FREE      NVBIT64(49)
1420 
1421 
1422 #endif // _MEMDESC_H_
1423 
1424 #ifdef __cplusplus
1425 } // extern "C"
1426 #endif
1427 
1428 #endif // _G_MEM_DESC_NVOC_H_
1429