1 #ifndef _G_VIRT_MEM_ALLOCATOR_NVOC_H_
2 #define _G_VIRT_MEM_ALLOCATOR_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_virt_mem_allocator_nvoc.h"
33 
34 #ifndef VIRT_MEM_ALLOCATOR_H
35 #define VIRT_MEM_ALLOCATOR_H
36 
37 /**************** Resource Manager Defines and Structures ******************\
38 *                                                                          *
39 *       Defines and structures used for the VirtMemAllocator Object.       *
40 *                                                                          *
41 \***************************************************************************/
42 
43 #include "kernel/core/core.h"
44 #include "kernel/core/info_block.h"
45 #include "kernel/gpu/disp/disp_objs.h"
46 #include "kernel/gpu/eng_state.h"
47 #include "kernel/gpu/fifo/kernel_channel.h"
48 #include "kernel/gpu/gpu.h"
49 #include "kernel/gpu/mem_mgr/virt_mem_allocator_common.h"
50 #include "kernel/mem_mgr/vaspace.h"
51 #include "kernel/mem_mgr/virtual_mem.h"
52 #include "kernel/rmapi/control.h"
53 #include "kernel/rmapi/mapping_list.h"
54 
55 typedef struct DMA_PAGE_ARRAY DMA_PAGE_ARRAY;
56 
57 //
58 // DMA mapping calls can invalidate synchronously which always leaves the TLB in a
59 // consistent state with the PTEs.  For performance reasons we sometimes defer
60 // the TLB invalidation when we have multiple mappings to perform before the
61 // mappings will be used.  Please use deferred invalidates with care.
62 //
63 enum
64 {
65     DMA_TLB_INVALIDATE = 0,
66     DMA_DEFER_TLB_INVALIDATE = 1
67 };
68 
69 //
70 // aperture capabilities
71 //
72 #define DMA_GPU_GART_CAPS_SNOOP                 0x00000001
73 #define DMA_GPU_GART_CAPS_NOSNOOP               0x00000002
74 
75 // The parameters for dmaAllocBar1P2PMapping
76 typedef struct _def_dma_bar1p2p_mapping_params
77 {
78     struct OBJVASPACE *pVas;                       // Virtual address space for request
79     struct OBJGPU *pPeerGpu;                       // The target GPU which owns the PeerMemDesc
80     MEMORY_DESCRIPTOR *pPeerMemDesc;        // The memdesc of the target GPU vidmem
81     MEMORY_DESCRIPTOR *pMemDescOut;         // The new memdesc of the mapped BAR1 region on the target GPU
82     NvU32 flags;                            // The flags used for the peer mapping
83     NvU32 flagsOut;                         // The new flags for for the new pPeerMemDesc
84     NvU64 offset;                           // The offset requested by the client.
85     NvU64 offsetOut;                        // The Adjusted offset by the new BAR1 surface mapping
86     NvU64 length;                           // The requested length by the client
87     CLI_DMA_MAPPING_INFO *pDmaMappingInfo;  // The Dma mapping info structure.
88 } DMA_BAR1P2P_MAPPING_PRARAMS;
89 
90 
91 #ifdef NVOC_VIRT_MEM_ALLOCATOR_H_PRIVATE_ACCESS_ALLOWED
92 #define PRIVATE_FIELD(x) x
93 #else
94 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
95 #endif
96 struct VirtMemAllocator {
97     const struct NVOC_RTTI *__nvoc_rtti;
98     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
99     struct Object *__nvoc_pbase_Object;
100     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
101     struct VirtMemAllocator *__nvoc_pbase_VirtMemAllocator;
102     NV_STATUS (*__dmaConstructEngine__)(struct OBJGPU *, struct VirtMemAllocator *, ENGDESCRIPTOR);
103     NV_STATUS (*__dmaStateInitLocked__)(struct OBJGPU *, struct VirtMemAllocator *);
104     NV_STATUS (*__dmaAllocBar1P2PMapping__)(struct OBJGPU *, struct VirtMemAllocator *, DMA_BAR1P2P_MAPPING_PRARAMS *);
105     void (*__dmaFreeBar1P2PMapping__)(struct VirtMemAllocator *, CLI_DMA_MAPPING_INFO *);
106     NV_STATUS (*__dmaStatePostLoad__)(struct OBJGPU *, struct VirtMemAllocator *, NvU32);
107     NV_STATUS (*__dmaStateLoad__)(POBJGPU, struct VirtMemAllocator *, NvU32);
108     NV_STATUS (*__dmaStateUnload__)(POBJGPU, struct VirtMemAllocator *, NvU32);
109     NV_STATUS (*__dmaStatePreLoad__)(POBJGPU, struct VirtMemAllocator *, NvU32);
110     NV_STATUS (*__dmaStatePostUnload__)(POBJGPU, struct VirtMemAllocator *, NvU32);
111     void (*__dmaStateDestroy__)(POBJGPU, struct VirtMemAllocator *);
112     NV_STATUS (*__dmaStatePreUnload__)(POBJGPU, struct VirtMemAllocator *, NvU32);
113     NV_STATUS (*__dmaStateInitUnlocked__)(POBJGPU, struct VirtMemAllocator *);
114     void (*__dmaInitMissing__)(POBJGPU, struct VirtMemAllocator *);
115     NV_STATUS (*__dmaStatePreInitLocked__)(POBJGPU, struct VirtMemAllocator *);
116     NV_STATUS (*__dmaStatePreInitUnlocked__)(POBJGPU, struct VirtMemAllocator *);
117     NvBool (*__dmaIsPresent__)(POBJGPU, struct VirtMemAllocator *);
118     NvBool PDB_PROP_DMA_MMU_INVALIDATE_DISABLE;
119     NvBool PDB_PROP_DMA_ENFORCE_32BIT_POINTER;
120     NvBool PDB_PROP_DMA_MEMORY_MAP_OVERRIDE;
121     NvBool PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED;
122     NvBool PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL;
123     NvBool PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE;
124     NvBool PDB_PROP_DMA_RESTRICT_VA_RANGE;
125     NvBool PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED;
126     NvBool bMemoryMapperApiEnabled;
127     NvU32 gpuGartCaps;
128     NvU32 increaseRsvdPages;
129     struct ENG_INFO_LINK_NODE *infoList;
130 };
131 
132 #ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__
133 #define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__
134 typedef struct VirtMemAllocator VirtMemAllocator;
135 #endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */
136 
137 #ifndef __nvoc_class_id_VirtMemAllocator
138 #define __nvoc_class_id_VirtMemAllocator 0x899e48
139 #endif /* __nvoc_class_id_VirtMemAllocator */
140 
141 extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtMemAllocator;
142 
143 #define __staticCast_VirtMemAllocator(pThis) \
144     ((pThis)->__nvoc_pbase_VirtMemAllocator)
145 
146 #ifdef __nvoc_virt_mem_allocator_h_disabled
147 #define __dynamicCast_VirtMemAllocator(pThis) ((VirtMemAllocator*)NULL)
148 #else //__nvoc_virt_mem_allocator_h_disabled
149 #define __dynamicCast_VirtMemAllocator(pThis) \
150     ((VirtMemAllocator*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(VirtMemAllocator)))
151 #endif //__nvoc_virt_mem_allocator_h_disabled
152 
153 #define PDB_PROP_DMA_MMU_INVALIDATE_DISABLE_BASE_CAST
154 #define PDB_PROP_DMA_MMU_INVALIDATE_DISABLE_BASE_NAME PDB_PROP_DMA_MMU_INVALIDATE_DISABLE
155 #define PDB_PROP_DMA_RESTRICT_VA_RANGE_BASE_CAST
156 #define PDB_PROP_DMA_RESTRICT_VA_RANGE_BASE_NAME PDB_PROP_DMA_RESTRICT_VA_RANGE
157 #define PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_BASE_CAST
158 #define PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_BASE_NAME PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL
159 #define PDB_PROP_DMA_ENFORCE_32BIT_POINTER_BASE_CAST
160 #define PDB_PROP_DMA_ENFORCE_32BIT_POINTER_BASE_NAME PDB_PROP_DMA_ENFORCE_32BIT_POINTER
161 #define PDB_PROP_DMA_MEMORY_MAP_OVERRIDE_BASE_CAST
162 #define PDB_PROP_DMA_MEMORY_MAP_OVERRIDE_BASE_NAME PDB_PROP_DMA_MEMORY_MAP_OVERRIDE
163 #define PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED_BASE_CAST
164 #define PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED_BASE_NAME PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED
165 #define PDB_PROP_DMA_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
166 #define PDB_PROP_DMA_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
167 #define PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE_BASE_CAST
168 #define PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE_BASE_NAME PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE
169 #define PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED_BASE_CAST
170 #define PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED_BASE_NAME PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED
171 
172 NV_STATUS __nvoc_objCreateDynamic_VirtMemAllocator(VirtMemAllocator**, Dynamic*, NvU32, va_list);
173 
174 NV_STATUS __nvoc_objCreate_VirtMemAllocator(VirtMemAllocator**, Dynamic*, NvU32);
175 #define __objCreate_VirtMemAllocator(ppNewObj, pParent, createFlags) \
176     __nvoc_objCreate_VirtMemAllocator((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
177 
178 #define dmaConstructEngine(pGpu, pDma, arg0) dmaConstructEngine_DISPATCH(pGpu, pDma, arg0)
179 #define dmaStateInitLocked(pGpu, pDma) dmaStateInitLocked_DISPATCH(pGpu, pDma)
180 #define dmaAllocBar1P2PMapping(pGpu, pDma, params) dmaAllocBar1P2PMapping_DISPATCH(pGpu, pDma, params)
181 #define dmaAllocBar1P2PMapping_HAL(pGpu, pDma, params) dmaAllocBar1P2PMapping_DISPATCH(pGpu, pDma, params)
182 #define dmaFreeBar1P2PMapping(pDma, arg0) dmaFreeBar1P2PMapping_DISPATCH(pDma, arg0)
183 #define dmaFreeBar1P2PMapping_HAL(pDma, arg0) dmaFreeBar1P2PMapping_DISPATCH(pDma, arg0)
184 #define dmaStatePostLoad(pGpu, pDma, arg0) dmaStatePostLoad_DISPATCH(pGpu, pDma, arg0)
185 #define dmaStatePostLoad_HAL(pGpu, pDma, arg0) dmaStatePostLoad_DISPATCH(pGpu, pDma, arg0)
186 #define dmaStateLoad(pGpu, pEngstate, arg0) dmaStateLoad_DISPATCH(pGpu, pEngstate, arg0)
187 #define dmaStateUnload(pGpu, pEngstate, arg0) dmaStateUnload_DISPATCH(pGpu, pEngstate, arg0)
188 #define dmaStatePreLoad(pGpu, pEngstate, arg0) dmaStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
189 #define dmaStatePostUnload(pGpu, pEngstate, arg0) dmaStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
190 #define dmaStateDestroy(pGpu, pEngstate) dmaStateDestroy_DISPATCH(pGpu, pEngstate)
191 #define dmaStatePreUnload(pGpu, pEngstate, arg0) dmaStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
192 #define dmaStateInitUnlocked(pGpu, pEngstate) dmaStateInitUnlocked_DISPATCH(pGpu, pEngstate)
193 #define dmaInitMissing(pGpu, pEngstate) dmaInitMissing_DISPATCH(pGpu, pEngstate)
194 #define dmaStatePreInitLocked(pGpu, pEngstate) dmaStatePreInitLocked_DISPATCH(pGpu, pEngstate)
195 #define dmaStatePreInitUnlocked(pGpu, pEngstate) dmaStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
196 #define dmaIsPresent(pGpu, pEngstate) dmaIsPresent_DISPATCH(pGpu, pEngstate)
197 NV_STATUS dmaInit_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma);
198 
199 
200 #ifdef __nvoc_virt_mem_allocator_h_disabled
201 static inline NV_STATUS dmaInit(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) {
202     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
203     return NV_ERR_NOT_SUPPORTED;
204 }
205 #else //__nvoc_virt_mem_allocator_h_disabled
206 #define dmaInit(pGpu, pDma) dmaInit_GM107(pGpu, pDma)
207 #endif //__nvoc_virt_mem_allocator_h_disabled
208 
209 #define dmaInit_HAL(pGpu, pDma) dmaInit(pGpu, pDma)
210 
211 NV_STATUS dmaConstructHal_VGPUSTUB(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma);
212 
213 static inline NV_STATUS dmaConstructHal_56cd7a(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) {
214     return NV_OK;
215 }
216 
217 
218 #ifdef __nvoc_virt_mem_allocator_h_disabled
219 static inline NV_STATUS dmaConstructHal(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) {
220     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
221     return NV_ERR_NOT_SUPPORTED;
222 }
223 #else //__nvoc_virt_mem_allocator_h_disabled
224 #define dmaConstructHal(pGpu, pDma) dmaConstructHal_VGPUSTUB(pGpu, pDma)
225 #endif //__nvoc_virt_mem_allocator_h_disabled
226 
227 #define dmaConstructHal_HAL(pGpu, pDma) dmaConstructHal(pGpu, pDma)
228 
229 void dmaDestruct_GM107(struct VirtMemAllocator *pDma);
230 
231 
232 #define __nvoc_dmaDestruct(pDma) dmaDestruct_GM107(pDma)
233 NV_STATUS dmaInitGart_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma);
234 
235 
236 #ifdef __nvoc_virt_mem_allocator_h_disabled
237 static inline NV_STATUS dmaInitGart(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) {
238     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
239     return NV_ERR_NOT_SUPPORTED;
240 }
241 #else //__nvoc_virt_mem_allocator_h_disabled
242 #define dmaInitGart(pGpu, pDma) dmaInitGart_GM107(pGpu, pDma)
243 #endif //__nvoc_virt_mem_allocator_h_disabled
244 
245 #define dmaInitGart_HAL(pGpu, pDma) dmaInitGart(pGpu, pDma)
246 
247 NV_STATUS dmaAllocMapping_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, MEMORY_DESCRIPTOR *arg1, NvU64 *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4, NvU32 arg5);
248 
249 
250 #ifdef __nvoc_virt_mem_allocator_h_disabled
251 static inline NV_STATUS dmaAllocMapping(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, MEMORY_DESCRIPTOR *arg1, NvU64 *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4, NvU32 arg5) {
252     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
253     return NV_ERR_NOT_SUPPORTED;
254 }
255 #else //__nvoc_virt_mem_allocator_h_disabled
256 #define dmaAllocMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5) dmaAllocMapping_GM107(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5)
257 #endif //__nvoc_virt_mem_allocator_h_disabled
258 
259 #define dmaAllocMapping_HAL(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5) dmaAllocMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5)
260 
261 NV_STATUS dmaFreeMapping_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, NvU64 arg1, MEMORY_DESCRIPTOR *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4);
262 
263 
264 #ifdef __nvoc_virt_mem_allocator_h_disabled
265 static inline NV_STATUS dmaFreeMapping(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, NvU64 arg1, MEMORY_DESCRIPTOR *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4) {
266     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
267     return NV_ERR_NOT_SUPPORTED;
268 }
269 #else //__nvoc_virt_mem_allocator_h_disabled
270 #define dmaFreeMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4) dmaFreeMapping_GM107(pGpu, pDma, arg0, arg1, arg2, arg3, arg4)
271 #endif //__nvoc_virt_mem_allocator_h_disabled
272 
273 #define dmaFreeMapping_HAL(pGpu, pDma, arg0, arg1, arg2, arg3, arg4) dmaFreeMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4)
274 
275 NV_STATUS dmaUpdateVASpace_GF100(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, MEMORY_DESCRIPTOR *pMemDesc, NvU8 *tgtPteMem, NvU64 vAddr, NvU64 vAddrLimit, NvU32 flags, DMA_PAGE_ARRAY *pPageArray, NvU32 overmapPteMod, COMPR_INFO *pComprInfo, NvU64 surfaceOffset, NvU32 valid, NvU32 aperture, NvU32 peer, NvU64 fabricAddr, NvU32 deferInvalidate, NvBool bSparse, NvU64 pageSize);
276 
277 
278 #ifdef __nvoc_virt_mem_allocator_h_disabled
279 static inline NV_STATUS dmaUpdateVASpace(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, MEMORY_DESCRIPTOR *pMemDesc, NvU8 *tgtPteMem, NvU64 vAddr, NvU64 vAddrLimit, NvU32 flags, DMA_PAGE_ARRAY *pPageArray, NvU32 overmapPteMod, COMPR_INFO *pComprInfo, NvU64 surfaceOffset, NvU32 valid, NvU32 aperture, NvU32 peer, NvU64 fabricAddr, NvU32 deferInvalidate, NvBool bSparse, NvU64 pageSize) {
280     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
281     return NV_ERR_NOT_SUPPORTED;
282 }
283 #else //__nvoc_virt_mem_allocator_h_disabled
284 #define dmaUpdateVASpace(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse, pageSize) dmaUpdateVASpace_GF100(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse, pageSize)
285 #endif //__nvoc_virt_mem_allocator_h_disabled
286 
287 #define dmaUpdateVASpace_HAL(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse, pageSize) dmaUpdateVASpace(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse, pageSize)
288 
289 NV_STATUS dmaXlateVAtoPAforChannel_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct KernelChannel *pKernelChannel, NvU64 vAddr, NvU64 *pAddr, NvU32 *memType);
290 
291 
292 #ifdef __nvoc_virt_mem_allocator_h_disabled
293 static inline NV_STATUS dmaXlateVAtoPAforChannel(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct KernelChannel *pKernelChannel, NvU64 vAddr, NvU64 *pAddr, NvU32 *memType) {
294     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
295     return NV_ERR_NOT_SUPPORTED;
296 }
297 #else //__nvoc_virt_mem_allocator_h_disabled
298 #define dmaXlateVAtoPAforChannel(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType) dmaXlateVAtoPAforChannel_GM107(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType)
299 #endif //__nvoc_virt_mem_allocator_h_disabled
300 
301 #define dmaXlateVAtoPAforChannel_HAL(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType) dmaXlateVAtoPAforChannel(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType)
302 
303 NvU32 dmaGetPTESize_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma);
304 
305 
306 #ifdef __nvoc_virt_mem_allocator_h_disabled
307 static inline NvU32 dmaGetPTESize(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) {
308     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
309     return 0;
310 }
311 #else //__nvoc_virt_mem_allocator_h_disabled
312 #define dmaGetPTESize(pGpu, pDma) dmaGetPTESize_GM107(pGpu, pDma)
313 #endif //__nvoc_virt_mem_allocator_h_disabled
314 
315 #define dmaGetPTESize_HAL(pGpu, pDma) dmaGetPTESize(pGpu, pDma)
316 
317 NV_STATUS dmaMapBuffer_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pVaddr, NvU32 allocFlags, NvU32 mapFlags);
318 
319 
320 #ifdef __nvoc_virt_mem_allocator_h_disabled
321 static inline NV_STATUS dmaMapBuffer(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pVaddr, NvU32 allocFlags, NvU32 mapFlags) {
322     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
323     return NV_ERR_NOT_SUPPORTED;
324 }
325 #else //__nvoc_virt_mem_allocator_h_disabled
326 #define dmaMapBuffer(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags) dmaMapBuffer_GM107(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags)
327 #endif //__nvoc_virt_mem_allocator_h_disabled
328 
329 #define dmaMapBuffer_HAL(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags) dmaMapBuffer(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags)
330 
331 void dmaUnmapBuffer_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, NvU64 vaddr);
332 
333 
334 #ifdef __nvoc_virt_mem_allocator_h_disabled
335 static inline void dmaUnmapBuffer(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, NvU64 vaddr) {
336     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
337 }
338 #else //__nvoc_virt_mem_allocator_h_disabled
339 #define dmaUnmapBuffer(pGpu, pDma, pVAS, vaddr) dmaUnmapBuffer_GM107(pGpu, pDma, pVAS, vaddr)
340 #endif //__nvoc_virt_mem_allocator_h_disabled
341 
342 #define dmaUnmapBuffer_HAL(pGpu, pDma, pVAS, vaddr) dmaUnmapBuffer(pGpu, pDma, pVAS, vaddr)
343 
344 static inline struct OBJVASPACE *dmaGetPrivateVAS_fa6e19(struct VirtMemAllocator *pDma) {
345     return ((void *)0);
346 }
347 
348 
349 #ifdef __nvoc_virt_mem_allocator_h_disabled
350 static inline struct OBJVASPACE *dmaGetPrivateVAS(struct VirtMemAllocator *pDma) {
351     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
352     return NULL;
353 }
354 #else //__nvoc_virt_mem_allocator_h_disabled
355 #define dmaGetPrivateVAS(pDma) dmaGetPrivateVAS_fa6e19(pDma)
356 #endif //__nvoc_virt_mem_allocator_h_disabled
357 
358 #define dmaGetPrivateVAS_HAL(pDma) dmaGetPrivateVAS(pDma)
359 
360 NV_STATUS dmaConstructEngine_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, ENGDESCRIPTOR arg0);
361 
362 static inline NV_STATUS dmaConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, ENGDESCRIPTOR arg0) {
363     return pDma->__dmaConstructEngine__(pGpu, pDma, arg0);
364 }
365 
366 NV_STATUS dmaStateInitLocked_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma);
367 
368 static inline NV_STATUS dmaStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) {
369     return pDma->__dmaStateInitLocked__(pGpu, pDma);
370 }
371 
372 NV_STATUS dmaAllocBar1P2PMapping_GH100(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, DMA_BAR1P2P_MAPPING_PRARAMS *params);
373 
374 static inline NV_STATUS dmaAllocBar1P2PMapping_46f6a7(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, DMA_BAR1P2P_MAPPING_PRARAMS *params) {
375     return NV_ERR_NOT_SUPPORTED;
376 }
377 
378 static inline NV_STATUS dmaAllocBar1P2PMapping_DISPATCH(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, DMA_BAR1P2P_MAPPING_PRARAMS *params) {
379     return pDma->__dmaAllocBar1P2PMapping__(pGpu, pDma, params);
380 }
381 
382 void dmaFreeBar1P2PMapping_GH100(struct VirtMemAllocator *pDma, CLI_DMA_MAPPING_INFO *arg0);
383 
384 static inline void dmaFreeBar1P2PMapping_b3696a(struct VirtMemAllocator *pDma, CLI_DMA_MAPPING_INFO *arg0) {
385     return;
386 }
387 
388 static inline void dmaFreeBar1P2PMapping_DISPATCH(struct VirtMemAllocator *pDma, CLI_DMA_MAPPING_INFO *arg0) {
389     pDma->__dmaFreeBar1P2PMapping__(pDma, arg0);
390 }
391 
392 NV_STATUS dmaStatePostLoad_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, NvU32 arg0);
393 
394 static inline NV_STATUS dmaStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, NvU32 arg0) {
395     return pDma->__dmaStatePostLoad__(pGpu, pDma, arg0);
396 }
397 
398 static inline NV_STATUS dmaStateLoad_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) {
399     return pEngstate->__dmaStateLoad__(pGpu, pEngstate, arg0);
400 }
401 
402 static inline NV_STATUS dmaStateUnload_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) {
403     return pEngstate->__dmaStateUnload__(pGpu, pEngstate, arg0);
404 }
405 
406 static inline NV_STATUS dmaStatePreLoad_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) {
407     return pEngstate->__dmaStatePreLoad__(pGpu, pEngstate, arg0);
408 }
409 
410 static inline NV_STATUS dmaStatePostUnload_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) {
411     return pEngstate->__dmaStatePostUnload__(pGpu, pEngstate, arg0);
412 }
413 
414 static inline void dmaStateDestroy_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) {
415     pEngstate->__dmaStateDestroy__(pGpu, pEngstate);
416 }
417 
418 static inline NV_STATUS dmaStatePreUnload_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) {
419     return pEngstate->__dmaStatePreUnload__(pGpu, pEngstate, arg0);
420 }
421 
422 static inline NV_STATUS dmaStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) {
423     return pEngstate->__dmaStateInitUnlocked__(pGpu, pEngstate);
424 }
425 
426 static inline void dmaInitMissing_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) {
427     pEngstate->__dmaInitMissing__(pGpu, pEngstate);
428 }
429 
430 static inline NV_STATUS dmaStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) {
431     return pEngstate->__dmaStatePreInitLocked__(pGpu, pEngstate);
432 }
433 
434 static inline NV_STATUS dmaStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) {
435     return pEngstate->__dmaStatePreInitUnlocked__(pGpu, pEngstate);
436 }
437 
438 static inline NvBool dmaIsPresent_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) {
439     return pEngstate->__dmaIsPresent__(pGpu, pEngstate);
440 }
441 
442 NV_STATUS dmaAllocMap_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, Memory *arg2, CLI_DMA_MAPPING_INFO *arg3);
443 
444 #ifdef __nvoc_virt_mem_allocator_h_disabled
445 static inline NV_STATUS dmaAllocMap(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, Memory *arg2, CLI_DMA_MAPPING_INFO *arg3) {
446     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
447     return NV_ERR_NOT_SUPPORTED;
448 }
449 #else //__nvoc_virt_mem_allocator_h_disabled
450 #define dmaAllocMap(pGpu, pDma, arg0, arg1, arg2, arg3) dmaAllocMap_IMPL(pGpu, pDma, arg0, arg1, arg2, arg3)
451 #endif //__nvoc_virt_mem_allocator_h_disabled
452 
453 NV_STATUS dmaFreeMap_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, CLI_DMA_MAPPING_INFO *arg2, NvU32 flags);
454 
455 #ifdef __nvoc_virt_mem_allocator_h_disabled
456 static inline NV_STATUS dmaFreeMap(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, CLI_DMA_MAPPING_INFO *arg2, NvU32 flags) {
457     NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!");
458     return NV_ERR_NOT_SUPPORTED;
459 }
460 #else //__nvoc_virt_mem_allocator_h_disabled
461 #define dmaFreeMap(pGpu, pDma, arg0, arg1, arg2, flags) dmaFreeMap_IMPL(pGpu, pDma, arg0, arg1, arg2, flags)
462 #endif //__nvoc_virt_mem_allocator_h_disabled
463 
464 #undef PRIVATE_FIELD
465 
466 
467 //
468 // Virtual Memory Manager
469 //
470 
471 //
472 // The VA space is the root of an address space.
473 //
474 // Currently we support 2 page sizes
475 //
476 #define VAS_PAGESIZE_IDX_4K    0
477 #define VAS_PAGESIZE_IDX_BIG   1
478 #define VAS_PAGESIZE_IDX_HUGE  2
479 #define VAS_PAGESIZE_IDX_512M  3
480 
481 // VMM-TODO Used by old VAS Object
482 #define VAS_NUM_PAGESIZE_TYPES VAS_PAGESIZE_IDX_BIG+1
483 #define VAS_PAGESIZE_IDX(PS)   ((PS) != 4096)
484 
485 // Convert a page size mask to a string for debug prints.
486 #define VAS_PAGESIZE_MASK_STR(mask)                      \
487     (!ONEBITSET(mask) ? "BOTH" :                         \
488         ((mask == RM_PAGE_SIZE) ? "4KB" : "BIG"))
489 
490 // Value to pass to dmaAllocVASpace_HAL for both (default) page size.
491 #define VAS_ALLOC_PAGESIZE_BOTH  (0x0)
492 
493 typedef enum
494 {
495     VASPACE_BIG_PAGE_SIZE_64K_IDX     = 0,
496     VASPACE_BIG_PAGE_SIZE_128K_IDX    = 1,
497     VASPACE_NUM_BIG_PAGE_TYPES        = 2
498 }VASPACE_BIG_PAGE_SIZE_IDX;
499 
500 /*!
501  * Abstracts an array of physical page addresses.
502  */
503 struct DMA_PAGE_ARRAY
504 {
505     void        *pData;       //!< Array of PTE addresses or opaque OS-specific data.
506     RmPhysAddr   orMask;      //!< Mask to be bitwise-ORed onto each page address.
507     NvU32        startIndex;  //!< Base index into the pData array.
508     NvU32        count;       //!< Number of pages represented by this array.
509     NvBool       bOsFormat;   //!< Indicates if pData is an opaque OS-specific data.
510     NvBool       bDuplicate;  //!< Indicates to duplicate the address of the first page.
511     OS_GPU_INFO *pOsGpuInfo;  //!< OS-specific GPU info needed for IOMMU on Windows.
512 };
513 
514 // page array operations
515 void dmaPageArrayInit(DMA_PAGE_ARRAY *pPageArray, void *pPageData, NvU32 pageCount);
516 void dmaPageArrayInitFromMemDesc(DMA_PAGE_ARRAY *pPageArray,
517                                  MEMORY_DESCRIPTOR *pMemDesc,
518                                  ADDRESS_TRANSLATION addressTranslation);
519 RmPhysAddr dmaPageArrayGetPhysAddr(DMA_PAGE_ARRAY *pPageArray, NvU32 pageIndex);
520 
521 /*!
522  * Indicates that if the VA range being initialized is sparse,
523  * the sparse bit should be set for the range.
524  */
525 #define DMA_INIT_VAS_FLAGS_ENABLE_SPARSE  NVBIT(0)
526 
527 //
528 // hal.dmaUpdateVASpace() flags
529 //
530 #define DMA_UPDATE_VASPACE_FLAGS_NONE               0
531 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_PADDR       NVBIT(0)
532 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_COMPR       NVBIT(1)
533 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_ACCESS      NVBIT(2)
534 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_VALID       NVBIT(3)
535 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_PRIV        NVBIT(4)
536 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_KIND        NVBIT(5)
537 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_APERTURE    NVBIT(6)
538 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_PEER        NVBIT(7)
539 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_ENCRYPTED   NVBIT(8)
540 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_TLB_LOCK    NVBIT(9)
541 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_CACHE       NVBIT(10)         // VOLATILE of fermi
542 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_SHADER_ACCESS NVBIT(11)       // Kepler shader access
543 #define DMA_UPDATE_VASPACE_FLAGS_UPDATE_ALL         MASK_BITS(11)
544 
545 #define DMA_UPDATE_VASPACE_FLAGS_SKIP_4K_PTE_CHECK  NVBIT(12)
546 #define DMA_UPDATE_VASPACE_FLAGS_INDIRECT_PEER      NVBIT(22)
547 #define DMA_UPDATE_VASPACE_FLAGS_ALLOW_REMAP        NVBIT(23)
548 #define DMA_UPDATE_VASPACE_FLAGS_UNALIGNED_COMP     NVBIT(24)
549 #define DMA_UPDATE_VASPACE_FLAGS_FILL_PTE_MEM       NVBIT(25)
550 #define DMA_UPDATE_VASPACE_FLAGS_DISABLE_ENCRYPTION NVBIT(26)
551 #define DMA_UPDATE_VASPACE_FLAGS_READ_ONLY          NVBIT(27)
552 #define DMA_UPDATE_VASPACE_FLAGS_PRIV               NVBIT(28)
553 #define DMA_UPDATE_VASPACE_FLAGS_TLB_LOCK           NVBIT(29)
554 #define DMA_UPDATE_VASPACE_FLAGS_SHADER_WRITE_ONLY  NVBIT(30)         // Kepler shader access
555 #define DMA_UPDATE_VASPACE_FLAGS_SHADER_READ_ONLY   NVBIT(31)         // Kepler shader access
556 
557 //
558 // hal.dmaAllocVASpace() flags
559 //
560 #define DMA_ALLOC_VASPACE_NONE                      0
561 #define DMA_VA_LIMIT_49B                            NVBIT(0)
562 #define DMA_VA_LIMIT_57B                            NVBIT(1)
563 #define DMA_ALLOC_VASPACE_SIZE_ALIGNED              NVBIT(9)
564 //
565 // Bug 3610538 For unlinked SLI, clients want to restrict internal buffers to
566 // Internal VA range, so that SLI vaspaces can mirror each other.
567 //
568 #define DMA_ALLOC_VASPACE_USE_RM_INTERNAL_VALIMITS  NVBIT(10)
569 
570 //
571 // Internal device allocation flags
572 //
573 #define NV_DEVICE_INTERNAL_ALLOCATION_FLAGS_NONE                         0
574 #define NV_DEVICE_INTERNAL_ALLOCATION_FLAGS_ENABLE_PRIVILEGED_VASPACE    NVBIT(0)
575 
576 //
577 // UVM privileged region
578 //
579 #define UVM_KERNEL_PRIVILEGED_REGION_START      (0xFFF8000000ULL)
580 #define UVM_KERNEL_PRIVILEGED_REGION_LENGTH     (0x0008000000ULL)
581 
582 #endif // VIRT_MEM_ALLOCATOR_H
583 
584 #ifdef __cplusplus
585 } // extern "C"
586 #endif
587 
588 #endif // _G_VIRT_MEM_ALLOCATOR_NVOC_H_
589