1 #ifndef _G_VASPACE_NVOC_H_
2 #define _G_VASPACE_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 #include "g_vaspace_nvoc.h"
32 
33 #ifndef _VASPACE_H_
34 #define _VASPACE_H_
35 
36 /**************** Resource Manager Defines and Structures ******************\
37 *                                                                           *
38 * Module: VASPACE.H                                                         *
39 *       Defines and structures used for Virtual Address Space Object.       *
40 \***************************************************************************/
41 
42 #include "ctrl/ctrl0080/ctrl0080dma.h"
43 
44 #include "core/core.h"
45 #include "resserv/rs_client.h"
46 #include "containers/eheap_old.h"
47 #include "gpu/mem_mgr/heap_base.h"
48 #include "gpu/mem_mgr/mem_desc.h"
49 
50 
51 typedef struct OBJVASPACE *POBJVASPACE;
52 typedef struct VASPACE VASPACE, *PVASPACE;
53 struct VirtMemAllocator;
54 
55 #ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__
56 #define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__
57 typedef struct VirtMemAllocator VirtMemAllocator;
58 #endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */
59 
60 #ifndef __nvoc_class_id_VirtMemAllocator
61 #define __nvoc_class_id_VirtMemAllocator 0x899e48
62 #endif /* __nvoc_class_id_VirtMemAllocator */
63 
64 
65 typedef struct MMU_MAP_TARGET       MMU_MAP_TARGET;
66 typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS;
67 
68 typedef struct
69 {
70     NvBool bReverse : 1;
71     NvBool bPreferSysmemPageTables : 1;
72     NvBool bExternallyManaged : 1;
73     NvBool bLazy : 1;
74     NvBool bSparse : 1;
75     NvBool bPrivileged : 1;
76     NvBool bClientAllocation : 1;
77     NvBool bFixedAddressRange : 1;
78     NvBool bFixedAddressAllocate : 1;
79     NvBool bForceContig : 1;
80     NvBool bForceNonContig : 1;
81 
82     //
83     // Using this flag may have security implications. So. use it only when
84     // you are sure about its usage.
85     //
86     NvBool bSkipTlbInvalidateOnFree : 1;
87 } VAS_ALLOC_FLAGS;
88 
89 #define VAS_EHEAP_OWNER_NVRM NvU32_BUILD('n','v','r','m')
90 #define VAS_EHEAP_OWNER_RSVD NvU32_BUILD('r','s','v','d')
91 
92 typedef struct
93 {
94     NvBool bRemap : 1; //<! Indicates to override existing mappings.
95 } VAS_MAP_FLAGS;
96 
97 typedef enum
98 {
99     PTE_UPGRADE,
100     PTE_DOWNGRADE
101 } VAS_PTE_UPDATE_TYPE;
102 
103 //
104 // OBJVASPACE creation flags
105 //
106 // BAR                        Used by one of the BAR apertures
107 // SCRATCH_INVAL              Use scratch page instead of invalid bit
108 // PARTIAL_INVAL              Do not fully invalidate when validating PTE
109 // MINIMIZE_PTETABLE_SIZE     Size PTE arrays minimally to
110 // RETRY_PTE_ALLOC_IN_SYS     PTE allocation falls back to sysmem
111 // FULL_PTE                   Initialize a full PTE
112 // BAR_BAR1                   Used for BAR1
113 // BAR_BAR2                   Used for BAR2 (unused)
114 // BAR_IFB                    Used for IFB
115 // PERFMON                    Used for Perfmon
116 // PMU                        Used for PMU
117 // DEFAULT_SIZE               Ignore varange parameters and use default
118 // SET_MIRRORED               <DEPRECATED.........>
119 //                            This flag will create a privileged PDB as part of this vaspace
120 //                            This new PDB will mirror all of the allocations made in the
121 //                            original PDB. The first PDE is considered privileged for this
122 //                            address space.
123 // SHARED_MANAGEMENT          Enables mode where only a portion of the VAS is managed
124 //                            and the page directory may be allocated/set externally.
125 // ALLOW_ZERO_ADDRESS         Explicitly allows the base VAS address to start at 0.
126 //                            Normally 0 is reserved to distinguish NULL pointers.
127 //
128 // BIG_PAGE_SIZE              Field that specifies the big page size to be used.
129 //                            DEFAULT is used till GM10X. GM20X and later, uses
130 //                            custom value for big page size.
131 //   SIZE_DEFAULT             Lets RM pick the default value
132 //   SIZE_64K                 Uses 64K as big page size for this VA space
133 //   SIZE_128K                Uses 128K as big page size for this VA space
134 //
135 // MMU_FMT_VA_BITS            Selects the MMU format of the VA space by the number
136 //                            of VA bits supported.
137 //   DEFAULT                  RM picks the default for the underlying MMU HW.
138 //   40                       Fermi+ 40-bit (2-level) format.
139 //   49                       Pascal+ 49-bit (5-level) format.
140 //
141 // ENABLE_VMM                 <DEPRECATED.........>
142 //                            Temp flag to enable new VMM code path on select
143 //                            VA spaces (e.g. client but not BAR1/PMU VAS).
144 //
145 // ZERO_OLD_STRUCT            Deprecated.
146 //
147 // ENABLE_FAULTING            This address space is participating in UVM.
148 //                            RM will enable page faulting for all channels that will be
149 //                            associated with this address space.
150 //
151 // IS_UVM_MANAGED             This flag will replace the SET_MIRRORED flag. It is used to
152 //                            denote that this VASpace is participating in UVM.
153 //
154 // ENABLE_ATS                 This address space has ATS enabled.
155 //
156 //
157 // ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR This flag when set will allow page table allocations
158 //                                      to be routed to suballocator of the current process
159 //                                      requesting mapping. If no suballocator, allocations
160 //                                      will fallback to global heap.
161 //
162 // VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB  This flag must be used by the VASs which use
163 //                                            the NVLink MMU.
164 //
165 #define VASPACE_FLAGS_NONE                                        0
166 #define VASPACE_FLAGS_BAR                                         NVBIT(0)
167 #define VASPACE_FLAGS_SCRATCH_INVAL                               NVBIT(1)
168 #define VASPACE_FLAGS_ENABLE_ATS                                  NVBIT(2)
169 #define VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS             NVBIT(3)
170 #define VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE                      NVBIT(4)
171 #define VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS                      NVBIT(5)
172 #define VASPACE_FLAGS_REQUIRE_FIXED_OFFSET                        NVBIT(6)
173 #define VASPACE_FLAGS_BAR_BAR1                                    NVBIT(7)
174 #define VASPACE_FLAGS_BAR_BAR2                                    NVBIT(8)
175 #define VASPACE_FLAGS_BAR_IFB                                     NVBIT(9)
176 #define VASPACE_FLAGS_PERFMON                                     NVBIT(10)
177 #define VASPACE_FLAGS_PMU                                         NVBIT(11)
178 #define VASPACE_FLAGS_DEFAULT_SIZE                                NVBIT(12)
179 #define VASPACE_FLAGS_DEFAULT_PARAMS                              NVBIT(13)
180 #define VASPACE_FLAGS_PTETABLE_PMA_MANAGED                        NVBIT(14)
181 #define VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB                 NVBIT(15)
182 #define VASPACE_FLAGS_DISABLE_SPLIT_VAS                           NVBIT(16)
183 #define VASPACE_FLAGS_SET_MIRRORED                                NVBIT(17)
184 #define VASPACE_FLAGS_SHARED_MANAGEMENT                           NVBIT(18)
185 #define VASPACE_FLAGS_ALLOW_ZERO_ADDRESS                          NVBIT(19)
186 #define VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL                          NVBIT(20)
187 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE                            22:21
188 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_DEFAULT                    0x00000000
189 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_64K                        0x00000001
190 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_128K                       0x00000002
191 #define VASPACE_FLAGS_HDA                                         NVBIT(23)
192 #define VASPACE_FLAGS_FLA                                         NVBIT(24) // Soon to be deprecated and removed.
193                                                                             // Used by legacy FLA implementation.
194 #define VASPACE_FLAGS_HWPM                                        NVBIT(25)
195 #define VASPACE_FLAGS_ENABLE_VMM                                  NVBIT(26)
196 #define VASPACE_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE             NVBIT(27)
197 #define VASPACE_FLAGS_REVERSE                                     NVBIT(28)
198 #define VASPACE_FLAGS_ENABLE_FAULTING                             NVBIT(29)
199 #define VASPACE_FLAGS_IS_EXTERNALLY_OWNED                         NVBIT(30)
200 #define VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR        NVBIT(31)
201 
202 /*!
203  * Flags for page table memory pools.
204  *
205  * VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY
206  *           Only allocate levels from the top to the specified level only.
207  *           Anything below the specified level is not allocated.
208  */
209 #define VASPACE_RESERVE_FLAGS_NONE                                 (0)
210 #define VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY  NVBIT32(0)
211 
212 /*!
213  * Level of RM-management for a given VA range.
214  *
215  * FULL
216  *      RM manages everything (e.g. PDEs, PTEs).
217  * PDES_ONLY
218  *      RM only manages PDEs (through non-buffer version of UpdatePde2).
219  *      Buffer versions of FillPteMem and UpdatePde2 are still allowed.
220  * NONE
221  *      RM does not manage anything.
222  *      Buffer versions of FillPteMem and UpdatePde2 are still allowed.
223  */
224 typedef enum
225 {
226     VA_MANAGEMENT_FULL = 0,
227     VA_MANAGEMENT_PDES_ONLY,
228     VA_MANAGEMENT_NONE,
229 } VA_MANAGEMENT;
230 
231 /*!
232  * Abstract base class of an RM-managed virtual address space.
233  */
234 #ifdef NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED
235 #define PRIVATE_FIELD(x) x
236 #else
237 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
238 #endif
239 struct OBJVASPACE {
240     const struct NVOC_RTTI *__nvoc_rtti;
241     struct Object __nvoc_base_Object;
242     struct Object *__nvoc_pbase_Object;
243     struct OBJVASPACE *__nvoc_pbase_OBJVASPACE;
244     NV_STATUS (*__vaspaceConstruct___)(struct OBJVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32);
245     NV_STATUS (*__vaspaceAlloc__)(struct OBJVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *);
246     NV_STATUS (*__vaspaceFree__)(struct OBJVASPACE *, NvU64);
247     NV_STATUS (*__vaspaceApplyDefaultAlignment__)(struct OBJVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *);
248     NV_STATUS (*__vaspaceIncAllocRefCnt__)(struct OBJVASPACE *, NvU64);
249     NvU64 (*__vaspaceGetVaStart__)(struct OBJVASPACE *);
250     NvU64 (*__vaspaceGetVaLimit__)(struct OBJVASPACE *);
251     NV_STATUS (*__vaspaceGetVasInfo__)(struct OBJVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *);
252     NvU32 (*__vaspaceGetFlags__)(struct OBJVASPACE *);
253     NvBool (*__vaspaceIsInternalVaRestricted__)(struct OBJVASPACE *);
254     NV_STATUS (*__vaspaceMap__)(struct OBJVASPACE *, struct OBJGPU *, const NvU64, const NvU64, const MMU_MAP_TARGET *, const VAS_MAP_FLAGS);
255     void (*__vaspaceUnmap__)(struct OBJVASPACE *, struct OBJGPU *, const NvU64, const NvU64);
256     NV_STATUS (*__vaspaceReserveMempool__)(struct OBJVASPACE *, struct OBJGPU *, NvHandle, NvU64, NvU64, NvU32);
257     struct OBJEHEAP *(*__vaspaceGetHeap__)(struct OBJVASPACE *);
258     NvU64 (*__vaspaceGetMapPageSize__)(struct OBJVASPACE *, struct OBJGPU *, EMEMBLOCK *);
259     NvU64 (*__vaspaceGetBigPageSize__)(struct OBJVASPACE *);
260     NvBool (*__vaspaceIsMirrored__)(struct OBJVASPACE *);
261     NvBool (*__vaspaceIsFaultCapable__)(struct OBJVASPACE *);
262     NvBool (*__vaspaceIsExternallyOwned__)(struct OBJVASPACE *);
263     NvBool (*__vaspaceIsAtsEnabled__)(struct OBJVASPACE *);
264     NV_STATUS (*__vaspaceGetPasid__)(struct OBJVASPACE *, NvU32 *);
265     PMEMORY_DESCRIPTOR (*__vaspaceGetPageDirBase__)(struct OBJVASPACE *, struct OBJGPU *);
266     PMEMORY_DESCRIPTOR (*__vaspaceGetKernelPageDirBase__)(struct OBJVASPACE *, struct OBJGPU *);
267     NV_STATUS (*__vaspacePinRootPageDir__)(struct OBJVASPACE *, struct OBJGPU *);
268     void (*__vaspaceUnpinRootPageDir__)(struct OBJVASPACE *, struct OBJGPU *);
269     void (*__vaspaceInvalidateTlb__)(struct OBJVASPACE *, struct OBJGPU *, VAS_PTE_UPDATE_TYPE);
270     NV_STATUS (*__vaspaceGetPageTableInfo__)(struct OBJVASPACE *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *);
271     NV_STATUS (*__vaspaceGetPteInfo__)(struct OBJVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *, RmPhysAddr *);
272     NV_STATUS (*__vaspaceSetPteInfo__)(struct OBJVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *);
273     NV_STATUS (*__vaspaceFreeV2__)(struct OBJVASPACE *, NvU64, NvU64 *);
274     NvU32 gpuMask;
275     ADDRESS_TRANSLATION addressTranslation;
276     NvU32 refCnt;
277     NvU32 vaspaceId;
278     NvU64 vasStart;
279     NvU64 vasLimit;
280 };
281 
282 #ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__
283 #define __NVOC_CLASS_OBJVASPACE_TYPEDEF__
284 typedef struct OBJVASPACE OBJVASPACE;
285 #endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */
286 
287 #ifndef __nvoc_class_id_OBJVASPACE
288 #define __nvoc_class_id_OBJVASPACE 0x6c347f
289 #endif /* __nvoc_class_id_OBJVASPACE */
290 
291 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE;
292 
293 #define __staticCast_OBJVASPACE(pThis) \
294     ((pThis)->__nvoc_pbase_OBJVASPACE)
295 
296 #ifdef __nvoc_vaspace_h_disabled
297 #define __dynamicCast_OBJVASPACE(pThis) ((OBJVASPACE*)NULL)
298 #else //__nvoc_vaspace_h_disabled
299 #define __dynamicCast_OBJVASPACE(pThis) \
300     ((OBJVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVASPACE)))
301 #endif //__nvoc_vaspace_h_disabled
302 
303 
304 NV_STATUS __nvoc_objCreateDynamic_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32, va_list);
305 
306 NV_STATUS __nvoc_objCreate_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32);
307 #define __objCreate_OBJVASPACE(ppNewObj, pParent, createFlags) \
308     __nvoc_objCreate_OBJVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
309 
310 #define vaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) vaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags)
311 #define vaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) vaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr)
312 #define vaspaceFree(pVAS, vAddr) vaspaceFree_DISPATCH(pVAS, vAddr)
313 #define vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) vaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask)
314 #define vaspaceIncAllocRefCnt(pVAS, vAddr) vaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr)
315 #define vaspaceGetVaStart(pVAS) vaspaceGetVaStart_DISPATCH(pVAS)
316 #define vaspaceGetVaLimit(pVAS) vaspaceGetVaLimit_DISPATCH(pVAS)
317 #define vaspaceGetVasInfo(pVAS, pParams) vaspaceGetVasInfo_DISPATCH(pVAS, pParams)
318 #define vaspaceGetFlags(pVAS) vaspaceGetFlags_DISPATCH(pVAS)
319 #define vaspaceIsInternalVaRestricted(pVAS) vaspaceIsInternalVaRestricted_DISPATCH(pVAS)
320 #define vaspaceMap(pVAS, pGpu, vaLo, vaHi, pTarget, flags) vaspaceMap_DISPATCH(pVAS, pGpu, vaLo, vaHi, pTarget, flags)
321 #define vaspaceUnmap(pVAS, pGpu, vaLo, vaHi) vaspaceUnmap_DISPATCH(pVAS, pGpu, vaLo, vaHi)
322 #define vaspaceReserveMempool(pVAS, pGpu, hClient, size, pageSizeLockMask, flags) vaspaceReserveMempool_DISPATCH(pVAS, pGpu, hClient, size, pageSizeLockMask, flags)
323 #define vaspaceGetHeap(pVAS) vaspaceGetHeap_DISPATCH(pVAS)
324 #define vaspaceGetMapPageSize(pVAS, pGpu, pMemBlock) vaspaceGetMapPageSize_DISPATCH(pVAS, pGpu, pMemBlock)
325 #define vaspaceGetBigPageSize(pVAS) vaspaceGetBigPageSize_DISPATCH(pVAS)
326 #define vaspaceIsMirrored(pVAS) vaspaceIsMirrored_DISPATCH(pVAS)
327 #define vaspaceIsFaultCapable(pVAS) vaspaceIsFaultCapable_DISPATCH(pVAS)
328 #define vaspaceIsExternallyOwned(pVAS) vaspaceIsExternallyOwned_DISPATCH(pVAS)
329 #define vaspaceIsAtsEnabled(pVAS) vaspaceIsAtsEnabled_DISPATCH(pVAS)
330 #define vaspaceGetPasid(pVAS, pPasid) vaspaceGetPasid_DISPATCH(pVAS, pPasid)
331 #define vaspaceGetPageDirBase(pVAS, pGpu) vaspaceGetPageDirBase_DISPATCH(pVAS, pGpu)
332 #define vaspaceGetKernelPageDirBase(pVAS, pGpu) vaspaceGetKernelPageDirBase_DISPATCH(pVAS, pGpu)
333 #define vaspacePinRootPageDir(pVAS, pGpu) vaspacePinRootPageDir_DISPATCH(pVAS, pGpu)
334 #define vaspaceUnpinRootPageDir(pVAS, pGpu) vaspaceUnpinRootPageDir_DISPATCH(pVAS, pGpu)
335 #define vaspaceInvalidateTlb(pVAS, pGpu, type) vaspaceInvalidateTlb_DISPATCH(pVAS, pGpu, type)
336 #define vaspaceGetPageTableInfo(pVAS, pParams) vaspaceGetPageTableInfo_DISPATCH(pVAS, pParams)
337 #define vaspaceGetPteInfo(pVAS, pGpu, pParams, pPhysAddr) vaspaceGetPteInfo_DISPATCH(pVAS, pGpu, pParams, pPhysAddr)
338 #define vaspaceSetPteInfo(pVAS, pGpu, pParams) vaspaceSetPteInfo_DISPATCH(pVAS, pGpu, pParams)
339 #define vaspaceFreeV2(pVAS, vAddr, pSize) vaspaceFreeV2_DISPATCH(pVAS, vAddr, pSize)
340 static inline NV_STATUS vaspaceConstruct__DISPATCH(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) {
341     return pVAS->__vaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags);
342 }
343 
344 static inline NV_STATUS vaspaceAlloc_DISPATCH(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) {
345     return pVAS->__vaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr);
346 }
347 
348 static inline NV_STATUS vaspaceFree_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) {
349     return pVAS->__vaspaceFree__(pVAS, vAddr);
350 }
351 
352 static inline NV_STATUS vaspaceApplyDefaultAlignment_DISPATCH(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) {
353     return pVAS->__vaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask);
354 }
355 
356 static inline NV_STATUS vaspaceIncAllocRefCnt_b7902c(struct OBJVASPACE *pVAS, NvU64 vAddr) {
357     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
358     return NV_ERR_NOT_SUPPORTED;
359 }
360 
361 static inline NV_STATUS vaspaceIncAllocRefCnt_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) {
362     return pVAS->__vaspaceIncAllocRefCnt__(pVAS, vAddr);
363 }
364 
365 NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS);
366 
367 static inline NvU64 vaspaceGetVaStart_DISPATCH(struct OBJVASPACE *pVAS) {
368     return pVAS->__vaspaceGetVaStart__(pVAS);
369 }
370 
371 NvU64 vaspaceGetVaLimit_IMPL(struct OBJVASPACE *pVAS);
372 
373 static inline NvU64 vaspaceGetVaLimit_DISPATCH(struct OBJVASPACE *pVAS) {
374     return pVAS->__vaspaceGetVaLimit__(pVAS);
375 }
376 
377 static inline NV_STATUS vaspaceGetVasInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) {
378     return pVAS->__vaspaceGetVasInfo__(pVAS, pParams);
379 }
380 
381 static inline NvU32 vaspaceGetFlags_edd98b(struct OBJVASPACE *pVAS) {
382     return 0U;
383 }
384 
385 static inline NvU32 vaspaceGetFlags_DISPATCH(struct OBJVASPACE *pVAS) {
386     return pVAS->__vaspaceGetFlags__(pVAS);
387 }
388 
389 NvBool vaspaceIsInternalVaRestricted_IMPL(struct OBJVASPACE *pVAS);
390 
391 static inline NvBool vaspaceIsInternalVaRestricted_DISPATCH(struct OBJVASPACE *pVAS) {
392     return pVAS->__vaspaceIsInternalVaRestricted__(pVAS);
393 }
394 
395 static inline NV_STATUS vaspaceMap_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) {
396     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
397     return NV_ERR_NOT_SUPPORTED;
398 }
399 
400 static inline NV_STATUS vaspaceMap_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) {
401     return pVAS->__vaspaceMap__(pVAS, pGpu, vaLo, vaHi, pTarget, flags);
402 }
403 
404 static inline void vaspaceUnmap_8b86a5(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) {
405     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
406 }
407 
408 static inline void vaspaceUnmap_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) {
409     pVAS->__vaspaceUnmap__(pVAS, pGpu, vaLo, vaHi);
410 }
411 
412 static inline NV_STATUS vaspaceReserveMempool_ac1694(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) {
413     return NV_OK;
414 }
415 
416 static inline NV_STATUS vaspaceReserveMempool_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) {
417     return pVAS->__vaspaceReserveMempool__(pVAS, pGpu, hClient, size, pageSizeLockMask, flags);
418 }
419 
420 static inline struct OBJEHEAP *vaspaceGetHeap_128d6d(struct OBJVASPACE *pVAS) {
421     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
422     return ((void *)0);
423 }
424 
425 static inline struct OBJEHEAP *vaspaceGetHeap_DISPATCH(struct OBJVASPACE *pVAS) {
426     return pVAS->__vaspaceGetHeap__(pVAS);
427 }
428 
429 static inline NvU64 vaspaceGetMapPageSize_07238a(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) {
430     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
431     return 0U;
432 }
433 
434 static inline NvU64 vaspaceGetMapPageSize_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) {
435     return pVAS->__vaspaceGetMapPageSize__(pVAS, pGpu, pMemBlock);
436 }
437 
438 static inline NvU64 vaspaceGetBigPageSize_07238a(struct OBJVASPACE *pVAS) {
439     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
440     return 0U;
441 }
442 
443 static inline NvU64 vaspaceGetBigPageSize_DISPATCH(struct OBJVASPACE *pVAS) {
444     return pVAS->__vaspaceGetBigPageSize__(pVAS);
445 }
446 
447 static inline NvBool vaspaceIsMirrored_814c13(struct OBJVASPACE *pVAS) {
448     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
449     return ((NvBool)(0 != 0));
450 }
451 
452 static inline NvBool vaspaceIsMirrored_DISPATCH(struct OBJVASPACE *pVAS) {
453     return pVAS->__vaspaceIsMirrored__(pVAS);
454 }
455 
456 static inline NvBool vaspaceIsFaultCapable_814c13(struct OBJVASPACE *pVAS) {
457     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
458     return ((NvBool)(0 != 0));
459 }
460 
461 static inline NvBool vaspaceIsFaultCapable_DISPATCH(struct OBJVASPACE *pVAS) {
462     return pVAS->__vaspaceIsFaultCapable__(pVAS);
463 }
464 
465 static inline NvBool vaspaceIsExternallyOwned_814c13(struct OBJVASPACE *pVAS) {
466     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
467     return ((NvBool)(0 != 0));
468 }
469 
470 static inline NvBool vaspaceIsExternallyOwned_DISPATCH(struct OBJVASPACE *pVAS) {
471     return pVAS->__vaspaceIsExternallyOwned__(pVAS);
472 }
473 
474 static inline NvBool vaspaceIsAtsEnabled_814c13(struct OBJVASPACE *pVAS) {
475     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
476     return ((NvBool)(0 != 0));
477 }
478 
479 static inline NvBool vaspaceIsAtsEnabled_DISPATCH(struct OBJVASPACE *pVAS) {
480     return pVAS->__vaspaceIsAtsEnabled__(pVAS);
481 }
482 
483 static inline NV_STATUS vaspaceGetPasid_b7902c(struct OBJVASPACE *pVAS, NvU32 *pPasid) {
484     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
485     return NV_ERR_NOT_SUPPORTED;
486 }
487 
488 static inline NV_STATUS vaspaceGetPasid_DISPATCH(struct OBJVASPACE *pVAS, NvU32 *pPasid) {
489     return pVAS->__vaspaceGetPasid__(pVAS, pPasid);
490 }
491 
492 static inline PMEMORY_DESCRIPTOR vaspaceGetPageDirBase_128d6d(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
493     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
494     return ((void *)0);
495 }
496 
497 static inline PMEMORY_DESCRIPTOR vaspaceGetPageDirBase_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
498     return pVAS->__vaspaceGetPageDirBase__(pVAS, pGpu);
499 }
500 
501 static inline PMEMORY_DESCRIPTOR vaspaceGetKernelPageDirBase_128d6d(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
502     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
503     return ((void *)0);
504 }
505 
506 static inline PMEMORY_DESCRIPTOR vaspaceGetKernelPageDirBase_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
507     return pVAS->__vaspaceGetKernelPageDirBase__(pVAS, pGpu);
508 }
509 
510 static inline NV_STATUS vaspacePinRootPageDir_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
511     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
512     return NV_ERR_NOT_SUPPORTED;
513 }
514 
515 static inline NV_STATUS vaspacePinRootPageDir_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
516     return pVAS->__vaspacePinRootPageDir__(pVAS, pGpu);
517 }
518 
519 static inline void vaspaceUnpinRootPageDir_8b86a5(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
520     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
521 }
522 
523 static inline void vaspaceUnpinRootPageDir_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
524     pVAS->__vaspaceUnpinRootPageDir__(pVAS, pGpu);
525 }
526 
527 void vaspaceInvalidateTlb_IMPL(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type);
528 
529 static inline void vaspaceInvalidateTlb_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) {
530     pVAS->__vaspaceInvalidateTlb__(pVAS, pGpu, type);
531 }
532 
533 static inline NV_STATUS vaspaceGetPageTableInfo_b7902c(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) {
534     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
535     return NV_ERR_NOT_SUPPORTED;
536 }
537 
538 static inline NV_STATUS vaspaceGetPageTableInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) {
539     return pVAS->__vaspaceGetPageTableInfo__(pVAS, pParams);
540 }
541 
542 static inline NV_STATUS vaspaceGetPteInfo_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) {
543     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
544     return NV_ERR_NOT_SUPPORTED;
545 }
546 
547 static inline NV_STATUS vaspaceGetPteInfo_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) {
548     return pVAS->__vaspaceGetPteInfo__(pVAS, pGpu, pParams, pPhysAddr);
549 }
550 
551 static inline NV_STATUS vaspaceSetPteInfo_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) {
552     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
553     return NV_ERR_NOT_SUPPORTED;
554 }
555 
556 static inline NV_STATUS vaspaceSetPteInfo_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) {
557     return pVAS->__vaspaceSetPteInfo__(pVAS, pGpu, pParams);
558 }
559 
560 static inline NV_STATUS vaspaceFreeV2_b7902c(struct OBJVASPACE *pVAS, NvU64 vAddr, NvU64 *pSize) {
561     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
562     return NV_ERR_NOT_SUPPORTED;
563 }
564 
565 static inline NV_STATUS vaspaceFreeV2_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr, NvU64 *pSize) {
566     return pVAS->__vaspaceFreeV2__(pVAS, vAddr, pSize);
567 }
568 
569 void vaspaceIncRefCnt_IMPL(struct OBJVASPACE *pVAS);
570 
571 #ifdef __nvoc_vaspace_h_disabled
572 static inline void vaspaceIncRefCnt(struct OBJVASPACE *pVAS) {
573     NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!");
574 }
575 #else //__nvoc_vaspace_h_disabled
576 #define vaspaceIncRefCnt(pVAS) vaspaceIncRefCnt_IMPL(pVAS)
577 #endif //__nvoc_vaspace_h_disabled
578 
579 void vaspaceDecRefCnt_IMPL(struct OBJVASPACE *pVAS);
580 
581 #ifdef __nvoc_vaspace_h_disabled
582 static inline void vaspaceDecRefCnt(struct OBJVASPACE *pVAS) {
583     NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!");
584 }
585 #else //__nvoc_vaspace_h_disabled
586 #define vaspaceDecRefCnt(pVAS) vaspaceDecRefCnt_IMPL(pVAS)
587 #endif //__nvoc_vaspace_h_disabled
588 
589 NV_STATUS vaspaceGetByHandleOrDeviceDefault_IMPL(struct RsClient *pClient, NvHandle hDeviceOrSubDevice, NvHandle hVASpace, struct OBJVASPACE **ppVAS);
590 
591 #define vaspaceGetByHandleOrDeviceDefault(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) vaspaceGetByHandleOrDeviceDefault_IMPL(pClient, hDeviceOrSubDevice, hVASpace, ppVAS)
592 NV_STATUS vaspaceFillAllocParams_IMPL(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pSize, NvU64 *pAlign, NvU64 *pRangeLo, NvU64 *pRangeHi, NvU64 *pPageSizeLockMask, VAS_ALLOC_FLAGS *pFlags);
593 
594 #ifdef __nvoc_vaspace_h_disabled
595 static inline NV_STATUS vaspaceFillAllocParams(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pSize, NvU64 *pAlign, NvU64 *pRangeLo, NvU64 *pRangeHi, NvU64 *pPageSizeLockMask, VAS_ALLOC_FLAGS *pFlags) {
596     NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!");
597     return NV_ERR_NOT_SUPPORTED;
598 }
599 #else //__nvoc_vaspace_h_disabled
600 #define vaspaceFillAllocParams(pVAS, pAllocInfo, pSize, pAlign, pRangeLo, pRangeHi, pPageSizeLockMask, pFlags) vaspaceFillAllocParams_IMPL(pVAS, pAllocInfo, pSize, pAlign, pRangeLo, pRangeHi, pPageSizeLockMask, pFlags)
601 #endif //__nvoc_vaspace_h_disabled
602 
603 #undef PRIVATE_FIELD
604 
605 
606 // Ideally all non-static base class method declaration should be in the _private.h file
607 NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS);
608 
609 // For getting the address translation after the MMU (i.e.: after VA->PA translation)
610 #define VAS_ADDRESS_TRANSLATION(pVASpace) ((pVASpace)->addressTranslation)
611 
612 #endif // _VASPACE_H_
613 
614 #ifdef __cplusplus
615 } // extern "C"
616 #endif
617 #endif // _G_VASPACE_NVOC_H_
618