1 #ifndef _G_VASPACE_NVOC_H_
2 #define _G_VASPACE_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 #include "g_vaspace_nvoc.h"
32 
33 #ifndef _VASPACE_H_
34 #define _VASPACE_H_
35 
36 /**************** Resource Manager Defines and Structures ******************\
37 *                                                                           *
38 * Module: VASPACE.H                                                         *
39 *       Defines and structures used for Virtual Address Space Object.       *
40 \***************************************************************************/
41 
42 #include "ctrl/ctrl0080/ctrl0080dma.h"
43 
44 #include "core/core.h"
45 #include "resserv/rs_client.h"
46 #include "containers/eheap_old.h"
47 #include "gpu/mem_mgr/heap_base.h"
48 #include "gpu/mem_mgr/mem_desc.h"
49 
50 
51 typedef struct OBJVASPACE *POBJVASPACE;
52 typedef struct VASPACE VASPACE, *PVASPACE;
53 struct VirtMemAllocator;
54 
55 #ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__
56 #define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__
57 typedef struct VirtMemAllocator VirtMemAllocator;
58 #endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */
59 
60 #ifndef __nvoc_class_id_VirtMemAllocator
61 #define __nvoc_class_id_VirtMemAllocator 0x899e48
62 #endif /* __nvoc_class_id_VirtMemAllocator */
63 
64 
65 typedef struct MMU_MAP_TARGET       MMU_MAP_TARGET;
66 typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS;
67 struct Device;
68 
69 #ifndef __NVOC_CLASS_Device_TYPEDEF__
70 #define __NVOC_CLASS_Device_TYPEDEF__
71 typedef struct Device Device;
72 #endif /* __NVOC_CLASS_Device_TYPEDEF__ */
73 
74 #ifndef __nvoc_class_id_Device
75 #define __nvoc_class_id_Device 0xe0ac20
76 #endif /* __nvoc_class_id_Device */
77 
78 
79 
80 typedef struct
81 {
82     NvBool bReverse : 1;
83     NvBool bPreferSysmemPageTables : 1;
84     NvBool bExternallyManaged : 1;
85     NvBool bLazy : 1;
86     NvBool bSparse : 1;
87     NvBool bPrivileged : 1;
88     NvBool bClientAllocation : 1;
89     NvBool bFixedAddressRange : 1;
90     NvBool bFixedAddressAllocate : 1;
91     NvBool bForceContig : 1;
92     NvBool bForceNonContig : 1;
93 
94     //
95     // Using this flag may have security implications. So. use it only when
96     // you are sure about its usage.
97     //
98     NvBool bSkipTlbInvalidateOnFree : 1;
99 } VAS_ALLOC_FLAGS;
100 
101 #define VAS_EHEAP_OWNER_NVRM NvU32_BUILD('n','v','r','m')
102 #define VAS_EHEAP_OWNER_RSVD NvU32_BUILD('r','s','v','d')
103 
104 typedef struct
105 {
106     NvBool bRemap : 1; //<! Indicates to override existing mappings.
107 } VAS_MAP_FLAGS;
108 
109 typedef enum
110 {
111     PTE_UPGRADE,
112     PTE_DOWNGRADE
113 } VAS_PTE_UPDATE_TYPE;
114 
115 //
116 // OBJVASPACE creation flags
117 //
118 // BAR                        Used by one of the BAR apertures
119 // SCRATCH_INVAL              Use scratch page instead of invalid bit
120 // PARTIAL_INVAL              Do not fully invalidate when validating PTE
121 // MINIMIZE_PTETABLE_SIZE     Size PTE arrays minimally to
122 // RETRY_PTE_ALLOC_IN_SYS     PTE allocation falls back to sysmem
123 // FULL_PTE                   Initialize a full PTE
124 // BAR_BAR1                   Used for BAR1
125 // BAR_BAR2                   Used for BAR2 (unused)
126 // BAR_IFB                    Used for IFB
127 // PERFMON                    Used for Perfmon
128 // PMU                        Used for PMU
129 // SET_MIRRORED               <DEPRECATED.........>
130 //                            This flag will create a privileged PDB as part of this vaspace
131 //                            This new PDB will mirror all of the allocations made in the
132 //                            original PDB. The first PDE is considered privileged for this
133 //                            address space.
134 // SHARED_MANAGEMENT          Enables mode where only a portion of the VAS is managed
135 //                            and the page directory may be allocated/set externally.
136 // ALLOW_ZERO_ADDRESS         Explicitly allows the base VAS address to start at 0.
137 //                            Normally 0 is reserved to distinguish NULL pointers.
138 //
139 // BIG_PAGE_SIZE              Field that specifies the big page size to be used.
140 //                            DEFAULT is used till GM10X. GM20X and later, uses
141 //                            custom value for big page size.
142 //   SIZE_DEFAULT             Lets RM pick the default value
143 //   SIZE_64K                 Uses 64K as big page size for this VA space
144 //   SIZE_128K                Uses 128K as big page size for this VA space
145 //
146 // MMU_FMT_VA_BITS            Selects the MMU format of the VA space by the number
147 //                            of VA bits supported.
148 //   DEFAULT                  RM picks the default for the underlying MMU HW.
149 //   40                       Fermi+ 40-bit (2-level) format.
150 //   49                       Pascal+ 49-bit (5-level) format.
151 //
152 // ENABLE_VMM                 <DEPRECATED.........>
153 //                            Temp flag to enable new VMM code path on select
154 //                            VA spaces (e.g. client but not BAR1/PMU VAS).
155 //
156 // ZERO_OLD_STRUCT            Deprecated.
157 //
158 // ENABLE_FAULTING            This address space is participating in UVM.
159 //                            RM will enable page faulting for all channels that will be
160 //                            associated with this address space.
161 //
162 // IS_UVM_MANAGED             This flag will replace the SET_MIRRORED flag. It is used to
163 //                            denote that this VASpace is participating in UVM.
164 //
165 // ENABLE_ATS                 This address space has ATS enabled.
166 //
167 //
168 // ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR This flag when set will allow page table allocations
169 //                                      to be routed to suballocator of the current process
170 //                                      requesting mapping. If no suballocator, allocations
171 //                                      will fallback to global heap.
172 //
173 // VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB  This flag must be used by the VASs which use
174 //                                            the NVLink MMU.
175 //
176 #define VASPACE_FLAGS_NONE                                        0
177 #define VASPACE_FLAGS_BAR                                         NVBIT(0)
178 #define VASPACE_FLAGS_SCRATCH_INVAL                               NVBIT(1)
179 #define VASPACE_FLAGS_ENABLE_ATS                                  NVBIT(2)
180 #define VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS             NVBIT(3)
181 #define VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE                      NVBIT(4)
182 #define VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS                      NVBIT(5)
183 #define VASPACE_FLAGS_REQUIRE_FIXED_OFFSET                        NVBIT(6)
184 #define VASPACE_FLAGS_BAR_BAR1                                    NVBIT(7)
185 #define VASPACE_FLAGS_BAR_BAR2                                    NVBIT(8)
186 #define VASPACE_FLAGS_BAR_IFB                                     NVBIT(9)
187 #define VASPACE_FLAGS_PERFMON                                     NVBIT(10)
188 #define VASPACE_FLAGS_PMU                                         NVBIT(11)
189 #define VASPACE_FLAGS_PTETABLE_PMA_MANAGED                        NVBIT(14)
190 #define VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB                 NVBIT(15)
191 #define VASPACE_FLAGS_DISABLE_SPLIT_VAS                           NVBIT(16)
192 #define VASPACE_FLAGS_SET_MIRRORED                                NVBIT(17)
193 #define VASPACE_FLAGS_SHARED_MANAGEMENT                           NVBIT(18)
194 #define VASPACE_FLAGS_ALLOW_ZERO_ADDRESS                          NVBIT(19)
195 #define VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL                          NVBIT(20)
196 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE                            22:21
197 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_DEFAULT                    0x00000000
198 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_64K                        0x00000001
199 #define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_128K                       0x00000002
200 #define VASPACE_FLAGS_HDA                                         NVBIT(23)
201 #define VASPACE_FLAGS_FLA                                         NVBIT(24) // Soon to be deprecated and removed.
202                                                                             // Used by legacy FLA implementation.
203 #define VASPACE_FLAGS_HWPM                                        NVBIT(25)
204 #define VASPACE_FLAGS_ENABLE_VMM                                  NVBIT(26)
205 #define VASPACE_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE             NVBIT(27)
206 #define VASPACE_FLAGS_REVERSE                                     NVBIT(28)
207 #define VASPACE_FLAGS_ENABLE_FAULTING                             NVBIT(29)
208 #define VASPACE_FLAGS_IS_EXTERNALLY_OWNED                         NVBIT(30)
209 #define VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR        NVBIT(31)
210 
211 /*!
212  * Flags for page table memory pools.
213  *
214  * VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY
215  *           Only allocate levels from the top to the specified level only.
216  *           Anything below the specified level is not allocated.
217  */
218 #define VASPACE_RESERVE_FLAGS_NONE                                 (0)
219 #define VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY  NVBIT32(0)
220 
221 /*!
222  * Level of RM-management for a given VA range.
223  *
224  * FULL
225  *      RM manages everything (e.g. PDEs, PTEs).
226  * PDES_ONLY
227  *      RM only manages PDEs (through non-buffer version of UpdatePde2).
228  *      Buffer versions of FillPteMem and UpdatePde2 are still allowed.
229  * NONE
230  *      RM does not manage anything.
231  *      Buffer versions of FillPteMem and UpdatePde2 are still allowed.
232  */
233 typedef enum
234 {
235     VA_MANAGEMENT_FULL = 0,
236     VA_MANAGEMENT_PDES_ONLY,
237     VA_MANAGEMENT_NONE,
238 } VA_MANAGEMENT;
239 
240 /*!
241  * Abstract base class of an RM-managed virtual address space.
242  */
243 
244 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
245 // the matching C source file, but causes diagnostics to be issued if another
246 // source file references the field.
247 #ifdef NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED
248 #define PRIVATE_FIELD(x) x
249 #else
250 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
251 #endif
252 
253 struct OBJVASPACE {
254     const struct NVOC_RTTI *__nvoc_rtti;
255     struct Object __nvoc_base_Object;
256     struct Object *__nvoc_pbase_Object;
257     struct OBJVASPACE *__nvoc_pbase_OBJVASPACE;
258     NV_STATUS (*__vaspaceConstruct___)(struct OBJVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32);
259     NV_STATUS (*__vaspaceAlloc__)(struct OBJVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *);
260     NV_STATUS (*__vaspaceFree__)(struct OBJVASPACE *, NvU64);
261     NV_STATUS (*__vaspaceApplyDefaultAlignment__)(struct OBJVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *);
262     NV_STATUS (*__vaspaceIncAllocRefCnt__)(struct OBJVASPACE *, NvU64);
263     NvU64 (*__vaspaceGetVaStart__)(struct OBJVASPACE *);
264     NvU64 (*__vaspaceGetVaLimit__)(struct OBJVASPACE *);
265     NV_STATUS (*__vaspaceGetVasInfo__)(struct OBJVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *);
266     NvU32 (*__vaspaceGetFlags__)(struct OBJVASPACE *);
267     NV_STATUS (*__vaspaceMap__)(struct OBJVASPACE *, struct OBJGPU *, const NvU64, const NvU64, const MMU_MAP_TARGET *, const VAS_MAP_FLAGS);
268     void (*__vaspaceUnmap__)(struct OBJVASPACE *, struct OBJGPU *, const NvU64, const NvU64);
269     NV_STATUS (*__vaspaceReserveMempool__)(struct OBJVASPACE *, struct OBJGPU *, struct Device *, NvU64, NvU64, NvU32);
270     struct OBJEHEAP *(*__vaspaceGetHeap__)(struct OBJVASPACE *);
271     NvU64 (*__vaspaceGetMapPageSize__)(struct OBJVASPACE *, struct OBJGPU *, struct EMEMBLOCK *);
272     NvU64 (*__vaspaceGetBigPageSize__)(struct OBJVASPACE *);
273     NvBool (*__vaspaceIsMirrored__)(struct OBJVASPACE *);
274     NvBool (*__vaspaceIsFaultCapable__)(struct OBJVASPACE *);
275     NvBool (*__vaspaceIsExternallyOwned__)(struct OBJVASPACE *);
276     NvBool (*__vaspaceIsAtsEnabled__)(struct OBJVASPACE *);
277     NV_STATUS (*__vaspaceGetPasid__)(struct OBJVASPACE *, NvU32 *);
278     PMEMORY_DESCRIPTOR (*__vaspaceGetPageDirBase__)(struct OBJVASPACE *, struct OBJGPU *);
279     PMEMORY_DESCRIPTOR (*__vaspaceGetKernelPageDirBase__)(struct OBJVASPACE *, struct OBJGPU *);
280     NV_STATUS (*__vaspacePinRootPageDir__)(struct OBJVASPACE *, struct OBJGPU *);
281     void (*__vaspaceUnpinRootPageDir__)(struct OBJVASPACE *, struct OBJGPU *);
282     void (*__vaspaceInvalidateTlb__)(struct OBJVASPACE *, struct OBJGPU *, VAS_PTE_UPDATE_TYPE);
283     NV_STATUS (*__vaspaceGetPageTableInfo__)(struct OBJVASPACE *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *);
284     NV_STATUS (*__vaspaceGetPteInfo__)(struct OBJVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *, RmPhysAddr *);
285     NV_STATUS (*__vaspaceSetPteInfo__)(struct OBJVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *);
286     NV_STATUS (*__vaspaceFreeV2__)(struct OBJVASPACE *, NvU64, NvU64 *);
287     NvU32 gpuMask;
288     ADDRESS_TRANSLATION addressTranslation;
289     NvU32 refCnt;
290     NvU32 vaspaceId;
291     NvU64 vasStart;
292     NvU64 vasLimit;
293 };
294 
295 #ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__
296 #define __NVOC_CLASS_OBJVASPACE_TYPEDEF__
297 typedef struct OBJVASPACE OBJVASPACE;
298 #endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */
299 
300 #ifndef __nvoc_class_id_OBJVASPACE
301 #define __nvoc_class_id_OBJVASPACE 0x6c347f
302 #endif /* __nvoc_class_id_OBJVASPACE */
303 
304 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE;
305 
306 #define __staticCast_OBJVASPACE(pThis) \
307     ((pThis)->__nvoc_pbase_OBJVASPACE)
308 
309 #ifdef __nvoc_vaspace_h_disabled
310 #define __dynamicCast_OBJVASPACE(pThis) ((OBJVASPACE*)NULL)
311 #else //__nvoc_vaspace_h_disabled
312 #define __dynamicCast_OBJVASPACE(pThis) \
313     ((OBJVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVASPACE)))
314 #endif //__nvoc_vaspace_h_disabled
315 
316 
317 NV_STATUS __nvoc_objCreateDynamic_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32, va_list);
318 
319 NV_STATUS __nvoc_objCreate_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32);
320 #define __objCreate_OBJVASPACE(ppNewObj, pParent, createFlags) \
321     __nvoc_objCreate_OBJVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
322 
323 #define vaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) vaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags)
324 #define vaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) vaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr)
325 #define vaspaceFree(pVAS, vAddr) vaspaceFree_DISPATCH(pVAS, vAddr)
326 #define vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) vaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask)
327 #define vaspaceIncAllocRefCnt(pVAS, vAddr) vaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr)
328 #define vaspaceGetVaStart(pVAS) vaspaceGetVaStart_DISPATCH(pVAS)
329 #define vaspaceGetVaLimit(pVAS) vaspaceGetVaLimit_DISPATCH(pVAS)
330 #define vaspaceGetVasInfo(pVAS, pParams) vaspaceGetVasInfo_DISPATCH(pVAS, pParams)
331 #define vaspaceGetFlags(pVAS) vaspaceGetFlags_DISPATCH(pVAS)
332 #define vaspaceMap(pVAS, pGpu, vaLo, vaHi, pTarget, flags) vaspaceMap_DISPATCH(pVAS, pGpu, vaLo, vaHi, pTarget, flags)
333 #define vaspaceUnmap(pVAS, pGpu, vaLo, vaHi) vaspaceUnmap_DISPATCH(pVAS, pGpu, vaLo, vaHi)
334 #define vaspaceReserveMempool(pVAS, pGpu, pDevice, size, pageSizeLockMask, flags) vaspaceReserveMempool_DISPATCH(pVAS, pGpu, pDevice, size, pageSizeLockMask, flags)
335 #define vaspaceGetHeap(pVAS) vaspaceGetHeap_DISPATCH(pVAS)
336 #define vaspaceGetMapPageSize(pVAS, pGpu, pMemBlock) vaspaceGetMapPageSize_DISPATCH(pVAS, pGpu, pMemBlock)
337 #define vaspaceGetBigPageSize(pVAS) vaspaceGetBigPageSize_DISPATCH(pVAS)
338 #define vaspaceIsMirrored(pVAS) vaspaceIsMirrored_DISPATCH(pVAS)
339 #define vaspaceIsFaultCapable(pVAS) vaspaceIsFaultCapable_DISPATCH(pVAS)
340 #define vaspaceIsExternallyOwned(pVAS) vaspaceIsExternallyOwned_DISPATCH(pVAS)
341 #define vaspaceIsAtsEnabled(pVAS) vaspaceIsAtsEnabled_DISPATCH(pVAS)
342 #define vaspaceGetPasid(pVAS, pPasid) vaspaceGetPasid_DISPATCH(pVAS, pPasid)
343 #define vaspaceGetPageDirBase(pVAS, pGpu) vaspaceGetPageDirBase_DISPATCH(pVAS, pGpu)
344 #define vaspaceGetKernelPageDirBase(pVAS, pGpu) vaspaceGetKernelPageDirBase_DISPATCH(pVAS, pGpu)
345 #define vaspacePinRootPageDir(pVAS, pGpu) vaspacePinRootPageDir_DISPATCH(pVAS, pGpu)
346 #define vaspaceUnpinRootPageDir(pVAS, pGpu) vaspaceUnpinRootPageDir_DISPATCH(pVAS, pGpu)
347 #define vaspaceInvalidateTlb(pVAS, pGpu, type) vaspaceInvalidateTlb_DISPATCH(pVAS, pGpu, type)
348 #define vaspaceGetPageTableInfo(pVAS, pParams) vaspaceGetPageTableInfo_DISPATCH(pVAS, pParams)
349 #define vaspaceGetPteInfo(pVAS, pGpu, pParams, pPhysAddr) vaspaceGetPteInfo_DISPATCH(pVAS, pGpu, pParams, pPhysAddr)
350 #define vaspaceSetPteInfo(pVAS, pGpu, pParams) vaspaceSetPteInfo_DISPATCH(pVAS, pGpu, pParams)
351 #define vaspaceFreeV2(pVAS, vAddr, pSize) vaspaceFreeV2_DISPATCH(pVAS, vAddr, pSize)
vaspaceConstruct__DISPATCH(struct OBJVASPACE * pVAS,NvU32 classId,NvU32 vaspaceId,NvU64 vaStart,NvU64 vaLimit,NvU64 vaStartInternal,NvU64 vaLimitInternal,NvU32 flags)352 static inline NV_STATUS vaspaceConstruct__DISPATCH(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) {
353     return pVAS->__vaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags);
354 }
355 
vaspaceAlloc_DISPATCH(struct OBJVASPACE * pVAS,NvU64 size,NvU64 align,NvU64 rangeLo,NvU64 rangeHi,NvU64 pageSizeLockMask,VAS_ALLOC_FLAGS flags,NvU64 * pAddr)356 static inline NV_STATUS vaspaceAlloc_DISPATCH(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) {
357     return pVAS->__vaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr);
358 }
359 
vaspaceFree_DISPATCH(struct OBJVASPACE * pVAS,NvU64 vAddr)360 static inline NV_STATUS vaspaceFree_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) {
361     return pVAS->__vaspaceFree__(pVAS, vAddr);
362 }
363 
vaspaceApplyDefaultAlignment_DISPATCH(struct OBJVASPACE * pVAS,const FB_ALLOC_INFO * pAllocInfo,NvU64 * pAlign,NvU64 * pSize,NvU64 * pPageSizeLockMask)364 static inline NV_STATUS vaspaceApplyDefaultAlignment_DISPATCH(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) {
365     return pVAS->__vaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask);
366 }
367 
vaspaceIncAllocRefCnt_b7902c(struct OBJVASPACE * pVAS,NvU64 vAddr)368 static inline NV_STATUS vaspaceIncAllocRefCnt_b7902c(struct OBJVASPACE *pVAS, NvU64 vAddr) {
369     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
370     return NV_ERR_NOT_SUPPORTED;
371 }
372 
vaspaceIncAllocRefCnt_DISPATCH(struct OBJVASPACE * pVAS,NvU64 vAddr)373 static inline NV_STATUS vaspaceIncAllocRefCnt_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) {
374     return pVAS->__vaspaceIncAllocRefCnt__(pVAS, vAddr);
375 }
376 
377 NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS);
378 
vaspaceGetVaStart_DISPATCH(struct OBJVASPACE * pVAS)379 static inline NvU64 vaspaceGetVaStart_DISPATCH(struct OBJVASPACE *pVAS) {
380     return pVAS->__vaspaceGetVaStart__(pVAS);
381 }
382 
383 NvU64 vaspaceGetVaLimit_IMPL(struct OBJVASPACE *pVAS);
384 
vaspaceGetVaLimit_DISPATCH(struct OBJVASPACE * pVAS)385 static inline NvU64 vaspaceGetVaLimit_DISPATCH(struct OBJVASPACE *pVAS) {
386     return pVAS->__vaspaceGetVaLimit__(pVAS);
387 }
388 
vaspaceGetVasInfo_DISPATCH(struct OBJVASPACE * pVAS,NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS * pParams)389 static inline NV_STATUS vaspaceGetVasInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) {
390     return pVAS->__vaspaceGetVasInfo__(pVAS, pParams);
391 }
392 
vaspaceGetFlags_edd98b(struct OBJVASPACE * pVAS)393 static inline NvU32 vaspaceGetFlags_edd98b(struct OBJVASPACE *pVAS) {
394     return 0U;
395 }
396 
vaspaceGetFlags_DISPATCH(struct OBJVASPACE * pVAS)397 static inline NvU32 vaspaceGetFlags_DISPATCH(struct OBJVASPACE *pVAS) {
398     return pVAS->__vaspaceGetFlags__(pVAS);
399 }
400 
vaspaceMap_b7902c(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,const NvU64 vaLo,const NvU64 vaHi,const MMU_MAP_TARGET * pTarget,const VAS_MAP_FLAGS flags)401 static inline NV_STATUS vaspaceMap_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) {
402     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
403     return NV_ERR_NOT_SUPPORTED;
404 }
405 
vaspaceMap_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,const NvU64 vaLo,const NvU64 vaHi,const MMU_MAP_TARGET * pTarget,const VAS_MAP_FLAGS flags)406 static inline NV_STATUS vaspaceMap_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) {
407     return pVAS->__vaspaceMap__(pVAS, pGpu, vaLo, vaHi, pTarget, flags);
408 }
409 
vaspaceUnmap_8b86a5(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,const NvU64 vaLo,const NvU64 vaHi)410 static inline void vaspaceUnmap_8b86a5(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) {
411     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
412 }
413 
vaspaceUnmap_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,const NvU64 vaLo,const NvU64 vaHi)414 static inline void vaspaceUnmap_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) {
415     pVAS->__vaspaceUnmap__(pVAS, pGpu, vaLo, vaHi);
416 }
417 
vaspaceReserveMempool_ac1694(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,struct Device * pDevice,NvU64 size,NvU64 pageSizeLockMask,NvU32 flags)418 static inline NV_STATUS vaspaceReserveMempool_ac1694(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, struct Device *pDevice, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) {
419     return NV_OK;
420 }
421 
vaspaceReserveMempool_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,struct Device * pDevice,NvU64 size,NvU64 pageSizeLockMask,NvU32 flags)422 static inline NV_STATUS vaspaceReserveMempool_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, struct Device *pDevice, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) {
423     return pVAS->__vaspaceReserveMempool__(pVAS, pGpu, pDevice, size, pageSizeLockMask, flags);
424 }
425 
vaspaceGetHeap_128d6d(struct OBJVASPACE * pVAS)426 static inline struct OBJEHEAP *vaspaceGetHeap_128d6d(struct OBJVASPACE *pVAS) {
427     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
428     return ((void *)0);
429 }
430 
vaspaceGetHeap_DISPATCH(struct OBJVASPACE * pVAS)431 static inline struct OBJEHEAP *vaspaceGetHeap_DISPATCH(struct OBJVASPACE *pVAS) {
432     return pVAS->__vaspaceGetHeap__(pVAS);
433 }
434 
vaspaceGetMapPageSize_07238a(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,struct EMEMBLOCK * pMemBlock)435 static inline NvU64 vaspaceGetMapPageSize_07238a(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, struct EMEMBLOCK *pMemBlock) {
436     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
437     return 0U;
438 }
439 
vaspaceGetMapPageSize_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,struct EMEMBLOCK * pMemBlock)440 static inline NvU64 vaspaceGetMapPageSize_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, struct EMEMBLOCK *pMemBlock) {
441     return pVAS->__vaspaceGetMapPageSize__(pVAS, pGpu, pMemBlock);
442 }
443 
vaspaceGetBigPageSize_07238a(struct OBJVASPACE * pVAS)444 static inline NvU64 vaspaceGetBigPageSize_07238a(struct OBJVASPACE *pVAS) {
445     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
446     return 0U;
447 }
448 
vaspaceGetBigPageSize_DISPATCH(struct OBJVASPACE * pVAS)449 static inline NvU64 vaspaceGetBigPageSize_DISPATCH(struct OBJVASPACE *pVAS) {
450     return pVAS->__vaspaceGetBigPageSize__(pVAS);
451 }
452 
vaspaceIsMirrored_814c13(struct OBJVASPACE * pVAS)453 static inline NvBool vaspaceIsMirrored_814c13(struct OBJVASPACE *pVAS) {
454     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
455     return ((NvBool)(0 != 0));
456 }
457 
vaspaceIsMirrored_DISPATCH(struct OBJVASPACE * pVAS)458 static inline NvBool vaspaceIsMirrored_DISPATCH(struct OBJVASPACE *pVAS) {
459     return pVAS->__vaspaceIsMirrored__(pVAS);
460 }
461 
vaspaceIsFaultCapable_814c13(struct OBJVASPACE * pVAS)462 static inline NvBool vaspaceIsFaultCapable_814c13(struct OBJVASPACE *pVAS) {
463     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
464     return ((NvBool)(0 != 0));
465 }
466 
vaspaceIsFaultCapable_DISPATCH(struct OBJVASPACE * pVAS)467 static inline NvBool vaspaceIsFaultCapable_DISPATCH(struct OBJVASPACE *pVAS) {
468     return pVAS->__vaspaceIsFaultCapable__(pVAS);
469 }
470 
vaspaceIsExternallyOwned_814c13(struct OBJVASPACE * pVAS)471 static inline NvBool vaspaceIsExternallyOwned_814c13(struct OBJVASPACE *pVAS) {
472     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
473     return ((NvBool)(0 != 0));
474 }
475 
vaspaceIsExternallyOwned_DISPATCH(struct OBJVASPACE * pVAS)476 static inline NvBool vaspaceIsExternallyOwned_DISPATCH(struct OBJVASPACE *pVAS) {
477     return pVAS->__vaspaceIsExternallyOwned__(pVAS);
478 }
479 
vaspaceIsAtsEnabled_814c13(struct OBJVASPACE * pVAS)480 static inline NvBool vaspaceIsAtsEnabled_814c13(struct OBJVASPACE *pVAS) {
481     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
482     return ((NvBool)(0 != 0));
483 }
484 
vaspaceIsAtsEnabled_DISPATCH(struct OBJVASPACE * pVAS)485 static inline NvBool vaspaceIsAtsEnabled_DISPATCH(struct OBJVASPACE *pVAS) {
486     return pVAS->__vaspaceIsAtsEnabled__(pVAS);
487 }
488 
vaspaceGetPasid_b7902c(struct OBJVASPACE * pVAS,NvU32 * pPasid)489 static inline NV_STATUS vaspaceGetPasid_b7902c(struct OBJVASPACE *pVAS, NvU32 *pPasid) {
490     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
491     return NV_ERR_NOT_SUPPORTED;
492 }
493 
vaspaceGetPasid_DISPATCH(struct OBJVASPACE * pVAS,NvU32 * pPasid)494 static inline NV_STATUS vaspaceGetPasid_DISPATCH(struct OBJVASPACE *pVAS, NvU32 *pPasid) {
495     return pVAS->__vaspaceGetPasid__(pVAS, pPasid);
496 }
497 
vaspaceGetPageDirBase_128d6d(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)498 static inline PMEMORY_DESCRIPTOR vaspaceGetPageDirBase_128d6d(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
499     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
500     return ((void *)0);
501 }
502 
vaspaceGetPageDirBase_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)503 static inline PMEMORY_DESCRIPTOR vaspaceGetPageDirBase_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
504     return pVAS->__vaspaceGetPageDirBase__(pVAS, pGpu);
505 }
506 
vaspaceGetKernelPageDirBase_128d6d(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)507 static inline PMEMORY_DESCRIPTOR vaspaceGetKernelPageDirBase_128d6d(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
508     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
509     return ((void *)0);
510 }
511 
vaspaceGetKernelPageDirBase_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)512 static inline PMEMORY_DESCRIPTOR vaspaceGetKernelPageDirBase_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
513     return pVAS->__vaspaceGetKernelPageDirBase__(pVAS, pGpu);
514 }
515 
vaspacePinRootPageDir_b7902c(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)516 static inline NV_STATUS vaspacePinRootPageDir_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
517     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
518     return NV_ERR_NOT_SUPPORTED;
519 }
520 
vaspacePinRootPageDir_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)521 static inline NV_STATUS vaspacePinRootPageDir_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
522     return pVAS->__vaspacePinRootPageDir__(pVAS, pGpu);
523 }
524 
vaspaceUnpinRootPageDir_8b86a5(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)525 static inline void vaspaceUnpinRootPageDir_8b86a5(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
526     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
527 }
528 
vaspaceUnpinRootPageDir_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu)529 static inline void vaspaceUnpinRootPageDir_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) {
530     pVAS->__vaspaceUnpinRootPageDir__(pVAS, pGpu);
531 }
532 
533 void vaspaceInvalidateTlb_IMPL(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type);
534 
vaspaceInvalidateTlb_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,VAS_PTE_UPDATE_TYPE type)535 static inline void vaspaceInvalidateTlb_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) {
536     pVAS->__vaspaceInvalidateTlb__(pVAS, pGpu, type);
537 }
538 
vaspaceGetPageTableInfo_b7902c(struct OBJVASPACE * pVAS,NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS * pParams)539 static inline NV_STATUS vaspaceGetPageTableInfo_b7902c(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) {
540     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
541     return NV_ERR_NOT_SUPPORTED;
542 }
543 
vaspaceGetPageTableInfo_DISPATCH(struct OBJVASPACE * pVAS,NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS * pParams)544 static inline NV_STATUS vaspaceGetPageTableInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) {
545     return pVAS->__vaspaceGetPageTableInfo__(pVAS, pParams);
546 }
547 
vaspaceGetPteInfo_b7902c(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS * pParams,RmPhysAddr * pPhysAddr)548 static inline NV_STATUS vaspaceGetPteInfo_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) {
549     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
550     return NV_ERR_NOT_SUPPORTED;
551 }
552 
vaspaceGetPteInfo_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS * pParams,RmPhysAddr * pPhysAddr)553 static inline NV_STATUS vaspaceGetPteInfo_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) {
554     return pVAS->__vaspaceGetPteInfo__(pVAS, pGpu, pParams, pPhysAddr);
555 }
556 
vaspaceSetPteInfo_b7902c(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS * pParams)557 static inline NV_STATUS vaspaceSetPteInfo_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) {
558     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
559     return NV_ERR_NOT_SUPPORTED;
560 }
561 
vaspaceSetPteInfo_DISPATCH(struct OBJVASPACE * pVAS,struct OBJGPU * pGpu,NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS * pParams)562 static inline NV_STATUS vaspaceSetPteInfo_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) {
563     return pVAS->__vaspaceSetPteInfo__(pVAS, pGpu, pParams);
564 }
565 
vaspaceFreeV2_b7902c(struct OBJVASPACE * pVAS,NvU64 vAddr,NvU64 * pSize)566 static inline NV_STATUS vaspaceFreeV2_b7902c(struct OBJVASPACE *pVAS, NvU64 vAddr, NvU64 *pSize) {
567     NV_ASSERT_PRECOMP(((NvBool)(0 != 0)));
568     return NV_ERR_NOT_SUPPORTED;
569 }
570 
vaspaceFreeV2_DISPATCH(struct OBJVASPACE * pVAS,NvU64 vAddr,NvU64 * pSize)571 static inline NV_STATUS vaspaceFreeV2_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr, NvU64 *pSize) {
572     return pVAS->__vaspaceFreeV2__(pVAS, vAddr, pSize);
573 }
574 
575 void vaspaceIncRefCnt_IMPL(struct OBJVASPACE *pVAS);
576 
577 #ifdef __nvoc_vaspace_h_disabled
vaspaceIncRefCnt(struct OBJVASPACE * pVAS)578 static inline void vaspaceIncRefCnt(struct OBJVASPACE *pVAS) {
579     NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!");
580 }
581 #else //__nvoc_vaspace_h_disabled
582 #define vaspaceIncRefCnt(pVAS) vaspaceIncRefCnt_IMPL(pVAS)
583 #endif //__nvoc_vaspace_h_disabled
584 
585 void vaspaceDecRefCnt_IMPL(struct OBJVASPACE *pVAS);
586 
587 #ifdef __nvoc_vaspace_h_disabled
vaspaceDecRefCnt(struct OBJVASPACE * pVAS)588 static inline void vaspaceDecRefCnt(struct OBJVASPACE *pVAS) {
589     NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!");
590 }
591 #else //__nvoc_vaspace_h_disabled
592 #define vaspaceDecRefCnt(pVAS) vaspaceDecRefCnt_IMPL(pVAS)
593 #endif //__nvoc_vaspace_h_disabled
594 
595 NV_STATUS vaspaceGetByHandleOrDeviceDefault_IMPL(struct RsClient *pClient, NvHandle hDeviceOrSubDevice, NvHandle hVASpace, struct OBJVASPACE **ppVAS);
596 
597 #define vaspaceGetByHandleOrDeviceDefault(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) vaspaceGetByHandleOrDeviceDefault_IMPL(pClient, hDeviceOrSubDevice, hVASpace, ppVAS)
598 NV_STATUS vaspaceFillAllocParams_IMPL(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pSize, NvU64 *pAlign, NvU64 *pRangeLo, NvU64 *pRangeHi, NvU64 *pPageSizeLockMask, VAS_ALLOC_FLAGS *pFlags);
599 
600 #ifdef __nvoc_vaspace_h_disabled
vaspaceFillAllocParams(struct OBJVASPACE * pVAS,const FB_ALLOC_INFO * pAllocInfo,NvU64 * pSize,NvU64 * pAlign,NvU64 * pRangeLo,NvU64 * pRangeHi,NvU64 * pPageSizeLockMask,VAS_ALLOC_FLAGS * pFlags)601 static inline NV_STATUS vaspaceFillAllocParams(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pSize, NvU64 *pAlign, NvU64 *pRangeLo, NvU64 *pRangeHi, NvU64 *pPageSizeLockMask, VAS_ALLOC_FLAGS *pFlags) {
602     NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!");
603     return NV_ERR_NOT_SUPPORTED;
604 }
605 #else //__nvoc_vaspace_h_disabled
606 #define vaspaceFillAllocParams(pVAS, pAllocInfo, pSize, pAlign, pRangeLo, pRangeHi, pPageSizeLockMask, pFlags) vaspaceFillAllocParams_IMPL(pVAS, pAllocInfo, pSize, pAlign, pRangeLo, pRangeHi, pPageSizeLockMask, pFlags)
607 #endif //__nvoc_vaspace_h_disabled
608 
609 #undef PRIVATE_FIELD
610 
611 
612 // Ideally all non-static base class method declaration should be in the _private.h file
613 NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS);
614 
615 // For getting the address translation after the MMU (i.e.: after VA->PA translation)
616 #define VAS_ADDRESS_TRANSLATION(pVASpace) ((pVASpace)->addressTranslation)
617 
618 #endif // _VASPACE_H_
619 
620 #ifdef __cplusplus
621 } // extern "C"
622 #endif
623 
624 #endif // _G_VASPACE_NVOC_H_
625