1 #ifndef _G_KERN_MEM_SYS_NVOC_H_
2 #define _G_KERN_MEM_SYS_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kern_mem_sys_nvoc.h"
33 
34 #ifndef KERN_MEM_SYS_H
35 #define KERN_MEM_SYS_H
36 
37 #include "core/core.h"
38 #include "gpu/eng_state.h"
39 #include "gpu/gpu.h"
40 #include "containers/map.h"
41 #include "gpu/mem_mgr/heap_base.h"
42 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
43 #include "ctrl/ctrl2080/ctrl2080internal.h"
44 //
45 // FB Cache (opcode, mem target) defines used by kmemsysCacheOp hal API
46 //
47 typedef enum
48 {
49     FB_CACHE_OP_UNDEFINED = 0,
50     // invalidate cache lines without writeback of dirty lines to memory
51     FB_CACHE_INVALIDATE = 1,
52     // writeback dirty lines but leave the lines in valid cache state
53     FB_CACHE_WRITEBACK,
54     // writeback dirty lines and then invalidates the cache state
55     FB_CACHE_EVICT,
56 } FB_CACHE_OP;
57 
58 // target memory types for cache operations
59 typedef enum
60 {
61     FB_CACHE_MEM_UNDEFINED = 0,
62     FB_CACHE_SYSTEM_MEMORY = 1,
63     FB_CACHE_VIDEO_MEMORY,
64     FB_CACHE_PEER_MEMORY,
65     FB_CACHE_DIRTY,
66     FB_CACHE_COMPTAG_MEMORY,
67     FB_CACHE_DIRTY_ALL,
68 } FB_CACHE_MEMTYPE;
69 
70 typedef enum
71 {
72     FB_CACHE_STATE_ENABLED,
73     FB_CACHE_STATE_DISABLED,
74 } FB_CACHE_STATE;
75 
76 typedef enum
77 {
78     FB_CACHE_WRITE_MODE_WRITETHROUGH,
79     FB_CACHE_WRITE_MODE_WRITEBACK,
80 } FB_CACHE_WRITE_MODE;
81 
82 typedef enum
83 {
84     FB_CACHE_BYPASS_MODE_ENABLED,
85     FB_CACHE_BYPASS_MODE_DISABLED,
86 } FB_CACHE_BYPASS_MODE; // FERMI (TEST) ONLY
87 
88 typedef enum
89 {
90     FB_CACHE_RCM_STATE_FULL,
91     FB_CACHE_RCM_STATE_TRANSITIONING,
92     FB_CACHE_RCM_STATE_REDUCED,
93     FB_CACHE_RCM_STATE_ZERO_CACHE,
94 } FB_CACHE_RCM_STATE;
95 
96 /*! Tracks NUMA information of GPU memory partitions */
97 typedef struct
98 {
99     NvBool bInUse;     // Is the partition in use?
100     NvU64  offset;     // FB offset of the partition
101     NvU64  size;       // FB size of the partition
102     NvU32  numaNodeId; // OS NUMA Node Id of the partition.
103 } MEM_PARTITION_NUMA_INFO;
104 
105 typedef struct MIG_MEM_BOUNDARY_CONFIG_TABLE
106 {
107     /*!
108      * Memory boundary config A (4KB aligned)
109      */
110     NvU64 memBoundaryCfgA;
111 
112     /*!
113      * Memory boundary config B (4KB aligned)
114      */
115     NvU64 memBoundaryCfgB;
116 
117     /*!
118      * Memory boundary config C (64KB aligned)
119      */
120     NvU32 memBoundaryCfgC;
121 } MIG_MEM_BOUNDARY_CONFIG_TABLE;
122 
123 /*!
124  * @brief Structure carrying memory configuration information for specific GPU instance
125  *        The information will be used to allocate memory when a GPU instance is
126  *        created or queried. The structure will be indexed with swizzIDs
127  */
128 typedef struct MIG_GPU_INSTANCE_MEMORY_CONFIG
129 {
130     /*!
131      * First VMMU segment from where the GPU instance memory starts
132      */
133     NvU64 startingVmmuSegment;
134 
135     /*!
136      * Size of the GPU instance memory in the form of number of vmmu segments
137      */
138     NvU64 memSizeInVmmuSegment;
139 
140     /*!
141      * GPU Instance memory config initialization state
142      */
143     NvBool bInitialized;
144 } MIG_GPU_INSTANCE_MEMORY_CONFIG;
145 
146 /* @ref NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS */
147 typedef NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS MEMORY_SYSTEM_STATIC_CONFIG;
148 
149 #define FB_HWRESID_CTAGID_FERMI              15:0
150 #define FB_HWRESID_ZCULL_FERMI               30:16
151 
152 #define FB_HWRESID_ZCULL_SHIFT_FERMI(i)      (1 << (i))
153 
154 #define FB_HWRESID_CTAGID_VAL_FERMI(n)        \
155     (((n) >> DRF_SHIFT(FB_HWRESID_CTAGID_FERMI)) & DRF_MASK(FB_HWRESID_CTAGID_FERMI))
156 
157 #define FB_HWRESID_CTAGID_NUM_FERMI(i)        \
158    (((i) & DRF_MASK(FB_HWRESID_CTAGID_FERMI)) << DRF_SHIFT(FB_HWRESID_CTAGID_FERMI))
159 
160 #define FB_SET_HWRESID_CTAGID_FERMI(h, i)                                                      \
161     h = ( ((h) & ~(DRF_MASK(FB_HWRESID_CTAGID_FERMI) << DRF_SHIFT(FB_HWRESID_CTAGID_FERMI))) | \
162       FB_HWRESID_CTAGID_NUM_FERMI(i) )
163 
164 #define FB_HWRESID_ZCULL_NUM_FERMI(i)        \
165    (((1<<i) & DRF_MASK(FB_HWRESID_ZCULL_FERMI)) << DRF_SHIFT(FB_HWRESID_ZCULL_FERMI))
166 
167 #define FB_HWRESID_ZCULL_VAL_FERMI(n)        \
168     (((n) >> DRF_SHIFT(FB_HWRESID_ZCULL_FERMI)) & DRF_MASK(FB_HWRESID_ZCULL_FERMI))
169 
170 /*!
171  * KernelMemorySystem is a logical abstraction of the GPU memory system. This
172  * type is instantiated in VGPU guest/GSP Client as well as the VGPU
173  * host/GSP-RM.
174  *
175  * When KernelMemorySystem wants to read or write hardware state, it does not
176  * have access to the registers on the GPU, it can however perform operations
177  * using the following mechanisms:
178  *
179  * 1.) access registers are virtualized across VFs, e.g.: registers within
180  * NV_VIRTUAL_FUNCTION_PRIV_XYZ.
181  *
182  * 2.) send a RPC to the VGPU Host/GSP-RM to perform the operation.
183  *
184  * Operations such as "get memory system bus width" are appropriate for this
185  * interface. Anything related to managing of the memory page
186  * tables/allocations should live in MemoryManager.
187  */
188 
189 #ifdef NVOC_KERN_MEM_SYS_H_PRIVATE_ACCESS_ALLOWED
190 #define PRIVATE_FIELD(x) x
191 #else
192 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
193 #endif
194 struct KernelMemorySystem {
195     const struct NVOC_RTTI *__nvoc_rtti;
196     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
197     struct Object *__nvoc_pbase_Object;
198     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
199     struct KernelMemorySystem *__nvoc_pbase_KernelMemorySystem;
200     NV_STATUS (*__kmemsysConstructEngine__)(OBJGPU *, struct KernelMemorySystem *, ENGDESCRIPTOR);
201     NV_STATUS (*__kmemsysStateInitLocked__)(OBJGPU *, struct KernelMemorySystem *);
202     NV_STATUS (*__kmemsysStatePreLoad__)(OBJGPU *, struct KernelMemorySystem *, NvU32);
203     NV_STATUS (*__kmemsysStatePostLoad__)(OBJGPU *, struct KernelMemorySystem *, NvU32);
204     NV_STATUS (*__kmemsysStatePreUnload__)(OBJGPU *, struct KernelMemorySystem *, NvU32);
205     void (*__kmemsysStateDestroy__)(OBJGPU *, struct KernelMemorySystem *);
206     NV_STATUS (*__kmemsysGetFbNumaInfo__)(OBJGPU *, struct KernelMemorySystem *, NvU64 *, NvS32 *);
207     NV_STATUS (*__kmemsysReadUsableFbSize__)(OBJGPU *, struct KernelMemorySystem *, NvU64 *);
208     NV_STATUS (*__kmemsysCacheOp__)(OBJGPU *, struct KernelMemorySystem *, PMEMORY_DESCRIPTOR, FB_CACHE_MEMTYPE, FB_CACHE_OP);
209     NV_STATUS (*__kmemsysDoCacheOp__)(OBJGPU *, struct KernelMemorySystem *, NvU32, NvU32, NvU32, PRMTIMEOUT);
210     NvU32 (*__kmemsysReadL2SysmemInvalidateReg__)(OBJGPU *, struct KernelMemorySystem *);
211     void (*__kmemsysWriteL2SysmemInvalidateReg__)(OBJGPU *, struct KernelMemorySystem *, NvU32);
212     NvU32 (*__kmemsysReadL2PeermemInvalidateReg__)(OBJGPU *, struct KernelMemorySystem *);
213     void (*__kmemsysWriteL2PeermemInvalidateReg__)(OBJGPU *, struct KernelMemorySystem *, NvU32);
214     NV_STATUS (*__kmemsysInitFlushSysmemBuffer__)(OBJGPU *, struct KernelMemorySystem *);
215     void (*__kmemsysProgramSysmemFlushBuffer__)(OBJGPU *, struct KernelMemorySystem *);
216     NvBool (*__kmemsysIsPagePLCable__)(OBJGPU *, struct KernelMemorySystem *, NvU64, NvU64);
217     NV_STATUS (*__kmemsysReadMIGMemoryCfg__)(OBJGPU *, struct KernelMemorySystem *);
218     NV_STATUS (*__kmemsysInitMIGMemoryPartitionTable__)(OBJGPU *, struct KernelMemorySystem *);
219     NV_STATUS (*__kmemsysSwizzIdToVmmuSegmentsRange__)(OBJGPU *, struct KernelMemorySystem *, NvU32, NvU32, NvU32);
220     NV_STATUS (*__kmemsysNumaAddMemory__)(OBJGPU *, struct KernelMemorySystem *, NvU32, NvU64, NvU64, NvS32 *);
221     void (*__kmemsysNumaRemoveMemory__)(OBJGPU *, struct KernelMemorySystem *, NvU32);
222     void (*__kmemsysNumaRemoveAllMemory__)(OBJGPU *, struct KernelMemorySystem *);
223     NV_STATUS (*__kmemsysSetupAllAtsPeers__)(OBJGPU *, struct KernelMemorySystem *);
224     void (*__kmemsysRemoveAllAtsPeers__)(OBJGPU *, struct KernelMemorySystem *);
225     NV_STATUS (*__kmemsysStateLoad__)(POBJGPU, struct KernelMemorySystem *, NvU32);
226     NV_STATUS (*__kmemsysStateUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32);
227     NV_STATUS (*__kmemsysStatePostUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32);
228     NV_STATUS (*__kmemsysStateInitUnlocked__)(POBJGPU, struct KernelMemorySystem *);
229     void (*__kmemsysInitMissing__)(POBJGPU, struct KernelMemorySystem *);
230     NV_STATUS (*__kmemsysStatePreInitLocked__)(POBJGPU, struct KernelMemorySystem *);
231     NV_STATUS (*__kmemsysStatePreInitUnlocked__)(POBJGPU, struct KernelMemorySystem *);
232     NvBool (*__kmemsysIsPresent__)(POBJGPU, struct KernelMemorySystem *);
233     NvBool bDisableTiledCachingInvalidatesWithEccBug1521641;
234     NvBool bGpuCacheEnable;
235     NvBool bNumaNodesAdded;
236     NvBool bL2CleanFbPull;
237     NvBool bPreserveComptagBackingStoreOnSuspend;
238     NvBool bBug3656943WAR;
239     const MEMORY_SYSTEM_STATIC_CONFIG *pStaticConfig;
240     MEM_PARTITION_NUMA_INFO *memPartitionNumaInfo;
241     MIG_MEM_BOUNDARY_CONFIG_TABLE memBoundaryCfgTable;
242     MIG_GPU_INSTANCE_MEMORY_CONFIG gpuInstanceMemConfig[15];
243     NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS migMemoryPartitionTable;
244     PMEMORY_DESCRIPTOR pSysmemFlushBufferMemDesc;
245     NvU64 sysmemFlushBuffer;
246     NvU64 coherentCpuFbBase;
247     NvU64 coherentCpuFbEnd;
248     NvU64 numaOnlineBase;
249     NvU64 numaOnlineSize;
250 };
251 
252 #ifndef __NVOC_CLASS_KernelMemorySystem_TYPEDEF__
253 #define __NVOC_CLASS_KernelMemorySystem_TYPEDEF__
254 typedef struct KernelMemorySystem KernelMemorySystem;
255 #endif /* __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ */
256 
257 #ifndef __nvoc_class_id_KernelMemorySystem
258 #define __nvoc_class_id_KernelMemorySystem 0x7faff1
259 #endif /* __nvoc_class_id_KernelMemorySystem */
260 
261 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMemorySystem;
262 
263 #define __staticCast_KernelMemorySystem(pThis) \
264     ((pThis)->__nvoc_pbase_KernelMemorySystem)
265 
266 #ifdef __nvoc_kern_mem_sys_h_disabled
267 #define __dynamicCast_KernelMemorySystem(pThis) ((KernelMemorySystem*)NULL)
268 #else //__nvoc_kern_mem_sys_h_disabled
269 #define __dynamicCast_KernelMemorySystem(pThis) \
270     ((KernelMemorySystem*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelMemorySystem)))
271 #endif //__nvoc_kern_mem_sys_h_disabled
272 
273 #define PDB_PROP_KMEMSYS_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
274 #define PDB_PROP_KMEMSYS_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
275 
276 NV_STATUS __nvoc_objCreateDynamic_KernelMemorySystem(KernelMemorySystem**, Dynamic*, NvU32, va_list);
277 
278 NV_STATUS __nvoc_objCreate_KernelMemorySystem(KernelMemorySystem**, Dynamic*, NvU32);
279 #define __objCreate_KernelMemorySystem(ppNewObj, pParent, createFlags) \
280     __nvoc_objCreate_KernelMemorySystem((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
281 
282 #define kmemsysConstructEngine(pGpu, pKernelMemorySystem, arg0) kmemsysConstructEngine_DISPATCH(pGpu, pKernelMemorySystem, arg0)
283 #define kmemsysStateInitLocked(pGpu, pKernelMemorySystem) kmemsysStateInitLocked_DISPATCH(pGpu, pKernelMemorySystem)
284 #define kmemsysStatePreLoad(pGpu, pKernelMemorySystem, flags) kmemsysStatePreLoad_DISPATCH(pGpu, pKernelMemorySystem, flags)
285 #define kmemsysStatePostLoad(pGpu, pKernelMemorySystem, flags) kmemsysStatePostLoad_DISPATCH(pGpu, pKernelMemorySystem, flags)
286 #define kmemsysStatePreUnload(pGpu, pKernelMemorySystem, flags) kmemsysStatePreUnload_DISPATCH(pGpu, pKernelMemorySystem, flags)
287 #define kmemsysStateDestroy(pGpu, pKernelMemorySystem) kmemsysStateDestroy_DISPATCH(pGpu, pKernelMemorySystem)
288 #define kmemsysGetFbNumaInfo(pGpu, pKernelMemorySystem, physAddr, numaNodeId) kmemsysGetFbNumaInfo_DISPATCH(pGpu, pKernelMemorySystem, physAddr, numaNodeId)
289 #define kmemsysGetFbNumaInfo_HAL(pGpu, pKernelMemorySystem, physAddr, numaNodeId) kmemsysGetFbNumaInfo_DISPATCH(pGpu, pKernelMemorySystem, physAddr, numaNodeId)
290 #define kmemsysReadUsableFbSize(pGpu, pKernelMemorySystem, pFbSize) kmemsysReadUsableFbSize_DISPATCH(pGpu, pKernelMemorySystem, pFbSize)
291 #define kmemsysReadUsableFbSize_HAL(pGpu, pKernelMemorySystem, pFbSize) kmemsysReadUsableFbSize_DISPATCH(pGpu, pKernelMemorySystem, pFbSize)
292 #define kmemsysCacheOp(pGpu, pKernelMemorySystem, arg0, arg1, operation) kmemsysCacheOp_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1, operation)
293 #define kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, arg0, arg1, operation) kmemsysCacheOp_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1, operation)
294 #define kmemsysDoCacheOp(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3) kmemsysDoCacheOp_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3)
295 #define kmemsysDoCacheOp_HAL(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3) kmemsysDoCacheOp_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3)
296 #define kmemsysReadL2SysmemInvalidateReg(pGpu, pKernelMemorySystem) kmemsysReadL2SysmemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem)
297 #define kmemsysReadL2SysmemInvalidateReg_HAL(pGpu, pKernelMemorySystem) kmemsysReadL2SysmemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem)
298 #define kmemsysWriteL2SysmemInvalidateReg(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2SysmemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem, arg0)
299 #define kmemsysWriteL2SysmemInvalidateReg_HAL(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2SysmemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem, arg0)
300 #define kmemsysReadL2PeermemInvalidateReg(pGpu, pKernelMemorySystem) kmemsysReadL2PeermemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem)
301 #define kmemsysReadL2PeermemInvalidateReg_HAL(pGpu, pKernelMemorySystem) kmemsysReadL2PeermemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem)
302 #define kmemsysWriteL2PeermemInvalidateReg(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2PeermemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem, arg0)
303 #define kmemsysWriteL2PeermemInvalidateReg_HAL(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2PeermemInvalidateReg_DISPATCH(pGpu, pKernelMemorySystem, arg0)
304 #define kmemsysInitFlushSysmemBuffer(pGpu, pKernelMemorySystem) kmemsysInitFlushSysmemBuffer_DISPATCH(pGpu, pKernelMemorySystem)
305 #define kmemsysInitFlushSysmemBuffer_HAL(pGpu, pKernelMemorySystem) kmemsysInitFlushSysmemBuffer_DISPATCH(pGpu, pKernelMemorySystem)
306 #define kmemsysProgramSysmemFlushBuffer(pGpu, pKernelMemorySystem) kmemsysProgramSysmemFlushBuffer_DISPATCH(pGpu, pKernelMemorySystem)
307 #define kmemsysProgramSysmemFlushBuffer_HAL(pGpu, pKernelMemorySystem) kmemsysProgramSysmemFlushBuffer_DISPATCH(pGpu, pKernelMemorySystem)
308 #define kmemsysIsPagePLCable(pGpu, KernelMemorySystem, physAddr, pageSize) kmemsysIsPagePLCable_DISPATCH(pGpu, KernelMemorySystem, physAddr, pageSize)
309 #define kmemsysIsPagePLCable_HAL(pGpu, KernelMemorySystem, physAddr, pageSize) kmemsysIsPagePLCable_DISPATCH(pGpu, KernelMemorySystem, physAddr, pageSize)
310 #define kmemsysReadMIGMemoryCfg(pGpu, pKernelMemorySystem) kmemsysReadMIGMemoryCfg_DISPATCH(pGpu, pKernelMemorySystem)
311 #define kmemsysReadMIGMemoryCfg_HAL(pGpu, pKernelMemorySystem) kmemsysReadMIGMemoryCfg_DISPATCH(pGpu, pKernelMemorySystem)
312 #define kmemsysInitMIGMemoryPartitionTable(pGpu, pKernelMemorySystem) kmemsysInitMIGMemoryPartitionTable_DISPATCH(pGpu, pKernelMemorySystem)
313 #define kmemsysInitMIGMemoryPartitionTable_HAL(pGpu, pKernelMemorySystem) kmemsysInitMIGMemoryPartitionTable_DISPATCH(pGpu, pKernelMemorySystem)
314 #define kmemsysSwizzIdToVmmuSegmentsRange(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments) kmemsysSwizzIdToVmmuSegmentsRange_DISPATCH(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments)
315 #define kmemsysSwizzIdToVmmuSegmentsRange_HAL(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments) kmemsysSwizzIdToVmmuSegmentsRange_DISPATCH(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments)
316 #define kmemsysNumaAddMemory(pGpu, pKernelMemorySystem, swizzId, offset, size, numaNodeId) kmemsysNumaAddMemory_DISPATCH(pGpu, pKernelMemorySystem, swizzId, offset, size, numaNodeId)
317 #define kmemsysNumaAddMemory_HAL(pGpu, pKernelMemorySystem, swizzId, offset, size, numaNodeId) kmemsysNumaAddMemory_DISPATCH(pGpu, pKernelMemorySystem, swizzId, offset, size, numaNodeId)
318 #define kmemsysNumaRemoveMemory(pGpu, pKernelMemorySystem, swizzId) kmemsysNumaRemoveMemory_DISPATCH(pGpu, pKernelMemorySystem, swizzId)
319 #define kmemsysNumaRemoveMemory_HAL(pGpu, pKernelMemorySystem, swizzId) kmemsysNumaRemoveMemory_DISPATCH(pGpu, pKernelMemorySystem, swizzId)
320 #define kmemsysNumaRemoveAllMemory(pGpu, pKernelMemorySystem) kmemsysNumaRemoveAllMemory_DISPATCH(pGpu, pKernelMemorySystem)
321 #define kmemsysNumaRemoveAllMemory_HAL(pGpu, pKernelMemorySystem) kmemsysNumaRemoveAllMemory_DISPATCH(pGpu, pKernelMemorySystem)
322 #define kmemsysSetupAllAtsPeers(pGpu, pKernelMemorySystem) kmemsysSetupAllAtsPeers_DISPATCH(pGpu, pKernelMemorySystem)
323 #define kmemsysSetupAllAtsPeers_HAL(pGpu, pKernelMemorySystem) kmemsysSetupAllAtsPeers_DISPATCH(pGpu, pKernelMemorySystem)
324 #define kmemsysRemoveAllAtsPeers(pGpu, pKernelMemorySystem) kmemsysRemoveAllAtsPeers_DISPATCH(pGpu, pKernelMemorySystem)
325 #define kmemsysRemoveAllAtsPeers_HAL(pGpu, pKernelMemorySystem) kmemsysRemoveAllAtsPeers_DISPATCH(pGpu, pKernelMemorySystem)
326 #define kmemsysStateLoad(pGpu, pEngstate, arg0) kmemsysStateLoad_DISPATCH(pGpu, pEngstate, arg0)
327 #define kmemsysStateUnload(pGpu, pEngstate, arg0) kmemsysStateUnload_DISPATCH(pGpu, pEngstate, arg0)
328 #define kmemsysStatePostUnload(pGpu, pEngstate, arg0) kmemsysStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
329 #define kmemsysStateInitUnlocked(pGpu, pEngstate) kmemsysStateInitUnlocked_DISPATCH(pGpu, pEngstate)
330 #define kmemsysInitMissing(pGpu, pEngstate) kmemsysInitMissing_DISPATCH(pGpu, pEngstate)
331 #define kmemsysStatePreInitLocked(pGpu, pEngstate) kmemsysStatePreInitLocked_DISPATCH(pGpu, pEngstate)
332 #define kmemsysStatePreInitUnlocked(pGpu, pEngstate) kmemsysStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
333 #define kmemsysIsPresent(pGpu, pEngstate) kmemsysIsPresent_DISPATCH(pGpu, pEngstate)
334 NV_STATUS kmemsysGetUsableFbSize_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize);
335 
336 
337 #ifdef __nvoc_kern_mem_sys_h_disabled
338 static inline NV_STATUS kmemsysGetUsableFbSize(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize) {
339     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
340     return NV_ERR_NOT_SUPPORTED;
341 }
342 #else //__nvoc_kern_mem_sys_h_disabled
343 #define kmemsysGetUsableFbSize(pGpu, pKernelMemorySystem, pFbSize) kmemsysGetUsableFbSize_KERNEL(pGpu, pKernelMemorySystem, pFbSize)
344 #endif //__nvoc_kern_mem_sys_h_disabled
345 
346 #define kmemsysGetUsableFbSize_HAL(pGpu, pKernelMemorySystem, pFbSize) kmemsysGetUsableFbSize(pGpu, pKernelMemorySystem, pFbSize)
347 
348 static inline void kmemsysAssertSysmemFlushBufferValid_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
349     return;
350 }
351 
352 void kmemsysAssertSysmemFlushBufferValid_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
353 
354 void kmemsysAssertSysmemFlushBufferValid_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
355 
356 void kmemsysAssertSysmemFlushBufferValid_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
357 
358 
359 #ifdef __nvoc_kern_mem_sys_h_disabled
360 static inline void kmemsysAssertSysmemFlushBufferValid(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
361     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
362 }
363 #else //__nvoc_kern_mem_sys_h_disabled
364 #define kmemsysAssertSysmemFlushBufferValid(pGpu, pKernelMemorySystem) kmemsysAssertSysmemFlushBufferValid_b3696a(pGpu, pKernelMemorySystem)
365 #endif //__nvoc_kern_mem_sys_h_disabled
366 
367 #define kmemsysAssertSysmemFlushBufferValid_HAL(pGpu, pKernelMemorySystem) kmemsysAssertSysmemFlushBufferValid(pGpu, pKernelMemorySystem)
368 
369 NvU32 kmemsysGetFlushSysmemBufferAddrShift_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
370 
371 
372 #ifdef __nvoc_kern_mem_sys_h_disabled
373 static inline NvU32 kmemsysGetFlushSysmemBufferAddrShift(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
374     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
375     return 0;
376 }
377 #else //__nvoc_kern_mem_sys_h_disabled
378 #define kmemsysGetFlushSysmemBufferAddrShift(pGpu, pKernelMemorySystem) kmemsysGetFlushSysmemBufferAddrShift_GM107(pGpu, pKernelMemorySystem)
379 #endif //__nvoc_kern_mem_sys_h_disabled
380 
381 #define kmemsysGetFlushSysmemBufferAddrShift_HAL(pGpu, pKernelMemorySystem) kmemsysGetFlushSysmemBufferAddrShift(pGpu, pKernelMemorySystem)
382 
383 NV_STATUS kmemsysInitStaticConfig_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, MEMORY_SYSTEM_STATIC_CONFIG *pConfig);
384 
385 
386 #ifdef __nvoc_kern_mem_sys_h_disabled
387 static inline NV_STATUS kmemsysInitStaticConfig(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, MEMORY_SYSTEM_STATIC_CONFIG *pConfig) {
388     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
389     return NV_ERR_NOT_SUPPORTED;
390 }
391 #else //__nvoc_kern_mem_sys_h_disabled
392 #define kmemsysInitStaticConfig(pGpu, pKernelMemorySystem, pConfig) kmemsysInitStaticConfig_KERNEL(pGpu, pKernelMemorySystem, pConfig)
393 #endif //__nvoc_kern_mem_sys_h_disabled
394 
395 #define kmemsysInitStaticConfig_HAL(pGpu, pKernelMemorySystem, pConfig) kmemsysInitStaticConfig(pGpu, pKernelMemorySystem, pConfig)
396 
397 static inline NV_STATUS kmemsysPreFillCacheOnlyMemory_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) {
398     return NV_OK;
399 }
400 
401 NV_STATUS kmemsysPreFillCacheOnlyMemory_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1);
402 
403 
404 #ifdef __nvoc_kern_mem_sys_h_disabled
405 static inline NV_STATUS kmemsysPreFillCacheOnlyMemory(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) {
406     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
407     return NV_ERR_NOT_SUPPORTED;
408 }
409 #else //__nvoc_kern_mem_sys_h_disabled
410 #define kmemsysPreFillCacheOnlyMemory(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysPreFillCacheOnlyMemory_56cd7a(pGpu, pKernelMemorySystem, arg0, arg1)
411 #endif //__nvoc_kern_mem_sys_h_disabled
412 
413 #define kmemsysPreFillCacheOnlyMemory_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysPreFillCacheOnlyMemory(pGpu, pKernelMemorySystem, arg0, arg1)
414 
415 static inline NV_STATUS kmemsysCheckDisplayRemapperRange_14278f(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) {
416     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_INVALID_STATE);
417 }
418 
419 
420 #ifdef __nvoc_kern_mem_sys_h_disabled
421 static inline NV_STATUS kmemsysCheckDisplayRemapperRange(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) {
422     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
423     return NV_ERR_NOT_SUPPORTED;
424 }
425 #else //__nvoc_kern_mem_sys_h_disabled
426 #define kmemsysCheckDisplayRemapperRange(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysCheckDisplayRemapperRange_14278f(pGpu, pKernelMemorySystem, arg0, arg1)
427 #endif //__nvoc_kern_mem_sys_h_disabled
428 
429 #define kmemsysCheckDisplayRemapperRange_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysCheckDisplayRemapperRange(pGpu, pKernelMemorySystem, arg0, arg1)
430 
431 static inline void kmemsysPostHeapCreate_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
432     return;
433 }
434 
435 
436 #ifdef __nvoc_kern_mem_sys_h_disabled
437 static inline void kmemsysPostHeapCreate(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
438     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
439 }
440 #else //__nvoc_kern_mem_sys_h_disabled
441 #define kmemsysPostHeapCreate(pGpu, pKernelMemorySystem) kmemsysPostHeapCreate_b3696a(pGpu, pKernelMemorySystem)
442 #endif //__nvoc_kern_mem_sys_h_disabled
443 
444 #define kmemsysPostHeapCreate_HAL(pGpu, pKernelMemorySystem) kmemsysPostHeapCreate(pGpu, pKernelMemorySystem)
445 
446 static inline void kmemsysPreHeapDestruct_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
447     return;
448 }
449 
450 
451 #ifdef __nvoc_kern_mem_sys_h_disabled
452 static inline void kmemsysPreHeapDestruct(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
453     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
454 }
455 #else //__nvoc_kern_mem_sys_h_disabled
456 #define kmemsysPreHeapDestruct(pGpu, pKernelMemorySystem) kmemsysPreHeapDestruct_b3696a(pGpu, pKernelMemorySystem)
457 #endif //__nvoc_kern_mem_sys_h_disabled
458 
459 #define kmemsysPreHeapDestruct_HAL(pGpu, pKernelMemorySystem) kmemsysPreHeapDestruct(pGpu, pKernelMemorySystem)
460 
461 NV_STATUS kmemsysAllocComprResources_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, FB_ALLOC_INFO *arg0, NvU64 arg1, NvU32 arg2, NvU32 *arg3, NvU32 arg4);
462 
463 
464 #ifdef __nvoc_kern_mem_sys_h_disabled
465 static inline NV_STATUS kmemsysAllocComprResources(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, FB_ALLOC_INFO *arg0, NvU64 arg1, NvU32 arg2, NvU32 *arg3, NvU32 arg4) {
466     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
467     return NV_ERR_NOT_SUPPORTED;
468 }
469 #else //__nvoc_kern_mem_sys_h_disabled
470 #define kmemsysAllocComprResources(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4) kmemsysAllocComprResources_KERNEL(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4)
471 #endif //__nvoc_kern_mem_sys_h_disabled
472 
473 #define kmemsysAllocComprResources_HAL(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4) kmemsysAllocComprResources(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4)
474 
475 static inline void kmemsysFreeComprResources_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 hwResId) {
476     return;
477 }
478 
479 
480 #ifdef __nvoc_kern_mem_sys_h_disabled
481 static inline void kmemsysFreeComprResources(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 hwResId) {
482     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
483 }
484 #else //__nvoc_kern_mem_sys_h_disabled
485 #define kmemsysFreeComprResources(pGpu, pKernelMemorySystem, hwResId) kmemsysFreeComprResources_b3696a(pGpu, pKernelMemorySystem, hwResId)
486 #endif //__nvoc_kern_mem_sys_h_disabled
487 
488 #define kmemsysFreeComprResources_HAL(pGpu, pKernelMemorySystem, hwResId) kmemsysFreeComprResources(pGpu, pKernelMemorySystem, hwResId)
489 
490 NV_STATUS kmemsysPopulateMIGGPUInstanceMemConfig_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
491 
492 
493 #ifdef __nvoc_kern_mem_sys_h_disabled
494 static inline NV_STATUS kmemsysPopulateMIGGPUInstanceMemConfig(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
495     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
496     return NV_ERR_NOT_SUPPORTED;
497 }
498 #else //__nvoc_kern_mem_sys_h_disabled
499 #define kmemsysPopulateMIGGPUInstanceMemConfig(pGpu, pKernelMemorySystem) kmemsysPopulateMIGGPUInstanceMemConfig_KERNEL(pGpu, pKernelMemorySystem)
500 #endif //__nvoc_kern_mem_sys_h_disabled
501 
502 #define kmemsysPopulateMIGGPUInstanceMemConfig_HAL(pGpu, pKernelMemorySystem) kmemsysPopulateMIGGPUInstanceMemConfig(pGpu, pKernelMemorySystem)
503 
504 NvBool kmemsysNeedInvalidateGpuCacheOnMap_GV100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bIsVolatile, NvU32 aperture);
505 
506 
507 #ifdef __nvoc_kern_mem_sys_h_disabled
508 static inline NvBool kmemsysNeedInvalidateGpuCacheOnMap(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bIsVolatile, NvU32 aperture) {
509     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
510     return NV_FALSE;
511 }
512 #else //__nvoc_kern_mem_sys_h_disabled
513 #define kmemsysNeedInvalidateGpuCacheOnMap(pGpu, pKernelMemorySystem, bIsVolatile, aperture) kmemsysNeedInvalidateGpuCacheOnMap_GV100(pGpu, pKernelMemorySystem, bIsVolatile, aperture)
514 #endif //__nvoc_kern_mem_sys_h_disabled
515 
516 #define kmemsysNeedInvalidateGpuCacheOnMap_HAL(pGpu, pKernelMemorySystem, bIsVolatile, aperture) kmemsysNeedInvalidateGpuCacheOnMap(pGpu, pKernelMemorySystem, bIsVolatile, aperture)
517 
518 NV_STATUS kmemsysConstructEngine_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, ENGDESCRIPTOR arg0);
519 
520 static inline NV_STATUS kmemsysConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, ENGDESCRIPTOR arg0) {
521     return pKernelMemorySystem->__kmemsysConstructEngine__(pGpu, pKernelMemorySystem, arg0);
522 }
523 
524 NV_STATUS kmemsysStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
525 
526 static inline NV_STATUS kmemsysStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
527     return pKernelMemorySystem->__kmemsysStateInitLocked__(pGpu, pKernelMemorySystem);
528 }
529 
530 NV_STATUS kmemsysStatePreLoad_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags);
531 
532 static inline NV_STATUS kmemsysStatePreLoad_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags) {
533     return pKernelMemorySystem->__kmemsysStatePreLoad__(pGpu, pKernelMemorySystem, flags);
534 }
535 
536 NV_STATUS kmemsysStatePostLoad_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags);
537 
538 static inline NV_STATUS kmemsysStatePostLoad_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags) {
539     return pKernelMemorySystem->__kmemsysStatePostLoad__(pGpu, pKernelMemorySystem, flags);
540 }
541 
542 NV_STATUS kmemsysStatePreUnload_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags);
543 
544 static inline NV_STATUS kmemsysStatePreUnload_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags) {
545     return pKernelMemorySystem->__kmemsysStatePreUnload__(pGpu, pKernelMemorySystem, flags);
546 }
547 
548 void kmemsysStateDestroy_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
549 
550 static inline void kmemsysStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
551     pKernelMemorySystem->__kmemsysStateDestroy__(pGpu, pKernelMemorySystem);
552 }
553 
554 NV_STATUS kmemsysGetFbNumaInfo_GV100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *physAddr, NvS32 *numaNodeId);
555 
556 static inline NV_STATUS kmemsysGetFbNumaInfo_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *physAddr, NvS32 *numaNodeId) {
557     return NV_OK;
558 }
559 
560 static inline NV_STATUS kmemsysGetFbNumaInfo_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *physAddr, NvS32 *numaNodeId) {
561     return pKernelMemorySystem->__kmemsysGetFbNumaInfo__(pGpu, pKernelMemorySystem, physAddr, numaNodeId);
562 }
563 
564 NV_STATUS kmemsysReadUsableFbSize_GP102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize);
565 
566 NV_STATUS kmemsysReadUsableFbSize_GA102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize);
567 
568 static inline NV_STATUS kmemsysReadUsableFbSize_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize) {
569     return pKernelMemorySystem->__kmemsysReadUsableFbSize__(pGpu, pKernelMemorySystem, pFbSize);
570 }
571 
572 NV_STATUS kmemsysCacheOp_GM200(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, PMEMORY_DESCRIPTOR arg0, FB_CACHE_MEMTYPE arg1, FB_CACHE_OP operation);
573 
574 NV_STATUS kmemsysCacheOp_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, PMEMORY_DESCRIPTOR arg0, FB_CACHE_MEMTYPE arg1, FB_CACHE_OP operation);
575 
576 static inline NV_STATUS kmemsysCacheOp_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, PMEMORY_DESCRIPTOR arg0, FB_CACHE_MEMTYPE arg1, FB_CACHE_OP operation) {
577     return pKernelMemorySystem->__kmemsysCacheOp__(pGpu, pKernelMemorySystem, arg0, arg1, operation);
578 }
579 
580 NV_STATUS kmemsysDoCacheOp_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0, NvU32 arg1, NvU32 arg2, PRMTIMEOUT arg3);
581 
582 NV_STATUS kmemsysDoCacheOp_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0, NvU32 arg1, NvU32 arg2, PRMTIMEOUT arg3);
583 
584 static inline NV_STATUS kmemsysDoCacheOp_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0, NvU32 arg1, NvU32 arg2, PRMTIMEOUT arg3) {
585     return pKernelMemorySystem->__kmemsysDoCacheOp__(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3);
586 }
587 
588 NvU32 kmemsysReadL2SysmemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
589 
590 static inline NvU32 kmemsysReadL2SysmemInvalidateReg_68b109(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
591     NV_ASSERT_OR_RETURN_PRECOMP(0, -1);
592 }
593 
594 static inline NvU32 kmemsysReadL2SysmemInvalidateReg_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
595     return pKernelMemorySystem->__kmemsysReadL2SysmemInvalidateReg__(pGpu, pKernelMemorySystem);
596 }
597 
598 void kmemsysWriteL2SysmemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0);
599 
600 static inline void kmemsysWriteL2SysmemInvalidateReg_f2d351(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0) {
601     NV_ASSERT_PRECOMP(0);
602 }
603 
604 static inline void kmemsysWriteL2SysmemInvalidateReg_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0) {
605     pKernelMemorySystem->__kmemsysWriteL2SysmemInvalidateReg__(pGpu, pKernelMemorySystem, arg0);
606 }
607 
608 NvU32 kmemsysReadL2PeermemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
609 
610 static inline NvU32 kmemsysReadL2PeermemInvalidateReg_68b109(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
611     NV_ASSERT_OR_RETURN_PRECOMP(0, -1);
612 }
613 
614 static inline NvU32 kmemsysReadL2PeermemInvalidateReg_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
615     return pKernelMemorySystem->__kmemsysReadL2PeermemInvalidateReg__(pGpu, pKernelMemorySystem);
616 }
617 
618 void kmemsysWriteL2PeermemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0);
619 
620 static inline void kmemsysWriteL2PeermemInvalidateReg_f2d351(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0) {
621     NV_ASSERT_PRECOMP(0);
622 }
623 
624 static inline void kmemsysWriteL2PeermemInvalidateReg_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0) {
625     pKernelMemorySystem->__kmemsysWriteL2PeermemInvalidateReg__(pGpu, pKernelMemorySystem, arg0);
626 }
627 
628 NV_STATUS kmemsysInitFlushSysmemBuffer_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
629 
630 NV_STATUS kmemsysInitFlushSysmemBuffer_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
631 
632 static inline NV_STATUS kmemsysInitFlushSysmemBuffer_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
633     return pKernelMemorySystem->__kmemsysInitFlushSysmemBuffer__(pGpu, pKernelMemorySystem);
634 }
635 
636 void kmemsysProgramSysmemFlushBuffer_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
637 
638 void kmemsysProgramSysmemFlushBuffer_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
639 
640 void kmemsysProgramSysmemFlushBuffer_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
641 
642 static inline void kmemsysProgramSysmemFlushBuffer_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
643     pKernelMemorySystem->__kmemsysProgramSysmemFlushBuffer__(pGpu, pKernelMemorySystem);
644 }
645 
646 NvBool kmemsysIsPagePLCable_GA100(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize);
647 
648 NvBool kmemsysIsPagePLCable_GA102(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize);
649 
650 static inline NvBool kmemsysIsPagePLCable_510167(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize) {
651     NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 == 0)));
652 }
653 
654 static inline NvBool kmemsysIsPagePLCable_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize) {
655     return KernelMemorySystem->__kmemsysIsPagePLCable__(pGpu, KernelMemorySystem, physAddr, pageSize);
656 }
657 
658 static inline NV_STATUS kmemsysReadMIGMemoryCfg_46f6a7(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
659     return NV_ERR_NOT_SUPPORTED;
660 }
661 
662 NV_STATUS kmemsysReadMIGMemoryCfg_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
663 
664 static inline NV_STATUS kmemsysReadMIGMemoryCfg_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
665     return pKernelMemorySystem->__kmemsysReadMIGMemoryCfg__(pGpu, pKernelMemorySystem);
666 }
667 
668 static inline NV_STATUS kmemsysInitMIGMemoryPartitionTable_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
669     return NV_OK;
670 }
671 
672 NV_STATUS kmemsysInitMIGMemoryPartitionTable_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
673 
674 static inline NV_STATUS kmemsysInitMIGMemoryPartitionTable_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
675     return pKernelMemorySystem->__kmemsysInitMIGMemoryPartitionTable__(pGpu, pKernelMemorySystem);
676 }
677 
678 NV_STATUS kmemsysSwizzIdToVmmuSegmentsRange_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU32 vmmuSegmentSize, NvU32 totalVmmuSegments);
679 
680 NV_STATUS kmemsysSwizzIdToVmmuSegmentsRange_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU32 vmmuSegmentSize, NvU32 totalVmmuSegments);
681 
682 static inline NV_STATUS kmemsysSwizzIdToVmmuSegmentsRange_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU32 vmmuSegmentSize, NvU32 totalVmmuSegments) {
683     return pKernelMemorySystem->__kmemsysSwizzIdToVmmuSegmentsRange__(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments);
684 }
685 
686 NV_STATUS kmemsysNumaAddMemory_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU64 offset, NvU64 size, NvS32 *numaNodeId);
687 
688 static inline NV_STATUS kmemsysNumaAddMemory_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU64 offset, NvU64 size, NvS32 *numaNodeId) {
689     return NV_OK;
690 }
691 
692 static inline NV_STATUS kmemsysNumaAddMemory_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU64 offset, NvU64 size, NvS32 *numaNodeId) {
693     return pKernelMemorySystem->__kmemsysNumaAddMemory__(pGpu, pKernelMemorySystem, swizzId, offset, size, numaNodeId);
694 }
695 
696 void kmemsysNumaRemoveMemory_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId);
697 
698 static inline void kmemsysNumaRemoveMemory_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId) {
699     return;
700 }
701 
702 static inline void kmemsysNumaRemoveMemory_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId) {
703     pKernelMemorySystem->__kmemsysNumaRemoveMemory__(pGpu, pKernelMemorySystem, swizzId);
704 }
705 
706 void kmemsysNumaRemoveAllMemory_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
707 
708 static inline void kmemsysNumaRemoveAllMemory_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
709     return;
710 }
711 
712 static inline void kmemsysNumaRemoveAllMemory_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
713     pKernelMemorySystem->__kmemsysNumaRemoveAllMemory__(pGpu, pKernelMemorySystem);
714 }
715 
716 static inline NV_STATUS kmemsysSetupAllAtsPeers_46f6a7(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
717     return NV_ERR_NOT_SUPPORTED;
718 }
719 
720 NV_STATUS kmemsysSetupAllAtsPeers_GV100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
721 
722 static inline NV_STATUS kmemsysSetupAllAtsPeers_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
723     return pKernelMemorySystem->__kmemsysSetupAllAtsPeers__(pGpu, pKernelMemorySystem);
724 }
725 
726 static inline void kmemsysRemoveAllAtsPeers_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
727     return;
728 }
729 
730 void kmemsysRemoveAllAtsPeers_GV100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
731 
732 static inline void kmemsysRemoveAllAtsPeers_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
733     pKernelMemorySystem->__kmemsysRemoveAllAtsPeers__(pGpu, pKernelMemorySystem);
734 }
735 
736 static inline NV_STATUS kmemsysStateLoad_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) {
737     return pEngstate->__kmemsysStateLoad__(pGpu, pEngstate, arg0);
738 }
739 
740 static inline NV_STATUS kmemsysStateUnload_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) {
741     return pEngstate->__kmemsysStateUnload__(pGpu, pEngstate, arg0);
742 }
743 
744 static inline NV_STATUS kmemsysStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) {
745     return pEngstate->__kmemsysStatePostUnload__(pGpu, pEngstate, arg0);
746 }
747 
748 static inline NV_STATUS kmemsysStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) {
749     return pEngstate->__kmemsysStateInitUnlocked__(pGpu, pEngstate);
750 }
751 
752 static inline void kmemsysInitMissing_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) {
753     pEngstate->__kmemsysInitMissing__(pGpu, pEngstate);
754 }
755 
756 static inline NV_STATUS kmemsysStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) {
757     return pEngstate->__kmemsysStatePreInitLocked__(pGpu, pEngstate);
758 }
759 
760 static inline NV_STATUS kmemsysStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) {
761     return pEngstate->__kmemsysStatePreInitUnlocked__(pGpu, pEngstate);
762 }
763 
764 static inline NvBool kmemsysIsPresent_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) {
765     return pEngstate->__kmemsysIsPresent__(pGpu, pEngstate);
766 }
767 
768 static inline NvBool kmemsysIsL2CleanFbPull(struct KernelMemorySystem *pKernelMemorySystem) {
769     return pKernelMemorySystem->bL2CleanFbPull;
770 }
771 
772 void kmemsysDestruct_IMPL(struct KernelMemorySystem *pKernelMemorySystem);
773 
774 #define __nvoc_kmemsysDestruct(pKernelMemorySystem) kmemsysDestruct_IMPL(pKernelMemorySystem)
775 NV_STATUS kmemsysEnsureSysmemFlushBufferInitialized_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
776 
777 #ifdef __nvoc_kern_mem_sys_h_disabled
778 static inline NV_STATUS kmemsysEnsureSysmemFlushBufferInitialized(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
779     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
780     return NV_ERR_NOT_SUPPORTED;
781 }
782 #else //__nvoc_kern_mem_sys_h_disabled
783 #define kmemsysEnsureSysmemFlushBufferInitialized(pGpu, pKernelMemorySystem) kmemsysEnsureSysmemFlushBufferInitialized_IMPL(pGpu, pKernelMemorySystem)
784 #endif //__nvoc_kern_mem_sys_h_disabled
785 
786 const MEMORY_SYSTEM_STATIC_CONFIG *kmemsysGetStaticConfig_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
787 
788 #ifdef __nvoc_kern_mem_sys_h_disabled
789 static inline const MEMORY_SYSTEM_STATIC_CONFIG *kmemsysGetStaticConfig(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
790     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
791     return NULL;
792 }
793 #else //__nvoc_kern_mem_sys_h_disabled
794 #define kmemsysGetStaticConfig(pGpu, pKernelMemorySystem) kmemsysGetStaticConfig_IMPL(pGpu, pKernelMemorySystem)
795 #endif //__nvoc_kern_mem_sys_h_disabled
796 
797 NV_STATUS kmemsysSetupCoherentCpuLink_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush);
798 
799 #ifdef __nvoc_kern_mem_sys_h_disabled
800 static inline NV_STATUS kmemsysSetupCoherentCpuLink(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush) {
801     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
802     return NV_ERR_NOT_SUPPORTED;
803 }
804 #else //__nvoc_kern_mem_sys_h_disabled
805 #define kmemsysSetupCoherentCpuLink(pGpu, pKernelMemorySystem, bFlush) kmemsysSetupCoherentCpuLink_IMPL(pGpu, pKernelMemorySystem, bFlush)
806 #endif //__nvoc_kern_mem_sys_h_disabled
807 
808 void kmemsysTeardownCoherentCpuLink_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush);
809 
810 #ifdef __nvoc_kern_mem_sys_h_disabled
811 static inline void kmemsysTeardownCoherentCpuLink(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush) {
812     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
813 }
814 #else //__nvoc_kern_mem_sys_h_disabled
815 #define kmemsysTeardownCoherentCpuLink(pGpu, pKernelMemorySystem, bFlush) kmemsysTeardownCoherentCpuLink_IMPL(pGpu, pKernelMemorySystem, bFlush)
816 #endif //__nvoc_kern_mem_sys_h_disabled
817 
818 NV_STATUS kmemsysSendL2InvalidateEvict_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags);
819 
820 #ifdef __nvoc_kern_mem_sys_h_disabled
821 static inline NV_STATUS kmemsysSendL2InvalidateEvict(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags) {
822     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
823     return NV_ERR_NOT_SUPPORTED;
824 }
825 #else //__nvoc_kern_mem_sys_h_disabled
826 #define kmemsysSendL2InvalidateEvict(pGpu, pKernelMemorySystem, flags) kmemsysSendL2InvalidateEvict_IMPL(pGpu, pKernelMemorySystem, flags)
827 #endif //__nvoc_kern_mem_sys_h_disabled
828 
829 NV_STATUS kmemsysSendFlushL2AllRamsAndCaches_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
830 
831 #ifdef __nvoc_kern_mem_sys_h_disabled
832 static inline NV_STATUS kmemsysSendFlushL2AllRamsAndCaches(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
833     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
834     return NV_ERR_NOT_SUPPORTED;
835 }
836 #else //__nvoc_kern_mem_sys_h_disabled
837 #define kmemsysSendFlushL2AllRamsAndCaches(pGpu, pKernelMemorySystem) kmemsysSendFlushL2AllRamsAndCaches_IMPL(pGpu, pKernelMemorySystem)
838 #endif //__nvoc_kern_mem_sys_h_disabled
839 
840 NV_STATUS kmemsysSwizzIdToMIGMemSize_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, NvU32 *pPartitionSizeFlag, NvU64 *pSizeInBytes);
841 
842 #ifdef __nvoc_kern_mem_sys_h_disabled
843 static inline NV_STATUS kmemsysSwizzIdToMIGMemSize(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, NvU32 *pPartitionSizeFlag, NvU64 *pSizeInBytes) {
844     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
845     return NV_ERR_NOT_SUPPORTED;
846 }
847 #else //__nvoc_kern_mem_sys_h_disabled
848 #define kmemsysSwizzIdToMIGMemSize(pGpu, pKernelMemorySystem, swizzId, totalRange, pPartitionSizeFlag, pSizeInBytes) kmemsysSwizzIdToMIGMemSize_IMPL(pGpu, pKernelMemorySystem, swizzId, totalRange, pPartitionSizeFlag, pSizeInBytes)
849 #endif //__nvoc_kern_mem_sys_h_disabled
850 
851 NV_STATUS kmemsysSwizzIdToMIGMemRange_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, struct NV_RANGE *pAddrRange);
852 
853 #ifdef __nvoc_kern_mem_sys_h_disabled
854 static inline NV_STATUS kmemsysSwizzIdToMIGMemRange(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, struct NV_RANGE *pAddrRange) {
855     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
856     return NV_ERR_NOT_SUPPORTED;
857 }
858 #else //__nvoc_kern_mem_sys_h_disabled
859 #define kmemsysSwizzIdToMIGMemRange(pGpu, pKernelMemorySystem, swizzId, totalRange, pAddrRange) kmemsysSwizzIdToMIGMemRange_IMPL(pGpu, pKernelMemorySystem, swizzId, totalRange, pAddrRange)
860 #endif //__nvoc_kern_mem_sys_h_disabled
861 
862 NV_STATUS kmemsysGetMIGGPUInstanceMemInfo_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE *pAddrRange);
863 
864 #ifdef __nvoc_kern_mem_sys_h_disabled
865 static inline NV_STATUS kmemsysGetMIGGPUInstanceMemInfo(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE *pAddrRange) {
866     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
867     return NV_ERR_NOT_SUPPORTED;
868 }
869 #else //__nvoc_kern_mem_sys_h_disabled
870 #define kmemsysGetMIGGPUInstanceMemInfo(pGpu, pKernelMemorySystem, swizzId, pAddrRange) kmemsysGetMIGGPUInstanceMemInfo_IMPL(pGpu, pKernelMemorySystem, swizzId, pAddrRange)
871 #endif //__nvoc_kern_mem_sys_h_disabled
872 
873 NV_STATUS kmemsysGetMIGGPUInstanceMemConfigFromSwizzId_IMPL(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, const MIG_GPU_INSTANCE_MEMORY_CONFIG **arg2);
874 
875 #ifdef __nvoc_kern_mem_sys_h_disabled
876 static inline NV_STATUS kmemsysGetMIGGPUInstanceMemConfigFromSwizzId(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, const MIG_GPU_INSTANCE_MEMORY_CONFIG **arg2) {
877     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
878     return NV_ERR_NOT_SUPPORTED;
879 }
880 #else //__nvoc_kern_mem_sys_h_disabled
881 #define kmemsysGetMIGGPUInstanceMemConfigFromSwizzId(arg0, arg1, swizzId, arg2) kmemsysGetMIGGPUInstanceMemConfigFromSwizzId_IMPL(arg0, arg1, swizzId, arg2)
882 #endif //__nvoc_kern_mem_sys_h_disabled
883 
884 NV_STATUS kmemsysInitMIGGPUInstanceMemConfigForSwizzId_IMPL(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, NvU64 startingVmmuSegment, NvU64 memSizeInVmmuSegment);
885 
886 #ifdef __nvoc_kern_mem_sys_h_disabled
887 static inline NV_STATUS kmemsysInitMIGGPUInstanceMemConfigForSwizzId(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, NvU64 startingVmmuSegment, NvU64 memSizeInVmmuSegment) {
888     NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
889     return NV_ERR_NOT_SUPPORTED;
890 }
891 #else //__nvoc_kern_mem_sys_h_disabled
892 #define kmemsysInitMIGGPUInstanceMemConfigForSwizzId(arg0, arg1, swizzId, startingVmmuSegment, memSizeInVmmuSegment) kmemsysInitMIGGPUInstanceMemConfigForSwizzId_IMPL(arg0, arg1, swizzId, startingVmmuSegment, memSizeInVmmuSegment)
893 #endif //__nvoc_kern_mem_sys_h_disabled
894 
895 #undef PRIVATE_FIELD
896 
897 
898 #define IS_COHERENT_CPU_ATS_OFFSET(kmemsys, offset, length)              \
899     (kmemsys && ((offset) >= kmemsys->coherentCpuFbBase) &&              \
900      (((NvU64)offset + size) <= kmemsys->coherentCpuFbEnd))
901 
902 #endif // KERN_MEM_SYS_H
903 
904 #ifdef __cplusplus
905 } // extern "C"
906 #endif
907 #endif // _G_KERN_MEM_SYS_NVOC_H_
908