1 #ifndef _G_KERN_GMMU_NVOC_H_
2 #define _G_KERN_GMMU_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 /******************************************************************************
33 *
34 *       Kernel GMMU module header
35 *       Defines and structures used on CPU RM for the GMMU object.
36 *
37 ******************************************************************************/
38 
39 #include "g_kern_gmmu_nvoc.h"
40 
41 #ifndef KERN_GMMU_H
42 #define KERN_GMMU_H
43 
44 #include "core/core.h"
45 #include "core/strict.h"
46 #include "nvtypes.h"
47 #include "nvoc/prelude.h"
48 #include "nvoc/object.h"
49 #include "gpu/mmu/mmu_trace.h"
50 #include "mmu/gmmu_fmt.h"
51 #include "class/cl90f1.h"    // FERMI_VASPACE_A
52 
53 #include "gpu/gpu_timeout.h"
54 #include "containers/queue.h"
55 #include "gpu/eng_state.h"
56 #include "gpu/intr/intr_service.h"
57 #include "gpu/mem_mgr/virt_mem_allocator_common.h" // RM_PAGE_SIZE_64K
58 #include "mmu/mmu_walk.h"
59 
60 #include "gpu/gpu_halspec.h"
61 #include "ctrl/ctrl2080/ctrl2080internal.h"  // NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS
62 
63 #include "class/clc369.h" // MMU_FAULT_BUFFER
64 
65 typedef struct COMPR_INFO COMPR_INFO;
66 
67 typedef struct GVAS_GPU_STATE GVAS_GPU_STATE;
68 
69 /*!
70  * Family of GMMU formats sharing the same version and PDE/PTE defines
71  * but with differing big page sizes.
72  * The term "family" is used here in the mathematical (set theory) sense.
73  *
74  * nv4kPte: GV100+ supports NV4K encoding, @ref gmmuStateInitHal_GV100 for more
75  *
76  */
77 typedef struct
78 {
79     GMMU_FMT_PDE_MULTI pdeMulti;
80     GMMU_FMT_PDE       pde;
81     GMMU_FMT_PTE       pte;
82     GMMU_ENTRY_VALUE   sparsePte;
83     GMMU_ENTRY_VALUE   sparsePde;
84     GMMU_ENTRY_VALUE   sparsePdeMulti;
85     GMMU_ENTRY_VALUE   nv4kPte;
86     GMMU_ENTRY_VALUE   bug2720120WarPde0;
87     GMMU_ENTRY_VALUE   bug2720120WarPde1;
88     GMMU_FMT          *pFmts[GMMU_FMT_MAX_BIG_PAGE_SIZES];
89 } GMMU_FMT_FAMILY;
90 
91 /*!
92  * This structure contains information needed for issuing a TLB invalidate.
93  */
94 typedef struct
95 {
96     RmPhysAddr pdbAddress;
97     NvU32      pdbAperture;
98     NvU32      gfid;
99     NvU32      regVal;
100     RMTIMEOUT  timeout;
101 } TLB_INVALIDATE_PARAMS;
102 
103 typedef enum
104 {
105     NON_REPLAYABLE_FAULT_BUFFER = 0,
106     REPLAYABLE_FAULT_BUFFER,
107     //this should always be the last entry
108     NUM_FAULT_BUFFERS
109 } FAULT_BUFFER_TYPE;
110 
111 /*!
112  * This structure holds information about a page
113  * of memory backing the fault buffer.
114  */
115 typedef struct
116 {
117     /*! Virtual address of this page */
118     NvP64 pAddress;
119 
120     /*! Cookie returned by memdescMap() */
121     NvP64 pPriv;
122 } GMMU_FAULT_BUFFER_PAGE;
123 
124 /*!
125  * This structure holds the information about MMU HW Fault buffer which is mapped on BAR2
126  * and is utilized by MMU for reporting MMU faults to SW
127  */
128 struct HW_FAULT_BUFFER
129 {
130     NvU64 bar2FaultBufferAddr;
131     MEMORY_DESCRIPTOR *pFaultBufferMemDesc;
132     /*!
133      * cookie that is stored for the CPU mapping
134      */
135     NvP64 hCpuFaultBuffer;
136     NvP64 kernelVaddr;
137 
138     GMMU_FAULT_BUFFER_PAGE *pBufferPages;
139 
140     NvU32 cachedGetIndex;
141 
142     /*!
143      * cached fault buffer size
144      */
145     NvU32 faultBufferSize;
146 };
147 
148 /*!
149  * This structure holds information shared between CPU-RM
150  * and GSP-RM
151  */
152 typedef struct
153 {
154     /*!
155      * The value updated by GSP-RM to notify CPU-RM that
156      * a successful flush has happened
157      */
158     NvU32 flushBufferSeqNum;
159 
160     /*!
161      * The GET index of replayable shadow buffer. This
162      * is updated by UVM driver and read by GSP-RM
163      */
164     NvU32 swGetIndex;
165 } FAULT_BUFFER_SHARED_MEMORY;
166 
167 /*!
168  * This structure holds information about the client shadow fault buffer.
169  */
170 typedef struct
171 {
172     /*!
173      * Pointer to circular queue structure shared by the RM with a
174      * privileged client, used as the shadow fault buffer for holding
175      * non-replayable faults.
176      * This structure is shared between CPU-RM and GSP-RM in GSP
177      * enabled driver.
178      */
179     NvP64 pQueue;
180 
181     /*! Memory descriptors associated with the queue. */
182     MEMORY_DESCRIPTOR *pQueueMemDesc;
183 
184     NvP64 pQueueAddress;
185 
186     /*!
187      * Execution context for the queue. Holds environment specific
188      * data that enable queue usage
189      */
190     QueueContext queueContext;
191 
192     /*! Cookie returned by memdescMap() */
193     NvP64 pQueuePriv;
194 
195     /*! Memory descriptor associated with the buffer. */
196     MEMORY_DESCRIPTOR *pBufferMemDesc;
197 
198     NvP64 pBufferAddress;
199 
200     /*! Cookie returned by memdescMap() */
201     NvP64 pBufferPriv;
202 
203     /*! GSP only split mapping of the buffer. */
204     GMMU_FAULT_BUFFER_PAGE *pBufferPages;
205 
206     NvU32 numBufferPages;
207 
208     /*!
209      * Used only by the replayable fault buffer. Memory descriptor used to
210      * describe shared memory b/w CPU-RM and GSP-RM.
211      */
212     MEMORY_DESCRIPTOR *pFaultBufferSharedMemDesc;
213 
214     NvP64 pFaultBufferSharedMemoryAddress;
215 
216     NvP64 pFaultBufferSharedMemoryPriv;
217 } GMMU_CLIENT_SHADOW_FAULT_BUFFER;
218 
219 /*!
220  * Top level structure containing all dataStructures used in MMU fault handling.
221  */
222 struct GMMU_FAULT_BUFFER
223 {
224     struct HW_FAULT_BUFFER hwFaultBuffers[NUM_FAULT_BUFFERS];
225 
226     /*!
227      * Unique client and object handle stored
228      * In VOLTA this is for MMU_FAULT_BUFFER, in PASCAL for MAXWELL_FAULT_BUFFER_A
229      */
230     NvHandle hFaultBufferClient;
231     NvHandle hFaultBufferObject;
232 
233     /*!
234      * Pointer to Circular Queue structure used as shadow fault buffer for
235      * holding fatal fault packets serviced by RM
236      */
237     NvP64 pRmShadowFaultBuffer;
238 
239     /*!
240      * Client shadow fault buffer data and pointer protected by gpu locks.
241      * Client may allocate upto 2 shadow buffers one each for replayable and
242      * non-replayable faults
243      */
244     GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer[NUM_FAULT_BUFFERS];
245     GMMU_CLIENT_SHADOW_FAULT_BUFFER clientShadowFaultBuffer[NUM_FAULT_BUFFERS];
246 
247     /*!
248      * SpinLock to protect shadow buffer pointers
249      */
250     PORT_SPINLOCK *pShadowFaultBufLock;
251 
252     /*!
253      * Flag stating fatalfault interrupt pending
254      */
255     NvS32 fatalFaultIntrPending;
256 };
257 
258 typedef struct GMMU_FAULT_PACKET
259 {
260     // 32 bytes MMU fault packet
261     NvU8 faultPacket[NVC369_BUF_SIZE];
262 } GMMU_FAULT_PACKET;
263 
264 // Initialize Circular Queue for MMU Shadow fault buffer
265 MAKE_QUEUE_CIRCULAR(GMMU_SHADOW_FAULT_BUF, GMMU_FAULT_PACKET);
266 
267 
268 /*!
269  * Structure that holds different parameters passed by an engine to kgmmuInstBlkInit
270  * for initializing their instance blocks.
271  */
272 typedef struct
273 {
274     NvBool               bIsClientAdmin;
275     NvBool               bIsFaultReplayable;
276     /*
277      * Defer the bus flush during the instance block init.
278      * If this field is set, the kgmmuInstBlkInit() routine won't do flush after the CPU writes.
279      * The caller of the kgmmuInstBlkInit() function has to explicit flush.
280      * This is useful if the caller does back to back updates to instance block.
281      * For e.g. Subcontext array init during channel setup.
282      */
283     NvBool               bDeferFlush;
284     NvU64                uvmKernelPrivRegion;
285 
286     // Instance block is being updated for a zombie subcontext.
287     NvBool               bIsZombieSubctx;
288     NvU8                *pInstBlk;      // VA of instance block.
289 } INST_BLK_INIT_PARAMS, *PINST_BLK_INIT_PARAMS;
290 
291 #define VMMU_MAX_GFID 64
292 
293 #ifdef NVOC_KERN_GMMU_H_PRIVATE_ACCESS_ALLOWED
294 #define PRIVATE_FIELD(x) x
295 #else
296 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
297 #endif
298 struct KernelGmmu {
299     const struct NVOC_RTTI *__nvoc_rtti;
300     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
301     struct IntrService __nvoc_base_IntrService;
302     struct Object *__nvoc_pbase_Object;
303     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
304     struct IntrService *__nvoc_pbase_IntrService;
305     struct KernelGmmu *__nvoc_pbase_KernelGmmu;
306     NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu *, ENGDESCRIPTOR);
307     NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu *);
308     NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu *, NvU32);
309     NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu *, NvU32);
310     void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu *);
311     void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu *, IntrServiceRecord *);
312     NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceInterruptArguments *);
313     NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu *, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *);
314     NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu *, TLB_INVALIDATE_PARAMS *);
315     NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu *, NvU32, TLB_INVALIDATE_PARAMS *);
316     void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
317     void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
318     void (*__kgmmuFmtInitPte__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool);
319     void (*__kgmmuFmtInitPde__)(struct KernelGmmu *, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
320     NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu *, NvU32);
321     void (*__kgmmuFmtInitLevels__)(struct KernelGmmu *, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32);
322     void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu *, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
323     NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu *);
324     NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
325     NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu *, NvU32, NvBool, NvU32 *);
326     NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
327     NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *);
328     NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);
329     NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *);
330     NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
331     void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
332     NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu *, GMMU_FMT_FAMILY *);
333     NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *);
334     NV_STATUS (*__kgmmuStateLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
335     NV_STATUS (*__kgmmuStateUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
336     NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *);
337     NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
338     NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
339     NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *);
340     void (*__kgmmuInitMissing__)(POBJGPU, struct KernelGmmu *);
341     NV_STATUS (*__kgmmuStatePreInitLocked__)(POBJGPU, struct KernelGmmu *);
342     NV_STATUS (*__kgmmuStatePreInitUnlocked__)(POBJGPU, struct KernelGmmu *);
343     NvBool (*__kgmmuClearInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceClearInterruptArguments *);
344     NvBool (*__kgmmuIsPresent__)(POBJGPU, struct KernelGmmu *);
345     NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED;
346     NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED;
347     const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo;
348     NvU32 defaultBigPageSize;
349     NvU32 uvmSharedIntrRmOwnsMask;
350     GMMU_FMT_FAMILY *PRIVATE_FIELD(pFmtFamilies)[3];
351     NvU32 PRIVATE_FIELD(PDEAperture);
352     NvU32 PRIVATE_FIELD(PDEAttr);
353     NvU32 PRIVATE_FIELD(PDEBAR1Aperture);
354     NvU32 PRIVATE_FIELD(PDEBAR1Attr);
355     NvU32 PRIVATE_FIELD(PTEAperture);
356     NvU32 PRIVATE_FIELD(PTEAttr);
357     NvU32 PRIVATE_FIELD(PTEBAR1Aperture);
358     NvU32 PRIVATE_FIELD(PTEBAR1Attr);
359     NvU32 PRIVATE_FIELD(overrideBigPageSize);
360     NvBool PRIVATE_FIELD(bEnablePerVaspaceBigPage);
361     NvBool PRIVATE_FIELD(bIgnoreHubTlbInvalidate);
362     NvU64 PRIVATE_FIELD(maxVASize);
363     struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pdeApertures)[5];
364     struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pteApertures)[5];
365     MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarSmallPageTable);
366     MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarPageDirectory0);
367     struct GMMU_FAULT_BUFFER PRIVATE_FIELD(mmuFaultBuffer)[64];
368     NvU64 PRIVATE_FIELD(sysmemBaseAddress);
369     NvBool PRIVATE_FIELD(bHugePageSupported);
370     NvBool PRIVATE_FIELD(bPageSize512mbSupported);
371     NvBool PRIVATE_FIELD(bBug2720120WarEnabled);
372     NvBool PRIVATE_FIELD(bVaspaceInteropSupported);
373 };
374 struct KernelGmmu_PRIVATE {
375     const struct NVOC_RTTI *__nvoc_rtti;
376     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
377     struct IntrService __nvoc_base_IntrService;
378     struct Object *__nvoc_pbase_Object;
379     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
380     struct IntrService *__nvoc_pbase_IntrService;
381     struct KernelGmmu *__nvoc_pbase_KernelGmmu;
382     NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu *, ENGDESCRIPTOR);
383     NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu *);
384     NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu *, NvU32);
385     NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu *, NvU32);
386     void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu *);
387     void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu *, IntrServiceRecord *);
388     NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceInterruptArguments *);
389     NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu *, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *);
390     NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu *, TLB_INVALIDATE_PARAMS *);
391     NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu *, NvU32, TLB_INVALIDATE_PARAMS *);
392     void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
393     void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
394     void (*__kgmmuFmtInitPte__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool);
395     void (*__kgmmuFmtInitPde__)(struct KernelGmmu *, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
396     NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu *, NvU32);
397     void (*__kgmmuFmtInitLevels__)(struct KernelGmmu *, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32);
398     void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu *, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
399     NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu *);
400     NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
401     NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu *, NvU32, NvBool, NvU32 *);
402     NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
403     NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *);
404     NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);
405     NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *);
406     NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
407     void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
408     NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu *, GMMU_FMT_FAMILY *);
409     NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *);
410     NV_STATUS (*__kgmmuStateLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
411     NV_STATUS (*__kgmmuStateUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
412     NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *);
413     NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
414     NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
415     NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *);
416     void (*__kgmmuInitMissing__)(POBJGPU, struct KernelGmmu *);
417     NV_STATUS (*__kgmmuStatePreInitLocked__)(POBJGPU, struct KernelGmmu *);
418     NV_STATUS (*__kgmmuStatePreInitUnlocked__)(POBJGPU, struct KernelGmmu *);
419     NvBool (*__kgmmuClearInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceClearInterruptArguments *);
420     NvBool (*__kgmmuIsPresent__)(POBJGPU, struct KernelGmmu *);
421     NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED;
422     NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED;
423     const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo;
424     NvU32 defaultBigPageSize;
425     NvU32 uvmSharedIntrRmOwnsMask;
426     GMMU_FMT_FAMILY *pFmtFamilies[3];
427     NvU32 PDEAperture;
428     NvU32 PDEAttr;
429     NvU32 PDEBAR1Aperture;
430     NvU32 PDEBAR1Attr;
431     NvU32 PTEAperture;
432     NvU32 PTEAttr;
433     NvU32 PTEBAR1Aperture;
434     NvU32 PTEBAR1Attr;
435     NvU32 overrideBigPageSize;
436     NvBool bEnablePerVaspaceBigPage;
437     NvBool bIgnoreHubTlbInvalidate;
438     NvU64 maxVASize;
439     struct NV_FIELD_ENUM_ENTRY pdeApertures[5];
440     struct NV_FIELD_ENUM_ENTRY pteApertures[5];
441     MEMORY_DESCRIPTOR *pWarSmallPageTable;
442     MEMORY_DESCRIPTOR *pWarPageDirectory0;
443     struct GMMU_FAULT_BUFFER mmuFaultBuffer[64];
444     NvU64 sysmemBaseAddress;
445     NvBool bHugePageSupported;
446     NvBool bPageSize512mbSupported;
447     NvBool bBug2720120WarEnabled;
448     NvBool bVaspaceInteropSupported;
449 };
450 
451 #ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__
452 #define __NVOC_CLASS_KernelGmmu_TYPEDEF__
453 typedef struct KernelGmmu KernelGmmu;
454 #endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */
455 
456 #ifndef __nvoc_class_id_KernelGmmu
457 #define __nvoc_class_id_KernelGmmu 0x29362f
458 #endif /* __nvoc_class_id_KernelGmmu */
459 
460 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGmmu;
461 
462 #define __staticCast_KernelGmmu(pThis) \
463     ((pThis)->__nvoc_pbase_KernelGmmu)
464 
465 #ifdef __nvoc_kern_gmmu_h_disabled
466 #define __dynamicCast_KernelGmmu(pThis) ((KernelGmmu*)NULL)
467 #else //__nvoc_kern_gmmu_h_disabled
468 #define __dynamicCast_KernelGmmu(pThis) \
469     ((KernelGmmu*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGmmu)))
470 #endif //__nvoc_kern_gmmu_h_disabled
471 
472 #define PDB_PROP_KGMMU_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
473 #define PDB_PROP_KGMMU_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
474 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_CAST
475 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_NAME PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED
476 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_CAST
477 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_NAME PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED
478 
479 NV_STATUS __nvoc_objCreateDynamic_KernelGmmu(KernelGmmu**, Dynamic*, NvU32, va_list);
480 
481 NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
482 #define __objCreate_KernelGmmu(ppNewObj, pParent, createFlags) \
483     __nvoc_objCreate_KernelGmmu((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
484 
485 #define kgmmuConstructEngine(pGpu, pKernelGmmu, arg0) kgmmuConstructEngine_DISPATCH(pGpu, pKernelGmmu, arg0)
486 #define kgmmuStateInitLocked(pGpu, pKernelGmmu) kgmmuStateInitLocked_DISPATCH(pGpu, pKernelGmmu)
487 #define kgmmuStatePostLoad(pGpu, pKernelGmmu, arg0) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg0)
488 #define kgmmuStatePostLoad_HAL(pGpu, pKernelGmmu, arg0) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg0)
489 #define kgmmuStatePreUnload(pGpu, pKernelGmmu, arg0) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg0)
490 #define kgmmuStatePreUnload_HAL(pGpu, pKernelGmmu, arg0) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg0)
491 #define kgmmuStateDestroy(pGpu, pKernelGmmu) kgmmuStateDestroy_DISPATCH(pGpu, pKernelGmmu)
492 #define kgmmuRegisterIntrService(pGpu, pKernelGmmu, arg0) kgmmuRegisterIntrService_DISPATCH(pGpu, pKernelGmmu, arg0)
493 #define kgmmuServiceInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
494 #define kgmmuInstBlkVaLimitGet(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
495 #define kgmmuInstBlkVaLimitGet_HAL(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
496 #define kgmmuSetTlbInvalidateMembarWarParameters(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams)
497 #define kgmmuSetTlbInvalidateMembarWarParameters_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams)
498 #define kgmmuSetTlbInvalidationScope(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams)
499 #define kgmmuSetTlbInvalidationScope_HAL(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams)
500 #define kgmmuFmtInitPteComptagLine(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version)
501 #define kgmmuFmtInitPteComptagLine_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version)
502 #define kgmmuFmtInitPeerPteFld(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version)
503 #define kgmmuFmtInitPeerPteFld_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version)
504 #define kgmmuFmtInitPte(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture)
505 #define kgmmuFmtInitPte_HAL(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture)
506 #define kgmmuFmtInitPde(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures)
507 #define kgmmuFmtInitPde_HAL(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures)
508 #define kgmmuFmtIsVersionSupported(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version)
509 #define kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version)
510 #define kgmmuFmtInitLevels(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift)
511 #define kgmmuFmtInitLevels_HAL(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift)
512 #define kgmmuFmtInitPdeMulti(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures)
513 #define kgmmuFmtInitPdeMulti_HAL(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures)
514 #define kgmmuFmtFamiliesInit(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu)
515 #define kgmmuFmtFamiliesInit_HAL(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu)
516 #define kgmmuTranslatePtePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
517 #define kgmmuTranslatePtePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
518 #define kgmmuTranslatePtePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
519 #define kgmmuTranslatePtePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
520 #define kgmmuTranslatePdePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
521 #define kgmmuTranslatePdePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
522 #define kgmmuTranslatePdePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
523 #define kgmmuTranslatePdePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
524 #define kgmmuGetFaultRegisterMappings(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
525 #define kgmmuGetFaultRegisterMappings_HAL(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
526 #define kgmmuIssueReplayableFaultBufferFlush(pGpu, pKernelGmmu) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu)
527 #define kgmmuIssueReplayableFaultBufferFlush_HAL(pGpu, pKernelGmmu) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu)
528 #define kgmmuFaultBufferAllocSharedMemory(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
529 #define kgmmuFaultBufferAllocSharedMemory_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
530 #define kgmmuFaultBufferFreeSharedMemory(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
531 #define kgmmuFaultBufferFreeSharedMemory_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
532 #define kgmmuSetupWarForBug2720120(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam)
533 #define kgmmuSetupWarForBug2720120_HAL(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam)
534 #define kgmmuGetGraphicsEngineId(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu)
535 #define kgmmuGetGraphicsEngineId_HAL(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu)
536 #define kgmmuStateLoad(pGpu, pEngstate, arg0) kgmmuStateLoad_DISPATCH(pGpu, pEngstate, arg0)
537 #define kgmmuStateUnload(pGpu, pEngstate, arg0) kgmmuStateUnload_DISPATCH(pGpu, pEngstate, arg0)
538 #define kgmmuServiceNotificationInterrupt(pGpu, pIntrService, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams)
539 #define kgmmuStatePreLoad(pGpu, pEngstate, arg0) kgmmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
540 #define kgmmuStatePostUnload(pGpu, pEngstate, arg0) kgmmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
541 #define kgmmuStateInitUnlocked(pGpu, pEngstate) kgmmuStateInitUnlocked_DISPATCH(pGpu, pEngstate)
542 #define kgmmuInitMissing(pGpu, pEngstate) kgmmuInitMissing_DISPATCH(pGpu, pEngstate)
543 #define kgmmuStatePreInitLocked(pGpu, pEngstate) kgmmuStatePreInitLocked_DISPATCH(pGpu, pEngstate)
544 #define kgmmuStatePreInitUnlocked(pGpu, pEngstate) kgmmuStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
545 #define kgmmuClearInterrupt(pGpu, pIntrService, pParams) kgmmuClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
546 #define kgmmuIsPresent(pGpu, pEngstate) kgmmuIsPresent_DISPATCH(pGpu, pEngstate)
547 NvU32 kgmmuGetMaxBigPageSize_GM107(struct KernelGmmu *pKernelGmmu);
548 
549 
550 #ifdef __nvoc_kern_gmmu_h_disabled
551 static inline NvU32 kgmmuGetMaxBigPageSize(struct KernelGmmu *pKernelGmmu) {
552     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
553     return 0;
554 }
555 #else //__nvoc_kern_gmmu_h_disabled
556 #define kgmmuGetMaxBigPageSize(pKernelGmmu) kgmmuGetMaxBigPageSize_GM107(pKernelGmmu)
557 #endif //__nvoc_kern_gmmu_h_disabled
558 
559 #define kgmmuGetMaxBigPageSize_HAL(pKernelGmmu) kgmmuGetMaxBigPageSize(pKernelGmmu)
560 
561 static inline NvU32 kgmmuGetVaspaceClass_f515df(struct KernelGmmu *pKernelGmmu) {
562     return (37105);
563 }
564 
565 
566 #ifdef __nvoc_kern_gmmu_h_disabled
567 static inline NvU32 kgmmuGetVaspaceClass(struct KernelGmmu *pKernelGmmu) {
568     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
569     return 0;
570 }
571 #else //__nvoc_kern_gmmu_h_disabled
572 #define kgmmuGetVaspaceClass(pKernelGmmu) kgmmuGetVaspaceClass_f515df(pKernelGmmu)
573 #endif //__nvoc_kern_gmmu_h_disabled
574 
575 #define kgmmuGetVaspaceClass_HAL(pKernelGmmu) kgmmuGetVaspaceClass(pKernelGmmu)
576 
577 NV_STATUS kgmmuInstBlkAtsGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData);
578 
579 
580 #ifdef __nvoc_kern_gmmu_h_disabled
581 static inline NV_STATUS kgmmuInstBlkAtsGet(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData) {
582     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
583     return NV_ERR_NOT_SUPPORTED;
584 }
585 #else //__nvoc_kern_gmmu_h_disabled
586 #define kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet_GV100(pKernelGmmu, pVAS, subctxid, pOffset, pData)
587 #endif //__nvoc_kern_gmmu_h_disabled
588 
589 #define kgmmuInstBlkAtsGet_HAL(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData)
590 
591 NV_STATUS kgmmuInstBlkPageDirBaseGet_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi);
592 
593 
594 #ifdef __nvoc_kern_gmmu_h_disabled
595 static inline NV_STATUS kgmmuInstBlkPageDirBaseGet(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi) {
596     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
597     return NV_ERR_NOT_SUPPORTED;
598 }
599 #else //__nvoc_kern_gmmu_h_disabled
600 #define kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet_GV100(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi)
601 #endif //__nvoc_kern_gmmu_h_disabled
602 
603 #define kgmmuInstBlkPageDirBaseGet_HAL(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi)
604 
605 NvU32 kgmmuGetPDBAllocSize_GP100(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1);
606 
607 
608 #ifdef __nvoc_kern_gmmu_h_disabled
609 static inline NvU32 kgmmuGetPDBAllocSize(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1) {
610     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
611     return 0;
612 }
613 #else //__nvoc_kern_gmmu_h_disabled
614 #define kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize_GP100(pKernelGmmu, arg0, arg1)
615 #endif //__nvoc_kern_gmmu_h_disabled
616 
617 #define kgmmuGetPDBAllocSize_HAL(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1)
618 
619 NvU32 kgmmuGetBigPageSize_GM107(struct KernelGmmu *pKernelGmmu);
620 
621 
622 #ifdef __nvoc_kern_gmmu_h_disabled
623 static inline NvU32 kgmmuGetBigPageSize(struct KernelGmmu *pKernelGmmu) {
624     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
625     return 0;
626 }
627 #else //__nvoc_kern_gmmu_h_disabled
628 #define kgmmuGetBigPageSize(pKernelGmmu) kgmmuGetBigPageSize_GM107(pKernelGmmu)
629 #endif //__nvoc_kern_gmmu_h_disabled
630 
631 #define kgmmuGetBigPageSize_HAL(pKernelGmmu) kgmmuGetBigPageSize(pKernelGmmu)
632 
633 NV_STATUS kgmmuInitStaticInfo_KERNEL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo);
634 
635 
636 #ifdef __nvoc_kern_gmmu_h_disabled
637 static inline NV_STATUS kgmmuInitStaticInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo) {
638     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
639     return NV_ERR_NOT_SUPPORTED;
640 }
641 #else //__nvoc_kern_gmmu_h_disabled
642 #define kgmmuInitStaticInfo(pGpu, pKernelGmmu, pStaticInfo) kgmmuInitStaticInfo_KERNEL(pGpu, pKernelGmmu, pStaticInfo)
643 #endif //__nvoc_kern_gmmu_h_disabled
644 
645 #define kgmmuInitStaticInfo_HAL(pGpu, pKernelGmmu, pStaticInfo) kgmmuInitStaticInfo(pGpu, pKernelGmmu, pStaticInfo)
646 
647 void kgmmuFmtInitCaps_GM20X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt);
648 
649 
650 #ifdef __nvoc_kern_gmmu_h_disabled
651 static inline void kgmmuFmtInitCaps(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt) {
652     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
653 }
654 #else //__nvoc_kern_gmmu_h_disabled
655 #define kgmmuFmtInitCaps(pKernelGmmu, pFmt) kgmmuFmtInitCaps_GM20X(pKernelGmmu, pFmt)
656 #endif //__nvoc_kern_gmmu_h_disabled
657 
658 #define kgmmuFmtInitCaps_HAL(pKernelGmmu, pFmt) kgmmuFmtInitCaps(pKernelGmmu, pFmt)
659 
660 void kgmmuFmtInitPteApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries);
661 
662 
663 #ifdef __nvoc_kern_gmmu_h_disabled
664 static inline void kgmmuFmtInitPteApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) {
665     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
666 }
667 #else //__nvoc_kern_gmmu_h_disabled
668 #define kgmmuFmtInitPteApertures(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures_GM10X(pKernelGmmu, pEntries)
669 #endif //__nvoc_kern_gmmu_h_disabled
670 
671 #define kgmmuFmtInitPteApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures(pKernelGmmu, pEntries)
672 
673 void kgmmuFmtInitPdeApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries);
674 
675 
676 #ifdef __nvoc_kern_gmmu_h_disabled
677 static inline void kgmmuFmtInitPdeApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) {
678     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
679 }
680 #else //__nvoc_kern_gmmu_h_disabled
681 #define kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures_GM10X(pKernelGmmu, pEntries)
682 #endif //__nvoc_kern_gmmu_h_disabled
683 
684 #define kgmmuFmtInitPdeApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries)
685 
686 void kgmmuInvalidateTlb_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope);
687 
688 
689 #ifdef __nvoc_kern_gmmu_h_disabled
690 static inline void kgmmuInvalidateTlb(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope) {
691     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
692 }
693 #else //__nvoc_kern_gmmu_h_disabled
694 #define kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb_GM107(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope)
695 #endif //__nvoc_kern_gmmu_h_disabled
696 
697 #define kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope)
698 
699 NV_STATUS kgmmuCheckPendingInvalidates_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid);
700 
701 
702 #ifdef __nvoc_kern_gmmu_h_disabled
703 static inline NV_STATUS kgmmuCheckPendingInvalidates(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid) {
704     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
705     return NV_ERR_NOT_SUPPORTED;
706 }
707 #else //__nvoc_kern_gmmu_h_disabled
708 #define kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates_TU102(pGpu, pKernelGmmu, pTimeOut, gfid)
709 #endif //__nvoc_kern_gmmu_h_disabled
710 
711 #define kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid)
712 
713 NV_STATUS kgmmuCommitTlbInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
714 
715 
716 #ifdef __nvoc_kern_gmmu_h_disabled
717 static inline NV_STATUS kgmmuCommitTlbInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
718     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
719     return NV_ERR_NOT_SUPPORTED;
720 }
721 #else //__nvoc_kern_gmmu_h_disabled
722 #define kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate_TU102(pGpu, pKernelGmmu, pParams)
723 #endif //__nvoc_kern_gmmu_h_disabled
724 
725 #define kgmmuCommitTlbInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams)
726 
727 void kgmmuSetPdbToInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
728 
729 
730 #ifdef __nvoc_kern_gmmu_h_disabled
731 static inline void kgmmuSetPdbToInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
732     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
733 }
734 #else //__nvoc_kern_gmmu_h_disabled
735 #define kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate_TU102(pGpu, pKernelGmmu, pParams)
736 #endif //__nvoc_kern_gmmu_h_disabled
737 
738 #define kgmmuSetPdbToInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams)
739 
740 NV_STATUS kgmmuEnableComputePeerAddressing_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags);
741 
742 
743 #ifdef __nvoc_kern_gmmu_h_disabled
744 static inline NV_STATUS kgmmuEnableComputePeerAddressing(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags) {
745     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
746     return NV_ERR_NOT_SUPPORTED;
747 }
748 #else //__nvoc_kern_gmmu_h_disabled
749 #define kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing_IMPL(pGpu, pKernelGmmu, flags)
750 #endif //__nvoc_kern_gmmu_h_disabled
751 
752 #define kgmmuEnableComputePeerAddressing_HAL(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags)
753 
754 void kgmmuDetermineMaxVASize_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
755 
756 
757 #ifdef __nvoc_kern_gmmu_h_disabled
758 static inline void kgmmuDetermineMaxVASize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
759     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
760 }
761 #else //__nvoc_kern_gmmu_h_disabled
762 #define kgmmuDetermineMaxVASize(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize_GM107(pGpu, pKernelGmmu)
763 #endif //__nvoc_kern_gmmu_h_disabled
764 
765 #define kgmmuDetermineMaxVASize_HAL(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize(pGpu, pKernelGmmu)
766 
767 const char *kgmmuGetFaultTypeString_GP100(struct KernelGmmu *pKernelGmmu, NvU32 faultType);
768 
769 
770 #ifdef __nvoc_kern_gmmu_h_disabled
771 static inline const char *kgmmuGetFaultTypeString(struct KernelGmmu *pKernelGmmu, NvU32 faultType) {
772     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
773     return NULL;
774 }
775 #else //__nvoc_kern_gmmu_h_disabled
776 #define kgmmuGetFaultTypeString(pKernelGmmu, faultType) kgmmuGetFaultTypeString_GP100(pKernelGmmu, faultType)
777 #endif //__nvoc_kern_gmmu_h_disabled
778 
779 #define kgmmuGetFaultTypeString_HAL(pKernelGmmu, faultType) kgmmuGetFaultTypeString(pKernelGmmu, faultType)
780 
781 NV_STATUS kgmmuChangeReplayableFaultOwnership_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0);
782 
783 
784 #ifdef __nvoc_kern_gmmu_h_disabled
785 static inline NV_STATUS kgmmuChangeReplayableFaultOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) {
786     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
787     return NV_ERR_NOT_SUPPORTED;
788 }
789 #else //__nvoc_kern_gmmu_h_disabled
790 #define kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership_GV100(pGpu, pKernelGmmu, arg0)
791 #endif //__nvoc_kern_gmmu_h_disabled
792 
793 #define kgmmuChangeReplayableFaultOwnership_HAL(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0)
794 
795 NV_STATUS kgmmuServiceReplayableFault_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
796 
797 
798 #ifdef __nvoc_kern_gmmu_h_disabled
799 static inline NV_STATUS kgmmuServiceReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
800     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
801     return NV_ERR_NOT_SUPPORTED;
802 }
803 #else //__nvoc_kern_gmmu_h_disabled
804 #define kgmmuServiceReplayableFault(pGpu, pKernelGmmu) kgmmuServiceReplayableFault_TU102(pGpu, pKernelGmmu)
805 #endif //__nvoc_kern_gmmu_h_disabled
806 
807 #define kgmmuServiceReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceReplayableFault(pGpu, pKernelGmmu)
808 
809 NV_STATUS kgmmuReportFaultBufferOverflow_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
810 
811 
812 #ifdef __nvoc_kern_gmmu_h_disabled
813 static inline NV_STATUS kgmmuReportFaultBufferOverflow(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
814     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
815     return NV_ERR_NOT_SUPPORTED;
816 }
817 #else //__nvoc_kern_gmmu_h_disabled
818 #define kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow_GV100(pGpu, pKernelGmmu)
819 #endif //__nvoc_kern_gmmu_h_disabled
820 
821 #define kgmmuReportFaultBufferOverflow_HAL(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu)
822 
823 NV_STATUS kgmmuReadFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0);
824 
825 
826 #ifdef __nvoc_kern_gmmu_h_disabled
827 static inline NV_STATUS kgmmuReadFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0) {
828     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
829     return NV_ERR_NOT_SUPPORTED;
830 }
831 #else //__nvoc_kern_gmmu_h_disabled
832 #define kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, pGetOffset, arg0)
833 #endif //__nvoc_kern_gmmu_h_disabled
834 
835 #define kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0)
836 
837 NV_STATUS kgmmuReadFaultBufferPutPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg0);
838 
839 
840 #ifdef __nvoc_kern_gmmu_h_disabled
841 static inline NV_STATUS kgmmuReadFaultBufferPutPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg0) {
842     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
843     return NV_ERR_NOT_SUPPORTED;
844 }
845 #else //__nvoc_kern_gmmu_h_disabled
846 #define kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg0) kgmmuReadFaultBufferPutPtr_TU102(pGpu, pKernelGmmu, index, pPutOffset, arg0)
847 #endif //__nvoc_kern_gmmu_h_disabled
848 
849 #define kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, index, pPutOffset, arg0) kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg0)
850 
851 NvU32 kgmmuReadMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid);
852 
853 
854 #ifdef __nvoc_kern_gmmu_h_disabled
855 static inline NvU32 kgmmuReadMmuFaultBufferSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid) {
856     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
857     return 0;
858 }
859 #else //__nvoc_kern_gmmu_h_disabled
860 #define kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize_TU102(pGpu, pKernelGmmu, arg0, gfid)
861 #endif //__nvoc_kern_gmmu_h_disabled
862 
863 #define kgmmuReadMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid)
864 
865 NvU32 kgmmuReadMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid);
866 
867 
868 #ifdef __nvoc_kern_gmmu_h_disabled
869 static inline NvU32 kgmmuReadMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid) {
870     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
871     return 0;
872 }
873 #else //__nvoc_kern_gmmu_h_disabled
874 #define kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus_TU102(pGpu, pKernelGmmu, gfid)
875 #endif //__nvoc_kern_gmmu_h_disabled
876 
877 #define kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid)
878 
879 void kgmmuWriteMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
880 
881 
882 #ifdef __nvoc_kern_gmmu_h_disabled
883 static inline void kgmmuWriteMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
884     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
885 }
886 #else //__nvoc_kern_gmmu_h_disabled
887 #define kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus_TU102(pGpu, pKernelGmmu, arg0)
888 #endif //__nvoc_kern_gmmu_h_disabled
889 
890 #define kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0)
891 
892 NvBool kgmmuIsNonReplayableFaultPending_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0);
893 
894 
895 #ifdef __nvoc_kern_gmmu_h_disabled
896 static inline NvBool kgmmuIsNonReplayableFaultPending(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) {
897     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
898     return NV_FALSE;
899 }
900 #else //__nvoc_kern_gmmu_h_disabled
901 #define kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending_TU102(pGpu, pKernelGmmu, arg0)
902 #endif //__nvoc_kern_gmmu_h_disabled
903 
904 #define kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0)
905 
906 NV_STATUS kgmmuClientShadowFaultBufferAlloc_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
907 
908 
909 #ifdef __nvoc_kern_gmmu_h_disabled
910 static inline NV_STATUS kgmmuClientShadowFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
911     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
912     return NV_ERR_NOT_SUPPORTED;
913 }
914 #else //__nvoc_kern_gmmu_h_disabled
915 #define kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAlloc_GV100(pGpu, pKernelGmmu, arg0)
916 #endif //__nvoc_kern_gmmu_h_disabled
917 
918 #define kgmmuClientShadowFaultBufferAlloc_HAL(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg0)
919 
920 NV_STATUS kgmmuClientShadowFaultBufferFree_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
921 
922 
923 #ifdef __nvoc_kern_gmmu_h_disabled
924 static inline NV_STATUS kgmmuClientShadowFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
925     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
926     return NV_ERR_NOT_SUPPORTED;
927 }
928 #else //__nvoc_kern_gmmu_h_disabled
929 #define kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferFree_GV100(pGpu, pKernelGmmu, arg0)
930 #endif //__nvoc_kern_gmmu_h_disabled
931 
932 #define kgmmuClientShadowFaultBufferFree_HAL(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg0)
933 
934 void kgmmuEncodeSysmemAddrs_GM107(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count);
935 
936 
937 #ifdef __nvoc_kern_gmmu_h_disabled
938 static inline void kgmmuEncodeSysmemAddrs(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count) {
939     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
940 }
941 #else //__nvoc_kern_gmmu_h_disabled
942 #define kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs_GM107(pKernelGmmu, pAddresses, count)
943 #endif //__nvoc_kern_gmmu_h_disabled
944 
945 #define kgmmuEncodeSysmemAddrs_HAL(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count)
946 
947 NvU8 kgmmuGetHwPteApertureFromMemdesc_GM107(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc);
948 
949 
950 #ifdef __nvoc_kern_gmmu_h_disabled
951 static inline NvU8 kgmmuGetHwPteApertureFromMemdesc(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc) {
952     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
953     return 0;
954 }
955 #else //__nvoc_kern_gmmu_h_disabled
956 #define kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc_GM107(pKernelGmmu, pDesc)
957 #endif //__nvoc_kern_gmmu_h_disabled
958 
959 #define kgmmuGetHwPteApertureFromMemdesc_HAL(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc)
960 
961 NvBool kgmmuTestAccessCounterWriteNak_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
962 
963 
964 #ifdef __nvoc_kern_gmmu_h_disabled
965 static inline NvBool kgmmuTestAccessCounterWriteNak(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
966     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
967     return NV_FALSE;
968 }
969 #else //__nvoc_kern_gmmu_h_disabled
970 #define kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak_TU102(pGpu, pKernelGmmu)
971 #endif //__nvoc_kern_gmmu_h_disabled
972 
973 #define kgmmuTestAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu)
974 
975 NV_STATUS kgmmuEnableNvlinkComputePeerAddressing_GV100(struct KernelGmmu *pKernelGmmu);
976 
977 
978 #ifdef __nvoc_kern_gmmu_h_disabled
979 static inline NV_STATUS kgmmuEnableNvlinkComputePeerAddressing(struct KernelGmmu *pKernelGmmu) {
980     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
981     return NV_ERR_NOT_SUPPORTED;
982 }
983 #else //__nvoc_kern_gmmu_h_disabled
984 #define kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing_GV100(pKernelGmmu)
985 #endif //__nvoc_kern_gmmu_h_disabled
986 
987 #define kgmmuEnableNvlinkComputePeerAddressing_HAL(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu)
988 
989 void kgmmuClearNonReplayableFaultIntr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0);
990 
991 
992 #ifdef __nvoc_kern_gmmu_h_disabled
993 static inline void kgmmuClearNonReplayableFaultIntr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) {
994     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
995 }
996 #else //__nvoc_kern_gmmu_h_disabled
997 #define kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg0) kgmmuClearNonReplayableFaultIntr_TU102(pGpu, pKernelGmmu, arg0)
998 #endif //__nvoc_kern_gmmu_h_disabled
999 
1000 #define kgmmuClearNonReplayableFaultIntr_HAL(pGpu, pKernelGmmu, arg0) kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg0)
1001 
1002 NV_STATUS kgmmuConstructEngine_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0);
1003 
1004 static inline NV_STATUS kgmmuConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0) {
1005     return pKernelGmmu->__kgmmuConstructEngine__(pGpu, pKernelGmmu, arg0);
1006 }
1007 
1008 NV_STATUS kgmmuStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1009 
1010 static inline NV_STATUS kgmmuStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1011     return pKernelGmmu->__kgmmuStateInitLocked__(pGpu, pKernelGmmu);
1012 }
1013 
1014 NV_STATUS kgmmuStatePostLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1015 
1016 static inline NV_STATUS kgmmuStatePostLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1017     return pKernelGmmu->__kgmmuStatePostLoad__(pGpu, pKernelGmmu, arg0);
1018 }
1019 
1020 NV_STATUS kgmmuStatePreUnload_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1021 
1022 static inline NV_STATUS kgmmuStatePreUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1023     return pKernelGmmu->__kgmmuStatePreUnload__(pGpu, pKernelGmmu, arg0);
1024 }
1025 
1026 void kgmmuStateDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1027 
1028 static inline void kgmmuStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1029     pKernelGmmu->__kgmmuStateDestroy__(pGpu, pKernelGmmu);
1030 }
1031 
1032 void kgmmuRegisterIntrService_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[166]);
1033 
1034 static inline void kgmmuRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[166]) {
1035     pKernelGmmu->__kgmmuRegisterIntrService__(pGpu, pKernelGmmu, arg0);
1036 }
1037 
1038 NvU32 kgmmuServiceInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams);
1039 
1040 static inline NvU32 kgmmuServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams) {
1041     return pKernelGmmu->__kgmmuServiceInterrupt__(pGpu, pKernelGmmu, pParams);
1042 }
1043 
1044 NV_STATUS kgmmuInstBlkVaLimitGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData);
1045 
1046 static inline NV_STATUS kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) {
1047     *pOffset = 0;
1048     return NV_OK;
1049 }
1050 
1051 static inline NV_STATUS kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) {
1052     return pKernelGmmu->__kgmmuInstBlkVaLimitGet__(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData);
1053 }
1054 
1055 NvU32 kgmmuSetTlbInvalidateMembarWarParameters_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
1056 
1057 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1058     return 0;
1059 }
1060 
1061 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1062     return pKernelGmmu->__kgmmuSetTlbInvalidateMembarWarParameters__(pGpu, pKernelGmmu, pParams);
1063 }
1064 
1065 NV_STATUS kgmmuSetTlbInvalidationScope_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams);
1066 
1067 static inline NV_STATUS kgmmuSetTlbInvalidationScope_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) {
1068     return NV_ERR_NOT_SUPPORTED;
1069 }
1070 
1071 static inline NV_STATUS kgmmuSetTlbInvalidationScope_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) {
1072     return pKernelGmmu->__kgmmuSetTlbInvalidationScope__(pGpu, pKernelGmmu, flags, pParams);
1073 }
1074 
1075 void kgmmuFmtInitPteComptagLine_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version);
1076 
1077 static inline void kgmmuFmtInitPteComptagLine_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1078     return;
1079 }
1080 
1081 static inline void kgmmuFmtInitPteComptagLine_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1082     pKernelGmmu->__kgmmuFmtInitPteComptagLine__(pKernelGmmu, pPte, version);
1083 }
1084 
1085 void kgmmuFmtInitPeerPteFld_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version);
1086 
1087 static inline void kgmmuFmtInitPeerPteFld_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1088     return;
1089 }
1090 
1091 static inline void kgmmuFmtInitPeerPteFld_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1092     pKernelGmmu->__kgmmuFmtInitPeerPteFld__(pKernelGmmu, pPte, version);
1093 }
1094 
1095 void kgmmuFmtInitPte_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture);
1096 
1097 void kgmmuFmtInitPte_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture);
1098 
1099 static inline void kgmmuFmtInitPte_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture) {
1100     pKernelGmmu->__kgmmuFmtInitPte__(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture);
1101 }
1102 
1103 void kgmmuFmtInitPde_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1104 
1105 void kgmmuFmtInitPde_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1106 
1107 static inline void kgmmuFmtInitPde_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) {
1108     pKernelGmmu->__kgmmuFmtInitPde__(pKernelGmmu, pPde, version, pPdeApertures);
1109 }
1110 
1111 NvBool kgmmuFmtIsVersionSupported_GP10X(struct KernelGmmu *pKernelGmmu, NvU32 version);
1112 
1113 NvBool kgmmuFmtIsVersionSupported_GH10X(struct KernelGmmu *pKernelGmmu, NvU32 version);
1114 
1115 static inline NvBool kgmmuFmtIsVersionSupported_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 version) {
1116     return pKernelGmmu->__kgmmuFmtIsVersionSupported__(pKernelGmmu, version);
1117 }
1118 
1119 void kgmmuFmtInitLevels_GP10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
1120 
1121 void kgmmuFmtInitLevels_GA10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
1122 
1123 void kgmmuFmtInitLevels_GH10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
1124 
1125 static inline void kgmmuFmtInitLevels_DISPATCH(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift) {
1126     pKernelGmmu->__kgmmuFmtInitLevels__(pKernelGmmu, pLevels, numLevels, version, bigPageShift);
1127 }
1128 
1129 void kgmmuFmtInitPdeMulti_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1130 
1131 void kgmmuFmtInitPdeMulti_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1132 
1133 static inline void kgmmuFmtInitPdeMulti_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) {
1134     pKernelGmmu->__kgmmuFmtInitPdeMulti__(pKernelGmmu, pPdeMulti, version, pPdeApertures);
1135 }
1136 
1137 NV_STATUS kgmmuFmtFamiliesInit_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1138 
1139 NV_STATUS kgmmuFmtFamiliesInit_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1140 
1141 static inline NV_STATUS kgmmuFmtFamiliesInit_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1142     return pKernelGmmu->__kgmmuFmtFamiliesInit__(pGpu, pKernelGmmu);
1143 }
1144 
1145 NV_STATUS kgmmuTranslatePtePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1);
1146 
1147 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1148     return NV_OK;
1149 }
1150 
1151 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1152     return pKernelGmmu->__kgmmuTranslatePtePcfFromSw__(pKernelGmmu, arg0, arg1);
1153 }
1154 
1155 NV_STATUS kgmmuTranslatePtePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2);
1156 
1157 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) {
1158     return NV_OK;
1159 }
1160 
1161 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) {
1162     return pKernelGmmu->__kgmmuTranslatePtePcfFromHw__(pKernelGmmu, arg0, arg1, arg2);
1163 }
1164 
1165 NV_STATUS kgmmuTranslatePdePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1);
1166 
1167 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1168     return NV_OK;
1169 }
1170 
1171 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1172     return pKernelGmmu->__kgmmuTranslatePdePcfFromSw__(pKernelGmmu, arg0, arg1);
1173 }
1174 
1175 NV_STATUS kgmmuTranslatePdePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2);
1176 
1177 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) {
1178     return NV_OK;
1179 }
1180 
1181 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) {
1182     return pKernelGmmu->__kgmmuTranslatePdePcfFromHw__(pKernelGmmu, arg0, arg1, arg2);
1183 }
1184 
1185 NV_STATUS kgmmuGetFaultRegisterMappings_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl);
1186 
1187 NV_STATUS kgmmuGetFaultRegisterMappings_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl);
1188 
1189 static inline NV_STATUS kgmmuGetFaultRegisterMappings_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl) {
1190     return pKernelGmmu->__kgmmuGetFaultRegisterMappings__(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl);
1191 }
1192 
1193 NV_STATUS kgmmuIssueReplayableFaultBufferFlush_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1194 
1195 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1196     return NV_ERR_NOT_SUPPORTED;
1197 }
1198 
1199 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1200     return pKernelGmmu->__kgmmuIssueReplayableFaultBufferFlush__(pGpu, pKernelGmmu);
1201 }
1202 
1203 NV_STATUS kgmmuFaultBufferAllocSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1204 
1205 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1206     return NV_OK;
1207 }
1208 
1209 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1210     return pKernelGmmu->__kgmmuFaultBufferAllocSharedMemory__(pGpu, pKernelGmmu, arg0);
1211 }
1212 
1213 void kgmmuFaultBufferFreeSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1214 
1215 static inline void kgmmuFaultBufferFreeSharedMemory_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1216     return;
1217 }
1218 
1219 static inline void kgmmuFaultBufferFreeSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1220     pKernelGmmu->__kgmmuFaultBufferFreeSharedMemory__(pGpu, pKernelGmmu, arg0);
1221 }
1222 
1223 NV_STATUS kgmmuSetupWarForBug2720120_GA100(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam);
1224 
1225 static inline NV_STATUS kgmmuSetupWarForBug2720120_56cd7a(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) {
1226     return NV_OK;
1227 }
1228 
1229 static inline NV_STATUS kgmmuSetupWarForBug2720120_DISPATCH(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) {
1230     return pKernelGmmu->__kgmmuSetupWarForBug2720120__(pKernelGmmu, pFam);
1231 }
1232 
1233 NvU32 kgmmuGetGraphicsEngineId_GV100(struct KernelGmmu *pKernelGmmu);
1234 
1235 NvU32 kgmmuGetGraphicsEngineId_GH100(struct KernelGmmu *pKernelGmmu);
1236 
1237 static inline NvU32 kgmmuGetGraphicsEngineId_DISPATCH(struct KernelGmmu *pKernelGmmu) {
1238     return pKernelGmmu->__kgmmuGetGraphicsEngineId__(pKernelGmmu);
1239 }
1240 
1241 static inline NV_STATUS kgmmuStateLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
1242     return pEngstate->__kgmmuStateLoad__(pGpu, pEngstate, arg0);
1243 }
1244 
1245 static inline NV_STATUS kgmmuStateUnload_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
1246     return pEngstate->__kgmmuStateUnload__(pGpu, pEngstate, arg0);
1247 }
1248 
1249 static inline NV_STATUS kgmmuServiceNotificationInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) {
1250     return pIntrService->__kgmmuServiceNotificationInterrupt__(pGpu, pIntrService, pParams);
1251 }
1252 
1253 static inline NV_STATUS kgmmuStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
1254     return pEngstate->__kgmmuStatePreLoad__(pGpu, pEngstate, arg0);
1255 }
1256 
1257 static inline NV_STATUS kgmmuStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
1258     return pEngstate->__kgmmuStatePostUnload__(pGpu, pEngstate, arg0);
1259 }
1260 
1261 static inline NV_STATUS kgmmuStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
1262     return pEngstate->__kgmmuStateInitUnlocked__(pGpu, pEngstate);
1263 }
1264 
1265 static inline void kgmmuInitMissing_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
1266     pEngstate->__kgmmuInitMissing__(pGpu, pEngstate);
1267 }
1268 
1269 static inline NV_STATUS kgmmuStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
1270     return pEngstate->__kgmmuStatePreInitLocked__(pGpu, pEngstate);
1271 }
1272 
1273 static inline NV_STATUS kgmmuStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
1274     return pEngstate->__kgmmuStatePreInitUnlocked__(pGpu, pEngstate);
1275 }
1276 
1277 static inline NvBool kgmmuClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pIntrService, IntrServiceClearInterruptArguments *pParams) {
1278     return pIntrService->__kgmmuClearInterrupt__(pGpu, pIntrService, pParams);
1279 }
1280 
1281 static inline NvBool kgmmuIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
1282     return pEngstate->__kgmmuIsPresent__(pGpu, pEngstate);
1283 }
1284 
1285 static inline NvU32 kgmmuGetPDEAperture(struct KernelGmmu *pKernelGmmu) {
1286     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1287     return pKernelGmmu_PRIVATE->PDEAperture;
1288 }
1289 
1290 static inline NvU32 kgmmuGetPTEAperture(struct KernelGmmu *pKernelGmmu) {
1291     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1292     return pKernelGmmu_PRIVATE->PTEAperture;
1293 }
1294 
1295 static inline NvU32 kgmmuGetPDEBAR1Aperture(struct KernelGmmu *pKernelGmmu) {
1296     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1297     return pKernelGmmu_PRIVATE->PDEBAR1Aperture;
1298 }
1299 
1300 static inline NvU32 kgmmuGetPTEBAR1Aperture(struct KernelGmmu *pKernelGmmu) {
1301     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1302     return pKernelGmmu_PRIVATE->PTEBAR1Aperture;
1303 }
1304 
1305 static inline NvU32 kgmmuGetPDEBAR1Attr(struct KernelGmmu *pKernelGmmu) {
1306     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1307     return pKernelGmmu_PRIVATE->PDEBAR1Attr;
1308 }
1309 
1310 static inline NvU32 kgmmuGetPTEBAR1Attr(struct KernelGmmu *pKernelGmmu) {
1311     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1312     return pKernelGmmu_PRIVATE->PTEBAR1Attr;
1313 }
1314 
1315 static inline NvU32 kgmmuGetPDEAttr(struct KernelGmmu *pKernelGmmu) {
1316     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1317     return pKernelGmmu_PRIVATE->PDEAttr;
1318 }
1319 
1320 static inline NvU32 kgmmuGetPTEAttr(struct KernelGmmu *pKernelGmmu) {
1321     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1322     return pKernelGmmu_PRIVATE->PTEAttr;
1323 }
1324 
1325 static inline NvU32 kgmmuGetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu) {
1326     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1327     return pKernelGmmu_PRIVATE->overrideBigPageSize;
1328 }
1329 
1330 static inline void kgmmuSetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu, NvU32 bigPageSize) {
1331     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1332     pKernelGmmu_PRIVATE->overrideBigPageSize = bigPageSize;
1333 }
1334 
1335 static inline NvBool kgmmuIsPerVaspaceBigPageEn(struct KernelGmmu *pKernelGmmu) {
1336     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1337     return pKernelGmmu_PRIVATE->bEnablePerVaspaceBigPage;
1338 }
1339 
1340 static inline NvBool kgmmuIsIgnoreHubTlbInvalidate(struct KernelGmmu *pKernelGmmu) {
1341     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1342     return pKernelGmmu_PRIVATE->bIgnoreHubTlbInvalidate;
1343 }
1344 
1345 static inline NvBool kgmmuIsHugePageSupported(struct KernelGmmu *pKernelGmmu) {
1346     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1347     return pKernelGmmu_PRIVATE->bHugePageSupported;
1348 }
1349 
1350 static inline NvBool kgmmuIsPageSize512mbSupported(struct KernelGmmu *pKernelGmmu) {
1351     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1352     return pKernelGmmu_PRIVATE->bPageSize512mbSupported;
1353 }
1354 
1355 static inline NvBool kgmmuIsBug2720120WarEnabled(struct KernelGmmu *pKernelGmmu) {
1356     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1357     return pKernelGmmu_PRIVATE->bBug2720120WarEnabled;
1358 }
1359 
1360 static inline NvBool kgmmuIsVaspaceInteropSupported(struct KernelGmmu *pKernelGmmu) {
1361     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1362     return pKernelGmmu_PRIVATE->bVaspaceInteropSupported;
1363 }
1364 
1365 static inline NvU64 kgmmuGetMaxVASize(struct KernelGmmu *pKernelGmmu) {
1366     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1367     return pKernelGmmu_PRIVATE->maxVASize;
1368 }
1369 
1370 static inline NvU64 kgmmuGetSysBaseAddress(struct KernelGmmu *pKernelGmmu) {
1371     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
1372     return pKernelGmmu_PRIVATE->sysmemBaseAddress;
1373 }
1374 
1375 void kgmmuDestruct_IMPL(struct KernelGmmu *pKernelGmmu);
1376 
1377 #define __nvoc_kgmmuDestruct(pKernelGmmu) kgmmuDestruct_IMPL(pKernelGmmu)
1378 NV_STATUS kgmmuFmtInit_IMPL(struct KernelGmmu *pKernelGmmu);
1379 
1380 #ifdef __nvoc_kern_gmmu_h_disabled
1381 static inline NV_STATUS kgmmuFmtInit(struct KernelGmmu *pKernelGmmu) {
1382     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1383     return NV_ERR_NOT_SUPPORTED;
1384 }
1385 #else //__nvoc_kern_gmmu_h_disabled
1386 #define kgmmuFmtInit(pKernelGmmu) kgmmuFmtInit_IMPL(pKernelGmmu)
1387 #endif //__nvoc_kern_gmmu_h_disabled
1388 
1389 GMMU_APERTURE kgmmuGetMemAperture_IMPL(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc);
1390 
1391 #ifdef __nvoc_kern_gmmu_h_disabled
1392 static inline GMMU_APERTURE kgmmuGetMemAperture(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc) {
1393     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1394     GMMU_APERTURE ret;
1395     portMemSet(&ret, 0, sizeof(GMMU_APERTURE));
1396     return ret;
1397 }
1398 #else //__nvoc_kern_gmmu_h_disabled
1399 #define kgmmuGetMemAperture(pKernelGmmu, pMemDesc) kgmmuGetMemAperture_IMPL(pKernelGmmu, pMemDesc)
1400 #endif //__nvoc_kern_gmmu_h_disabled
1401 
1402 const GMMU_FMT_FAMILY *kgmmuFmtGetFamily_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version);
1403 
1404 #ifdef __nvoc_kern_gmmu_h_disabled
1405 static inline const GMMU_FMT_FAMILY *kgmmuFmtGetFamily(struct KernelGmmu *pKernelGmmu, NvU32 version) {
1406     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1407     return NULL;
1408 }
1409 #else //__nvoc_kern_gmmu_h_disabled
1410 #define kgmmuFmtGetFamily(pKernelGmmu, version) kgmmuFmtGetFamily_IMPL(pKernelGmmu, version)
1411 #endif //__nvoc_kern_gmmu_h_disabled
1412 
1413 const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1414 
1415 #ifdef __nvoc_kern_gmmu_h_disabled
1416 static inline const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1417     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1418     return NULL;
1419 }
1420 #else //__nvoc_kern_gmmu_h_disabled
1421 #define kgmmuGetStaticInfo(pGpu, pKernelGmmu) kgmmuGetStaticInfo_IMPL(pGpu, pKernelGmmu)
1422 #endif //__nvoc_kern_gmmu_h_disabled
1423 
1424 const struct GMMU_FMT *kgmmuFmtGet_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize);
1425 
1426 #ifdef __nvoc_kern_gmmu_h_disabled
1427 static inline const struct GMMU_FMT *kgmmuFmtGet(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize) {
1428     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1429     return NULL;
1430 }
1431 #else //__nvoc_kern_gmmu_h_disabled
1432 #define kgmmuFmtGet(pKernelGmmu, version, bigPageSize) kgmmuFmtGet_IMPL(pKernelGmmu, version, bigPageSize)
1433 #endif //__nvoc_kern_gmmu_h_disabled
1434 
1435 void kgmmuExtractPteInfo_IMPL(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3);
1436 
1437 #ifdef __nvoc_kern_gmmu_h_disabled
1438 static inline void kgmmuExtractPteInfo(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3) {
1439     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1440 }
1441 #else //__nvoc_kern_gmmu_h_disabled
1442 #define kgmmuExtractPteInfo(pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuExtractPteInfo_IMPL(pKernelGmmu, arg0, arg1, arg2, arg3)
1443 #endif //__nvoc_kern_gmmu_h_disabled
1444 
1445 void kgmmuFieldSetKindCompTags_IMPL(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries);
1446 
1447 #ifdef __nvoc_kern_gmmu_h_disabled
1448 static inline void kgmmuFieldSetKindCompTags(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries) {
1449     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1450 }
1451 #else //__nvoc_kern_gmmu_h_disabled
1452 #define kgmmuFieldSetKindCompTags(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries) kgmmuFieldSetKindCompTags_IMPL(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries)
1453 #endif //__nvoc_kern_gmmu_h_disabled
1454 
1455 NvBool kgmmuFmtIsBigPageSizeSupported_IMPL(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize);
1456 
1457 #ifdef __nvoc_kern_gmmu_h_disabled
1458 static inline NvBool kgmmuFmtIsBigPageSizeSupported(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) {
1459     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1460     return NV_FALSE;
1461 }
1462 #else //__nvoc_kern_gmmu_h_disabled
1463 #define kgmmuFmtIsBigPageSizeSupported(pKernelGmmu, bigPageSize) kgmmuFmtIsBigPageSizeSupported_IMPL(pKernelGmmu, bigPageSize)
1464 #endif //__nvoc_kern_gmmu_h_disabled
1465 
1466 const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1467 
1468 #ifdef __nvoc_kern_gmmu_h_disabled
1469 static inline const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1470     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1471     return NULL;
1472 }
1473 #else //__nvoc_kern_gmmu_h_disabled
1474 #define kgmmuFmtGetLatestSupportedFormat(pGpu, pKernelGmmu) kgmmuFmtGetLatestSupportedFormat_IMPL(pGpu, pKernelGmmu)
1475 #endif //__nvoc_kern_gmmu_h_disabled
1476 
1477 NvU32 kgmmuGetMinBigPageSize_IMPL(struct KernelGmmu *pKernelGmmu);
1478 
1479 #ifdef __nvoc_kern_gmmu_h_disabled
1480 static inline NvU32 kgmmuGetMinBigPageSize(struct KernelGmmu *pKernelGmmu) {
1481     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1482     return 0;
1483 }
1484 #else //__nvoc_kern_gmmu_h_disabled
1485 #define kgmmuGetMinBigPageSize(pKernelGmmu) kgmmuGetMinBigPageSize_IMPL(pKernelGmmu)
1486 #endif //__nvoc_kern_gmmu_h_disabled
1487 
1488 NV_STATUS kgmmuInstBlkInit_IMPL(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams);
1489 
1490 #ifdef __nvoc_kern_gmmu_h_disabled
1491 static inline NV_STATUS kgmmuInstBlkInit(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams) {
1492     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1493     return NV_ERR_NOT_SUPPORTED;
1494 }
1495 #else //__nvoc_kern_gmmu_h_disabled
1496 #define kgmmuInstBlkInit(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams) kgmmuInstBlkInit_IMPL(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams)
1497 #endif //__nvoc_kern_gmmu_h_disabled
1498 
1499 NV_STATUS kgmmuFaultBufferReplayableAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1);
1500 
1501 #ifdef __nvoc_kern_gmmu_h_disabled
1502 static inline NV_STATUS kgmmuFaultBufferReplayableAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1) {
1503     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1504     return NV_ERR_NOT_SUPPORTED;
1505 }
1506 #else //__nvoc_kern_gmmu_h_disabled
1507 #define kgmmuFaultBufferReplayableAllocate(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferReplayableAllocate_IMPL(pGpu, pKernelGmmu, arg0, arg1)
1508 #endif //__nvoc_kern_gmmu_h_disabled
1509 
1510 NV_STATUS kgmmuFaultBufferReplayableDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1511 
1512 #ifdef __nvoc_kern_gmmu_h_disabled
1513 static inline NV_STATUS kgmmuFaultBufferReplayableDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1514     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1515     return NV_ERR_NOT_SUPPORTED;
1516 }
1517 #else //__nvoc_kern_gmmu_h_disabled
1518 #define kgmmuFaultBufferReplayableDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferReplayableDestroy_IMPL(pGpu, pKernelGmmu)
1519 #endif //__nvoc_kern_gmmu_h_disabled
1520 
1521 NV_STATUS kgmmuFaultBufferAlloc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1);
1522 
1523 #ifdef __nvoc_kern_gmmu_h_disabled
1524 static inline NV_STATUS kgmmuFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) {
1525     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1526     return NV_ERR_NOT_SUPPORTED;
1527 }
1528 #else //__nvoc_kern_gmmu_h_disabled
1529 #define kgmmuFaultBufferAlloc(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferAlloc_IMPL(pGpu, pKernelGmmu, arg0, arg1)
1530 #endif //__nvoc_kern_gmmu_h_disabled
1531 
1532 NV_STATUS kgmmuFaultBufferCreateMemDesc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3);
1533 
1534 #ifdef __nvoc_kern_gmmu_h_disabled
1535 static inline NV_STATUS kgmmuFaultBufferCreateMemDesc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3) {
1536     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1537     return NV_ERR_NOT_SUPPORTED;
1538 }
1539 #else //__nvoc_kern_gmmu_h_disabled
1540 #define kgmmuFaultBufferCreateMemDesc(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuFaultBufferCreateMemDesc_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3)
1541 #endif //__nvoc_kern_gmmu_h_disabled
1542 
1543 NV_STATUS kgmmuFaultBufferGetAddressSpace_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2);
1544 
1545 #ifdef __nvoc_kern_gmmu_h_disabled
1546 static inline NV_STATUS kgmmuFaultBufferGetAddressSpace(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2) {
1547     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1548     return NV_ERR_NOT_SUPPORTED;
1549 }
1550 #else //__nvoc_kern_gmmu_h_disabled
1551 #define kgmmuFaultBufferGetAddressSpace(pGpu, pKernelGmmu, arg0, arg1, arg2) kgmmuFaultBufferGetAddressSpace_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2)
1552 #endif //__nvoc_kern_gmmu_h_disabled
1553 
1554 NV_STATUS kgmmuFaultBufferFree_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1555 
1556 #ifdef __nvoc_kern_gmmu_h_disabled
1557 static inline NV_STATUS kgmmuFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1558     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1559     return NV_ERR_NOT_SUPPORTED;
1560 }
1561 #else //__nvoc_kern_gmmu_h_disabled
1562 #define kgmmuFaultBufferFree(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFree_IMPL(pGpu, pKernelGmmu, arg0)
1563 #endif //__nvoc_kern_gmmu_h_disabled
1564 
1565 NV_STATUS kgmmuFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1566 
1567 #ifdef __nvoc_kern_gmmu_h_disabled
1568 static inline NV_STATUS kgmmuFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1569     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1570     return NV_ERR_NOT_SUPPORTED;
1571 }
1572 #else //__nvoc_kern_gmmu_h_disabled
1573 #define kgmmuFaultBufferUnregister(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg0)
1574 #endif //__nvoc_kern_gmmu_h_disabled
1575 
1576 NV_STATUS kgmmuClientShadowFaultBufferAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1577 
1578 #ifdef __nvoc_kern_gmmu_h_disabled
1579 static inline NV_STATUS kgmmuClientShadowFaultBufferAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1580     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1581     return NV_ERR_NOT_SUPPORTED;
1582 }
1583 #else //__nvoc_kern_gmmu_h_disabled
1584 #define kgmmuClientShadowFaultBufferAllocate(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAllocate_IMPL(pGpu, pKernelGmmu, arg0)
1585 #endif //__nvoc_kern_gmmu_h_disabled
1586 
1587 NV_STATUS kgmmuClientShadowFaultBufferDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1588 
1589 #ifdef __nvoc_kern_gmmu_h_disabled
1590 static inline NV_STATUS kgmmuClientShadowFaultBufferDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1591     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1592     return NV_ERR_NOT_SUPPORTED;
1593 }
1594 #else //__nvoc_kern_gmmu_h_disabled
1595 #define kgmmuClientShadowFaultBufferDestroy(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferDestroy_IMPL(pGpu, pKernelGmmu, arg0)
1596 #endif //__nvoc_kern_gmmu_h_disabled
1597 
1598 NV_STATUS kgmmuClientShadowFaultBufferRegister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1599 
1600 #ifdef __nvoc_kern_gmmu_h_disabled
1601 static inline NV_STATUS kgmmuClientShadowFaultBufferRegister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1602     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1603     return NV_ERR_NOT_SUPPORTED;
1604 }
1605 #else //__nvoc_kern_gmmu_h_disabled
1606 #define kgmmuClientShadowFaultBufferRegister(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferRegister_IMPL(pGpu, pKernelGmmu, arg0)
1607 #endif //__nvoc_kern_gmmu_h_disabled
1608 
1609 void kgmmuClientShadowFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1610 
1611 #ifdef __nvoc_kern_gmmu_h_disabled
1612 static inline void kgmmuClientShadowFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1613     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1614 }
1615 #else //__nvoc_kern_gmmu_h_disabled
1616 #define kgmmuClientShadowFaultBufferUnregister(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg0)
1617 #endif //__nvoc_kern_gmmu_h_disabled
1618 
1619 void kgmmuClientShadowFaultBufferPagesDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1);
1620 
1621 #ifdef __nvoc_kern_gmmu_h_disabled
1622 static inline void kgmmuClientShadowFaultBufferPagesDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1) {
1623     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1624 }
1625 #else //__nvoc_kern_gmmu_h_disabled
1626 #define kgmmuClientShadowFaultBufferPagesDestroy(pGpu, pKernelGmmu, arg0, arg1) kgmmuClientShadowFaultBufferPagesDestroy_IMPL(pGpu, pKernelGmmu, arg0, arg1)
1627 #endif //__nvoc_kern_gmmu_h_disabled
1628 
1629 void kgmmuClientShadowFaultBufferQueueDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1);
1630 
1631 #ifdef __nvoc_kern_gmmu_h_disabled
1632 static inline void kgmmuClientShadowFaultBufferQueueDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1) {
1633     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1634 }
1635 #else //__nvoc_kern_gmmu_h_disabled
1636 #define kgmmuClientShadowFaultBufferQueueDestroy(pGpu, pKernelGmmu, arg0, arg1) kgmmuClientShadowFaultBufferQueueDestroy_IMPL(pGpu, pKernelGmmu, arg0, arg1)
1637 #endif //__nvoc_kern_gmmu_h_disabled
1638 
1639 NvU64 kgmmuGetSizeOfPageTables_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3);
1640 
1641 #ifdef __nvoc_kern_gmmu_h_disabled
1642 static inline NvU64 kgmmuGetSizeOfPageTables(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) {
1643     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1644     return 0;
1645 }
1646 #else //__nvoc_kern_gmmu_h_disabled
1647 #define kgmmuGetSizeOfPageTables(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageTables_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3)
1648 #endif //__nvoc_kern_gmmu_h_disabled
1649 
1650 NvU64 kgmmuGetSizeOfPageDirs_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3);
1651 
1652 #ifdef __nvoc_kern_gmmu_h_disabled
1653 static inline NvU64 kgmmuGetSizeOfPageDirs(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) {
1654     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1655     return 0;
1656 }
1657 #else //__nvoc_kern_gmmu_h_disabled
1658 #define kgmmuGetSizeOfPageDirs(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageDirs_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3)
1659 #endif //__nvoc_kern_gmmu_h_disabled
1660 
1661 GMMU_APERTURE kgmmuGetExternalAllocAperture_IMPL(NvU32 addressSpace);
1662 
1663 #define kgmmuGetExternalAllocAperture(addressSpace) kgmmuGetExternalAllocAperture_IMPL(addressSpace)
1664 void kgmmuEncodePhysAddrs_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count);
1665 
1666 #ifdef __nvoc_kern_gmmu_h_disabled
1667 static inline void kgmmuEncodePhysAddrs(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count) {
1668     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1669 }
1670 #else //__nvoc_kern_gmmu_h_disabled
1671 #define kgmmuEncodePhysAddrs(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count) kgmmuEncodePhysAddrs_IMPL(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count)
1672 #endif //__nvoc_kern_gmmu_h_disabled
1673 
1674 NvU64 kgmmuEncodePhysAddr_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress);
1675 
1676 #ifdef __nvoc_kern_gmmu_h_disabled
1677 static inline NvU64 kgmmuEncodePhysAddr(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress) {
1678     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1679     return 0;
1680 }
1681 #else //__nvoc_kern_gmmu_h_disabled
1682 #define kgmmuEncodePhysAddr(pKernelGmmu, aperture, physAddr, fabricBaseAddress) kgmmuEncodePhysAddr_IMPL(pKernelGmmu, aperture, physAddr, fabricBaseAddress)
1683 #endif //__nvoc_kern_gmmu_h_disabled
1684 
1685 void kgmmuAccessCntrChangeIntrOwnership_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0);
1686 
1687 #ifdef __nvoc_kern_gmmu_h_disabled
1688 static inline void kgmmuAccessCntrChangeIntrOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) {
1689     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1690 }
1691 #else //__nvoc_kern_gmmu_h_disabled
1692 #define kgmmuAccessCntrChangeIntrOwnership(pGpu, pKernelGmmu, arg0) kgmmuAccessCntrChangeIntrOwnership_IMPL(pGpu, pKernelGmmu, arg0)
1693 #endif //__nvoc_kern_gmmu_h_disabled
1694 
1695 NvS32 *kgmmuGetFatalFaultIntrPendingState_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid);
1696 
1697 #ifdef __nvoc_kern_gmmu_h_disabled
1698 static inline NvS32 *kgmmuGetFatalFaultIntrPendingState(struct KernelGmmu *pKernelGmmu, NvU8 gfid) {
1699     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1700     return NULL;
1701 }
1702 #else //__nvoc_kern_gmmu_h_disabled
1703 #define kgmmuGetFatalFaultIntrPendingState(pKernelGmmu, gfid) kgmmuGetFatalFaultIntrPendingState_IMPL(pKernelGmmu, gfid)
1704 #endif //__nvoc_kern_gmmu_h_disabled
1705 
1706 struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex);
1707 
1708 #ifdef __nvoc_kern_gmmu_h_disabled
1709 static inline struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex) {
1710     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1711     return NULL;
1712 }
1713 #else //__nvoc_kern_gmmu_h_disabled
1714 #define kgmmuGetHwFaultBufferPtr(pKernelGmmu, gfid, faultBufferIndex) kgmmuGetHwFaultBufferPtr_IMPL(pKernelGmmu, gfid, faultBufferIndex)
1715 #endif //__nvoc_kern_gmmu_h_disabled
1716 
1717 #undef PRIVATE_FIELD
1718 
1719 
1720 // defines for TLB Invalidation scope
1721 #define NV_GMMU_INVAL_SCOPE_ALL_TLBS       0x00000000
1722 #define NV_GMMU_INVAL_SCOPE_LINK_TLBS      0x00000001
1723 #define NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS  0x00000002
1724 
1725 // bit fields for uvmSharedIntrRmOwnsMask
1726 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY          NVBIT(0)
1727 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_ERROR           NVBIT(1)
1728 #define RM_UVM_SHARED_INTR_MASK_MMU_ECC_UNCORRECTED_ERROR_NOTIFY   NVBIT(2)
1729 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY        NVBIT(3)
1730 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_OVERFLOW      NVBIT(4)
1731 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_NOTIFY     NVBIT(5)
1732 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_OVERFLOW   NVBIT(6)
1733 #define RM_UVM_SHARED_INTR_MASK_MMU_OTHER_FAULT_NOTIFY             NVBIT(7)
1734 #define RM_UVM_SHARED_INTR_MASK_ALL                                (NVBIT(8) - 1)
1735 
1736 /*!
1737  * Constants used for UVM mirroring loops.
1738  */
1739 #define GMMU_USER_PAGE_DIR_INDEX       0
1740 #define GMMU_KERNEL_PAGE_DIR_INDEX     1
1741 #define GMMU_MAX_PAGE_DIR_INDEX_COUNT  (GMMU_KERNEL_PAGE_DIR_INDEX + 1)
1742 
1743 /*!
1744  * Page table walker callbacks used for map/unmap operations.
1745  */
1746 extern const MMU_WALK_CALLBACKS  g_gmmuWalkCallbacks;
1747 extern const MMU_WALK_CALLBACKS  g_bar2WalkCallbacks;
1748 extern const MMU_TRACE_CALLBACKS g_gmmuTraceCallbacks;
1749 
1750 void       gmmuMemDescCacheFree(GVAS_GPU_STATE *pGpuState);
1751 
1752 #endif // KERN_GMMU_H
1753 
1754 #ifdef __cplusplus
1755 } // extern "C"
1756 #endif
1757 #endif // _G_KERN_GMMU_NVOC_H_
1758