1 
2 #ifndef _G_KERN_GMMU_NVOC_H_
3 #define _G_KERN_GMMU_NVOC_H_
4 #include "nvoc/runtime.h"
5 
6 // Version of generated metadata structures
7 #ifdef NVOC_METADATA_VERSION
8 #undef NVOC_METADATA_VERSION
9 #endif
10 #define NVOC_METADATA_VERSION 0
11 
12 #ifdef __cplusplus
13 extern "C" {
14 #endif
15 
16 /*
17 3* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
18  * SPDX-License-Identifier: MIT
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a
21  * copy of this software and associated documentation files (the "Software"),
22  * to deal in the Software without restriction, including without limitation
23  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24  * and/or sell copies of the Software, and to permit persons to whom the
25  * Software is furnished to do so, subject to the following conditions:
26  *
27  * The above copyright notice and this permission notice shall be included in
28  * all copies or substantial portions of the Software.
29  *
30  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
36  * DEALINGS IN THE SOFTWARE.
37  */
38 
39 /******************************************************************************
40 *
41 *       Kernel GMMU module header
42 *       Defines and structures used on CPU RM for the GMMU object.
43 *
44 ******************************************************************************/
45 
46 #pragma once
47 #include "g_kern_gmmu_nvoc.h"
48 
49 #ifndef KERN_GMMU_H
50 #define KERN_GMMU_H
51 
52 #include "core/core.h"
53 #include "core/strict.h"
54 #include "nvtypes.h"
55 #include "nvoc/prelude.h"
56 #include "nvoc/object.h"
57 #include "gpu/mmu/mmu_trace.h"
58 #include "mmu/gmmu_fmt.h"
59 #include "class/cl90f1.h"    // FERMI_VASPACE_A
60 
61 #include "gpu/gpu_timeout.h"
62 #include "containers/queue.h"
63 #include "gpu/eng_state.h"
64 #include "gpu/intr/intr_service.h"
65 #include "gpu/fifo/kernel_fifo.h"
66 #include "gpu/mem_mgr/virt_mem_allocator_common.h" // RM_PAGE_SIZE_64K
67 #include "mmu/mmu_walk.h"
68 
69 #include "gpu/gpu_halspec.h"
70 #include "ctrl/ctrl2080/ctrl2080internal.h"  // NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS
71 
72 #include "class/clc369.h" // MMU_FAULT_BUFFER
73 
74 typedef struct COMPR_INFO COMPR_INFO;
75 
76 typedef struct GVAS_GPU_STATE GVAS_GPU_STATE;
77 
78 typedef struct _fifo_mmu_exception_data FIFO_MMU_EXCEPTION_DATA;
79 
80 /*!
81  * Family of GMMU formats sharing the same version and PDE/PTE defines
82  * but with differing big page sizes.
83  * The term "family" is used here in the mathematical (set theory) sense.
84  *
85  * nv4kPte: GV100+ supports NV4K encoding, @ref gmmuStateInitHal_GV100 for more
86  *
87  */
88 typedef struct
89 {
90     GMMU_FMT_PDE_MULTI pdeMulti;
91     GMMU_FMT_PDE       pde;
92     GMMU_FMT_PTE       pte;
93     GMMU_ENTRY_VALUE   sparsePte;
94     GMMU_ENTRY_VALUE   sparsePde;
95     GMMU_ENTRY_VALUE   sparsePdeMulti;
96     GMMU_ENTRY_VALUE   nv4kPte;
97     GMMU_ENTRY_VALUE   bug2720120WarPde0;
98     GMMU_ENTRY_VALUE   bug2720120WarPde1;
99     GMMU_FMT          *pFmts[GMMU_FMT_MAX_BIG_PAGE_SIZES];
100 } GMMU_FMT_FAMILY;
101 
102 /*!
103  * This structure contains information needed for issuing a commit TLB invalidate
104  * through RmTest.
105  */
106 typedef struct
107 {
108     NvU32       gfid;
109     NvBool      invalidateAll;
110 } COMMIT_TLB_INVALIDATE_TEST_PARAMS;
111 
112 /*!
113  * This structure contains information needed for issuing a TLB invalidate.
114  */
115 typedef struct
116 {
117     RmPhysAddr pdbAddress;
118     NvU32      pdbAperture;
119     NvU32      gfid;
120     NvU32      regVal;
121     RMTIMEOUT  timeout;
122 } TLB_INVALIDATE_PARAMS;
123 
124 typedef enum
125 {
126     NON_REPLAYABLE_FAULT_BUFFER = 0,
127     REPLAYABLE_FAULT_BUFFER,
128     //this should always be the last entry
129     NUM_FAULT_BUFFERS
130 } FAULT_BUFFER_TYPE;
131 
132 /*!
133  * This structure holds information about a page
134  * of memory backing the fault buffer.
135  */
136 typedef struct
137 {
138     /*! Virtual address of this page */
139     NvP64 pAddress;
140 
141     /*! Cookie returned by memdescMap() */
142     NvP64 pPriv;
143 } GMMU_FAULT_BUFFER_PAGE;
144 
145 /*!
146  * This structure holds the information about MMU HW Fault buffer which is mapped on BAR2
147  * and is utilized by MMU for reporting MMU faults to SW
148  */
149 struct HW_FAULT_BUFFER
150 {
151     NvU64 bar2FaultBufferAddr;
152     MEMORY_DESCRIPTOR *pFaultBufferMemDesc;
153     /*!
154      * cookie that is stored for the CPU mapping
155      */
156     NvP64 hCpuFaultBuffer;
157     NvP64 kernelVaddr;
158 
159     GMMU_FAULT_BUFFER_PAGE *pBufferPages;
160 
161     NvU32 cachedGetIndex;
162 
163     /*!
164      * cached fault buffer size
165      */
166     NvU32 faultBufferSize;
167 };
168 
169 /*!
170  * This structure holds information shared between CPU-RM
171  * and GSP-RM
172  */
173 typedef struct
174 {
175     /*!
176      * The GET index of replayable shadow buffer. This
177      * is updated by UVM driver and read by GSP-RM
178      */
179     NvU32 swGetIndex;
180 } FAULT_BUFFER_SHARED_MEMORY;
181 
182 /*!
183  * This structure holds information about the client shadow fault buffer.
184  */
185 typedef struct
186 {
187     /*!
188      * Pointer to circular queue structure shared by the RM with a
189      * privileged client, used as the shadow fault buffer for holding
190      * non-replayable faults.
191      * This structure is shared between CPU-RM and GSP-RM in GSP
192      * enabled driver.
193      */
194     NvP64 pQueue;
195 
196     /*! Memory descriptors associated with the queue. */
197     MEMORY_DESCRIPTOR *pQueueMemDesc;
198 
199     NvP64 pQueueAddress;
200 
201     /*!
202      * Execution context for the queue. Holds environment specific
203      * data that enable queue usage
204      */
205     QueueContext queueContext;
206 
207     /*! Cookie returned by memdescMap() */
208     NvP64 pQueuePriv;
209 
210     /*! Memory descriptor associated with the buffer. */
211     MEMORY_DESCRIPTOR *pBufferMemDesc;
212 
213     NvP64 pBufferAddress;
214 
215     /*! Cookie returned by memdescMap() */
216     NvP64 pBufferPriv;
217 
218     /*! GSP only split mapping of the buffer. */
219     GMMU_FAULT_BUFFER_PAGE *pBufferPages;
220 
221     NvU32 numBufferPages;
222 
223     /*!
224      * Start index of the page containing the fault buffer metadata.
225      * 0 if no metadata is present.
226      */
227     NvU32 metadataStartIndex;
228 
229     /*!
230      * Used only by the replayable fault buffer. Memory descriptor used to
231      * describe shared memory b/w CPU-RM and GSP-RM.
232      */
233     MEMORY_DESCRIPTOR *pFaultBufferSharedMemDesc;
234 
235     NvP64 pFaultBufferSharedMemoryAddress;
236 
237     NvP64 pFaultBufferSharedMemoryPriv;
238 
239     NvP64 pFaultBufferMetadataAddress;
240 
241 } GMMU_CLIENT_SHADOW_FAULT_BUFFER;
242 
243 /*!
244  * Top level structure containing all dataStructures used in MMU fault handling.
245  */
246 struct GMMU_FAULT_BUFFER
247 {
248     struct HW_FAULT_BUFFER hwFaultBuffers[NUM_FAULT_BUFFERS];
249 
250     /*!
251      * Unique client and object handle stored
252      * In VOLTA this is for MMU_FAULT_BUFFER, in PASCAL for MAXWELL_FAULT_BUFFER_A
253      */
254     NvHandle hFaultBufferClient;
255     NvHandle hFaultBufferObject;
256 
257     /*!
258      * Pointer to Circular Queue structure used as shadow fault buffer for
259      * holding fatal fault packets serviced by RM
260      */
261     NvP64 pRmShadowFaultBuffer;
262 
263     /*!
264      * Client shadow fault buffer data and pointer protected by gpu locks.
265      * Client may allocate upto 2 shadow buffers one each for replayable and
266      * non-replayable faults
267      */
268     GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer[NUM_FAULT_BUFFERS];
269     GMMU_CLIENT_SHADOW_FAULT_BUFFER clientShadowFaultBuffer[NUM_FAULT_BUFFERS];
270 
271     /*!
272      * SpinLock to protect shadow buffer pointers
273      */
274     PORT_SPINLOCK *pShadowFaultBufLock;
275 
276     /*!
277      * Flag stating fatalfault interrupt pending
278      */
279     NvS32 fatalFaultIntrPending;
280 
281     /*! Generational counter for fault buffer. Incremented when the fault buffer wraps around. */
282     volatile NvU64 faultBufferGenerationCounter;
283 };
284 
285 typedef struct GMMU_FAULT_PACKET
286 {
287     // 32 bytes MMU fault packet
288     NvU8 faultPacket[NVC369_BUF_SIZE];
289 } GMMU_FAULT_PACKET;
290 
291 // Initialize Circular Queue for MMU Shadow fault buffer
292 MAKE_QUEUE_CIRCULAR(GMMU_SHADOW_FAULT_BUF, GMMU_FAULT_PACKET);
293 
294 #define GMMU_FAULT_PACKET_METADATA_SIZE                32
295 #define GMMU_FAULT_PACKET_METADATA_AUTHTAG_IDX          0
296 #define GMMU_FAULT_PACKET_METADATA_AUTHTAG_SIZE        16
297 #define GMMU_FAULT_PACKET_METADATA_VALID_IDX           16
298 #define GMMU_FAULT_PACKET_METADATA_VALID_SIZE           1
299 #define GMMU_FAULT_PACKET_METADATA_VALID_YES      NV_TRUE
300 #define GMMU_FAULT_PACKET_METADATA_VALID_NO      NV_FALSE
301 
302 typedef struct GMMU_FAULT_PACKET_METADATA
303 {
304     NvU8 metadata[GMMU_FAULT_PACKET_METADATA_SIZE];
305 } GMMU_FAULT_PACKET_METADATA;
306 
307 /*!
308  * Structure that holds different parameters passed by an engine to kgmmuInstBlkInit
309  * for initializing their instance blocks.
310  */
311 typedef struct
312 {
313     NvBool               bIsClientAdmin;
314     NvBool               bIsFaultReplayable;
315     /*
316      * Defer the bus flush during the instance block init.
317      * If this field is set, the kgmmuInstBlkInit() routine won't do flush after the CPU writes.
318      * The caller of the kgmmuInstBlkInit() function has to explicit flush.
319      * This is useful if the caller does back to back updates to instance block.
320      * For e.g. Subcontext array init during channel setup.
321      */
322     NvBool               bDeferFlush;
323     NvU64                uvmKernelPrivRegion;
324 
325     // Instance block is being updated for a zombie subcontext.
326     NvBool               bIsZombieSubctx;
327     NvU8                *pInstBlk;      // VA of instance block.
328 } INST_BLK_INIT_PARAMS, *PINST_BLK_INIT_PARAMS;
329 
330 typedef enum
331 {
332     fault_invalidPde              = 0x00000000,
333     fault_invalidPdeSize          = 0x00000001,
334     fault_invalidPte              = 0x00000002,
335     fault_limitViolation          = 0x00000003,
336     fault_unboundInstBlock        = 0x00000004,
337     fault_privViolation           = 0x00000005,
338     fault_write                   = 0x00000006,
339     fault_read                    = 0x00000007,
340     fault_pitchMaskViolation      = 0x00000008,
341     fault_workCreation            = 0x00000009,
342     fault_unsupportedAperture     = 0x0000000a,
343     fault_compressionFailure      = 0x0000000b,
344     fault_cc_violation            = 0x0000000b,
345     fault_unsupportedKind         = 0x0000000c,
346     fault_regionViolation         = 0x0000000d,
347     fault_poison                  = 0x0000000e,
348     fault_atomic                  = 0x0000000f
349 } FAULT_TYPE;
350 
351 typedef struct
352 {
353     INST_BLOCK_DESC         mmuFaultInstBlock;
354     NvU64                   mmuFaultAddress;
355     NvU64                   mmuFaultTimestamp;
356     FAULT_TYPE              mmuFaultType;
357     NvU32                   mmuFaultAccessType;
358     NvU32                   mmuFaultEngineId;
359     NvU32                   mmuFaultClientId;
360     NvU32                   mmuFaultClientType;
361     NvU32                   mmuFaultGpcId;
362     NvU8                    bFaultEntryValid        : 1;
363     NvU8                    bFaultInProtectedMode   : 1;
364     NvU8                    bFaultTypeReplayable    : 1;
365     NvU8                    bReplayableFaultEn      : 1;
366 } MMU_FAULT_BUFFER_ENTRY;
367 
368 /*!
369  * This structure contains information needed for targetted fault cancel
370  * This is passed in by UVM using SW methods (cl0076.h)
371  */
372 typedef struct
373 {
374     NvU32 clientId;
375     NvU32 gpcId;
376     INST_BLOCK_DESC instBlock;
377 } GMMU_FAULT_CANCEL_INFO;
378 
379 #define VMMU_MAX_GFID 64
380 
381 /*! Fake sparse table defines */
382 #define NV_GMMU_FAKE_SPARSE_TABLE_LEVEL_LO          38  // GH100 PDE2 virtAddrBitLo=38
383 #define NV_GMMU_FAKE_SPARSE_TABLE_LEVELS            3   // PDE2 to PDE4
384 #define NV_GMMU_FAKE_SPARSE_TABLE_BITS_PER_LEVEL    9
385 #define NV_GMMU_FAKE_SPARSE_TABLE_ENTRY_SIZE        8
386 
387 
388 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
389 // the matching C source file, but causes diagnostics to be issued if another
390 // source file references the field.
391 #ifdef NVOC_KERN_GMMU_H_PRIVATE_ACCESS_ALLOWED
392 #define PRIVATE_FIELD(x) x
393 #else
394 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
395 #endif
396 
397 
398 struct KernelGmmu {
399 
400     // Metadata
401     const struct NVOC_RTTI *__nvoc_rtti;
402 
403     // Parent (i.e. superclass or base class) object pointers
404     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
405     struct IntrService __nvoc_base_IntrService;
406 
407     // Ancestor object pointers for `staticCast` feature
408     struct Object *__nvoc_pbase_Object;    // obj super^2
409     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;    // engstate super
410     struct IntrService *__nvoc_pbase_IntrService;    // intrserv super
411     struct KernelGmmu *__nvoc_pbase_KernelGmmu;    // kgmmu
412 
413     // Vtable with 90 per-object function pointers
414     NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu * /*this*/, ENGDESCRIPTOR);  // virtual override (engstate) base (engstate)
415     NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu * /*this*/);  // virtual override (engstate) base (engstate)
416     NV_STATUS (*__kgmmuStateLoad__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual override (engstate) base (engstate)
417     NV_STATUS (*__kgmmuStateUnload__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual override (engstate) base (engstate)
418     NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual halified (singleton optimized) override (engstate) base (engstate) body
419     NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual halified (singleton optimized) override (engstate) base (engstate) body
420     void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu * /*this*/);  // virtual override (engstate) base (engstate)
421     void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceRecord *);  // virtual override (intrserv) base (intrserv)
422     NvBool (*__kgmmuClearInterrupt__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceClearInterruptArguments *);  // virtual override (intrserv) base (intrserv)
423     NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceServiceInterruptArguments *);  // virtual override (intrserv) base (intrserv)
424     NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceServiceNotificationInterruptArguments *);  // virtual halified (singleton optimized) override (intrserv) base (intrserv) body
425     NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu * /*this*/, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *);  // halified (2 hals) body
426     NV_STATUS (*__kgmmuCommitTlbInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *);  // halified (2 hals) body
427     NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *);  // halified (2 hals) body
428     NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, TLB_INVALIDATE_PARAMS *);  // halified (2 hals) body
429     void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PTE *, const NvU32);  // halified (2 hals) body
430     void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PTE *, const NvU32);  // halified (2 hals) body
431     void (*__kgmmuFmtInitPte__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool);  // halified (2 hals) body
432     void (*__kgmmuFmtInitPde__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);  // halified (2 hals) body
433     NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
434     void (*__kgmmuFmtInitLevels__)(struct KernelGmmu * /*this*/, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32);  // halified (4 hals) body
435     void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);  // halified (2 hals) body
436     NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
437     NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu * /*this*/, NvU32, NvU32 *);  // halified (2 hals) body
438     NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu * /*this*/, NvU32, NvBool, NvU32 *);  // halified (2 hals) body
439     NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu * /*this*/, NvU32, NvU32 *);  // halified (2 hals) body
440     NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu * /*this*/, NvU32, GMMU_APERTURE, NvU32 *);  // halified (2 hals) body
441     NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);  // halified (2 hals) body
442     const char * (*__kgmmuGetFaultTypeString__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
443     NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu * /*this*/, NvBool);  // halified (2 hals) body
444     NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu * /*this*/, NvBool);  // halified (2 hals) body
445     NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE);  // halified (2 hals) body
446     void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE);  // halified (2 hals) body
447     NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu * /*this*/);  // halified (2 hals) body
448     NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu * /*this*/);  // halified (2 hals) body
449     NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE);  // halified (2 hals) body
450     NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
451     NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
452     NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
453     NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu * /*this*/, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE, NvBool);  // halified (2 hals) body
454     NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu * /*this*/, NvP64, NvP64);  // halified (2 hals) body
455     void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu * /*this*/, struct HW_FAULT_BUFFER *, NvU32, NvU32);  // halified (2 hals) body
456     GMMU_FAULT_PACKET * (*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu * /*this*/, struct HW_FAULT_BUFFER *, NvU32);  // halified (2 hals) body
457     NvU32 (*__kgmmuCopyFaultPacketToClientShadowBuffer__)(OBJGPU *, struct KernelGmmu * /*this*/, struct GMMU_FAULT_BUFFER *, FAULT_BUFFER_TYPE, NvU32, NvU32, NvU32, struct THREAD_STATE_NODE *, NvU32 *);  // halified (3 hals) body
458     NvBool (*__kgmmuIsReplayableShadowFaultBufferFull__)(OBJGPU *, struct KernelGmmu * /*this*/, GMMU_CLIENT_SHADOW_FAULT_BUFFER *, NvU32, NvU32);  // halified (3 hals) body
459     NvU32 (*__kgmmuReadClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, FAULT_BUFFER_TYPE);  // halified (4 hals) body
460     void (*__kgmmuWriteClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, FAULT_BUFFER_TYPE, NvU32);  // halified (4 hals) body
461     NV_STATUS (*__kgmmuInitCeMmuFaultIdRange__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
462     NvU32 (*__kgmmuGetMinCeEngineId__)(struct KernelGmmu * /*this*/);  // halified (3 hals) body
463     NvU32 (*__kgmmuGetMaxCeEngineId__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (5 hals) body
464     NV_STATUS (*__kgmmuFaultBufferMap__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
465     NV_STATUS (*__kgmmuFaultBufferUnmap__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
466     NV_STATUS (*__kgmmuFaultBufferInit__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
467     NV_STATUS (*__kgmmuFaultBufferDestroy__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
468     NV_STATUS (*__kgmmuFaultBufferLoad__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
469     NV_STATUS (*__kgmmuFaultBufferUnload__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
470     NV_STATUS (*__kgmmuEnableFaultBuffer__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvBool, NvU32);  // halified (2 hals) body
471     NV_STATUS (*__kgmmuDisableFaultBuffer__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvBool, NvU32);  // halified (2 hals) body
472     NvU32 (*__kgmmuSetAndGetDefaultFaultBufferSize__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE, NvU32);  // halified (2 hals) body
473     void (*__kgmmuReadMmuFaultInstHiLo__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32 *, NvU32 *);  // halified (2 hals) body
474     void (*__kgmmuReadMmuFaultAddrHiLo__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32 *, NvU32 *);  // halified (2 hals) body
475     NvU32 (*__kgmmuReadMmuFaultInfo__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
476     void (*__kgmmuWriteMmuFaultBufferSize__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32, NvU32);  // halified (2 hals) body
477     void (*__kgmmuWriteMmuFaultBufferHiLo__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32, NvU32, NvU32);  // halified (2 hals) body
478     NV_STATUS (*__kgmmuEnableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
479     NV_STATUS (*__kgmmuDisableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
480     NV_STATUS (*__kgmmuEnableMmuFaultOverflowIntr__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
481     void (*__kgmmuSignExtendFaultAddress__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU64 *);  // halified (3 hals) body
482     NV_STATUS (*__kgmmuGetFaultType__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, FAULT_TYPE *);  // halified (3 hals) body
483     NvBool (*__kgmmuIsP2PUnboundInstFault__)(struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (3 hals) body
484     NV_STATUS (*__kgmmuServiceVfPriFaults__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
485     NvBool (*__kgmmuTestVidmemAccessBitBufferError__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
486     void (*__kgmmuDisableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
487     NV_STATUS (*__kgmmuEnableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
488     void (*__kgmmuClearAccessCounterWriteNak__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
489     NV_STATUS (*__kgmmuServiceMthdBuffFaultInBar2Fault__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
490     NV_STATUS (*__kgmmuFaultCancelTargeted__)(OBJGPU *, struct KernelGmmu * /*this*/, GMMU_FAULT_CANCEL_INFO *);  // halified (2 hals) body
491     NV_STATUS (*__kgmmuFaultCancelIssueInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, GMMU_FAULT_CANCEL_INFO *, TLB_INVALIDATE_PARAMS *, NvBool);  // halified (2 hals) body
492     NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu * /*this*/, NvP64, FIFO_MMU_EXCEPTION_DATA *);  // halified (2 hals) body
493     NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu * /*this*/, NvP64, FIFO_MMU_EXCEPTION_DATA *);  // halified (2 hals) body
494     NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
495     NV_STATUS (*__kgmmuCreateFakeSparseTables__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals)
496     NvU8 * (*__kgmmuGetFakeSparseEntry__)(OBJGPU *, struct KernelGmmu * /*this*/, const MMU_FMT_LEVEL *);  // halified (2 hals)
497     void (*__kgmmuInitMissing__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
498     NV_STATUS (*__kgmmuStatePreInitLocked__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
499     NV_STATUS (*__kgmmuStatePreInitUnlocked__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
500     NV_STATUS (*__kgmmuStateInitUnlocked__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
501     NV_STATUS (*__kgmmuStatePreLoad__)(struct OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
502     NV_STATUS (*__kgmmuStatePostUnload__)(struct OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
503     NvBool (*__kgmmuIsPresent__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
504 
505     // 4 PDB properties
506     NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED;
507     NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED;
508     NvBool PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE;
509     NvBool PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE;
510 
511     // Data members
512     NvBool bReportFlaTranslationXid;
513     MEMORY_DESCRIPTOR *pFakeSparseBuffer;
514     NvU64 fakeSparseEntry[3];
515     NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo;
516     NvU64 defaultBigPageSize;
517     NvU32 uvmSharedIntrRmOwnsMask;
518     GMMU_FMT_FAMILY *PRIVATE_FIELD(pFmtFamilies)[3];
519     NvU32 PRIVATE_FIELD(PDEAperture);
520     NvU32 PRIVATE_FIELD(PDEAttr);
521     NvU32 PRIVATE_FIELD(PDEBAR1Aperture);
522     NvU32 PRIVATE_FIELD(PDEBAR1Attr);
523     NvU32 PRIVATE_FIELD(PTEAperture);
524     NvU32 PRIVATE_FIELD(PTEAttr);
525     NvU32 PRIVATE_FIELD(PTEBAR1Aperture);
526     NvU32 PRIVATE_FIELD(PTEBAR1Attr);
527     NvU64 PRIVATE_FIELD(overrideBigPageSize);
528     NvBool PRIVATE_FIELD(bEnablePerVaspaceBigPage);
529     NvBool PRIVATE_FIELD(bIgnoreHubTlbInvalidate);
530     NvU64 PRIVATE_FIELD(maxVASize);
531     struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pdeApertures)[5];
532     struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pteApertures)[5];
533     MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarSmallPageTable);
534     MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarPageDirectory0);
535     struct GMMU_FAULT_BUFFER PRIVATE_FIELD(mmuFaultBuffer)[64];
536     NvU64 PRIVATE_FIELD(sysmemBaseAddress);
537     NvU32 PRIVATE_FIELD(minCeMmuFaultId);
538     NvU32 PRIVATE_FIELD(maxCeMmuFaultId);
539     NvBool PRIVATE_FIELD(bHugePageSupported);
540     NvBool PRIVATE_FIELD(bPageSize512mbSupported);
541     NvBool PRIVATE_FIELD(bPageSize256gbSupported);
542     NvBool PRIVATE_FIELD(bBug2720120WarEnabled);
543     NvBool PRIVATE_FIELD(bVaspaceInteropSupported);
544 };
545 
546 
547 struct KernelGmmu_PRIVATE {
548 
549     // Metadata
550     const struct NVOC_RTTI *__nvoc_rtti;
551 
552     // Parent (i.e. superclass or base class) object pointers
553     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
554     struct IntrService __nvoc_base_IntrService;
555 
556     // Ancestor object pointers for `staticCast` feature
557     struct Object *__nvoc_pbase_Object;    // obj super^2
558     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;    // engstate super
559     struct IntrService *__nvoc_pbase_IntrService;    // intrserv super
560     struct KernelGmmu *__nvoc_pbase_KernelGmmu;    // kgmmu
561 
562     // Vtable with 90 per-object function pointers
563     NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu * /*this*/, ENGDESCRIPTOR);  // virtual override (engstate) base (engstate)
564     NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu * /*this*/);  // virtual override (engstate) base (engstate)
565     NV_STATUS (*__kgmmuStateLoad__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual override (engstate) base (engstate)
566     NV_STATUS (*__kgmmuStateUnload__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual override (engstate) base (engstate)
567     NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual halified (singleton optimized) override (engstate) base (engstate) body
568     NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual halified (singleton optimized) override (engstate) base (engstate) body
569     void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu * /*this*/);  // virtual override (engstate) base (engstate)
570     void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceRecord *);  // virtual override (intrserv) base (intrserv)
571     NvBool (*__kgmmuClearInterrupt__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceClearInterruptArguments *);  // virtual override (intrserv) base (intrserv)
572     NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceServiceInterruptArguments *);  // virtual override (intrserv) base (intrserv)
573     NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(OBJGPU *, struct KernelGmmu * /*this*/, IntrServiceServiceNotificationInterruptArguments *);  // virtual halified (singleton optimized) override (intrserv) base (intrserv) body
574     NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu * /*this*/, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *);  // halified (2 hals) body
575     NV_STATUS (*__kgmmuCommitTlbInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *);  // halified (2 hals) body
576     NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *);  // halified (2 hals) body
577     NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, TLB_INVALIDATE_PARAMS *);  // halified (2 hals) body
578     void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PTE *, const NvU32);  // halified (2 hals) body
579     void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PTE *, const NvU32);  // halified (2 hals) body
580     void (*__kgmmuFmtInitPte__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool);  // halified (2 hals) body
581     void (*__kgmmuFmtInitPde__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);  // halified (2 hals) body
582     NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
583     void (*__kgmmuFmtInitLevels__)(struct KernelGmmu * /*this*/, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32);  // halified (4 hals) body
584     void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu * /*this*/, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);  // halified (2 hals) body
585     NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
586     NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu * /*this*/, NvU32, NvU32 *);  // halified (2 hals) body
587     NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu * /*this*/, NvU32, NvBool, NvU32 *);  // halified (2 hals) body
588     NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu * /*this*/, NvU32, NvU32 *);  // halified (2 hals) body
589     NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu * /*this*/, NvU32, GMMU_APERTURE, NvU32 *);  // halified (2 hals) body
590     NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);  // halified (2 hals) body
591     const char * (*__kgmmuGetFaultTypeString__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
592     NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu * /*this*/, NvBool);  // halified (2 hals) body
593     NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu * /*this*/, NvBool);  // halified (2 hals) body
594     NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE);  // halified (2 hals) body
595     void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE);  // halified (2 hals) body
596     NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu * /*this*/);  // halified (2 hals) body
597     NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu * /*this*/);  // halified (2 hals) body
598     NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE);  // halified (2 hals) body
599     NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
600     NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
601     NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
602     NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu * /*this*/, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE, NvBool);  // halified (2 hals) body
603     NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu * /*this*/, NvP64, NvP64);  // halified (2 hals) body
604     void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu * /*this*/, struct HW_FAULT_BUFFER *, NvU32, NvU32);  // halified (2 hals) body
605     GMMU_FAULT_PACKET * (*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu * /*this*/, struct HW_FAULT_BUFFER *, NvU32);  // halified (2 hals) body
606     NvU32 (*__kgmmuCopyFaultPacketToClientShadowBuffer__)(OBJGPU *, struct KernelGmmu * /*this*/, struct GMMU_FAULT_BUFFER *, FAULT_BUFFER_TYPE, NvU32, NvU32, NvU32, struct THREAD_STATE_NODE *, NvU32 *);  // halified (3 hals) body
607     NvBool (*__kgmmuIsReplayableShadowFaultBufferFull__)(OBJGPU *, struct KernelGmmu * /*this*/, GMMU_CLIENT_SHADOW_FAULT_BUFFER *, NvU32, NvU32);  // halified (3 hals) body
608     NvU32 (*__kgmmuReadClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, FAULT_BUFFER_TYPE);  // halified (4 hals) body
609     void (*__kgmmuWriteClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, FAULT_BUFFER_TYPE, NvU32);  // halified (4 hals) body
610     NV_STATUS (*__kgmmuInitCeMmuFaultIdRange__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
611     NvU32 (*__kgmmuGetMinCeEngineId__)(struct KernelGmmu * /*this*/);  // halified (3 hals) body
612     NvU32 (*__kgmmuGetMaxCeEngineId__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (5 hals) body
613     NV_STATUS (*__kgmmuFaultBufferMap__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
614     NV_STATUS (*__kgmmuFaultBufferUnmap__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
615     NV_STATUS (*__kgmmuFaultBufferInit__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
616     NV_STATUS (*__kgmmuFaultBufferDestroy__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
617     NV_STATUS (*__kgmmuFaultBufferLoad__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
618     NV_STATUS (*__kgmmuFaultBufferUnload__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (2 hals) body
619     NV_STATUS (*__kgmmuEnableFaultBuffer__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvBool, NvU32);  // halified (2 hals) body
620     NV_STATUS (*__kgmmuDisableFaultBuffer__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvBool, NvU32);  // halified (2 hals) body
621     NvU32 (*__kgmmuSetAndGetDefaultFaultBufferSize__)(OBJGPU *, struct KernelGmmu * /*this*/, FAULT_BUFFER_TYPE, NvU32);  // halified (2 hals) body
622     void (*__kgmmuReadMmuFaultInstHiLo__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32 *, NvU32 *);  // halified (2 hals) body
623     void (*__kgmmuReadMmuFaultAddrHiLo__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32 *, NvU32 *);  // halified (2 hals) body
624     NvU32 (*__kgmmuReadMmuFaultInfo__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
625     void (*__kgmmuWriteMmuFaultBufferSize__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32, NvU32);  // halified (2 hals) body
626     void (*__kgmmuWriteMmuFaultBufferHiLo__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, NvU32, NvU32, NvU32);  // halified (2 hals) body
627     NV_STATUS (*__kgmmuEnableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
628     NV_STATUS (*__kgmmuDisableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
629     NV_STATUS (*__kgmmuEnableMmuFaultOverflowIntr__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
630     void (*__kgmmuSignExtendFaultAddress__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU64 *);  // halified (3 hals) body
631     NV_STATUS (*__kgmmuGetFaultType__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32, FAULT_TYPE *);  // halified (3 hals) body
632     NvBool (*__kgmmuIsP2PUnboundInstFault__)(struct KernelGmmu * /*this*/, NvU32, NvU32);  // halified (3 hals) body
633     NV_STATUS (*__kgmmuServiceVfPriFaults__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
634     NvBool (*__kgmmuTestVidmemAccessBitBufferError__)(OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // halified (2 hals) body
635     void (*__kgmmuDisableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
636     NV_STATUS (*__kgmmuEnableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
637     void (*__kgmmuClearAccessCounterWriteNak__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
638     NV_STATUS (*__kgmmuServiceMthdBuffFaultInBar2Fault__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
639     NV_STATUS (*__kgmmuFaultCancelTargeted__)(OBJGPU *, struct KernelGmmu * /*this*/, GMMU_FAULT_CANCEL_INFO *);  // halified (2 hals) body
640     NV_STATUS (*__kgmmuFaultCancelIssueInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, GMMU_FAULT_CANCEL_INFO *, TLB_INVALIDATE_PARAMS *, NvBool);  // halified (2 hals) body
641     NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu * /*this*/, NvP64, FIFO_MMU_EXCEPTION_DATA *);  // halified (2 hals) body
642     NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu * /*this*/, NvP64, FIFO_MMU_EXCEPTION_DATA *);  // halified (2 hals) body
643     NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals) body
644     NV_STATUS (*__kgmmuCreateFakeSparseTables__)(OBJGPU *, struct KernelGmmu * /*this*/);  // halified (2 hals)
645     NvU8 * (*__kgmmuGetFakeSparseEntry__)(OBJGPU *, struct KernelGmmu * /*this*/, const MMU_FMT_LEVEL *);  // halified (2 hals)
646     void (*__kgmmuInitMissing__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
647     NV_STATUS (*__kgmmuStatePreInitLocked__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
648     NV_STATUS (*__kgmmuStatePreInitUnlocked__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
649     NV_STATUS (*__kgmmuStateInitUnlocked__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
650     NV_STATUS (*__kgmmuStatePreLoad__)(struct OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
651     NV_STATUS (*__kgmmuStatePostUnload__)(struct OBJGPU *, struct KernelGmmu * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
652     NvBool (*__kgmmuIsPresent__)(struct OBJGPU *, struct KernelGmmu * /*this*/);  // virtual inherited (engstate) base (engstate)
653 
654     // 4 PDB properties
655     NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED;
656     NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED;
657     NvBool PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE;
658     NvBool PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE;
659 
660     // Data members
661     NvBool bReportFlaTranslationXid;
662     MEMORY_DESCRIPTOR *pFakeSparseBuffer;
663     NvU64 fakeSparseEntry[3];
664     NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo;
665     NvU64 defaultBigPageSize;
666     NvU32 uvmSharedIntrRmOwnsMask;
667     GMMU_FMT_FAMILY *pFmtFamilies[3];
668     NvU32 PDEAperture;
669     NvU32 PDEAttr;
670     NvU32 PDEBAR1Aperture;
671     NvU32 PDEBAR1Attr;
672     NvU32 PTEAperture;
673     NvU32 PTEAttr;
674     NvU32 PTEBAR1Aperture;
675     NvU32 PTEBAR1Attr;
676     NvU64 overrideBigPageSize;
677     NvBool bEnablePerVaspaceBigPage;
678     NvBool bIgnoreHubTlbInvalidate;
679     NvU64 maxVASize;
680     struct NV_FIELD_ENUM_ENTRY pdeApertures[5];
681     struct NV_FIELD_ENUM_ENTRY pteApertures[5];
682     MEMORY_DESCRIPTOR *pWarSmallPageTable;
683     MEMORY_DESCRIPTOR *pWarPageDirectory0;
684     struct GMMU_FAULT_BUFFER mmuFaultBuffer[64];
685     NvU64 sysmemBaseAddress;
686     NvU32 minCeMmuFaultId;
687     NvU32 maxCeMmuFaultId;
688     NvBool bHugePageSupported;
689     NvBool bPageSize512mbSupported;
690     NvBool bPageSize256gbSupported;
691     NvBool bBug2720120WarEnabled;
692     NvBool bVaspaceInteropSupported;
693 };
694 
695 #ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__
696 #define __NVOC_CLASS_KernelGmmu_TYPEDEF__
697 typedef struct KernelGmmu KernelGmmu;
698 #endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */
699 
700 #ifndef __nvoc_class_id_KernelGmmu
701 #define __nvoc_class_id_KernelGmmu 0x29362f
702 #endif /* __nvoc_class_id_KernelGmmu */
703 
704 // Casting support
705 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGmmu;
706 
707 #define __staticCast_KernelGmmu(pThis) \
708     ((pThis)->__nvoc_pbase_KernelGmmu)
709 
710 #ifdef __nvoc_kern_gmmu_h_disabled
711 #define __dynamicCast_KernelGmmu(pThis) ((KernelGmmu*)NULL)
712 #else //__nvoc_kern_gmmu_h_disabled
713 #define __dynamicCast_KernelGmmu(pThis) \
714     ((KernelGmmu*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGmmu)))
715 #endif //__nvoc_kern_gmmu_h_disabled
716 
717 // Property macros
718 #define PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE_BASE_CAST
719 #define PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE_BASE_NAME PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE
720 #define PDB_PROP_KGMMU_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
721 #define PDB_PROP_KGMMU_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
722 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_CAST
723 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_NAME PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED
724 #define PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE_BASE_CAST
725 #define PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE_BASE_NAME PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE
726 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_CAST
727 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_NAME PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED
728 
729 NV_STATUS __nvoc_objCreateDynamic_KernelGmmu(KernelGmmu**, Dynamic*, NvU32, va_list);
730 
731 NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
732 #define __objCreate_KernelGmmu(ppNewObj, pParent, createFlags) \
733     __nvoc_objCreate_KernelGmmu((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
734 
735 
736 // Wrapper macros
737 #define kgmmuConstructEngine_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuConstructEngine__
738 #define kgmmuConstructEngine(pGpu, pKernelGmmu, arg3) kgmmuConstructEngine_DISPATCH(pGpu, pKernelGmmu, arg3)
739 #define kgmmuStateInitLocked_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuStateInitLocked__
740 #define kgmmuStateInitLocked(pGpu, pKernelGmmu) kgmmuStateInitLocked_DISPATCH(pGpu, pKernelGmmu)
741 #define kgmmuStateLoad_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuStateLoad__
742 #define kgmmuStateLoad(pGpu, pKernelGmmu, arg3) kgmmuStateLoad_DISPATCH(pGpu, pKernelGmmu, arg3)
743 #define kgmmuStateUnload_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuStateUnload__
744 #define kgmmuStateUnload(pGpu, pKernelGmmu, arg3) kgmmuStateUnload_DISPATCH(pGpu, pKernelGmmu, arg3)
745 #define kgmmuStatePostLoad_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuStatePostLoad__
746 #define kgmmuStatePostLoad(pGpu, pKernelGmmu, arg3) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg3)
747 #define kgmmuStatePostLoad_HAL(pGpu, pKernelGmmu, arg3) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg3)
748 #define kgmmuStatePreUnload_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuStatePreUnload__
749 #define kgmmuStatePreUnload(pGpu, pKernelGmmu, arg3) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg3)
750 #define kgmmuStatePreUnload_HAL(pGpu, pKernelGmmu, arg3) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg3)
751 #define kgmmuStateDestroy_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuStateDestroy__
752 #define kgmmuStateDestroy(pGpu, pKernelGmmu) kgmmuStateDestroy_DISPATCH(pGpu, pKernelGmmu)
753 #define kgmmuRegisterIntrService_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuRegisterIntrService__
754 #define kgmmuRegisterIntrService(pGpu, pKernelGmmu, arg3) kgmmuRegisterIntrService_DISPATCH(pGpu, pKernelGmmu, arg3)
755 #define kgmmuClearInterrupt_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuClearInterrupt__
756 #define kgmmuClearInterrupt(pGpu, pKernelGmmu, pParams) kgmmuClearInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
757 #define kgmmuServiceInterrupt_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuServiceInterrupt__
758 #define kgmmuServiceInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
759 #define kgmmuServiceNotificationInterrupt_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuServiceNotificationInterrupt__
760 #define kgmmuServiceNotificationInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
761 #define kgmmuServiceNotificationInterrupt_HAL(pGpu, pKernelGmmu, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
762 #define kgmmuInstBlkVaLimitGet_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuInstBlkVaLimitGet__
763 #define kgmmuInstBlkVaLimitGet(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
764 #define kgmmuInstBlkVaLimitGet_HAL(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
765 #define kgmmuCommitTlbInvalidate_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuCommitTlbInvalidate__
766 #define kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate_DISPATCH(pGpu, pKernelGmmu, pParams)
767 #define kgmmuCommitTlbInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate_DISPATCH(pGpu, pKernelGmmu, pParams)
768 #define kgmmuSetTlbInvalidateMembarWarParameters_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuSetTlbInvalidateMembarWarParameters__
769 #define kgmmuSetTlbInvalidateMembarWarParameters(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams)
770 #define kgmmuSetTlbInvalidateMembarWarParameters_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams)
771 #define kgmmuSetTlbInvalidationScope_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuSetTlbInvalidationScope__
772 #define kgmmuSetTlbInvalidationScope(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams)
773 #define kgmmuSetTlbInvalidationScope_HAL(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams)
774 #define kgmmuFmtInitPteComptagLine_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtInitPteComptagLine__
775 #define kgmmuFmtInitPteComptagLine(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version)
776 #define kgmmuFmtInitPteComptagLine_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version)
777 #define kgmmuFmtInitPeerPteFld_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtInitPeerPteFld__
778 #define kgmmuFmtInitPeerPteFld(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version)
779 #define kgmmuFmtInitPeerPteFld_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version)
780 #define kgmmuFmtInitPte_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtInitPte__
781 #define kgmmuFmtInitPte(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture)
782 #define kgmmuFmtInitPte_HAL(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture)
783 #define kgmmuFmtInitPde_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtInitPde__
784 #define kgmmuFmtInitPde(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures)
785 #define kgmmuFmtInitPde_HAL(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures)
786 #define kgmmuFmtIsVersionSupported_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtIsVersionSupported__
787 #define kgmmuFmtIsVersionSupported(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version)
788 #define kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version)
789 #define kgmmuFmtInitLevels_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtInitLevels__
790 #define kgmmuFmtInitLevels(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift)
791 #define kgmmuFmtInitLevels_HAL(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift)
792 #define kgmmuFmtInitPdeMulti_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtInitPdeMulti__
793 #define kgmmuFmtInitPdeMulti(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures)
794 #define kgmmuFmtInitPdeMulti_HAL(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures)
795 #define kgmmuFmtFamiliesInit_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFmtFamiliesInit__
796 #define kgmmuFmtFamiliesInit(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu)
797 #define kgmmuFmtFamiliesInit_HAL(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu)
798 #define kgmmuTranslatePtePcfFromSw_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuTranslatePtePcfFromSw__
799 #define kgmmuTranslatePtePcfFromSw(pKernelGmmu, arg2, arg3) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg2, arg3)
800 #define kgmmuTranslatePtePcfFromSw_HAL(pKernelGmmu, arg2, arg3) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg2, arg3)
801 #define kgmmuTranslatePtePcfFromHw_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuTranslatePtePcfFromHw__
802 #define kgmmuTranslatePtePcfFromHw(pKernelGmmu, arg2, arg3, arg4) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg2, arg3, arg4)
803 #define kgmmuTranslatePtePcfFromHw_HAL(pKernelGmmu, arg2, arg3, arg4) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg2, arg3, arg4)
804 #define kgmmuTranslatePdePcfFromSw_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuTranslatePdePcfFromSw__
805 #define kgmmuTranslatePdePcfFromSw(pKernelGmmu, arg2, arg3) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg2, arg3)
806 #define kgmmuTranslatePdePcfFromSw_HAL(pKernelGmmu, arg2, arg3) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg2, arg3)
807 #define kgmmuTranslatePdePcfFromHw_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuTranslatePdePcfFromHw__
808 #define kgmmuTranslatePdePcfFromHw(pKernelGmmu, arg2, arg3, arg4) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg2, arg3, arg4)
809 #define kgmmuTranslatePdePcfFromHw_HAL(pKernelGmmu, arg2, arg3, arg4) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg2, arg3, arg4)
810 #define kgmmuGetFaultRegisterMappings_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuGetFaultRegisterMappings__
811 #define kgmmuGetFaultRegisterMappings(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
812 #define kgmmuGetFaultRegisterMappings_HAL(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
813 #define kgmmuGetFaultTypeString_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuGetFaultTypeString__
814 #define kgmmuGetFaultTypeString(pKernelGmmu, faultType) kgmmuGetFaultTypeString_DISPATCH(pKernelGmmu, faultType)
815 #define kgmmuGetFaultTypeString_HAL(pKernelGmmu, faultType) kgmmuGetFaultTypeString_DISPATCH(pKernelGmmu, faultType)
816 #define kgmmuIssueReplayableFaultBufferFlush_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuIssueReplayableFaultBufferFlush__
817 #define kgmmuIssueReplayableFaultBufferFlush(pGpu, pKernelGmmu, bCopyAndFlush) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu, bCopyAndFlush)
818 #define kgmmuIssueReplayableFaultBufferFlush_HAL(pGpu, pKernelGmmu, bCopyAndFlush) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu, bCopyAndFlush)
819 #define kgmmuToggleFaultOnPrefetch_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuToggleFaultOnPrefetch__
820 #define kgmmuToggleFaultOnPrefetch(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable)
821 #define kgmmuToggleFaultOnPrefetch_HAL(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable)
822 #define kgmmuFaultBufferAllocSharedMemory_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferAllocSharedMemory__
823 #define kgmmuFaultBufferAllocSharedMemory(pGpu, pKernelGmmu, arg3) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg3)
824 #define kgmmuFaultBufferAllocSharedMemory_HAL(pGpu, pKernelGmmu, arg3) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg3)
825 #define kgmmuFaultBufferFreeSharedMemory_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferFreeSharedMemory__
826 #define kgmmuFaultBufferFreeSharedMemory(pGpu, pKernelGmmu, arg3) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg3)
827 #define kgmmuFaultBufferFreeSharedMemory_HAL(pGpu, pKernelGmmu, arg3) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg3)
828 #define kgmmuSetupWarForBug2720120_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuSetupWarForBug2720120__
829 #define kgmmuSetupWarForBug2720120(pKernelGmmu) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu)
830 #define kgmmuSetupWarForBug2720120_HAL(pKernelGmmu) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu)
831 #define kgmmuGetGraphicsEngineId_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuGetGraphicsEngineId__
832 #define kgmmuGetGraphicsEngineId(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu)
833 #define kgmmuGetGraphicsEngineId_HAL(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu)
834 #define kgmmuReadShadowBufPutIndex_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuReadShadowBufPutIndex__
835 #define kgmmuReadShadowBufPutIndex(pGpu, pKernelGmmu, type) kgmmuReadShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, type)
836 #define kgmmuReadShadowBufPutIndex_HAL(pGpu, pKernelGmmu, type) kgmmuReadShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, type)
837 #define kgmmuIsFaultEngineBar1_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuIsFaultEngineBar1__
838 #define kgmmuIsFaultEngineBar1(pKernelGmmu, arg2) kgmmuIsFaultEngineBar1_DISPATCH(pKernelGmmu, arg2)
839 #define kgmmuIsFaultEngineBar1_HAL(pKernelGmmu, arg2) kgmmuIsFaultEngineBar1_DISPATCH(pKernelGmmu, arg2)
840 #define kgmmuIsFaultEngineBar2_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuIsFaultEngineBar2__
841 #define kgmmuIsFaultEngineBar2(pKernelGmmu, arg2) kgmmuIsFaultEngineBar2_DISPATCH(pKernelGmmu, arg2)
842 #define kgmmuIsFaultEngineBar2_HAL(pKernelGmmu, arg2) kgmmuIsFaultEngineBar2_DISPATCH(pKernelGmmu, arg2)
843 #define kgmmuIsFaultEnginePhysical_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuIsFaultEnginePhysical__
844 #define kgmmuIsFaultEnginePhysical(pKernelGmmu, arg2) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg2)
845 #define kgmmuIsFaultEnginePhysical_HAL(pKernelGmmu, arg2) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg2)
846 #define kgmmuCopyMmuFaults_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuCopyMmuFaults__
847 #define kgmmuCopyMmuFaults(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit)
848 #define kgmmuCopyMmuFaults_HAL(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit)
849 #define kgmmuParseFaultPacket_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuParseFaultPacket__
850 #define kgmmuParseFaultPacket(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry)
851 #define kgmmuParseFaultPacket_HAL(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry)
852 #define kgmmuFaultBufferClearPackets_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferClearPackets__
853 #define kgmmuFaultBufferClearPackets(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) kgmmuFaultBufferClearPackets_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets)
854 #define kgmmuFaultBufferClearPackets_HAL(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) kgmmuFaultBufferClearPackets_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets)
855 #define kgmmuFaultBufferGetFault_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferGetFault__
856 #define kgmmuFaultBufferGetFault(pGpu, pKernelGmmu, pFaultBuffer, idx) kgmmuFaultBufferGetFault_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, idx)
857 #define kgmmuFaultBufferGetFault_HAL(pGpu, pKernelGmmu, pFaultBuffer, idx) kgmmuFaultBufferGetFault_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, idx)
858 #define kgmmuCopyFaultPacketToClientShadowBuffer_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuCopyFaultPacketToClientShadowBuffer__
859 #define kgmmuCopyFaultPacketToClientShadowBuffer(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied)
860 #define kgmmuCopyFaultPacketToClientShadowBuffer_HAL(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied)
861 #define kgmmuIsReplayableShadowFaultBufferFull_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuIsReplayableShadowFaultBufferFull__
862 #define kgmmuIsReplayableShadowFaultBufferFull(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries)
863 #define kgmmuIsReplayableShadowFaultBufferFull_HAL(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries)
864 #define kgmmuReadClientShadowBufPutIndex_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuReadClientShadowBufPutIndex__
865 #define kgmmuReadClientShadowBufPutIndex(pGpu, pKernelGmmu, gfid, type) kgmmuReadClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type)
866 #define kgmmuReadClientShadowBufPutIndex_HAL(pGpu, pKernelGmmu, gfid, type) kgmmuReadClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type)
867 #define kgmmuWriteClientShadowBufPutIndex_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuWriteClientShadowBufPutIndex__
868 #define kgmmuWriteClientShadowBufPutIndex(pGpu, pKernelGmmu, gfid, type, putIndex) kgmmuWriteClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type, putIndex)
869 #define kgmmuWriteClientShadowBufPutIndex_HAL(pGpu, pKernelGmmu, gfid, type, putIndex) kgmmuWriteClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type, putIndex)
870 #define kgmmuInitCeMmuFaultIdRange_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuInitCeMmuFaultIdRange__
871 #define kgmmuInitCeMmuFaultIdRange(pGpu, pKernelGmmu) kgmmuInitCeMmuFaultIdRange_DISPATCH(pGpu, pKernelGmmu)
872 #define kgmmuInitCeMmuFaultIdRange_HAL(pGpu, pKernelGmmu) kgmmuInitCeMmuFaultIdRange_DISPATCH(pGpu, pKernelGmmu)
873 #define kgmmuGetMinCeEngineId_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuGetMinCeEngineId__
874 #define kgmmuGetMinCeEngineId(pKernelGmmu) kgmmuGetMinCeEngineId_DISPATCH(pKernelGmmu)
875 #define kgmmuGetMinCeEngineId_HAL(pKernelGmmu) kgmmuGetMinCeEngineId_DISPATCH(pKernelGmmu)
876 #define kgmmuGetMaxCeEngineId_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuGetMaxCeEngineId__
877 #define kgmmuGetMaxCeEngineId(pGpu, pKernelGmmu) kgmmuGetMaxCeEngineId_DISPATCH(pGpu, pKernelGmmu)
878 #define kgmmuGetMaxCeEngineId_HAL(pGpu, pKernelGmmu) kgmmuGetMaxCeEngineId_DISPATCH(pGpu, pKernelGmmu)
879 #define kgmmuFaultBufferMap_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferMap__
880 #define kgmmuFaultBufferMap(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferMap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
881 #define kgmmuFaultBufferMap_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferMap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
882 #define kgmmuFaultBufferUnmap_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferUnmap__
883 #define kgmmuFaultBufferUnmap(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnmap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
884 #define kgmmuFaultBufferUnmap_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnmap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
885 #define kgmmuFaultBufferInit_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferInit__
886 #define kgmmuFaultBufferInit(pGpu, pKernelGmmu) kgmmuFaultBufferInit_DISPATCH(pGpu, pKernelGmmu)
887 #define kgmmuFaultBufferInit_HAL(pGpu, pKernelGmmu) kgmmuFaultBufferInit_DISPATCH(pGpu, pKernelGmmu)
888 #define kgmmuFaultBufferDestroy_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferDestroy__
889 #define kgmmuFaultBufferDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferDestroy_DISPATCH(pGpu, pKernelGmmu)
890 #define kgmmuFaultBufferDestroy_HAL(pGpu, pKernelGmmu) kgmmuFaultBufferDestroy_DISPATCH(pGpu, pKernelGmmu)
891 #define kgmmuFaultBufferLoad_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferLoad__
892 #define kgmmuFaultBufferLoad(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferLoad_DISPATCH(pGpu, pKernelGmmu, index, gfid)
893 #define kgmmuFaultBufferLoad_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferLoad_DISPATCH(pGpu, pKernelGmmu, index, gfid)
894 #define kgmmuFaultBufferUnload_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultBufferUnload__
895 #define kgmmuFaultBufferUnload(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnload_DISPATCH(pGpu, pKernelGmmu, index, gfid)
896 #define kgmmuFaultBufferUnload_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnload_DISPATCH(pGpu, pKernelGmmu, index, gfid)
897 #define kgmmuEnableFaultBuffer_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuEnableFaultBuffer__
898 #define kgmmuEnableFaultBuffer(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuEnableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
899 #define kgmmuEnableFaultBuffer_HAL(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuEnableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
900 #define kgmmuDisableFaultBuffer_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuDisableFaultBuffer__
901 #define kgmmuDisableFaultBuffer(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuDisableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
902 #define kgmmuDisableFaultBuffer_HAL(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuDisableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
903 #define kgmmuSetAndGetDefaultFaultBufferSize_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuSetAndGetDefaultFaultBufferSize__
904 #define kgmmuSetAndGetDefaultFaultBufferSize(pGpu, pKernelGmmu, index, gfid) kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, index, gfid)
905 #define kgmmuSetAndGetDefaultFaultBufferSize_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, index, gfid)
906 #define kgmmuReadMmuFaultInstHiLo_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuReadMmuFaultInstHiLo__
907 #define kgmmuReadMmuFaultInstHiLo(pGpu, pKernelGmmu, arg3, arg4) kgmmuReadMmuFaultInstHiLo_DISPATCH(pGpu, pKernelGmmu, arg3, arg4)
908 #define kgmmuReadMmuFaultInstHiLo_HAL(pGpu, pKernelGmmu, arg3, arg4) kgmmuReadMmuFaultInstHiLo_DISPATCH(pGpu, pKernelGmmu, arg3, arg4)
909 #define kgmmuReadMmuFaultAddrHiLo_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuReadMmuFaultAddrHiLo__
910 #define kgmmuReadMmuFaultAddrHiLo(pGpu, pKernelGmmu, arg3, arg4) kgmmuReadMmuFaultAddrHiLo_DISPATCH(pGpu, pKernelGmmu, arg3, arg4)
911 #define kgmmuReadMmuFaultAddrHiLo_HAL(pGpu, pKernelGmmu, arg3, arg4) kgmmuReadMmuFaultAddrHiLo_DISPATCH(pGpu, pKernelGmmu, arg3, arg4)
912 #define kgmmuReadMmuFaultInfo_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuReadMmuFaultInfo__
913 #define kgmmuReadMmuFaultInfo(pGpu, pKernelGmmu) kgmmuReadMmuFaultInfo_DISPATCH(pGpu, pKernelGmmu)
914 #define kgmmuReadMmuFaultInfo_HAL(pGpu, pKernelGmmu) kgmmuReadMmuFaultInfo_DISPATCH(pGpu, pKernelGmmu)
915 #define kgmmuWriteMmuFaultBufferSize_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuWriteMmuFaultBufferSize__
916 #define kgmmuWriteMmuFaultBufferSize(pGpu, pKernelGmmu, arg3, arg4, gfid) kgmmuWriteMmuFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, arg3, arg4, gfid)
917 #define kgmmuWriteMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg3, arg4, gfid) kgmmuWriteMmuFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, arg3, arg4, gfid)
918 #define kgmmuWriteMmuFaultBufferHiLo_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuWriteMmuFaultBufferHiLo__
919 #define kgmmuWriteMmuFaultBufferHiLo(pGpu, pKernelGmmu, arg3, arg4, arg5, gfid) kgmmuWriteMmuFaultBufferHiLo_DISPATCH(pGpu, pKernelGmmu, arg3, arg4, arg5, gfid)
920 #define kgmmuWriteMmuFaultBufferHiLo_HAL(pGpu, pKernelGmmu, arg3, arg4, arg5, gfid) kgmmuWriteMmuFaultBufferHiLo_DISPATCH(pGpu, pKernelGmmu, arg3, arg4, arg5, gfid)
921 #define kgmmuEnableMmuFaultInterrupts_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuEnableMmuFaultInterrupts__
922 #define kgmmuEnableMmuFaultInterrupts(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
923 #define kgmmuEnableMmuFaultInterrupts_HAL(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
924 #define kgmmuDisableMmuFaultInterrupts_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuDisableMmuFaultInterrupts__
925 #define kgmmuDisableMmuFaultInterrupts(pGpu, pKernelGmmu, index) kgmmuDisableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
926 #define kgmmuDisableMmuFaultInterrupts_HAL(pGpu, pKernelGmmu, index) kgmmuDisableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
927 #define kgmmuEnableMmuFaultOverflowIntr_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuEnableMmuFaultOverflowIntr__
928 #define kgmmuEnableMmuFaultOverflowIntr(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultOverflowIntr_DISPATCH(pGpu, pKernelGmmu, index)
929 #define kgmmuEnableMmuFaultOverflowIntr_HAL(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultOverflowIntr_DISPATCH(pGpu, pKernelGmmu, index)
930 #define kgmmuSignExtendFaultAddress_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuSignExtendFaultAddress__
931 #define kgmmuSignExtendFaultAddress(pGpu, pKernelGmmu, pMmuFaultAddress) kgmmuSignExtendFaultAddress_DISPATCH(pGpu, pKernelGmmu, pMmuFaultAddress)
932 #define kgmmuSignExtendFaultAddress_HAL(pGpu, pKernelGmmu, pMmuFaultAddress) kgmmuSignExtendFaultAddress_DISPATCH(pGpu, pKernelGmmu, pMmuFaultAddress)
933 #define kgmmuGetFaultType_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuGetFaultType__
934 #define kgmmuGetFaultType(pGpu, pKernelGmmu, fault, pMmuFaultType) kgmmuGetFaultType_DISPATCH(pGpu, pKernelGmmu, fault, pMmuFaultType)
935 #define kgmmuGetFaultType_HAL(pGpu, pKernelGmmu, fault, pMmuFaultType) kgmmuGetFaultType_DISPATCH(pGpu, pKernelGmmu, fault, pMmuFaultType)
936 #define kgmmuIsP2PUnboundInstFault_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuIsP2PUnboundInstFault__
937 #define kgmmuIsP2PUnboundInstFault(pKernelGmmu, arg2, arg3) kgmmuIsP2PUnboundInstFault_DISPATCH(pKernelGmmu, arg2, arg3)
938 #define kgmmuIsP2PUnboundInstFault_HAL(pKernelGmmu, arg2, arg3) kgmmuIsP2PUnboundInstFault_DISPATCH(pKernelGmmu, arg2, arg3)
939 #define kgmmuServiceVfPriFaults_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuServiceVfPriFaults__
940 #define kgmmuServiceVfPriFaults(pGpu, pKernelGmmu, faultType) kgmmuServiceVfPriFaults_DISPATCH(pGpu, pKernelGmmu, faultType)
941 #define kgmmuServiceVfPriFaults_HAL(pGpu, pKernelGmmu, faultType) kgmmuServiceVfPriFaults_DISPATCH(pGpu, pKernelGmmu, faultType)
942 #define kgmmuTestVidmemAccessBitBufferError_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuTestVidmemAccessBitBufferError__
943 #define kgmmuTestVidmemAccessBitBufferError(pGpu, pKernelGmmu, arg3) kgmmuTestVidmemAccessBitBufferError_DISPATCH(pGpu, pKernelGmmu, arg3)
944 #define kgmmuTestVidmemAccessBitBufferError_HAL(pGpu, pKernelGmmu, arg3) kgmmuTestVidmemAccessBitBufferError_DISPATCH(pGpu, pKernelGmmu, arg3)
945 #define kgmmuDisableVidmemAccessBitBuf_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuDisableVidmemAccessBitBuf__
946 #define kgmmuDisableVidmemAccessBitBuf(pGpu, pKernelGmmu) kgmmuDisableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
947 #define kgmmuDisableVidmemAccessBitBuf_HAL(pGpu, pKernelGmmu) kgmmuDisableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
948 #define kgmmuEnableVidmemAccessBitBuf_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuEnableVidmemAccessBitBuf__
949 #define kgmmuEnableVidmemAccessBitBuf(pGpu, pKernelGmmu) kgmmuEnableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
950 #define kgmmuEnableVidmemAccessBitBuf_HAL(pGpu, pKernelGmmu) kgmmuEnableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
951 #define kgmmuClearAccessCounterWriteNak_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuClearAccessCounterWriteNak__
952 #define kgmmuClearAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuClearAccessCounterWriteNak_DISPATCH(pGpu, pKernelGmmu)
953 #define kgmmuClearAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuClearAccessCounterWriteNak_DISPATCH(pGpu, pKernelGmmu)
954 #define kgmmuServiceMthdBuffFaultInBar2Fault_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuServiceMthdBuffFaultInBar2Fault__
955 #define kgmmuServiceMthdBuffFaultInBar2Fault(pGpu, pKernelGmmu) kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(pGpu, pKernelGmmu)
956 #define kgmmuServiceMthdBuffFaultInBar2Fault_HAL(pGpu, pKernelGmmu) kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(pGpu, pKernelGmmu)
957 #define kgmmuFaultCancelTargeted_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultCancelTargeted__
958 #define kgmmuFaultCancelTargeted(pGpu, pKernelGmmu, arg3) kgmmuFaultCancelTargeted_DISPATCH(pGpu, pKernelGmmu, arg3)
959 #define kgmmuFaultCancelTargeted_HAL(pGpu, pKernelGmmu, arg3) kgmmuFaultCancelTargeted_DISPATCH(pGpu, pKernelGmmu, arg3)
960 #define kgmmuFaultCancelIssueInvalidate_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuFaultCancelIssueInvalidate__
961 #define kgmmuFaultCancelIssueInvalidate(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) kgmmuFaultCancelIssueInvalidate_DISPATCH(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal)
962 #define kgmmuFaultCancelIssueInvalidate_HAL(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) kgmmuFaultCancelIssueInvalidate_DISPATCH(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal)
963 #define kgmmuServiceMmuFault_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuServiceMmuFault__
964 #define kgmmuServiceMmuFault(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) kgmmuServiceMmuFault_DISPATCH(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData)
965 #define kgmmuServiceMmuFault_HAL(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) kgmmuServiceMmuFault_DISPATCH(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData)
966 #define kgmmuServiceUnboundInstBlockFault_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuServiceUnboundInstBlockFault__
967 #define kgmmuServiceUnboundInstBlockFault(pGpu, pKernelGmmu, arg3, arg4) kgmmuServiceUnboundInstBlockFault_DISPATCH(pGpu, pKernelGmmu, arg3, arg4)
968 #define kgmmuServiceUnboundInstBlockFault_HAL(pGpu, pKernelGmmu, arg3, arg4) kgmmuServiceUnboundInstBlockFault_DISPATCH(pGpu, pKernelGmmu, arg3, arg4)
969 #define kgmmuGetEccCounts_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuGetEccCounts__
970 #define kgmmuGetEccCounts(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
971 #define kgmmuGetEccCounts_HAL(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
972 #define kgmmuCreateFakeSparseTables_FNPTR(arg_this) arg_this->__kgmmuCreateFakeSparseTables__
973 #define kgmmuCreateFakeSparseTables(arg1, arg_this) kgmmuCreateFakeSparseTables_DISPATCH(arg1, arg_this)
974 #define kgmmuCreateFakeSparseTables_HAL(arg1, arg_this) kgmmuCreateFakeSparseTables_DISPATCH(arg1, arg_this)
975 #define kgmmuGetFakeSparseEntry_FNPTR(arg_this) arg_this->__kgmmuGetFakeSparseEntry__
976 #define kgmmuGetFakeSparseEntry(arg1, arg_this, arg3) kgmmuGetFakeSparseEntry_DISPATCH(arg1, arg_this, arg3)
977 #define kgmmuGetFakeSparseEntry_HAL(arg1, arg_this, arg3) kgmmuGetFakeSparseEntry_DISPATCH(arg1, arg_this, arg3)
978 #define kgmmuInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateInitMissing__
979 #define kgmmuInitMissing(pGpu, pEngstate) kgmmuInitMissing_DISPATCH(pGpu, pEngstate)
980 #define kgmmuStatePreInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__
981 #define kgmmuStatePreInitLocked(pGpu, pEngstate) kgmmuStatePreInitLocked_DISPATCH(pGpu, pEngstate)
982 #define kgmmuStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePreInitUnlocked__
983 #define kgmmuStatePreInitUnlocked(pGpu, pEngstate) kgmmuStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
984 #define kgmmuStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStateInitUnlocked__
985 #define kgmmuStateInitUnlocked(pGpu, pEngstate) kgmmuStateInitUnlocked_DISPATCH(pGpu, pEngstate)
986 #define kgmmuStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePreLoad__
987 #define kgmmuStatePreLoad(pGpu, pEngstate, arg3) kgmmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg3)
988 #define kgmmuStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePostUnload__
989 #define kgmmuStatePostUnload(pGpu, pEngstate, arg3) kgmmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg3)
990 #define kgmmuIsPresent_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateIsPresent__
991 #define kgmmuIsPresent(pGpu, pEngstate) kgmmuIsPresent_DISPATCH(pGpu, pEngstate)
992 
993 // Dispatch functions
kgmmuConstructEngine_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,ENGDESCRIPTOR arg3)994 static inline NV_STATUS kgmmuConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg3) {
995     return pKernelGmmu->__kgmmuConstructEngine__(pGpu, pKernelGmmu, arg3);
996 }
997 
kgmmuStateInitLocked_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)998 static inline NV_STATUS kgmmuStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
999     return pKernelGmmu->__kgmmuStateInitLocked__(pGpu, pKernelGmmu);
1000 }
1001 
kgmmuStateLoad_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)1002 static inline NV_STATUS kgmmuStateLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
1003     return pKernelGmmu->__kgmmuStateLoad__(pGpu, pKernelGmmu, arg3);
1004 }
1005 
kgmmuStateUnload_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)1006 static inline NV_STATUS kgmmuStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
1007     return pKernelGmmu->__kgmmuStateUnload__(pGpu, pKernelGmmu, arg3);
1008 }
1009 
kgmmuStatePostLoad_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)1010 static inline NV_STATUS kgmmuStatePostLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
1011     return pKernelGmmu->__kgmmuStatePostLoad__(pGpu, pKernelGmmu, arg3);
1012 }
1013 
kgmmuStatePreUnload_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)1014 static inline NV_STATUS kgmmuStatePreUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
1015     return pKernelGmmu->__kgmmuStatePreUnload__(pGpu, pKernelGmmu, arg3);
1016 }
1017 
kgmmuStateDestroy_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1018 static inline void kgmmuStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1019     pKernelGmmu->__kgmmuStateDestroy__(pGpu, pKernelGmmu);
1020 }
1021 
kgmmuRegisterIntrService_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceRecord arg3[175])1022 static inline void kgmmuRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg3[175]) {
1023     pKernelGmmu->__kgmmuRegisterIntrService__(pGpu, pKernelGmmu, arg3);
1024 }
1025 
kgmmuClearInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceClearInterruptArguments * pParams)1026 static inline NvBool kgmmuClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceClearInterruptArguments *pParams) {
1027     return pKernelGmmu->__kgmmuClearInterrupt__(pGpu, pKernelGmmu, pParams);
1028 }
1029 
kgmmuServiceInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceServiceInterruptArguments * pParams)1030 static inline NvU32 kgmmuServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams) {
1031     return pKernelGmmu->__kgmmuServiceInterrupt__(pGpu, pKernelGmmu, pParams);
1032 }
1033 
kgmmuServiceNotificationInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceServiceNotificationInterruptArguments * pParams)1034 static inline NV_STATUS kgmmuServiceNotificationInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceNotificationInterruptArguments *pParams) {
1035     return pKernelGmmu->__kgmmuServiceNotificationInterrupt__(pGpu, pKernelGmmu, pParams);
1036 }
1037 
kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,NvU32 subctxId,INST_BLK_INIT_PARAMS * pParams,NvU32 * pOffset,NvU64 * pData)1038 static inline NV_STATUS kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) {
1039     return pKernelGmmu->__kgmmuInstBlkVaLimitGet__(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData);
1040 }
1041 
kgmmuCommitTlbInvalidate_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)1042 static inline NV_STATUS kgmmuCommitTlbInvalidate_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1043     return pKernelGmmu->__kgmmuCommitTlbInvalidate__(pGpu, pKernelGmmu, pParams);
1044 }
1045 
kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)1046 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1047     return pKernelGmmu->__kgmmuSetTlbInvalidateMembarWarParameters__(pGpu, pKernelGmmu, pParams);
1048 }
1049 
kgmmuSetTlbInvalidationScope_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 flags,TLB_INVALIDATE_PARAMS * pParams)1050 static inline NV_STATUS kgmmuSetTlbInvalidationScope_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) {
1051     return pKernelGmmu->__kgmmuSetTlbInvalidationScope__(pGpu, pKernelGmmu, flags, pParams);
1052 }
1053 
kgmmuFmtInitPteComptagLine_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)1054 static inline void kgmmuFmtInitPteComptagLine_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1055     pKernelGmmu->__kgmmuFmtInitPteComptagLine__(pKernelGmmu, pPte, version);
1056 }
1057 
kgmmuFmtInitPeerPteFld_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)1058 static inline void kgmmuFmtInitPeerPteFld_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1059     pKernelGmmu->__kgmmuFmtInitPeerPteFld__(pKernelGmmu, pPte, version);
1060 }
1061 
kgmmuFmtInitPte_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version,const struct NV_FIELD_ENUM_ENTRY * pPteApertures,const NvBool bUnifiedAperture)1062 static inline void kgmmuFmtInitPte_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture) {
1063     pKernelGmmu->__kgmmuFmtInitPte__(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture);
1064 }
1065 
kgmmuFmtInitPde_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PDE * pPde,const NvU32 version,const struct NV_FIELD_ENUM_ENTRY * pPdeApertures)1066 static inline void kgmmuFmtInitPde_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) {
1067     pKernelGmmu->__kgmmuFmtInitPde__(pKernelGmmu, pPde, version, pPdeApertures);
1068 }
1069 
kgmmuFmtIsVersionSupported_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 version)1070 static inline NvBool kgmmuFmtIsVersionSupported_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 version) {
1071     return pKernelGmmu->__kgmmuFmtIsVersionSupported__(pKernelGmmu, version);
1072 }
1073 
kgmmuFmtInitLevels_DISPATCH(struct KernelGmmu * pKernelGmmu,MMU_FMT_LEVEL * pLevels,const NvU32 numLevels,const NvU32 version,const NvU32 bigPageShift)1074 static inline void kgmmuFmtInitLevels_DISPATCH(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift) {
1075     pKernelGmmu->__kgmmuFmtInitLevels__(pKernelGmmu, pLevels, numLevels, version, bigPageShift);
1076 }
1077 
kgmmuFmtInitPdeMulti_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PDE_MULTI * pPdeMulti,const NvU32 version,const struct NV_FIELD_ENUM_ENTRY * pPdeApertures)1078 static inline void kgmmuFmtInitPdeMulti_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) {
1079     pKernelGmmu->__kgmmuFmtInitPdeMulti__(pKernelGmmu, pPdeMulti, version, pPdeApertures);
1080 }
1081 
kgmmuFmtFamiliesInit_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1082 static inline NV_STATUS kgmmuFmtFamiliesInit_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1083     return pKernelGmmu->__kgmmuFmtFamiliesInit__(pGpu, pKernelGmmu);
1084 }
1085 
kgmmuTranslatePtePcfFromSw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvU32 * arg3)1086 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 *arg3) {
1087     return pKernelGmmu->__kgmmuTranslatePtePcfFromSw__(pKernelGmmu, arg2, arg3);
1088 }
1089 
kgmmuTranslatePtePcfFromHw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvBool arg3,NvU32 * arg4)1090 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvBool arg3, NvU32 *arg4) {
1091     return pKernelGmmu->__kgmmuTranslatePtePcfFromHw__(pKernelGmmu, arg2, arg3, arg4);
1092 }
1093 
kgmmuTranslatePdePcfFromSw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvU32 * arg3)1094 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 *arg3) {
1095     return pKernelGmmu->__kgmmuTranslatePdePcfFromSw__(pKernelGmmu, arg2, arg3);
1096 }
1097 
kgmmuTranslatePdePcfFromHw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2,GMMU_APERTURE arg3,NvU32 * arg4)1098 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2, GMMU_APERTURE arg3, NvU32 *arg4) {
1099     return pKernelGmmu->__kgmmuTranslatePdePcfFromHw__(pKernelGmmu, arg2, arg3, arg4);
1100 }
1101 
kgmmuGetFaultRegisterMappings_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvP64 * pFaultBufferGet,NvP64 * pFaultBufferPut,NvP64 * pFaultBufferInfo,NvP64 * faultIntr,NvP64 * faultIntrSet,NvP64 * faultIntrClear,NvU32 * faultMask,NvP64 * pPrefetchCtrl)1102 static inline NV_STATUS kgmmuGetFaultRegisterMappings_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl) {
1103     return pKernelGmmu->__kgmmuGetFaultRegisterMappings__(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl);
1104 }
1105 
kgmmuGetFaultTypeString_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 faultType)1106 static inline const char * kgmmuGetFaultTypeString_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 faultType) {
1107     return pKernelGmmu->__kgmmuGetFaultTypeString__(pKernelGmmu, faultType);
1108 }
1109 
kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bCopyAndFlush)1110 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush) {
1111     return pKernelGmmu->__kgmmuIssueReplayableFaultBufferFlush__(pGpu, pKernelGmmu, bCopyAndFlush);
1112 }
1113 
kgmmuToggleFaultOnPrefetch_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bEnable)1114 static inline NV_STATUS kgmmuToggleFaultOnPrefetch_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable) {
1115     return pKernelGmmu->__kgmmuToggleFaultOnPrefetch__(pGpu, pKernelGmmu, bEnable);
1116 }
1117 
kgmmuFaultBufferAllocSharedMemory_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)1118 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
1119     return pKernelGmmu->__kgmmuFaultBufferAllocSharedMemory__(pGpu, pKernelGmmu, arg3);
1120 }
1121 
kgmmuFaultBufferFreeSharedMemory_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)1122 static inline void kgmmuFaultBufferFreeSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
1123     pKernelGmmu->__kgmmuFaultBufferFreeSharedMemory__(pGpu, pKernelGmmu, arg3);
1124 }
1125 
kgmmuSetupWarForBug2720120_DISPATCH(struct KernelGmmu * pKernelGmmu)1126 static inline NV_STATUS kgmmuSetupWarForBug2720120_DISPATCH(struct KernelGmmu *pKernelGmmu) {
1127     return pKernelGmmu->__kgmmuSetupWarForBug2720120__(pKernelGmmu);
1128 }
1129 
kgmmuGetGraphicsEngineId_DISPATCH(struct KernelGmmu * pKernelGmmu)1130 static inline NvU32 kgmmuGetGraphicsEngineId_DISPATCH(struct KernelGmmu *pKernelGmmu) {
1131     return pKernelGmmu->__kgmmuGetGraphicsEngineId__(pKernelGmmu);
1132 }
1133 
kgmmuReadShadowBufPutIndex_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE type)1134 static inline NvU32 kgmmuReadShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) {
1135     return pKernelGmmu->__kgmmuReadShadowBufPutIndex__(pGpu, pKernelGmmu, type);
1136 }
1137 
kgmmuIsFaultEngineBar1_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2)1138 static inline NvBool kgmmuIsFaultEngineBar1_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2) {
1139     return pKernelGmmu->__kgmmuIsFaultEngineBar1__(pKernelGmmu, arg2);
1140 }
1141 
kgmmuIsFaultEngineBar2_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2)1142 static inline NvBool kgmmuIsFaultEngineBar2_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2) {
1143     return pKernelGmmu->__kgmmuIsFaultEngineBar2__(pKernelGmmu, arg2);
1144 }
1145 
kgmmuIsFaultEnginePhysical_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2)1146 static inline NvBool kgmmuIsFaultEnginePhysical_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2) {
1147     return pKernelGmmu->__kgmmuIsFaultEnginePhysical__(pKernelGmmu, arg2);
1148 }
1149 
kgmmuCopyMmuFaults_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * pThreadState,NvU32 * entriesCopied,FAULT_BUFFER_TYPE type,NvBool bPollForValidBit)1150 static inline NV_STATUS kgmmuCopyMmuFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit) {
1151     return pKernelGmmu->__kgmmuCopyMmuFaults__(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit);
1152 }
1153 
kgmmuParseFaultPacket_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 pFaultPacket,NvP64 pParsedFaultEntry)1154 static inline NV_STATUS kgmmuParseFaultPacket_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry) {
1155     return pKernelGmmu->__kgmmuParseFaultPacket__(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry);
1156 }
1157 
kgmmuFaultBufferClearPackets_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 beginIdx,NvU32 numFaultPackets)1158 static inline void kgmmuFaultBufferClearPackets_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets) {
1159     pKernelGmmu->__kgmmuFaultBufferClearPackets__(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets);
1160 }
1161 
kgmmuFaultBufferGetFault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 idx)1162 static inline GMMU_FAULT_PACKET * kgmmuFaultBufferGetFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx) {
1163     return pKernelGmmu->__kgmmuFaultBufferGetFault__(pGpu, pKernelGmmu, pFaultBuffer, idx);
1164 }
1165 
kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct GMMU_FAULT_BUFFER * pFaultBuffer,FAULT_BUFFER_TYPE type,NvU32 getIndex,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries,struct THREAD_STATE_NODE * pThreadState,NvU32 * pFaultsCopied)1166 static inline NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied) {
1167     return pKernelGmmu->__kgmmuCopyFaultPacketToClientShadowBuffer__(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied);
1168 }
1169 
kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_CLIENT_SHADOW_FAULT_BUFFER * pClientFaultBuf,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries)1170 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) {
1171     return pKernelGmmu->__kgmmuIsReplayableShadowFaultBufferFull__(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries);
1172 }
1173 
kgmmuReadClientShadowBufPutIndex_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type)1174 static inline NvU32 kgmmuReadClientShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) {
1175     return pKernelGmmu->__kgmmuReadClientShadowBufPutIndex__(pGpu, pKernelGmmu, gfid, type);
1176 }
1177 
kgmmuWriteClientShadowBufPutIndex_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type,NvU32 putIndex)1178 static inline void kgmmuWriteClientShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) {
1179     pKernelGmmu->__kgmmuWriteClientShadowBufPutIndex__(pGpu, pKernelGmmu, gfid, type, putIndex);
1180 }
1181 
kgmmuInitCeMmuFaultIdRange_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1182 static inline NV_STATUS kgmmuInitCeMmuFaultIdRange_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1183     return pKernelGmmu->__kgmmuInitCeMmuFaultIdRange__(pGpu, pKernelGmmu);
1184 }
1185 
kgmmuGetMinCeEngineId_DISPATCH(struct KernelGmmu * pKernelGmmu)1186 static inline NvU32 kgmmuGetMinCeEngineId_DISPATCH(struct KernelGmmu *pKernelGmmu) {
1187     return pKernelGmmu->__kgmmuGetMinCeEngineId__(pKernelGmmu);
1188 }
1189 
kgmmuGetMaxCeEngineId_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1190 static inline NvU32 kgmmuGetMaxCeEngineId_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1191     return pKernelGmmu->__kgmmuGetMaxCeEngineId__(pGpu, pKernelGmmu);
1192 }
1193 
kgmmuFaultBufferMap_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1194 static inline NV_STATUS kgmmuFaultBufferMap_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1195     return pKernelGmmu->__kgmmuFaultBufferMap__(pGpu, pKernelGmmu, index, gfid);
1196 }
1197 
kgmmuFaultBufferUnmap_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1198 static inline NV_STATUS kgmmuFaultBufferUnmap_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1199     return pKernelGmmu->__kgmmuFaultBufferUnmap__(pGpu, pKernelGmmu, index, gfid);
1200 }
1201 
kgmmuFaultBufferInit_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1202 static inline NV_STATUS kgmmuFaultBufferInit_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1203     return pKernelGmmu->__kgmmuFaultBufferInit__(pGpu, pKernelGmmu);
1204 }
1205 
kgmmuFaultBufferDestroy_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1206 static inline NV_STATUS kgmmuFaultBufferDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1207     return pKernelGmmu->__kgmmuFaultBufferDestroy__(pGpu, pKernelGmmu);
1208 }
1209 
kgmmuFaultBufferLoad_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1210 static inline NV_STATUS kgmmuFaultBufferLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1211     return pKernelGmmu->__kgmmuFaultBufferLoad__(pGpu, pKernelGmmu, index, gfid);
1212 }
1213 
kgmmuFaultBufferUnload_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1214 static inline NV_STATUS kgmmuFaultBufferUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1215     return pKernelGmmu->__kgmmuFaultBufferUnload__(pGpu, pKernelGmmu, index, gfid);
1216 }
1217 
kgmmuEnableFaultBuffer_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)1218 static inline NV_STATUS kgmmuEnableFaultBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
1219     return pKernelGmmu->__kgmmuEnableFaultBuffer__(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid);
1220 }
1221 
kgmmuDisableFaultBuffer_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)1222 static inline NV_STATUS kgmmuDisableFaultBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
1223     return pKernelGmmu->__kgmmuDisableFaultBuffer__(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid);
1224 }
1225 
kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE index,NvU32 gfid)1226 static inline NvU32 kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid) {
1227     return pKernelGmmu->__kgmmuSetAndGetDefaultFaultBufferSize__(pGpu, pKernelGmmu, index, gfid);
1228 }
1229 
kgmmuReadMmuFaultInstHiLo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg3,NvU32 * arg4)1230 static inline void kgmmuReadMmuFaultInstHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg3, NvU32 *arg4) {
1231     pKernelGmmu->__kgmmuReadMmuFaultInstHiLo__(pGpu, pKernelGmmu, arg3, arg4);
1232 }
1233 
kgmmuReadMmuFaultAddrHiLo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg3,NvU32 * arg4)1234 static inline void kgmmuReadMmuFaultAddrHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg3, NvU32 *arg4) {
1235     pKernelGmmu->__kgmmuReadMmuFaultAddrHiLo__(pGpu, pKernelGmmu, arg3, arg4);
1236 }
1237 
kgmmuReadMmuFaultInfo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1238 static inline NvU32 kgmmuReadMmuFaultInfo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1239     return pKernelGmmu->__kgmmuReadMmuFaultInfo__(pGpu, pKernelGmmu);
1240 }
1241 
kgmmuWriteMmuFaultBufferSize_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 arg4,NvU32 gfid)1242 static inline void kgmmuWriteMmuFaultBufferSize_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU32 gfid) {
1243     pKernelGmmu->__kgmmuWriteMmuFaultBufferSize__(pGpu, pKernelGmmu, arg3, arg4, gfid);
1244 }
1245 
kgmmuWriteMmuFaultBufferHiLo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 arg4,NvU32 arg5,NvU32 gfid)1246 static inline void kgmmuWriteMmuFaultBufferHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU32 arg5, NvU32 gfid) {
1247     pKernelGmmu->__kgmmuWriteMmuFaultBufferHiLo__(pGpu, pKernelGmmu, arg3, arg4, arg5, gfid);
1248 }
1249 
kgmmuEnableMmuFaultInterrupts_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)1250 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
1251     return pKernelGmmu->__kgmmuEnableMmuFaultInterrupts__(pGpu, pKernelGmmu, index);
1252 }
1253 
kgmmuDisableMmuFaultInterrupts_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)1254 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
1255     return pKernelGmmu->__kgmmuDisableMmuFaultInterrupts__(pGpu, pKernelGmmu, index);
1256 }
1257 
kgmmuEnableMmuFaultOverflowIntr_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)1258 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
1259     return pKernelGmmu->__kgmmuEnableMmuFaultOverflowIntr__(pGpu, pKernelGmmu, index);
1260 }
1261 
kgmmuSignExtendFaultAddress_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU64 * pMmuFaultAddress)1262 static inline void kgmmuSignExtendFaultAddress_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress) {
1263     pKernelGmmu->__kgmmuSignExtendFaultAddress__(pGpu, pKernelGmmu, pMmuFaultAddress);
1264 }
1265 
kgmmuGetFaultType_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 fault,FAULT_TYPE * pMmuFaultType)1266 static inline NV_STATUS kgmmuGetFaultType_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType) {
1267     return pKernelGmmu->__kgmmuGetFaultType__(pGpu, pKernelGmmu, fault, pMmuFaultType);
1268 }
1269 
kgmmuIsP2PUnboundInstFault_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvU32 arg3)1270 static inline NvBool kgmmuIsP2PUnboundInstFault_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 arg3) {
1271     return pKernelGmmu->__kgmmuIsP2PUnboundInstFault__(pKernelGmmu, arg2, arg3);
1272 }
1273 
kgmmuServiceVfPriFaults_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 faultType)1274 static inline NV_STATUS kgmmuServiceVfPriFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType) {
1275     return pKernelGmmu->__kgmmuServiceVfPriFaults__(pGpu, pKernelGmmu, faultType);
1276 }
1277 
kgmmuTestVidmemAccessBitBufferError_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)1278 static inline NvBool kgmmuTestVidmemAccessBitBufferError_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
1279     return pKernelGmmu->__kgmmuTestVidmemAccessBitBufferError__(pGpu, pKernelGmmu, arg3);
1280 }
1281 
kgmmuDisableVidmemAccessBitBuf_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1282 static inline void kgmmuDisableVidmemAccessBitBuf_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1283     pKernelGmmu->__kgmmuDisableVidmemAccessBitBuf__(pGpu, pKernelGmmu);
1284 }
1285 
kgmmuEnableVidmemAccessBitBuf_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1286 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1287     return pKernelGmmu->__kgmmuEnableVidmemAccessBitBuf__(pGpu, pKernelGmmu);
1288 }
1289 
kgmmuClearAccessCounterWriteNak_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1290 static inline void kgmmuClearAccessCounterWriteNak_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1291     pKernelGmmu->__kgmmuClearAccessCounterWriteNak__(pGpu, pKernelGmmu);
1292 }
1293 
kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1294 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1295     return pKernelGmmu->__kgmmuServiceMthdBuffFaultInBar2Fault__(pGpu, pKernelGmmu);
1296 }
1297 
kgmmuFaultCancelTargeted_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * arg3)1298 static inline NV_STATUS kgmmuFaultCancelTargeted_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg3) {
1299     return pKernelGmmu->__kgmmuFaultCancelTargeted__(pGpu, pKernelGmmu, arg3);
1300 }
1301 
kgmmuFaultCancelIssueInvalidate_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * pCancelInfo,TLB_INVALIDATE_PARAMS * pParams,NvBool bGlobal)1302 static inline NV_STATUS kgmmuFaultCancelIssueInvalidate_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal) {
1303     return pKernelGmmu->__kgmmuFaultCancelIssueInvalidate__(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal);
1304 }
1305 
kgmmuServiceMmuFault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 pParsedFaultInfo,FIFO_MMU_EXCEPTION_DATA * pMmuExceptionData)1306 static inline NV_STATUS kgmmuServiceMmuFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData) {
1307     return pKernelGmmu->__kgmmuServiceMmuFault__(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData);
1308 }
1309 
kgmmuServiceUnboundInstBlockFault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 arg3,FIFO_MMU_EXCEPTION_DATA * arg4)1310 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg3, FIFO_MMU_EXCEPTION_DATA *arg4) {
1311     return pKernelGmmu->__kgmmuServiceUnboundInstBlockFault__(pGpu, pKernelGmmu, arg3, arg4);
1312 }
1313 
kgmmuGetEccCounts_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1314 static inline NvU32 kgmmuGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1315     return pKernelGmmu->__kgmmuGetEccCounts__(pGpu, pKernelGmmu);
1316 }
1317 
kgmmuCreateFakeSparseTables_DISPATCH(OBJGPU * arg1,struct KernelGmmu * arg_this)1318 static inline NV_STATUS kgmmuCreateFakeSparseTables_DISPATCH(OBJGPU *arg1, struct KernelGmmu *arg_this) {
1319     return arg_this->__kgmmuCreateFakeSparseTables__(arg1, arg_this);
1320 }
1321 
kgmmuGetFakeSparseEntry_DISPATCH(OBJGPU * arg1,struct KernelGmmu * arg_this,const MMU_FMT_LEVEL * arg3)1322 static inline NvU8 * kgmmuGetFakeSparseEntry_DISPATCH(OBJGPU *arg1, struct KernelGmmu *arg_this, const MMU_FMT_LEVEL *arg3) {
1323     return arg_this->__kgmmuGetFakeSparseEntry__(arg1, arg_this, arg3);
1324 }
1325 
kgmmuInitMissing_DISPATCH(struct OBJGPU * pGpu,struct KernelGmmu * pEngstate)1326 static inline void kgmmuInitMissing_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pEngstate) {
1327     pEngstate->__kgmmuInitMissing__(pGpu, pEngstate);
1328 }
1329 
kgmmuStatePreInitLocked_DISPATCH(struct OBJGPU * pGpu,struct KernelGmmu * pEngstate)1330 static inline NV_STATUS kgmmuStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pEngstate) {
1331     return pEngstate->__kgmmuStatePreInitLocked__(pGpu, pEngstate);
1332 }
1333 
kgmmuStatePreInitUnlocked_DISPATCH(struct OBJGPU * pGpu,struct KernelGmmu * pEngstate)1334 static inline NV_STATUS kgmmuStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pEngstate) {
1335     return pEngstate->__kgmmuStatePreInitUnlocked__(pGpu, pEngstate);
1336 }
1337 
kgmmuStateInitUnlocked_DISPATCH(struct OBJGPU * pGpu,struct KernelGmmu * pEngstate)1338 static inline NV_STATUS kgmmuStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pEngstate) {
1339     return pEngstate->__kgmmuStateInitUnlocked__(pGpu, pEngstate);
1340 }
1341 
kgmmuStatePreLoad_DISPATCH(struct OBJGPU * pGpu,struct KernelGmmu * pEngstate,NvU32 arg3)1342 static inline NV_STATUS kgmmuStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pEngstate, NvU32 arg3) {
1343     return pEngstate->__kgmmuStatePreLoad__(pGpu, pEngstate, arg3);
1344 }
1345 
kgmmuStatePostUnload_DISPATCH(struct OBJGPU * pGpu,struct KernelGmmu * pEngstate,NvU32 arg3)1346 static inline NV_STATUS kgmmuStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pEngstate, NvU32 arg3) {
1347     return pEngstate->__kgmmuStatePostUnload__(pGpu, pEngstate, arg3);
1348 }
1349 
kgmmuIsPresent_DISPATCH(struct OBJGPU * pGpu,struct KernelGmmu * pEngstate)1350 static inline NvBool kgmmuIsPresent_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pEngstate) {
1351     return pEngstate->__kgmmuIsPresent__(pGpu, pEngstate);
1352 }
1353 
kgmmuService_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1354 static inline NvU32 kgmmuService_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1355     return 0;
1356 }
1357 
1358 
1359 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuService(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1360 static inline NvU32 kgmmuService(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1361     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1362     return 0;
1363 }
1364 #else //__nvoc_kern_gmmu_h_disabled
1365 #define kgmmuService(pGpu, pKernelGmmu) kgmmuService_4a4dee(pGpu, pKernelGmmu)
1366 #endif //__nvoc_kern_gmmu_h_disabled
1367 
1368 #define kgmmuService_HAL(pGpu, pKernelGmmu) kgmmuService(pGpu, pKernelGmmu)
1369 
1370 NvU64 kgmmuGetMaxBigPageSize_GM107(struct KernelGmmu *pKernelGmmu);
1371 
1372 
1373 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetMaxBigPageSize(struct KernelGmmu * pKernelGmmu)1374 static inline NvU64 kgmmuGetMaxBigPageSize(struct KernelGmmu *pKernelGmmu) {
1375     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1376     return 0;
1377 }
1378 #else //__nvoc_kern_gmmu_h_disabled
1379 #define kgmmuGetMaxBigPageSize(pKernelGmmu) kgmmuGetMaxBigPageSize_GM107(pKernelGmmu)
1380 #endif //__nvoc_kern_gmmu_h_disabled
1381 
1382 #define kgmmuGetMaxBigPageSize_HAL(pKernelGmmu) kgmmuGetMaxBigPageSize(pKernelGmmu)
1383 
kgmmuGetVaspaceClass_f515df(struct KernelGmmu * pKernelGmmu)1384 static inline NvU32 kgmmuGetVaspaceClass_f515df(struct KernelGmmu *pKernelGmmu) {
1385     return (37105);
1386 }
1387 
1388 
1389 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetVaspaceClass(struct KernelGmmu * pKernelGmmu)1390 static inline NvU32 kgmmuGetVaspaceClass(struct KernelGmmu *pKernelGmmu) {
1391     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1392     return 0;
1393 }
1394 #else //__nvoc_kern_gmmu_h_disabled
1395 #define kgmmuGetVaspaceClass(pKernelGmmu) kgmmuGetVaspaceClass_f515df(pKernelGmmu)
1396 #endif //__nvoc_kern_gmmu_h_disabled
1397 
1398 #define kgmmuGetVaspaceClass_HAL(pKernelGmmu) kgmmuGetVaspaceClass(pKernelGmmu)
1399 
1400 NV_STATUS kgmmuInstBlkAtsGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData);
1401 
1402 
1403 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkAtsGet(struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,NvU32 subctxid,NvU32 * pOffset,NvU32 * pData)1404 static inline NV_STATUS kgmmuInstBlkAtsGet(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData) {
1405     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1406     return NV_ERR_NOT_SUPPORTED;
1407 }
1408 #else //__nvoc_kern_gmmu_h_disabled
1409 #define kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet_GV100(pKernelGmmu, pVAS, subctxid, pOffset, pData)
1410 #endif //__nvoc_kern_gmmu_h_disabled
1411 
1412 #define kgmmuInstBlkAtsGet_HAL(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData)
1413 
kgmmuInstBlkMagicValueGet_46f6a7(struct KernelGmmu * pKernelGmmu,NvU32 * pOffset,NvU32 * pData)1414 static inline NV_STATUS kgmmuInstBlkMagicValueGet_46f6a7(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
1415     return NV_ERR_NOT_SUPPORTED;
1416 }
1417 
1418 
1419 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkMagicValueGet(struct KernelGmmu * pKernelGmmu,NvU32 * pOffset,NvU32 * pData)1420 static inline NV_STATUS kgmmuInstBlkMagicValueGet(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
1421     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1422     return NV_ERR_NOT_SUPPORTED;
1423 }
1424 #else //__nvoc_kern_gmmu_h_disabled
1425 #define kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet_46f6a7(pKernelGmmu, pOffset, pData)
1426 #endif //__nvoc_kern_gmmu_h_disabled
1427 
1428 #define kgmmuInstBlkMagicValueGet_HAL(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData)
1429 
1430 NV_STATUS kgmmuInstBlkPageDirBaseGet_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi);
1431 
1432 
1433 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkPageDirBaseGet(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,INST_BLK_INIT_PARAMS * pParams,NvU32 subctxid,NvU32 * pOffsetLo,NvU32 * pDataLo,NvU32 * pOffsetHi,NvU32 * pDataHi)1434 static inline NV_STATUS kgmmuInstBlkPageDirBaseGet(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi) {
1435     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1436     return NV_ERR_NOT_SUPPORTED;
1437 }
1438 #else //__nvoc_kern_gmmu_h_disabled
1439 #define kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet_GV100(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi)
1440 #endif //__nvoc_kern_gmmu_h_disabled
1441 
1442 #define kgmmuInstBlkPageDirBaseGet_HAL(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi)
1443 
1444 NvU32 kgmmuGetPDBAllocSize_GP100(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg2, NvU64 arg3);
1445 
1446 
1447 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetPDBAllocSize(struct KernelGmmu * pKernelGmmu,const MMU_FMT_LEVEL * arg2,NvU64 arg3)1448 static inline NvU32 kgmmuGetPDBAllocSize(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg2, NvU64 arg3) {
1449     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1450     return 0;
1451 }
1452 #else //__nvoc_kern_gmmu_h_disabled
1453 #define kgmmuGetPDBAllocSize(pKernelGmmu, arg2, arg3) kgmmuGetPDBAllocSize_GP100(pKernelGmmu, arg2, arg3)
1454 #endif //__nvoc_kern_gmmu_h_disabled
1455 
1456 #define kgmmuGetPDBAllocSize_HAL(pKernelGmmu, arg2, arg3) kgmmuGetPDBAllocSize(pKernelGmmu, arg2, arg3)
1457 
1458 NvU64 kgmmuGetBigPageSize_GM107(struct KernelGmmu *pKernelGmmu);
1459 
1460 
1461 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetBigPageSize(struct KernelGmmu * pKernelGmmu)1462 static inline NvU64 kgmmuGetBigPageSize(struct KernelGmmu *pKernelGmmu) {
1463     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1464     return 0;
1465 }
1466 #else //__nvoc_kern_gmmu_h_disabled
1467 #define kgmmuGetBigPageSize(pKernelGmmu) kgmmuGetBigPageSize_GM107(pKernelGmmu)
1468 #endif //__nvoc_kern_gmmu_h_disabled
1469 
1470 #define kgmmuGetBigPageSize_HAL(pKernelGmmu) kgmmuGetBigPageSize(pKernelGmmu)
1471 
1472 void kgmmuFmtInitCaps_GM20X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt);
1473 
1474 
1475 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInitCaps(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT * pFmt)1476 static inline void kgmmuFmtInitCaps(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt) {
1477     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1478 }
1479 #else //__nvoc_kern_gmmu_h_disabled
1480 #define kgmmuFmtInitCaps(pKernelGmmu, pFmt) kgmmuFmtInitCaps_GM20X(pKernelGmmu, pFmt)
1481 #endif //__nvoc_kern_gmmu_h_disabled
1482 
1483 #define kgmmuFmtInitCaps_HAL(pKernelGmmu, pFmt) kgmmuFmtInitCaps(pKernelGmmu, pFmt)
1484 
1485 void kgmmuFmtInitPteApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries);
1486 
1487 
1488 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInitPteApertures(struct KernelGmmu * pKernelGmmu,struct NV_FIELD_ENUM_ENTRY * pEntries)1489 static inline void kgmmuFmtInitPteApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) {
1490     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1491 }
1492 #else //__nvoc_kern_gmmu_h_disabled
1493 #define kgmmuFmtInitPteApertures(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures_GM10X(pKernelGmmu, pEntries)
1494 #endif //__nvoc_kern_gmmu_h_disabled
1495 
1496 #define kgmmuFmtInitPteApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures(pKernelGmmu, pEntries)
1497 
1498 void kgmmuFmtInitPdeApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries);
1499 
1500 
1501 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInitPdeApertures(struct KernelGmmu * pKernelGmmu,struct NV_FIELD_ENUM_ENTRY * pEntries)1502 static inline void kgmmuFmtInitPdeApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) {
1503     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1504 }
1505 #else //__nvoc_kern_gmmu_h_disabled
1506 #define kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures_GM10X(pKernelGmmu, pEntries)
1507 #endif //__nvoc_kern_gmmu_h_disabled
1508 
1509 #define kgmmuFmtInitPdeApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries)
1510 
1511 void kgmmuInvalidateTlb_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope);
1512 
1513 
1514 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInvalidateTlb(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,MEMORY_DESCRIPTOR * pRootPageDir,NvU32 vaspaceFlags,VAS_PTE_UPDATE_TYPE update_type,NvU32 gfid,NvU32 invalidation_scope)1515 static inline void kgmmuInvalidateTlb(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope) {
1516     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1517 }
1518 #else //__nvoc_kern_gmmu_h_disabled
1519 #define kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb_GM107(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope)
1520 #endif //__nvoc_kern_gmmu_h_disabled
1521 
1522 #define kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope)
1523 
1524 NV_STATUS kgmmuCommitInvalidateTlbTest_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, COMMIT_TLB_INVALIDATE_TEST_PARAMS *pTestParams);
1525 
1526 
1527 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuCommitInvalidateTlbTest(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,COMMIT_TLB_INVALIDATE_TEST_PARAMS * pTestParams)1528 static inline NV_STATUS kgmmuCommitInvalidateTlbTest(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, COMMIT_TLB_INVALIDATE_TEST_PARAMS *pTestParams) {
1529     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1530     return NV_ERR_NOT_SUPPORTED;
1531 }
1532 #else //__nvoc_kern_gmmu_h_disabled
1533 #define kgmmuCommitInvalidateTlbTest(pGpu, pKernelGmmu, pTestParams) kgmmuCommitInvalidateTlbTest_GM107(pGpu, pKernelGmmu, pTestParams)
1534 #endif //__nvoc_kern_gmmu_h_disabled
1535 
1536 #define kgmmuCommitInvalidateTlbTest_HAL(pGpu, pKernelGmmu, pTestParams) kgmmuCommitInvalidateTlbTest(pGpu, pKernelGmmu, pTestParams)
1537 
1538 NV_STATUS kgmmuCheckPendingInvalidates_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid);
1539 
1540 
1541 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuCheckPendingInvalidates(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,RMTIMEOUT * pTimeOut,NvU32 gfid)1542 static inline NV_STATUS kgmmuCheckPendingInvalidates(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid) {
1543     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1544     return NV_ERR_NOT_SUPPORTED;
1545 }
1546 #else //__nvoc_kern_gmmu_h_disabled
1547 #define kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates_TU102(pGpu, pKernelGmmu, pTimeOut, gfid)
1548 #endif //__nvoc_kern_gmmu_h_disabled
1549 
1550 #define kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid)
1551 
1552 void kgmmuSetPdbToInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
1553 
1554 
1555 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuSetPdbToInvalidate(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)1556 static inline void kgmmuSetPdbToInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1557     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1558 }
1559 #else //__nvoc_kern_gmmu_h_disabled
1560 #define kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate_TU102(pGpu, pKernelGmmu, pParams)
1561 #endif //__nvoc_kern_gmmu_h_disabled
1562 
1563 #define kgmmuSetPdbToInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams)
1564 
1565 NV_STATUS kgmmuEnableComputePeerAddressing_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags);
1566 
1567 
1568 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEnableComputePeerAddressing(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 flags)1569 static inline NV_STATUS kgmmuEnableComputePeerAddressing(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags) {
1570     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1571     return NV_ERR_NOT_SUPPORTED;
1572 }
1573 #else //__nvoc_kern_gmmu_h_disabled
1574 #define kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing_IMPL(pGpu, pKernelGmmu, flags)
1575 #endif //__nvoc_kern_gmmu_h_disabled
1576 
1577 #define kgmmuEnableComputePeerAddressing_HAL(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags)
1578 
1579 void kgmmuDetermineMaxVASize_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1580 
1581 
1582 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuDetermineMaxVASize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1583 static inline void kgmmuDetermineMaxVASize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1584     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1585 }
1586 #else //__nvoc_kern_gmmu_h_disabled
1587 #define kgmmuDetermineMaxVASize(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize_GM107(pGpu, pKernelGmmu)
1588 #endif //__nvoc_kern_gmmu_h_disabled
1589 
1590 #define kgmmuDetermineMaxVASize_HAL(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize(pGpu, pKernelGmmu)
1591 
1592 NV_STATUS kgmmuChangeReplayableFaultOwnership_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3);
1593 
1594 
1595 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuChangeReplayableFaultOwnership(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg3)1596 static inline NV_STATUS kgmmuChangeReplayableFaultOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3) {
1597     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1598     return NV_ERR_NOT_SUPPORTED;
1599 }
1600 #else //__nvoc_kern_gmmu_h_disabled
1601 #define kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg3) kgmmuChangeReplayableFaultOwnership_GV100(pGpu, pKernelGmmu, arg3)
1602 #endif //__nvoc_kern_gmmu_h_disabled
1603 
1604 #define kgmmuChangeReplayableFaultOwnership_HAL(pGpu, pKernelGmmu, arg3) kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg3)
1605 
1606 NV_STATUS kgmmuServiceReplayableFault_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1607 
1608 
1609 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServiceReplayableFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1610 static inline NV_STATUS kgmmuServiceReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1611     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1612     return NV_ERR_NOT_SUPPORTED;
1613 }
1614 #else //__nvoc_kern_gmmu_h_disabled
1615 #define kgmmuServiceReplayableFault(pGpu, pKernelGmmu) kgmmuServiceReplayableFault_TU102(pGpu, pKernelGmmu)
1616 #endif //__nvoc_kern_gmmu_h_disabled
1617 
1618 #define kgmmuServiceReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceReplayableFault(pGpu, pKernelGmmu)
1619 
1620 NV_STATUS kgmmuReportFaultBufferOverflow_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1621 
1622 
1623 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReportFaultBufferOverflow(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1624 static inline NV_STATUS kgmmuReportFaultBufferOverflow(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1625     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1626     return NV_ERR_NOT_SUPPORTED;
1627 }
1628 #else //__nvoc_kern_gmmu_h_disabled
1629 #define kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow_GV100(pGpu, pKernelGmmu)
1630 #endif //__nvoc_kern_gmmu_h_disabled
1631 
1632 #define kgmmuReportFaultBufferOverflow_HAL(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu)
1633 
1634 NV_STATUS kgmmuReadFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg5);
1635 
1636 
1637 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadFaultBufferGetPtr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 * pGetOffset,struct THREAD_STATE_NODE * arg5)1638 static inline NV_STATUS kgmmuReadFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg5) {
1639     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1640     return NV_ERR_NOT_SUPPORTED;
1641 }
1642 #else //__nvoc_kern_gmmu_h_disabled
1643 #define kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg5) kgmmuReadFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, pGetOffset, arg5)
1644 #endif //__nvoc_kern_gmmu_h_disabled
1645 
1646 #define kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, pGetOffset, arg5) kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg5)
1647 
1648 NV_STATUS kgmmuWriteFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 getValue, struct THREAD_STATE_NODE *arg5);
1649 
1650 
1651 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuWriteFaultBufferGetPtr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 getValue,struct THREAD_STATE_NODE * arg5)1652 static inline NV_STATUS kgmmuWriteFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 getValue, struct THREAD_STATE_NODE *arg5) {
1653     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1654     return NV_ERR_NOT_SUPPORTED;
1655 }
1656 #else //__nvoc_kern_gmmu_h_disabled
1657 #define kgmmuWriteFaultBufferGetPtr(pGpu, pKernelGmmu, index, getValue, arg5) kgmmuWriteFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, getValue, arg5)
1658 #endif //__nvoc_kern_gmmu_h_disabled
1659 
1660 #define kgmmuWriteFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, getValue, arg5) kgmmuWriteFaultBufferGetPtr(pGpu, pKernelGmmu, index, getValue, arg5)
1661 
1662 NV_STATUS kgmmuReadFaultBufferPutPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg5);
1663 
1664 
1665 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadFaultBufferPutPtr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 * pPutOffset,struct THREAD_STATE_NODE * arg5)1666 static inline NV_STATUS kgmmuReadFaultBufferPutPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg5) {
1667     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1668     return NV_ERR_NOT_SUPPORTED;
1669 }
1670 #else //__nvoc_kern_gmmu_h_disabled
1671 #define kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg5) kgmmuReadFaultBufferPutPtr_TU102(pGpu, pKernelGmmu, index, pPutOffset, arg5)
1672 #endif //__nvoc_kern_gmmu_h_disabled
1673 
1674 #define kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, index, pPutOffset, arg5) kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg5)
1675 
1676 NvU32 kgmmuReadMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 gfid);
1677 
1678 
1679 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadMmuFaultBufferSize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 gfid)1680 static inline NvU32 kgmmuReadMmuFaultBufferSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 gfid) {
1681     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1682     return 0;
1683 }
1684 #else //__nvoc_kern_gmmu_h_disabled
1685 #define kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg3, gfid) kgmmuReadMmuFaultBufferSize_TU102(pGpu, pKernelGmmu, arg3, gfid)
1686 #endif //__nvoc_kern_gmmu_h_disabled
1687 
1688 #define kgmmuReadMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg3, gfid) kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg3, gfid)
1689 
1690 NvU32 kgmmuReadMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid);
1691 
1692 
1693 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadMmuFaultStatus(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid)1694 static inline NvU32 kgmmuReadMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid) {
1695     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1696     return 0;
1697 }
1698 #else //__nvoc_kern_gmmu_h_disabled
1699 #define kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus_TU102(pGpu, pKernelGmmu, gfid)
1700 #endif //__nvoc_kern_gmmu_h_disabled
1701 
1702 #define kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid)
1703 
1704 void kgmmuWriteMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3);
1705 
1706 
1707 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuWriteMmuFaultStatus(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)1708 static inline void kgmmuWriteMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
1709     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1710 }
1711 #else //__nvoc_kern_gmmu_h_disabled
1712 #define kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg3) kgmmuWriteMmuFaultStatus_TU102(pGpu, pKernelGmmu, arg3)
1713 #endif //__nvoc_kern_gmmu_h_disabled
1714 
1715 #define kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, arg3) kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg3)
1716 
1717 NvBool kgmmuIsNonReplayableFaultPending_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg3);
1718 
1719 
1720 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuIsNonReplayableFaultPending(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * arg3)1721 static inline NvBool kgmmuIsNonReplayableFaultPending(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg3) {
1722     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1723     return NV_FALSE;
1724 }
1725 #else //__nvoc_kern_gmmu_h_disabled
1726 #define kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg3) kgmmuIsNonReplayableFaultPending_TU102(pGpu, pKernelGmmu, arg3)
1727 #endif //__nvoc_kern_gmmu_h_disabled
1728 
1729 #define kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, arg3) kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg3)
1730 
1731 NV_STATUS kgmmuClientShadowFaultBufferAlloc_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
1732 
1733 
1734 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferAlloc(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)1735 static inline NV_STATUS kgmmuClientShadowFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
1736     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1737     return NV_ERR_NOT_SUPPORTED;
1738 }
1739 #else //__nvoc_kern_gmmu_h_disabled
1740 #define kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferAlloc_GV100(pGpu, pKernelGmmu, arg3)
1741 #endif //__nvoc_kern_gmmu_h_disabled
1742 
1743 #define kgmmuClientShadowFaultBufferAlloc_HAL(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg3)
1744 
1745 NV_STATUS kgmmuClientShadowFaultBufferFree_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
1746 
1747 
1748 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferFree(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)1749 static inline NV_STATUS kgmmuClientShadowFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
1750     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1751     return NV_ERR_NOT_SUPPORTED;
1752 }
1753 #else //__nvoc_kern_gmmu_h_disabled
1754 #define kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferFree_GV100(pGpu, pKernelGmmu, arg3)
1755 #endif //__nvoc_kern_gmmu_h_disabled
1756 
1757 #define kgmmuClientShadowFaultBufferFree_HAL(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg3)
1758 
1759 void kgmmuEncodeSysmemAddrs_GM107(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count);
1760 
1761 
1762 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEncodeSysmemAddrs(struct KernelGmmu * pKernelGmmu,NvU64 * pAddresses,NvU64 count)1763 static inline void kgmmuEncodeSysmemAddrs(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count) {
1764     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1765 }
1766 #else //__nvoc_kern_gmmu_h_disabled
1767 #define kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs_GM107(pKernelGmmu, pAddresses, count)
1768 #endif //__nvoc_kern_gmmu_h_disabled
1769 
1770 #define kgmmuEncodeSysmemAddrs_HAL(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count)
1771 
1772 NvU8 kgmmuGetHwPteApertureFromMemdesc_GM107(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc);
1773 
1774 
1775 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetHwPteApertureFromMemdesc(struct KernelGmmu * pKernelGmmu,MEMORY_DESCRIPTOR * pDesc)1776 static inline NvU8 kgmmuGetHwPteApertureFromMemdesc(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc) {
1777     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1778     return 0;
1779 }
1780 #else //__nvoc_kern_gmmu_h_disabled
1781 #define kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc_GM107(pKernelGmmu, pDesc)
1782 #endif //__nvoc_kern_gmmu_h_disabled
1783 
1784 #define kgmmuGetHwPteApertureFromMemdesc_HAL(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc)
1785 
1786 NvBool kgmmuTestAccessCounterWriteNak_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1787 
1788 
1789 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuTestAccessCounterWriteNak(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1790 static inline NvBool kgmmuTestAccessCounterWriteNak(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1791     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1792     return NV_FALSE;
1793 }
1794 #else //__nvoc_kern_gmmu_h_disabled
1795 #define kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak_TU102(pGpu, pKernelGmmu)
1796 #endif //__nvoc_kern_gmmu_h_disabled
1797 
1798 #define kgmmuTestAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu)
1799 
1800 NV_STATUS kgmmuEnableNvlinkComputePeerAddressing_GV100(struct KernelGmmu *pKernelGmmu);
1801 
1802 
1803 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEnableNvlinkComputePeerAddressing(struct KernelGmmu * pKernelGmmu)1804 static inline NV_STATUS kgmmuEnableNvlinkComputePeerAddressing(struct KernelGmmu *pKernelGmmu) {
1805     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1806     return NV_ERR_NOT_SUPPORTED;
1807 }
1808 #else //__nvoc_kern_gmmu_h_disabled
1809 #define kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing_GV100(pKernelGmmu)
1810 #endif //__nvoc_kern_gmmu_h_disabled
1811 
1812 #define kgmmuEnableNvlinkComputePeerAddressing_HAL(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu)
1813 
1814 void kgmmuClearNonReplayableFaultIntr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg3);
1815 
1816 
1817 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClearNonReplayableFaultIntr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * arg3)1818 static inline void kgmmuClearNonReplayableFaultIntr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg3) {
1819     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1820 }
1821 #else //__nvoc_kern_gmmu_h_disabled
1822 #define kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg3) kgmmuClearNonReplayableFaultIntr_TU102(pGpu, pKernelGmmu, arg3)
1823 #endif //__nvoc_kern_gmmu_h_disabled
1824 
1825 #define kgmmuClearNonReplayableFaultIntr_HAL(pGpu, pKernelGmmu, arg3) kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg3)
1826 
1827 void kgmmuClearReplayableFaultIntr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg3);
1828 
1829 
1830 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClearReplayableFaultIntr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * arg3)1831 static inline void kgmmuClearReplayableFaultIntr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg3) {
1832     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1833 }
1834 #else //__nvoc_kern_gmmu_h_disabled
1835 #define kgmmuClearReplayableFaultIntr(pGpu, pKernelGmmu, arg3) kgmmuClearReplayableFaultIntr_TU102(pGpu, pKernelGmmu, arg3)
1836 #endif //__nvoc_kern_gmmu_h_disabled
1837 
1838 #define kgmmuClearReplayableFaultIntr_HAL(pGpu, pKernelGmmu, arg3) kgmmuClearReplayableFaultIntr(pGpu, pKernelGmmu, arg3)
1839 
1840 void kgmmuPrintFaultInfo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, FIFO_MMU_EXCEPTION_DATA *arg4);
1841 
1842 
1843 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuPrintFaultInfo(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,FIFO_MMU_EXCEPTION_DATA * arg4)1844 static inline void kgmmuPrintFaultInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, FIFO_MMU_EXCEPTION_DATA *arg4) {
1845     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1846 }
1847 #else //__nvoc_kern_gmmu_h_disabled
1848 #define kgmmuPrintFaultInfo(pGpu, pKernelGmmu, arg3, arg4) kgmmuPrintFaultInfo_TU102(pGpu, pKernelGmmu, arg3, arg4)
1849 #endif //__nvoc_kern_gmmu_h_disabled
1850 
1851 #define kgmmuPrintFaultInfo_HAL(pGpu, pKernelGmmu, arg3, arg4) kgmmuPrintFaultInfo(pGpu, pKernelGmmu, arg3, arg4)
1852 
1853 NV_STATUS kgmmuServiceNonReplayableFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1854 
1855 
1856 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServiceNonReplayableFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1857 static inline NV_STATUS kgmmuServiceNonReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1858     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1859     return NV_ERR_NOT_SUPPORTED;
1860 }
1861 #else //__nvoc_kern_gmmu_h_disabled
1862 #define kgmmuServiceNonReplayableFault(pGpu, pKernelGmmu) kgmmuServiceNonReplayableFault_GV100(pGpu, pKernelGmmu)
1863 #endif //__nvoc_kern_gmmu_h_disabled
1864 
1865 #define kgmmuServiceNonReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceNonReplayableFault(pGpu, pKernelGmmu)
1866 
1867 NV_STATUS kgmmuHandleNonReplayableFaultPacket_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_PACKET *arg3);
1868 
1869 
1870 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuHandleNonReplayableFaultPacket(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_PACKET * arg3)1871 static inline NV_STATUS kgmmuHandleNonReplayableFaultPacket(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_PACKET *arg3) {
1872     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1873     return NV_ERR_NOT_SUPPORTED;
1874 }
1875 #else //__nvoc_kern_gmmu_h_disabled
1876 #define kgmmuHandleNonReplayableFaultPacket(pGpu, pKernelGmmu, arg3) kgmmuHandleNonReplayableFaultPacket_GV100(pGpu, pKernelGmmu, arg3)
1877 #endif //__nvoc_kern_gmmu_h_disabled
1878 
1879 #define kgmmuHandleNonReplayableFaultPacket_HAL(pGpu, pKernelGmmu, arg3) kgmmuHandleNonReplayableFaultPacket(pGpu, pKernelGmmu, arg3)
1880 
1881 NV_STATUS kgmmuNotifyNonReplayableFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3);
1882 
1883 
1884 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuNotifyNonReplayableFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg3)1885 static inline NV_STATUS kgmmuNotifyNonReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3) {
1886     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1887     return NV_ERR_NOT_SUPPORTED;
1888 }
1889 #else //__nvoc_kern_gmmu_h_disabled
1890 #define kgmmuNotifyNonReplayableFault(pGpu, pKernelGmmu, arg3) kgmmuNotifyNonReplayableFault_GV100(pGpu, pKernelGmmu, arg3)
1891 #endif //__nvoc_kern_gmmu_h_disabled
1892 
1893 #define kgmmuNotifyNonReplayableFault_HAL(pGpu, pKernelGmmu, arg3) kgmmuNotifyNonReplayableFault(pGpu, pKernelGmmu, arg3)
1894 
1895 NvU32 kgmmuGetFaultInfoFromFaultPckt_GV100(struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry);
1896 
1897 
1898 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFaultInfoFromFaultPckt(struct KernelGmmu * pKernelGmmu,MMU_FAULT_BUFFER_ENTRY * pParsedFaultEntry)1899 static inline NvU32 kgmmuGetFaultInfoFromFaultPckt(struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry) {
1900     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1901     return 0;
1902 }
1903 #else //__nvoc_kern_gmmu_h_disabled
1904 #define kgmmuGetFaultInfoFromFaultPckt(pKernelGmmu, pParsedFaultEntry) kgmmuGetFaultInfoFromFaultPckt_GV100(pKernelGmmu, pParsedFaultEntry)
1905 #endif //__nvoc_kern_gmmu_h_disabled
1906 
1907 #define kgmmuGetFaultInfoFromFaultPckt_HAL(pKernelGmmu, pParsedFaultEntry) kgmmuGetFaultInfoFromFaultPckt(pKernelGmmu, pParsedFaultEntry)
1908 
kgmmuServiceChannelMmuFault_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,MMU_FAULT_BUFFER_ENTRY * pParsedFaultEntry,FIFO_MMU_EXCEPTION_DATA * pMmuExceptionData,struct KernelChannel * pKernelChannel)1909 static inline NV_STATUS kgmmuServiceChannelMmuFault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel) {
1910     NV_ASSERT_PRECOMP(0);
1911     return NV_ERR_NOT_SUPPORTED;
1912 }
1913 
1914 NV_STATUS kgmmuServiceChannelMmuFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel);
1915 
1916 
1917 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServiceChannelMmuFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,MMU_FAULT_BUFFER_ENTRY * pParsedFaultEntry,FIFO_MMU_EXCEPTION_DATA * pMmuExceptionData,struct KernelChannel * pKernelChannel)1918 static inline NV_STATUS kgmmuServiceChannelMmuFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel) {
1919     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1920     return NV_ERR_NOT_SUPPORTED;
1921 }
1922 #else //__nvoc_kern_gmmu_h_disabled
1923 #define kgmmuServiceChannelMmuFault(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) kgmmuServiceChannelMmuFault_92bfc3(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel)
1924 #endif //__nvoc_kern_gmmu_h_disabled
1925 
1926 #define kgmmuServiceChannelMmuFault_HAL(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) kgmmuServiceChannelMmuFault(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel)
1927 
1928 NV_STATUS kgmmuServicePriFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1929 
1930 
1931 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServicePriFaults(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1932 static inline NV_STATUS kgmmuServicePriFaults(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1933     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1934     return NV_ERR_NOT_SUPPORTED;
1935 }
1936 #else //__nvoc_kern_gmmu_h_disabled
1937 #define kgmmuServicePriFaults(pGpu, pKernelGmmu) kgmmuServicePriFaults_GV100(pGpu, pKernelGmmu)
1938 #endif //__nvoc_kern_gmmu_h_disabled
1939 
1940 #define kgmmuServicePriFaults_HAL(pGpu, pKernelGmmu) kgmmuServicePriFaults(pGpu, pKernelGmmu)
1941 
1942 NV_STATUS kgmmuCheckAndDecideBigPageSize_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1943 
1944 
1945 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuCheckAndDecideBigPageSize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1946 static inline NV_STATUS kgmmuCheckAndDecideBigPageSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1947     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1948     return NV_ERR_NOT_SUPPORTED;
1949 }
1950 #else //__nvoc_kern_gmmu_h_disabled
1951 #define kgmmuCheckAndDecideBigPageSize(pGpu, pKernelGmmu) kgmmuCheckAndDecideBigPageSize_GP100(pGpu, pKernelGmmu)
1952 #endif //__nvoc_kern_gmmu_h_disabled
1953 
1954 #define kgmmuCheckAndDecideBigPageSize_HAL(pGpu, pKernelGmmu) kgmmuCheckAndDecideBigPageSize(pGpu, pKernelGmmu)
1955 
1956 NV_STATUS kgmmuCreateFakeSparseTablesInternal_KERNEL(OBJGPU *arg1, struct KernelGmmu *arg2);
1957 
1958 
1959 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuCreateFakeSparseTablesInternal(OBJGPU * arg1,struct KernelGmmu * arg2)1960 static inline NV_STATUS kgmmuCreateFakeSparseTablesInternal(OBJGPU *arg1, struct KernelGmmu *arg2) {
1961     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1962     return NV_ERR_NOT_SUPPORTED;
1963 }
1964 #else //__nvoc_kern_gmmu_h_disabled
1965 #define kgmmuCreateFakeSparseTablesInternal(arg1, arg2) kgmmuCreateFakeSparseTablesInternal_KERNEL(arg1, arg2)
1966 #endif //__nvoc_kern_gmmu_h_disabled
1967 
1968 #define kgmmuCreateFakeSparseTablesInternal_HAL(arg1, arg2) kgmmuCreateFakeSparseTablesInternal(arg1, arg2)
1969 
1970 NV_STATUS kgmmuConstructEngine_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg3);
1971 
1972 NV_STATUS kgmmuStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1973 
1974 NV_STATUS kgmmuStateLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3);
1975 
1976 NV_STATUS kgmmuStateUnload_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3);
1977 
1978 NV_STATUS kgmmuStatePostLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3);
1979 
1980 NV_STATUS kgmmuStatePreUnload_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3);
1981 
1982 void kgmmuStateDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1983 
1984 void kgmmuRegisterIntrService_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg3[175]);
1985 
1986 NvBool kgmmuClearInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceClearInterruptArguments *pParams);
1987 
1988 NvU32 kgmmuServiceInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams);
1989 
kgmmuServiceNotificationInterrupt_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceServiceNotificationInterruptArguments * pParams)1990 static inline NV_STATUS kgmmuServiceNotificationInterrupt_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceNotificationInterruptArguments *pParams) {
1991     return NV_OK;
1992 }
1993 
1994 NV_STATUS kgmmuInstBlkVaLimitGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData);
1995 
kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,NvU32 subctxId,INST_BLK_INIT_PARAMS * pParams,NvU32 * pOffset,NvU64 * pData)1996 static inline NV_STATUS kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) {
1997     *pOffset = 0;
1998     return NV_OK;
1999 }
2000 
2001 NV_STATUS kgmmuCommitTlbInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
2002 
2003 NV_STATUS kgmmuCommitTlbInvalidate_GB100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
2004 
2005 NvU32 kgmmuSetTlbInvalidateMembarWarParameters_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
2006 
kgmmuSetTlbInvalidateMembarWarParameters_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)2007 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
2008     return 0;
2009 }
2010 
2011 NV_STATUS kgmmuSetTlbInvalidationScope_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams);
2012 
kgmmuSetTlbInvalidationScope_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 flags,TLB_INVALIDATE_PARAMS * pParams)2013 static inline NV_STATUS kgmmuSetTlbInvalidationScope_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) {
2014     return NV_ERR_NOT_SUPPORTED;
2015 }
2016 
2017 void kgmmuFmtInitPteComptagLine_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version);
2018 
kgmmuFmtInitPteComptagLine_b3696a(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)2019 static inline void kgmmuFmtInitPteComptagLine_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
2020     return;
2021 }
2022 
2023 void kgmmuFmtInitPeerPteFld_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version);
2024 
kgmmuFmtInitPeerPteFld_b3696a(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)2025 static inline void kgmmuFmtInitPeerPteFld_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
2026     return;
2027 }
2028 
2029 void kgmmuFmtInitPte_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture);
2030 
2031 void kgmmuFmtInitPte_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture);
2032 
2033 void kgmmuFmtInitPde_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
2034 
2035 void kgmmuFmtInitPde_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
2036 
2037 NvBool kgmmuFmtIsVersionSupported_GP10X(struct KernelGmmu *pKernelGmmu, NvU32 version);
2038 
2039 NvBool kgmmuFmtIsVersionSupported_GH10X(struct KernelGmmu *pKernelGmmu, NvU32 version);
2040 
2041 void kgmmuFmtInitLevels_GP10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
2042 
2043 void kgmmuFmtInitLevels_GA10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
2044 
2045 void kgmmuFmtInitLevels_GH10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
2046 
2047 void kgmmuFmtInitLevels_GB10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
2048 
2049 void kgmmuFmtInitPdeMulti_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
2050 
2051 void kgmmuFmtInitPdeMulti_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
2052 
2053 NV_STATUS kgmmuFmtFamiliesInit_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2054 
2055 NV_STATUS kgmmuFmtFamiliesInit_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2056 
2057 NV_STATUS kgmmuTranslatePtePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 *arg3);
2058 
kgmmuTranslatePtePcfFromSw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvU32 * arg3)2059 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 *arg3) {
2060     return NV_OK;
2061 }
2062 
2063 NV_STATUS kgmmuTranslatePtePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvBool arg3, NvU32 *arg4);
2064 
kgmmuTranslatePtePcfFromHw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvBool arg3,NvU32 * arg4)2065 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvBool arg3, NvU32 *arg4) {
2066     return NV_OK;
2067 }
2068 
2069 NV_STATUS kgmmuTranslatePdePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 *arg3);
2070 
kgmmuTranslatePdePcfFromSw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvU32 * arg3)2071 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 *arg3) {
2072     return NV_OK;
2073 }
2074 
2075 NV_STATUS kgmmuTranslatePdePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg2, GMMU_APERTURE arg3, NvU32 *arg4);
2076 
kgmmuTranslatePdePcfFromHw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg2,GMMU_APERTURE arg3,NvU32 * arg4)2077 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg2, GMMU_APERTURE arg3, NvU32 *arg4) {
2078     return NV_OK;
2079 }
2080 
2081 NV_STATUS kgmmuGetFaultRegisterMappings_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl);
2082 
2083 NV_STATUS kgmmuGetFaultRegisterMappings_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl);
2084 
2085 const char *kgmmuGetFaultTypeString_GP100(struct KernelGmmu *pKernelGmmu, NvU32 faultType);
2086 
2087 const char *kgmmuGetFaultTypeString_GB100(struct KernelGmmu *pKernelGmmu, NvU32 faultType);
2088 
2089 NV_STATUS kgmmuIssueReplayableFaultBufferFlush_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush);
2090 
kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bCopyAndFlush)2091 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush) {
2092     return NV_ERR_NOT_SUPPORTED;
2093 }
2094 
2095 NV_STATUS kgmmuToggleFaultOnPrefetch_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable);
2096 
kgmmuToggleFaultOnPrefetch_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bEnable)2097 static inline NV_STATUS kgmmuToggleFaultOnPrefetch_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable) {
2098     return NV_ERR_NOT_SUPPORTED;
2099 }
2100 
2101 NV_STATUS kgmmuFaultBufferAllocSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
2102 
kgmmuFaultBufferAllocSharedMemory_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)2103 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
2104     return NV_OK;
2105 }
2106 
2107 void kgmmuFaultBufferFreeSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
2108 
kgmmuFaultBufferFreeSharedMemory_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)2109 static inline void kgmmuFaultBufferFreeSharedMemory_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
2110     return;
2111 }
2112 
2113 NV_STATUS kgmmuSetupWarForBug2720120_GA100(struct KernelGmmu *pKernelGmmu);
2114 
kgmmuSetupWarForBug2720120_56cd7a(struct KernelGmmu * pKernelGmmu)2115 static inline NV_STATUS kgmmuSetupWarForBug2720120_56cd7a(struct KernelGmmu *pKernelGmmu) {
2116     return NV_OK;
2117 }
2118 
2119 NvU32 kgmmuGetGraphicsEngineId_GV100(struct KernelGmmu *pKernelGmmu);
2120 
2121 NvU32 kgmmuGetGraphicsEngineId_GH100(struct KernelGmmu *pKernelGmmu);
2122 
2123 NvU32 kgmmuReadShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type);
2124 
kgmmuReadShadowBufPutIndex_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE type)2125 static inline NvU32 kgmmuReadShadowBufPutIndex_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) {
2126     return 0;
2127 }
2128 
2129 NvBool kgmmuIsFaultEngineBar1_TU102(struct KernelGmmu *pKernelGmmu, NvU32 arg2);
2130 
2131 NvBool kgmmuIsFaultEngineBar1_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg2);
2132 
2133 NvBool kgmmuIsFaultEngineBar2_TU102(struct KernelGmmu *pKernelGmmu, NvU32 arg2);
2134 
2135 NvBool kgmmuIsFaultEngineBar2_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg2);
2136 
2137 NvBool kgmmuIsFaultEnginePhysical_GV100(struct KernelGmmu *pKernelGmmu, NvU32 arg2);
2138 
2139 NvBool kgmmuIsFaultEnginePhysical_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg2);
2140 
kgmmuCopyMmuFaults_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * pThreadState,NvU32 * entriesCopied,FAULT_BUFFER_TYPE type,NvBool bPollForValidBit)2141 static inline NV_STATUS kgmmuCopyMmuFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit) {
2142     NV_ASSERT_PRECOMP(0);
2143     return NV_ERR_NOT_SUPPORTED;
2144 }
2145 
2146 NV_STATUS kgmmuCopyMmuFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit);
2147 
kgmmuParseFaultPacket_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 pFaultPacket,NvP64 pParsedFaultEntry)2148 static inline NV_STATUS kgmmuParseFaultPacket_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry) {
2149     NV_ASSERT_PRECOMP(0);
2150     return NV_ERR_NOT_SUPPORTED;
2151 }
2152 
2153 NV_STATUS kgmmuParseFaultPacket_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry);
2154 
kgmmuFaultBufferClearPackets_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 beginIdx,NvU32 numFaultPackets)2155 static inline void kgmmuFaultBufferClearPackets_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets) {
2156     NV_ASSERT_PRECOMP(0);
2157 }
2158 
2159 void kgmmuFaultBufferClearPackets_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets);
2160 
kgmmuFaultBufferGetFault_dc3e6c(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 idx)2161 static inline GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_dc3e6c(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx) {
2162     NV_ASSERT_PRECOMP(0);
2163     return ((void *)0);
2164 }
2165 
2166 GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx);
2167 
kgmmuCopyFaultPacketToClientShadowBuffer_13cd8d(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct GMMU_FAULT_BUFFER * pFaultBuffer,FAULT_BUFFER_TYPE type,NvU32 getIndex,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries,struct THREAD_STATE_NODE * pThreadState,NvU32 * pFaultsCopied)2168 static inline NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied) {
2169     NV_ASSERT_PRECOMP(0);
2170     return 0;
2171 }
2172 
2173 NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied);
2174 
2175 NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied);
2176 
kgmmuIsReplayableShadowFaultBufferFull_ceaee8(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_CLIENT_SHADOW_FAULT_BUFFER * pClientFaultBuf,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries)2177 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_ceaee8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) {
2178     NV_ASSERT_PRECOMP(0);
2179     return ((NvBool)(0 != 0));
2180 }
2181 
2182 NvBool kgmmuIsReplayableShadowFaultBufferFull_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries);
2183 
kgmmuIsReplayableShadowFaultBufferFull_491d52(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_CLIENT_SHADOW_FAULT_BUFFER * pClientFaultBuf,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries)2184 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_491d52(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) {
2185     return ((NvBool)(0 != 0));
2186 }
2187 
kgmmuReadClientShadowBufPutIndex_13cd8d(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type)2188 static inline NvU32 kgmmuReadClientShadowBufPutIndex_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) {
2189     NV_ASSERT_PRECOMP(0);
2190     return 0;
2191 }
2192 
2193 NvU32 kgmmuReadClientShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type);
2194 
2195 NvU32 kgmmuReadClientShadowBufPutIndex_GB100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type);
2196 
kgmmuReadClientShadowBufPutIndex_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type)2197 static inline NvU32 kgmmuReadClientShadowBufPutIndex_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) {
2198     return 0;
2199 }
2200 
kgmmuWriteClientShadowBufPutIndex_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type,NvU32 putIndex)2201 static inline void kgmmuWriteClientShadowBufPutIndex_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) {
2202     NV_ASSERT_PRECOMP(0);
2203 }
2204 
2205 void kgmmuWriteClientShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex);
2206 
2207 void kgmmuWriteClientShadowBufPutIndex_GB100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex);
2208 
kgmmuWriteClientShadowBufPutIndex_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type,NvU32 putIndex)2209 static inline void kgmmuWriteClientShadowBufPutIndex_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) {
2210     return;
2211 }
2212 
2213 NV_STATUS kgmmuInitCeMmuFaultIdRange_GB100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2214 
kgmmuInitCeMmuFaultIdRange_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2215 static inline NV_STATUS kgmmuInitCeMmuFaultIdRange_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2216     return NV_OK;
2217 }
2218 
2219 NvU32 kgmmuGetMinCeEngineId_GV100(struct KernelGmmu *pKernelGmmu);
2220 
2221 NvU32 kgmmuGetMinCeEngineId_GH100(struct KernelGmmu *pKernelGmmu);
2222 
2223 NvU32 kgmmuGetMinCeEngineId_GB100(struct KernelGmmu *pKernelGmmu);
2224 
2225 NvU32 kgmmuGetMaxCeEngineId_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2226 
2227 NvU32 kgmmuGetMaxCeEngineId_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2228 
2229 NvU32 kgmmuGetMaxCeEngineId_AD102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2230 
2231 NvU32 kgmmuGetMaxCeEngineId_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2232 
2233 NvU32 kgmmuGetMaxCeEngineId_GB100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2234 
kgmmuFaultBufferMap_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)2235 static inline NV_STATUS kgmmuFaultBufferMap_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
2236     NV_ASSERT_PRECOMP(0);
2237     return NV_ERR_NOT_SUPPORTED;
2238 }
2239 
2240 NV_STATUS kgmmuFaultBufferMap_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
2241 
kgmmuFaultBufferUnmap_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)2242 static inline NV_STATUS kgmmuFaultBufferUnmap_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
2243     NV_ASSERT_PRECOMP(0);
2244     return NV_ERR_NOT_SUPPORTED;
2245 }
2246 
2247 NV_STATUS kgmmuFaultBufferUnmap_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
2248 
kgmmuFaultBufferInit_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2249 static inline NV_STATUS kgmmuFaultBufferInit_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2250     return NV_OK;
2251 }
2252 
2253 NV_STATUS kgmmuFaultBufferInit_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2254 
kgmmuFaultBufferDestroy_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2255 static inline NV_STATUS kgmmuFaultBufferDestroy_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2256     return NV_OK;
2257 }
2258 
2259 NV_STATUS kgmmuFaultBufferDestroy_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2260 
kgmmuFaultBufferLoad_ac1694(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)2261 static inline NV_STATUS kgmmuFaultBufferLoad_ac1694(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
2262     return NV_OK;
2263 }
2264 
2265 NV_STATUS kgmmuFaultBufferLoad_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
2266 
kgmmuFaultBufferUnload_ac1694(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)2267 static inline NV_STATUS kgmmuFaultBufferUnload_ac1694(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
2268     return NV_OK;
2269 }
2270 
2271 NV_STATUS kgmmuFaultBufferUnload_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
2272 
kgmmuEnableFaultBuffer_395e98(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)2273 static inline NV_STATUS kgmmuEnableFaultBuffer_395e98(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
2274     return NV_ERR_NOT_SUPPORTED;
2275 }
2276 
2277 NV_STATUS kgmmuEnableFaultBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid);
2278 
kgmmuDisableFaultBuffer_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)2279 static inline NV_STATUS kgmmuDisableFaultBuffer_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
2280     NV_ASSERT_PRECOMP(0);
2281     return NV_ERR_NOT_SUPPORTED;
2282 }
2283 
2284 NV_STATUS kgmmuDisableFaultBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid);
2285 
kgmmuSetAndGetDefaultFaultBufferSize_13cd8d(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE index,NvU32 gfid)2286 static inline NvU32 kgmmuSetAndGetDefaultFaultBufferSize_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid) {
2287     NV_ASSERT_PRECOMP(0);
2288     return 0;
2289 }
2290 
2291 NvU32 kgmmuSetAndGetDefaultFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid);
2292 
kgmmuReadMmuFaultInstHiLo_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg3,NvU32 * arg4)2293 static inline void kgmmuReadMmuFaultInstHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg3, NvU32 *arg4) {
2294     NV_ASSERT_PRECOMP(0);
2295 }
2296 
2297 void kgmmuReadMmuFaultInstHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg3, NvU32 *arg4);
2298 
kgmmuReadMmuFaultAddrHiLo_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg3,NvU32 * arg4)2299 static inline void kgmmuReadMmuFaultAddrHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg3, NvU32 *arg4) {
2300     NV_ASSERT_PRECOMP(0);
2301 }
2302 
2303 void kgmmuReadMmuFaultAddrHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg3, NvU32 *arg4);
2304 
kgmmuReadMmuFaultInfo_a547a8(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2305 static inline NvU32 kgmmuReadMmuFaultInfo_a547a8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2306     NV_ASSERT_PRECOMP(0);
2307     return -1;
2308 }
2309 
2310 NvU32 kgmmuReadMmuFaultInfo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2311 
kgmmuWriteMmuFaultBufferSize_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 arg4,NvU32 gfid)2312 static inline void kgmmuWriteMmuFaultBufferSize_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU32 gfid) {
2313     NV_ASSERT_PRECOMP(0);
2314 }
2315 
2316 void kgmmuWriteMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU32 gfid);
2317 
kgmmuWriteMmuFaultBufferHiLo_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 arg4,NvU32 arg5,NvU32 gfid)2318 static inline void kgmmuWriteMmuFaultBufferHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU32 arg5, NvU32 gfid) {
2319     NV_ASSERT_PRECOMP(0);
2320 }
2321 
2322 void kgmmuWriteMmuFaultBufferHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU32 arg5, NvU32 gfid);
2323 
kgmmuEnableMmuFaultInterrupts_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2324 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2325     NV_ASSERT_PRECOMP(0);
2326     return NV_ERR_NOT_SUPPORTED;
2327 }
2328 
kgmmuEnableMmuFaultInterrupts_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2329 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2330     return NV_ERR_NOT_SUPPORTED;
2331 }
2332 
kgmmuDisableMmuFaultInterrupts_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2333 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2334     NV_ASSERT_PRECOMP(0);
2335     return NV_ERR_NOT_SUPPORTED;
2336 }
2337 
kgmmuDisableMmuFaultInterrupts_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2338 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2339     return NV_ERR_NOT_SUPPORTED;
2340 }
2341 
kgmmuEnableMmuFaultOverflowIntr_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2342 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2343     NV_ASSERT_PRECOMP(0);
2344     return NV_ERR_NOT_SUPPORTED;
2345 }
2346 
kgmmuEnableMmuFaultOverflowIntr_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2347 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2348     return NV_ERR_NOT_SUPPORTED;
2349 }
2350 
kgmmuSignExtendFaultAddress_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU64 * pMmuFaultAddress)2351 static inline void kgmmuSignExtendFaultAddress_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress) {
2352     NV_ASSERT_PRECOMP(0);
2353 }
2354 
2355 void kgmmuSignExtendFaultAddress_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress);
2356 
2357 void kgmmuSignExtendFaultAddress_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress);
2358 
kgmmuGetFaultType_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 fault,FAULT_TYPE * pMmuFaultType)2359 static inline NV_STATUS kgmmuGetFaultType_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType) {
2360     NV_ASSERT_PRECOMP(0);
2361     return NV_ERR_NOT_SUPPORTED;
2362 }
2363 
2364 NV_STATUS kgmmuGetFaultType_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType);
2365 
2366 NV_STATUS kgmmuGetFaultType_GB100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType);
2367 
kgmmuIsP2PUnboundInstFault_92bfc3(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvU32 arg3)2368 static inline NvBool kgmmuIsP2PUnboundInstFault_92bfc3(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 arg3) {
2369     NV_ASSERT_PRECOMP(0);
2370     return NV_ERR_NOT_SUPPORTED;
2371 }
2372 
2373 NvBool kgmmuIsP2PUnboundInstFault_GA100(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 arg3);
2374 
kgmmuIsP2PUnboundInstFault_491d52(struct KernelGmmu * pKernelGmmu,NvU32 arg2,NvU32 arg3)2375 static inline NvBool kgmmuIsP2PUnboundInstFault_491d52(struct KernelGmmu *pKernelGmmu, NvU32 arg2, NvU32 arg3) {
2376     return ((NvBool)(0 != 0));
2377 }
2378 
2379 NV_STATUS kgmmuServiceVfPriFaults_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType);
2380 
kgmmuServiceVfPriFaults_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 faultType)2381 static inline NV_STATUS kgmmuServiceVfPriFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType) {
2382     NV_ASSERT_PRECOMP(0);
2383     return NV_ERR_NOT_SUPPORTED;
2384 }
2385 
kgmmuTestVidmemAccessBitBufferError_491d52(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)2386 static inline NvBool kgmmuTestVidmemAccessBitBufferError_491d52(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
2387     return ((NvBool)(0 != 0));
2388 }
2389 
kgmmuTestVidmemAccessBitBufferError_ceaee8(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)2390 static inline NvBool kgmmuTestVidmemAccessBitBufferError_ceaee8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
2391     NV_ASSERT_PRECOMP(0);
2392     return ((NvBool)(0 != 0));
2393 }
2394 
kgmmuDisableVidmemAccessBitBuf_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2395 static inline void kgmmuDisableVidmemAccessBitBuf_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2396     return;
2397 }
2398 
kgmmuDisableVidmemAccessBitBuf_e426af(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2399 static inline void kgmmuDisableVidmemAccessBitBuf_e426af(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2400     NV_ASSERT_PRECOMP(0);
2401     return;
2402 }
2403 
kgmmuEnableVidmemAccessBitBuf_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2404 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2405     return NV_ERR_NOT_SUPPORTED;
2406 }
2407 
kgmmuEnableVidmemAccessBitBuf_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2408 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2409     NV_ASSERT_PRECOMP(0);
2410     return NV_ERR_NOT_SUPPORTED;
2411 }
2412 
kgmmuClearAccessCounterWriteNak_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2413 static inline void kgmmuClearAccessCounterWriteNak_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2414     return;
2415 }
2416 
kgmmuClearAccessCounterWriteNak_e426af(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2417 static inline void kgmmuClearAccessCounterWriteNak_e426af(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2418     NV_ASSERT_PRECOMP(0);
2419     return;
2420 }
2421 
kgmmuServiceMthdBuffFaultInBar2Fault_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2422 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2423     return NV_OK;
2424 }
2425 
kgmmuServiceMthdBuffFaultInBar2Fault_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2426 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2427     NV_ASSERT_PRECOMP(0);
2428     return NV_ERR_NOT_SUPPORTED;
2429 }
2430 
2431 NV_STATUS kgmmuFaultCancelTargeted_VF(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg3);
2432 
kgmmuFaultCancelTargeted_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * arg3)2433 static inline NV_STATUS kgmmuFaultCancelTargeted_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg3) {
2434     NV_ASSERT_PRECOMP(0);
2435     return NV_ERR_NOT_SUPPORTED;
2436 }
2437 
2438 NV_STATUS kgmmuFaultCancelTargeted_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg3);
2439 
kgmmuFaultCancelIssueInvalidate_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * pCancelInfo,TLB_INVALIDATE_PARAMS * pParams,NvBool bGlobal)2440 static inline NV_STATUS kgmmuFaultCancelIssueInvalidate_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal) {
2441     NV_ASSERT_PRECOMP(0);
2442     return NV_ERR_NOT_SUPPORTED;
2443 }
2444 
2445 NV_STATUS kgmmuFaultCancelIssueInvalidate_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal);
2446 
2447 NV_STATUS kgmmuServiceMmuFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData);
2448 
2449 NV_STATUS kgmmuServiceMmuFault_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData);
2450 
kgmmuServiceUnboundInstBlockFault_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 arg3,FIFO_MMU_EXCEPTION_DATA * arg4)2451 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg3, FIFO_MMU_EXCEPTION_DATA *arg4) {
2452     return NV_OK;
2453 }
2454 
kgmmuServiceUnboundInstBlockFault_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 arg3,FIFO_MMU_EXCEPTION_DATA * arg4)2455 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg3, FIFO_MMU_EXCEPTION_DATA *arg4) {
2456     NV_ASSERT_PRECOMP(0);
2457     return NV_ERR_NOT_SUPPORTED;
2458 }
2459 
2460 NvU32 kgmmuGetEccCounts_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2461 
kgmmuGetEccCounts_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2462 static inline NvU32 kgmmuGetEccCounts_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2463     return 0;
2464 }
2465 
2466 NV_STATUS kgmmuCreateFakeSparseTables_GH100(OBJGPU *arg1, struct KernelGmmu *arg2);
2467 
kgmmuCreateFakeSparseTables_56cd7a(OBJGPU * arg1,struct KernelGmmu * arg2)2468 static inline NV_STATUS kgmmuCreateFakeSparseTables_56cd7a(OBJGPU *arg1, struct KernelGmmu *arg2) {
2469     return NV_OK;
2470 }
2471 
2472 NvU8 *kgmmuGetFakeSparseEntry_GH100(OBJGPU *arg1, struct KernelGmmu *arg2, const MMU_FMT_LEVEL *arg3);
2473 
kgmmuGetFakeSparseEntry_fa6e19(OBJGPU * arg1,struct KernelGmmu * arg2,const MMU_FMT_LEVEL * arg3)2474 static inline NvU8 *kgmmuGetFakeSparseEntry_fa6e19(OBJGPU *arg1, struct KernelGmmu *arg2, const MMU_FMT_LEVEL *arg3) {
2475     return ((void *)0);
2476 }
2477 
kgmmuGetPDEAperture(struct KernelGmmu * pKernelGmmu)2478 static inline NvU32 kgmmuGetPDEAperture(struct KernelGmmu *pKernelGmmu) {
2479     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2480     return pKernelGmmu_PRIVATE->PDEAperture;
2481 }
2482 
kgmmuGetPTEAperture(struct KernelGmmu * pKernelGmmu)2483 static inline NvU32 kgmmuGetPTEAperture(struct KernelGmmu *pKernelGmmu) {
2484     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2485     return pKernelGmmu_PRIVATE->PTEAperture;
2486 }
2487 
kgmmuGetPDEBAR1Aperture(struct KernelGmmu * pKernelGmmu)2488 static inline NvU32 kgmmuGetPDEBAR1Aperture(struct KernelGmmu *pKernelGmmu) {
2489     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2490     return pKernelGmmu_PRIVATE->PDEBAR1Aperture;
2491 }
2492 
kgmmuGetPTEBAR1Aperture(struct KernelGmmu * pKernelGmmu)2493 static inline NvU32 kgmmuGetPTEBAR1Aperture(struct KernelGmmu *pKernelGmmu) {
2494     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2495     return pKernelGmmu_PRIVATE->PTEBAR1Aperture;
2496 }
2497 
kgmmuGetPDEBAR1Attr(struct KernelGmmu * pKernelGmmu)2498 static inline NvU32 kgmmuGetPDEBAR1Attr(struct KernelGmmu *pKernelGmmu) {
2499     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2500     return pKernelGmmu_PRIVATE->PDEBAR1Attr;
2501 }
2502 
kgmmuGetPTEBAR1Attr(struct KernelGmmu * pKernelGmmu)2503 static inline NvU32 kgmmuGetPTEBAR1Attr(struct KernelGmmu *pKernelGmmu) {
2504     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2505     return pKernelGmmu_PRIVATE->PTEBAR1Attr;
2506 }
2507 
kgmmuGetPDEAttr(struct KernelGmmu * pKernelGmmu)2508 static inline NvU32 kgmmuGetPDEAttr(struct KernelGmmu *pKernelGmmu) {
2509     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2510     return pKernelGmmu_PRIVATE->PDEAttr;
2511 }
2512 
kgmmuGetPTEAttr(struct KernelGmmu * pKernelGmmu)2513 static inline NvU32 kgmmuGetPTEAttr(struct KernelGmmu *pKernelGmmu) {
2514     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2515     return pKernelGmmu_PRIVATE->PTEAttr;
2516 }
2517 
kgmmuGetBigPageSizeOverride(struct KernelGmmu * pKernelGmmu)2518 static inline NvU64 kgmmuGetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu) {
2519     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2520     return pKernelGmmu_PRIVATE->overrideBigPageSize;
2521 }
2522 
kgmmuSetBigPageSizeOverride(struct KernelGmmu * pKernelGmmu,NvU64 bigPageSize)2523 static inline void kgmmuSetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) {
2524     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2525     pKernelGmmu_PRIVATE->overrideBigPageSize = bigPageSize;
2526 }
2527 
kgmmuIsPerVaspaceBigPageEn(struct KernelGmmu * pKernelGmmu)2528 static inline NvBool kgmmuIsPerVaspaceBigPageEn(struct KernelGmmu *pKernelGmmu) {
2529     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2530     return pKernelGmmu_PRIVATE->bEnablePerVaspaceBigPage;
2531 }
2532 
kgmmuIsIgnoreHubTlbInvalidate(struct KernelGmmu * pKernelGmmu)2533 static inline NvBool kgmmuIsIgnoreHubTlbInvalidate(struct KernelGmmu *pKernelGmmu) {
2534     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2535     return pKernelGmmu_PRIVATE->bIgnoreHubTlbInvalidate;
2536 }
2537 
kgmmuIsHugePageSupported(struct KernelGmmu * pKernelGmmu)2538 static inline NvBool kgmmuIsHugePageSupported(struct KernelGmmu *pKernelGmmu) {
2539     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2540     return pKernelGmmu_PRIVATE->bHugePageSupported;
2541 }
2542 
kgmmuIsPageSize512mbSupported(struct KernelGmmu * pKernelGmmu)2543 static inline NvBool kgmmuIsPageSize512mbSupported(struct KernelGmmu *pKernelGmmu) {
2544     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2545     return pKernelGmmu_PRIVATE->bPageSize512mbSupported;
2546 }
2547 
kgmmuIsPageSize256gbSupported(struct KernelGmmu * pKernelGmmu)2548 static inline NvBool kgmmuIsPageSize256gbSupported(struct KernelGmmu *pKernelGmmu) {
2549     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2550     return pKernelGmmu_PRIVATE->bPageSize256gbSupported;
2551 }
2552 
kgmmuIsBug2720120WarEnabled(struct KernelGmmu * pKernelGmmu)2553 static inline NvBool kgmmuIsBug2720120WarEnabled(struct KernelGmmu *pKernelGmmu) {
2554     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2555     return pKernelGmmu_PRIVATE->bBug2720120WarEnabled;
2556 }
2557 
kgmmuIsVaspaceInteropSupported(struct KernelGmmu * pKernelGmmu)2558 static inline NvBool kgmmuIsVaspaceInteropSupported(struct KernelGmmu *pKernelGmmu) {
2559     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2560     return pKernelGmmu_PRIVATE->bVaspaceInteropSupported;
2561 }
2562 
kgmmuGetMaxVASize(struct KernelGmmu * pKernelGmmu)2563 static inline NvU64 kgmmuGetMaxVASize(struct KernelGmmu *pKernelGmmu) {
2564     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2565     return pKernelGmmu_PRIVATE->maxVASize;
2566 }
2567 
kgmmuGetSysBaseAddress(struct KernelGmmu * pKernelGmmu)2568 static inline NvU64 kgmmuGetSysBaseAddress(struct KernelGmmu *pKernelGmmu) {
2569     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2570     return pKernelGmmu_PRIVATE->sysmemBaseAddress;
2571 }
2572 
2573 void kgmmuDestruct_IMPL(struct KernelGmmu *pKernelGmmu);
2574 
2575 #define __nvoc_kgmmuDestruct(pKernelGmmu) kgmmuDestruct_IMPL(pKernelGmmu)
2576 NV_STATUS kgmmuFmtInit_IMPL(struct KernelGmmu *pKernelGmmu);
2577 
2578 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInit(struct KernelGmmu * pKernelGmmu)2579 static inline NV_STATUS kgmmuFmtInit(struct KernelGmmu *pKernelGmmu) {
2580     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2581     return NV_ERR_NOT_SUPPORTED;
2582 }
2583 #else //__nvoc_kern_gmmu_h_disabled
2584 #define kgmmuFmtInit(pKernelGmmu) kgmmuFmtInit_IMPL(pKernelGmmu)
2585 #endif //__nvoc_kern_gmmu_h_disabled
2586 
2587 GMMU_APERTURE kgmmuGetMemAperture_IMPL(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc);
2588 
2589 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetMemAperture(struct KernelGmmu * pKernelGmmu,MEMORY_DESCRIPTOR * pMemDesc)2590 static inline GMMU_APERTURE kgmmuGetMemAperture(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc) {
2591     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2592     GMMU_APERTURE ret;
2593     portMemSet(&ret, 0, sizeof(GMMU_APERTURE));
2594     return ret;
2595 }
2596 #else //__nvoc_kern_gmmu_h_disabled
2597 #define kgmmuGetMemAperture(pKernelGmmu, pMemDesc) kgmmuGetMemAperture_IMPL(pKernelGmmu, pMemDesc)
2598 #endif //__nvoc_kern_gmmu_h_disabled
2599 
2600 const GMMU_FMT_FAMILY *kgmmuFmtGetFamily_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version);
2601 
2602 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtGetFamily(struct KernelGmmu * pKernelGmmu,NvU32 version)2603 static inline const GMMU_FMT_FAMILY *kgmmuFmtGetFamily(struct KernelGmmu *pKernelGmmu, NvU32 version) {
2604     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2605     return NULL;
2606 }
2607 #else //__nvoc_kern_gmmu_h_disabled
2608 #define kgmmuFmtGetFamily(pKernelGmmu, version) kgmmuFmtGetFamily_IMPL(pKernelGmmu, version)
2609 #endif //__nvoc_kern_gmmu_h_disabled
2610 
2611 const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2612 
2613 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetStaticInfo(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2614 static inline const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2615     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2616     return NULL;
2617 }
2618 #else //__nvoc_kern_gmmu_h_disabled
2619 #define kgmmuGetStaticInfo(pGpu, pKernelGmmu) kgmmuGetStaticInfo_IMPL(pGpu, pKernelGmmu)
2620 #endif //__nvoc_kern_gmmu_h_disabled
2621 
2622 const struct GMMU_FMT *kgmmuFmtGet_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize);
2623 
2624 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtGet(struct KernelGmmu * pKernelGmmu,NvU32 version,NvU64 bigPageSize)2625 static inline const struct GMMU_FMT *kgmmuFmtGet(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize) {
2626     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2627     return NULL;
2628 }
2629 #else //__nvoc_kern_gmmu_h_disabled
2630 #define kgmmuFmtGet(pKernelGmmu, version, bigPageSize) kgmmuFmtGet_IMPL(pKernelGmmu, version, bigPageSize)
2631 #endif //__nvoc_kern_gmmu_h_disabled
2632 
2633 void kgmmuExtractPteInfo_IMPL(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg2, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg3, const struct GMMU_FMT *arg4, const MMU_FMT_LEVEL *arg5);
2634 
2635 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuExtractPteInfo(struct KernelGmmu * pKernelGmmu,union GMMU_ENTRY_VALUE * arg2,NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK * arg3,const struct GMMU_FMT * arg4,const MMU_FMT_LEVEL * arg5)2636 static inline void kgmmuExtractPteInfo(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg2, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg3, const struct GMMU_FMT *arg4, const MMU_FMT_LEVEL *arg5) {
2637     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2638 }
2639 #else //__nvoc_kern_gmmu_h_disabled
2640 #define kgmmuExtractPteInfo(pKernelGmmu, arg2, arg3, arg4, arg5) kgmmuExtractPteInfo_IMPL(pKernelGmmu, arg2, arg3, arg4, arg5)
2641 #endif //__nvoc_kern_gmmu_h_disabled
2642 
2643 void kgmmuFieldSetKindCompTags_IMPL(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries);
2644 
2645 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFieldSetKindCompTags(struct KernelGmmu * pKernelGmmu,const struct GMMU_FMT * pFmt,const MMU_FMT_LEVEL * pLevel,const COMPR_INFO * pCompr,NvU64 physAddr,NvU64 surfOffset,NvU32 pteIndex,NvU8 * pEntries)2646 static inline void kgmmuFieldSetKindCompTags(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries) {
2647     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2648 }
2649 #else //__nvoc_kern_gmmu_h_disabled
2650 #define kgmmuFieldSetKindCompTags(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries) kgmmuFieldSetKindCompTags_IMPL(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries)
2651 #endif //__nvoc_kern_gmmu_h_disabled
2652 
2653 NvBool kgmmuFmtIsBigPageSizeSupported_IMPL(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize);
2654 
2655 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtIsBigPageSizeSupported(struct KernelGmmu * pKernelGmmu,NvU64 bigPageSize)2656 static inline NvBool kgmmuFmtIsBigPageSizeSupported(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) {
2657     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2658     return NV_FALSE;
2659 }
2660 #else //__nvoc_kern_gmmu_h_disabled
2661 #define kgmmuFmtIsBigPageSizeSupported(pKernelGmmu, bigPageSize) kgmmuFmtIsBigPageSizeSupported_IMPL(pKernelGmmu, bigPageSize)
2662 #endif //__nvoc_kern_gmmu_h_disabled
2663 
2664 const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2665 
2666 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtGetLatestSupportedFormat(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2667 static inline const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2668     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2669     return NULL;
2670 }
2671 #else //__nvoc_kern_gmmu_h_disabled
2672 #define kgmmuFmtGetLatestSupportedFormat(pGpu, pKernelGmmu) kgmmuFmtGetLatestSupportedFormat_IMPL(pGpu, pKernelGmmu)
2673 #endif //__nvoc_kern_gmmu_h_disabled
2674 
2675 NvU32 kgmmuGetFaultBufferReservedFbSpaceSize_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2676 
2677 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFaultBufferReservedFbSpaceSize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2678 static inline NvU32 kgmmuGetFaultBufferReservedFbSpaceSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2679     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2680     return 0;
2681 }
2682 #else //__nvoc_kern_gmmu_h_disabled
2683 #define kgmmuGetFaultBufferReservedFbSpaceSize(pGpu, pKernelGmmu) kgmmuGetFaultBufferReservedFbSpaceSize_IMPL(pGpu, pKernelGmmu)
2684 #endif //__nvoc_kern_gmmu_h_disabled
2685 
2686 NV_STATUS kgmmuFaultBufferReplayableSetup_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg3, NvHandle arg4, NvU32 arg5, RmPhysAddr *arg6);
2687 
2688 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferReplayableSetup(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvHandle arg3,NvHandle arg4,NvU32 arg5,RmPhysAddr * arg6)2689 static inline NV_STATUS kgmmuFaultBufferReplayableSetup(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg3, NvHandle arg4, NvU32 arg5, RmPhysAddr *arg6) {
2690     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2691     return NV_ERR_NOT_SUPPORTED;
2692 }
2693 #else //__nvoc_kern_gmmu_h_disabled
2694 #define kgmmuFaultBufferReplayableSetup(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6) kgmmuFaultBufferReplayableSetup_IMPL(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6)
2695 #endif //__nvoc_kern_gmmu_h_disabled
2696 
2697 NvU64 kgmmuGetMinBigPageSize_IMPL(struct KernelGmmu *pKernelGmmu);
2698 
2699 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetMinBigPageSize(struct KernelGmmu * pKernelGmmu)2700 static inline NvU64 kgmmuGetMinBigPageSize(struct KernelGmmu *pKernelGmmu) {
2701     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2702     return 0;
2703 }
2704 #else //__nvoc_kern_gmmu_h_disabled
2705 #define kgmmuGetMinBigPageSize(pKernelGmmu) kgmmuGetMinBigPageSize_IMPL(pKernelGmmu)
2706 #endif //__nvoc_kern_gmmu_h_disabled
2707 
2708 NV_STATUS kgmmuInstBlkInit_IMPL(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams);
2709 
2710 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkInit(struct KernelGmmu * pKernelGmmu,PMEMORY_DESCRIPTOR pInstBlkDesc,struct OBJVASPACE * pVAS,NvU32 subctxId,INST_BLK_INIT_PARAMS * pInstBlkParams)2711 static inline NV_STATUS kgmmuInstBlkInit(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams) {
2712     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2713     return NV_ERR_NOT_SUPPORTED;
2714 }
2715 #else //__nvoc_kern_gmmu_h_disabled
2716 #define kgmmuInstBlkInit(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams) kgmmuInstBlkInit_IMPL(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams)
2717 #endif //__nvoc_kern_gmmu_h_disabled
2718 
2719 NV_STATUS kgmmuFaultBufferReplayableAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg3, NvHandle arg4);
2720 
2721 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferReplayableAllocate(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvHandle arg3,NvHandle arg4)2722 static inline NV_STATUS kgmmuFaultBufferReplayableAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg3, NvHandle arg4) {
2723     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2724     return NV_ERR_NOT_SUPPORTED;
2725 }
2726 #else //__nvoc_kern_gmmu_h_disabled
2727 #define kgmmuFaultBufferReplayableAllocate(pGpu, pKernelGmmu, arg3, arg4) kgmmuFaultBufferReplayableAllocate_IMPL(pGpu, pKernelGmmu, arg3, arg4)
2728 #endif //__nvoc_kern_gmmu_h_disabled
2729 
2730 NV_STATUS kgmmuFaultBufferReplayableDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2731 
2732 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferReplayableDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2733 static inline NV_STATUS kgmmuFaultBufferReplayableDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2734     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2735     return NV_ERR_NOT_SUPPORTED;
2736 }
2737 #else //__nvoc_kern_gmmu_h_disabled
2738 #define kgmmuFaultBufferReplayableDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferReplayableDestroy_IMPL(pGpu, pKernelGmmu)
2739 #endif //__nvoc_kern_gmmu_h_disabled
2740 
2741 NV_STATUS kgmmuFaultBufferAlloc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4);
2742 
2743 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferAlloc(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 arg4)2744 static inline NV_STATUS kgmmuFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4) {
2745     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2746     return NV_ERR_NOT_SUPPORTED;
2747 }
2748 #else //__nvoc_kern_gmmu_h_disabled
2749 #define kgmmuFaultBufferAlloc(pGpu, pKernelGmmu, arg3, arg4) kgmmuFaultBufferAlloc_IMPL(pGpu, pKernelGmmu, arg3, arg4)
2750 #endif //__nvoc_kern_gmmu_h_disabled
2751 
2752 NV_STATUS kgmmuFaultBufferCreateMemDesc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU64 arg5, MEMORY_DESCRIPTOR **arg6);
2753 
2754 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferCreateMemDesc(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 arg4,NvU64 arg5,MEMORY_DESCRIPTOR ** arg6)2755 static inline NV_STATUS kgmmuFaultBufferCreateMemDesc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 arg4, NvU64 arg5, MEMORY_DESCRIPTOR **arg6) {
2756     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2757     return NV_ERR_NOT_SUPPORTED;
2758 }
2759 #else //__nvoc_kern_gmmu_h_disabled
2760 #define kgmmuFaultBufferCreateMemDesc(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6) kgmmuFaultBufferCreateMemDesc_IMPL(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6)
2761 #endif //__nvoc_kern_gmmu_h_disabled
2762 
2763 NV_STATUS kgmmuFaultBufferGetAddressSpace_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 *arg4, NvU32 *arg5);
2764 
2765 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferGetAddressSpace(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3,NvU32 * arg4,NvU32 * arg5)2766 static inline NV_STATUS kgmmuFaultBufferGetAddressSpace(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3, NvU32 *arg4, NvU32 *arg5) {
2767     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2768     return NV_ERR_NOT_SUPPORTED;
2769 }
2770 #else //__nvoc_kern_gmmu_h_disabled
2771 #define kgmmuFaultBufferGetAddressSpace(pGpu, pKernelGmmu, arg3, arg4, arg5) kgmmuFaultBufferGetAddressSpace_IMPL(pGpu, pKernelGmmu, arg3, arg4, arg5)
2772 #endif //__nvoc_kern_gmmu_h_disabled
2773 
2774 NV_STATUS kgmmuFaultBufferFree_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3);
2775 
2776 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferFree(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)2777 static inline NV_STATUS kgmmuFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
2778     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2779     return NV_ERR_NOT_SUPPORTED;
2780 }
2781 #else //__nvoc_kern_gmmu_h_disabled
2782 #define kgmmuFaultBufferFree(pGpu, pKernelGmmu, arg3) kgmmuFaultBufferFree_IMPL(pGpu, pKernelGmmu, arg3)
2783 #endif //__nvoc_kern_gmmu_h_disabled
2784 
2785 NV_STATUS kgmmuFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3);
2786 
2787 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferUnregister(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg3)2788 static inline NV_STATUS kgmmuFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg3) {
2789     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2790     return NV_ERR_NOT_SUPPORTED;
2791 }
2792 #else //__nvoc_kern_gmmu_h_disabled
2793 #define kgmmuFaultBufferUnregister(pGpu, pKernelGmmu, arg3) kgmmuFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg3)
2794 #endif //__nvoc_kern_gmmu_h_disabled
2795 
2796 NV_STATUS kgmmuClientShadowFaultBufferAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
2797 
2798 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferAllocate(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)2799 static inline NV_STATUS kgmmuClientShadowFaultBufferAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
2800     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2801     return NV_ERR_NOT_SUPPORTED;
2802 }
2803 #else //__nvoc_kern_gmmu_h_disabled
2804 #define kgmmuClientShadowFaultBufferAllocate(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferAllocate_IMPL(pGpu, pKernelGmmu, arg3)
2805 #endif //__nvoc_kern_gmmu_h_disabled
2806 
2807 NV_STATUS kgmmuClientShadowFaultBufferDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
2808 
2809 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)2810 static inline NV_STATUS kgmmuClientShadowFaultBufferDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
2811     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2812     return NV_ERR_NOT_SUPPORTED;
2813 }
2814 #else //__nvoc_kern_gmmu_h_disabled
2815 #define kgmmuClientShadowFaultBufferDestroy(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferDestroy_IMPL(pGpu, pKernelGmmu, arg3)
2816 #endif //__nvoc_kern_gmmu_h_disabled
2817 
2818 NV_STATUS kgmmuClientShadowFaultBufferRegister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
2819 
2820 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferRegister(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)2821 static inline NV_STATUS kgmmuClientShadowFaultBufferRegister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
2822     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2823     return NV_ERR_NOT_SUPPORTED;
2824 }
2825 #else //__nvoc_kern_gmmu_h_disabled
2826 #define kgmmuClientShadowFaultBufferRegister(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferRegister_IMPL(pGpu, pKernelGmmu, arg3)
2827 #endif //__nvoc_kern_gmmu_h_disabled
2828 
2829 void kgmmuClientShadowFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3);
2830 
2831 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferUnregister(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg3)2832 static inline void kgmmuClientShadowFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg3) {
2833     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2834 }
2835 #else //__nvoc_kern_gmmu_h_disabled
2836 #define kgmmuClientShadowFaultBufferUnregister(pGpu, pKernelGmmu, arg3) kgmmuClientShadowFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg3)
2837 #endif //__nvoc_kern_gmmu_h_disabled
2838 
2839 void kgmmuClientShadowFaultBufferPagesDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3, FAULT_BUFFER_TYPE arg4);
2840 
2841 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferPagesDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg3,FAULT_BUFFER_TYPE arg4)2842 static inline void kgmmuClientShadowFaultBufferPagesDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3, FAULT_BUFFER_TYPE arg4) {
2843     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2844 }
2845 #else //__nvoc_kern_gmmu_h_disabled
2846 #define kgmmuClientShadowFaultBufferPagesDestroy(pGpu, pKernelGmmu, arg3, arg4) kgmmuClientShadowFaultBufferPagesDestroy_IMPL(pGpu, pKernelGmmu, arg3, arg4)
2847 #endif //__nvoc_kern_gmmu_h_disabled
2848 
2849 void kgmmuClientShadowFaultBufferQueueDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3, FAULT_BUFFER_TYPE arg4);
2850 
2851 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferQueueDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg3,FAULT_BUFFER_TYPE arg4)2852 static inline void kgmmuClientShadowFaultBufferQueueDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3, FAULT_BUFFER_TYPE arg4) {
2853     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2854 }
2855 #else //__nvoc_kern_gmmu_h_disabled
2856 #define kgmmuClientShadowFaultBufferQueueDestroy(pGpu, pKernelGmmu, arg3, arg4) kgmmuClientShadowFaultBufferQueueDestroy_IMPL(pGpu, pKernelGmmu, arg3, arg4)
2857 #endif //__nvoc_kern_gmmu_h_disabled
2858 
2859 NvU64 kgmmuGetSizeOfPageTables_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg3, NvU64 arg4, NvU64 arg5, NvU64 arg6);
2860 
2861 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetSizeOfPageTables(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,const struct GMMU_FMT * arg3,NvU64 arg4,NvU64 arg5,NvU64 arg6)2862 static inline NvU64 kgmmuGetSizeOfPageTables(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg3, NvU64 arg4, NvU64 arg5, NvU64 arg6) {
2863     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2864     return 0;
2865 }
2866 #else //__nvoc_kern_gmmu_h_disabled
2867 #define kgmmuGetSizeOfPageTables(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6) kgmmuGetSizeOfPageTables_IMPL(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6)
2868 #endif //__nvoc_kern_gmmu_h_disabled
2869 
2870 NvU64 kgmmuGetSizeOfPageDirs_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg3, NvU64 arg4, NvU64 arg5, NvU64 arg6);
2871 
2872 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetSizeOfPageDirs(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,const struct GMMU_FMT * arg3,NvU64 arg4,NvU64 arg5,NvU64 arg6)2873 static inline NvU64 kgmmuGetSizeOfPageDirs(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg3, NvU64 arg4, NvU64 arg5, NvU64 arg6) {
2874     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2875     return 0;
2876 }
2877 #else //__nvoc_kern_gmmu_h_disabled
2878 #define kgmmuGetSizeOfPageDirs(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6) kgmmuGetSizeOfPageDirs_IMPL(pGpu, pKernelGmmu, arg3, arg4, arg5, arg6)
2879 #endif //__nvoc_kern_gmmu_h_disabled
2880 
2881 GMMU_APERTURE kgmmuGetExternalAllocAperture_IMPL(NvU32 addressSpace);
2882 
2883 #define kgmmuGetExternalAllocAperture(addressSpace) kgmmuGetExternalAllocAperture_IMPL(addressSpace)
2884 void kgmmuEncodePhysAddrs_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count);
2885 
2886 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEncodePhysAddrs(struct KernelGmmu * pKernelGmmu,const GMMU_APERTURE aperture,NvU64 * pAddresses,NvU64 fabricBaseAddress,NvU64 count)2887 static inline void kgmmuEncodePhysAddrs(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count) {
2888     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2889 }
2890 #else //__nvoc_kern_gmmu_h_disabled
2891 #define kgmmuEncodePhysAddrs(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count) kgmmuEncodePhysAddrs_IMPL(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count)
2892 #endif //__nvoc_kern_gmmu_h_disabled
2893 
2894 NvU64 kgmmuEncodePhysAddr_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress);
2895 
2896 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEncodePhysAddr(struct KernelGmmu * pKernelGmmu,const GMMU_APERTURE aperture,NvU64 physAddr,NvU64 fabricBaseAddress)2897 static inline NvU64 kgmmuEncodePhysAddr(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress) {
2898     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2899     return 0;
2900 }
2901 #else //__nvoc_kern_gmmu_h_disabled
2902 #define kgmmuEncodePhysAddr(pKernelGmmu, aperture, physAddr, fabricBaseAddress) kgmmuEncodePhysAddr_IMPL(pKernelGmmu, aperture, physAddr, fabricBaseAddress)
2903 #endif //__nvoc_kern_gmmu_h_disabled
2904 
2905 void kgmmuAccessCntrChangeIntrOwnership_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3);
2906 
2907 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuAccessCntrChangeIntrOwnership(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg3)2908 static inline void kgmmuAccessCntrChangeIntrOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg3) {
2909     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2910 }
2911 #else //__nvoc_kern_gmmu_h_disabled
2912 #define kgmmuAccessCntrChangeIntrOwnership(pGpu, pKernelGmmu, arg3) kgmmuAccessCntrChangeIntrOwnership_IMPL(pGpu, pKernelGmmu, arg3)
2913 #endif //__nvoc_kern_gmmu_h_disabled
2914 
2915 void *kgmmuGetShadowFaultBufferCslContext_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type);
2916 
2917 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetShadowFaultBufferCslContext(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE type)2918 static inline void *kgmmuGetShadowFaultBufferCslContext(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) {
2919     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2920     return NULL;
2921 }
2922 #else //__nvoc_kern_gmmu_h_disabled
2923 #define kgmmuGetShadowFaultBufferCslContext(pGpu, pKernelGmmu, type) kgmmuGetShadowFaultBufferCslContext_IMPL(pGpu, pKernelGmmu, type)
2924 #endif //__nvoc_kern_gmmu_h_disabled
2925 
2926 NvS32 *kgmmuGetFatalFaultIntrPendingState_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid);
2927 
2928 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFatalFaultIntrPendingState(struct KernelGmmu * pKernelGmmu,NvU8 gfid)2929 static inline NvS32 *kgmmuGetFatalFaultIntrPendingState(struct KernelGmmu *pKernelGmmu, NvU8 gfid) {
2930     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2931     return NULL;
2932 }
2933 #else //__nvoc_kern_gmmu_h_disabled
2934 #define kgmmuGetFatalFaultIntrPendingState(pKernelGmmu, gfid) kgmmuGetFatalFaultIntrPendingState_IMPL(pKernelGmmu, gfid)
2935 #endif //__nvoc_kern_gmmu_h_disabled
2936 
2937 struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex);
2938 
2939 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetHwFaultBufferPtr(struct KernelGmmu * pKernelGmmu,NvU8 gfid,NvU8 faultBufferIndex)2940 static inline struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex) {
2941     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2942     return NULL;
2943 }
2944 #else //__nvoc_kern_gmmu_h_disabled
2945 #define kgmmuGetHwFaultBufferPtr(pKernelGmmu, gfid, faultBufferIndex) kgmmuGetHwFaultBufferPtr_IMPL(pKernelGmmu, gfid, faultBufferIndex)
2946 #endif //__nvoc_kern_gmmu_h_disabled
2947 
2948 NvU64 kgmmuGetFaultBufferGenCnt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU8 gfid);
2949 
2950 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFaultBufferGenCnt(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU8 gfid)2951 static inline NvU64 kgmmuGetFaultBufferGenCnt(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU8 gfid) {
2952     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2953     return 0;
2954 }
2955 #else //__nvoc_kern_gmmu_h_disabled
2956 #define kgmmuGetFaultBufferGenCnt(pGpu, pKernelGmmu, gfid) kgmmuGetFaultBufferGenCnt_IMPL(pGpu, pKernelGmmu, gfid)
2957 #endif //__nvoc_kern_gmmu_h_disabled
2958 
2959 #undef PRIVATE_FIELD
2960 
2961 
2962 // defines for TLB Invalidation scope
2963 #define NV_GMMU_INVAL_SCOPE_ALL_TLBS       0x00000000
2964 #define NV_GMMU_INVAL_SCOPE_LINK_TLBS      0x00000001
2965 #define NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS  0x00000002
2966 
2967 // bit fields for uvmSharedIntrRmOwnsMask
2968 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY          NVBIT(0)
2969 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_ERROR           NVBIT(1)
2970 #define RM_UVM_SHARED_INTR_MASK_MMU_ECC_UNCORRECTED_ERROR_NOTIFY   NVBIT(2)
2971 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY        NVBIT(3)
2972 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_OVERFLOW      NVBIT(4)
2973 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_NOTIFY     NVBIT(5)
2974 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_OVERFLOW   NVBIT(6)
2975 #define RM_UVM_SHARED_INTR_MASK_MMU_OTHER_FAULT_NOTIFY             NVBIT(7)
2976 #define RM_UVM_SHARED_INTR_MASK_ALL                                (NVBIT(8) - 1)
2977 
2978 /*!
2979  * Constants used for UVM mirroring loops.
2980  */
2981 #define GMMU_USER_PAGE_DIR_INDEX       0
2982 #define GMMU_KERNEL_PAGE_DIR_INDEX     1
2983 #define GMMU_MAX_PAGE_DIR_INDEX_COUNT  (GMMU_KERNEL_PAGE_DIR_INDEX + 1)
2984 
2985 /*!
2986  * Page table walker callbacks used for map/unmap operations.
2987  */
2988 extern const MMU_WALK_CALLBACKS  g_gmmuWalkCallbacks;
2989 extern const MMU_WALK_CALLBACKS  g_bar2WalkCallbacks;
2990 extern const MMU_TRACE_CALLBACKS g_gmmuTraceCallbacks;
2991 
2992 void       gmmuMemDescCacheFree(GVAS_GPU_STATE *pGpuState);
2993 
2994 #endif // KERN_GMMU_H
2995 
2996 #ifdef __cplusplus
2997 } // extern "C"
2998 #endif
2999 
3000 #endif // _G_KERN_GMMU_NVOC_H_
3001