1 #ifndef _G_KERN_GMMU_NVOC_H_
2 #define _G_KERN_GMMU_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10 3* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 /******************************************************************************
33 *
34 *       Kernel GMMU module header
35 *       Defines and structures used on CPU RM for the GMMU object.
36 *
37 ******************************************************************************/
38 
39 #include "g_kern_gmmu_nvoc.h"
40 
41 #ifndef KERN_GMMU_H
42 #define KERN_GMMU_H
43 
44 #include "core/core.h"
45 #include "core/strict.h"
46 #include "nvtypes.h"
47 #include "nvoc/prelude.h"
48 #include "nvoc/object.h"
49 #include "gpu/mmu/mmu_trace.h"
50 #include "mmu/gmmu_fmt.h"
51 #include "class/cl90f1.h"    // FERMI_VASPACE_A
52 
53 #include "gpu/gpu_timeout.h"
54 #include "containers/queue.h"
55 #include "gpu/eng_state.h"
56 #include "gpu/intr/intr_service.h"
57 #include "gpu/fifo/kernel_fifo.h"
58 #include "gpu/mem_mgr/virt_mem_allocator_common.h" // RM_PAGE_SIZE_64K
59 #include "mmu/mmu_walk.h"
60 
61 #include "gpu/gpu_halspec.h"
62 #include "ctrl/ctrl2080/ctrl2080internal.h"  // NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS
63 
64 #include "class/clc369.h" // MMU_FAULT_BUFFER
65 
66 typedef struct COMPR_INFO COMPR_INFO;
67 
68 typedef struct GVAS_GPU_STATE GVAS_GPU_STATE;
69 
70 typedef struct _fifo_mmu_exception_data FIFO_MMU_EXCEPTION_DATA;
71 
72 /*!
73  * Family of GMMU formats sharing the same version and PDE/PTE defines
74  * but with differing big page sizes.
75  * The term "family" is used here in the mathematical (set theory) sense.
76  *
77  * nv4kPte: GV100+ supports NV4K encoding, @ref gmmuStateInitHal_GV100 for more
78  *
79  */
80 typedef struct
81 {
82     GMMU_FMT_PDE_MULTI pdeMulti;
83     GMMU_FMT_PDE       pde;
84     GMMU_FMT_PTE       pte;
85     GMMU_ENTRY_VALUE   sparsePte;
86     GMMU_ENTRY_VALUE   sparsePde;
87     GMMU_ENTRY_VALUE   sparsePdeMulti;
88     GMMU_ENTRY_VALUE   nv4kPte;
89     GMMU_ENTRY_VALUE   bug2720120WarPde0;
90     GMMU_ENTRY_VALUE   bug2720120WarPde1;
91     GMMU_FMT          *pFmts[GMMU_FMT_MAX_BIG_PAGE_SIZES];
92 } GMMU_FMT_FAMILY;
93 
94 /*!
95  * This structure contains information needed for issuing a TLB invalidate.
96  */
97 typedef struct
98 {
99     RmPhysAddr pdbAddress;
100     NvU32      pdbAperture;
101     NvU32      gfid;
102     NvU32      regVal;
103     RMTIMEOUT  timeout;
104 } TLB_INVALIDATE_PARAMS;
105 
106 typedef enum
107 {
108     NON_REPLAYABLE_FAULT_BUFFER = 0,
109     REPLAYABLE_FAULT_BUFFER,
110     //this should always be the last entry
111     NUM_FAULT_BUFFERS
112 } FAULT_BUFFER_TYPE;
113 
114 /*!
115  * This structure holds information about a page
116  * of memory backing the fault buffer.
117  */
118 typedef struct
119 {
120     /*! Virtual address of this page */
121     NvP64 pAddress;
122 
123     /*! Cookie returned by memdescMap() */
124     NvP64 pPriv;
125 } GMMU_FAULT_BUFFER_PAGE;
126 
127 /*!
128  * This structure holds the information about MMU HW Fault buffer which is mapped on BAR2
129  * and is utilized by MMU for reporting MMU faults to SW
130  */
131 struct HW_FAULT_BUFFER
132 {
133     NvU64 bar2FaultBufferAddr;
134     MEMORY_DESCRIPTOR *pFaultBufferMemDesc;
135     /*!
136      * cookie that is stored for the CPU mapping
137      */
138     NvP64 hCpuFaultBuffer;
139     NvP64 kernelVaddr;
140 
141     GMMU_FAULT_BUFFER_PAGE *pBufferPages;
142 
143     NvU32 cachedGetIndex;
144 
145     /*!
146      * cached fault buffer size
147      */
148     NvU32 faultBufferSize;
149 };
150 
151 /*!
152  * This structure holds information shared between CPU-RM
153  * and GSP-RM
154  */
155 typedef struct
156 {
157     /*!
158      * The GET index of replayable shadow buffer. This
159      * is updated by UVM driver and read by GSP-RM
160      */
161     NvU32 swGetIndex;
162 } FAULT_BUFFER_SHARED_MEMORY;
163 
164 /*!
165  * This structure holds information about the client shadow fault buffer.
166  */
167 typedef struct
168 {
169     /*!
170      * Pointer to circular queue structure shared by the RM with a
171      * privileged client, used as the shadow fault buffer for holding
172      * non-replayable faults.
173      * This structure is shared between CPU-RM and GSP-RM in GSP
174      * enabled driver.
175      */
176     NvP64 pQueue;
177 
178     /*! Memory descriptors associated with the queue. */
179     MEMORY_DESCRIPTOR *pQueueMemDesc;
180 
181     NvP64 pQueueAddress;
182 
183     /*!
184      * Execution context for the queue. Holds environment specific
185      * data that enable queue usage
186      */
187     QueueContext queueContext;
188 
189     /*! Cookie returned by memdescMap() */
190     NvP64 pQueuePriv;
191 
192     /*! Memory descriptor associated with the buffer. */
193     MEMORY_DESCRIPTOR *pBufferMemDesc;
194 
195     NvP64 pBufferAddress;
196 
197     /*! Cookie returned by memdescMap() */
198     NvP64 pBufferPriv;
199 
200     /*! GSP only split mapping of the buffer. */
201     GMMU_FAULT_BUFFER_PAGE *pBufferPages;
202 
203     NvU32 numBufferPages;
204 
205     /*!
206      * Start index of the page containing the fault buffer metadata.
207      * 0 if no metadata is present.
208      */
209     NvU32 metadataStartIndex;
210 
211     /*!
212      * Used only by the replayable fault buffer. Memory descriptor used to
213      * describe shared memory b/w CPU-RM and GSP-RM.
214      */
215     MEMORY_DESCRIPTOR *pFaultBufferSharedMemDesc;
216 
217     NvP64 pFaultBufferSharedMemoryAddress;
218 
219     NvP64 pFaultBufferSharedMemoryPriv;
220 
221     NvP64 pFaultBufferMetadataAddress;
222 
223 } GMMU_CLIENT_SHADOW_FAULT_BUFFER;
224 
225 /*!
226  * Top level structure containing all dataStructures used in MMU fault handling.
227  */
228 struct GMMU_FAULT_BUFFER
229 {
230     struct HW_FAULT_BUFFER hwFaultBuffers[NUM_FAULT_BUFFERS];
231 
232     /*!
233      * Unique client and object handle stored
234      * In VOLTA this is for MMU_FAULT_BUFFER, in PASCAL for MAXWELL_FAULT_BUFFER_A
235      */
236     NvHandle hFaultBufferClient;
237     NvHandle hFaultBufferObject;
238 
239     /*!
240      * Pointer to Circular Queue structure used as shadow fault buffer for
241      * holding fatal fault packets serviced by RM
242      */
243     NvP64 pRmShadowFaultBuffer;
244 
245     /*!
246      * Client shadow fault buffer data and pointer protected by gpu locks.
247      * Client may allocate upto 2 shadow buffers one each for replayable and
248      * non-replayable faults
249      */
250     GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer[NUM_FAULT_BUFFERS];
251     GMMU_CLIENT_SHADOW_FAULT_BUFFER clientShadowFaultBuffer[NUM_FAULT_BUFFERS];
252 
253     /*!
254      * SpinLock to protect shadow buffer pointers
255      */
256     PORT_SPINLOCK *pShadowFaultBufLock;
257 
258     /*!
259      * Flag stating fatalfault interrupt pending
260      */
261     NvS32 fatalFaultIntrPending;
262 
263     /*! Generational counter for fault buffer. Incremented when the fault buffer wraps around. */
264     volatile NvU64 faultBufferGenerationCounter;
265 };
266 
267 typedef struct GMMU_FAULT_PACKET
268 {
269     // 32 bytes MMU fault packet
270     NvU8 faultPacket[NVC369_BUF_SIZE];
271 } GMMU_FAULT_PACKET;
272 
273 // Initialize Circular Queue for MMU Shadow fault buffer
274 MAKE_QUEUE_CIRCULAR(GMMU_SHADOW_FAULT_BUF, GMMU_FAULT_PACKET);
275 
276 #define GMMU_FAULT_PACKET_METADATA_SIZE                32
277 #define GMMU_FAULT_PACKET_METADATA_AUTHTAG_IDX          0
278 #define GMMU_FAULT_PACKET_METADATA_AUTHTAG_SIZE        16
279 #define GMMU_FAULT_PACKET_METADATA_VALID_IDX           16
280 #define GMMU_FAULT_PACKET_METADATA_VALID_SIZE           1
281 #define GMMU_FAULT_PACKET_METADATA_VALID_YES      NV_TRUE
282 #define GMMU_FAULT_PACKET_METADATA_VALID_NO      NV_FALSE
283 
284 typedef struct GMMU_FAULT_PACKET_METADATA
285 {
286     NvU8 metadata[GMMU_FAULT_PACKET_METADATA_SIZE];
287 } GMMU_FAULT_PACKET_METADATA;
288 
289 /*!
290  * Structure that holds different parameters passed by an engine to kgmmuInstBlkInit
291  * for initializing their instance blocks.
292  */
293 typedef struct
294 {
295     NvBool               bIsClientAdmin;
296     NvBool               bIsFaultReplayable;
297     /*
298      * Defer the bus flush during the instance block init.
299      * If this field is set, the kgmmuInstBlkInit() routine won't do flush after the CPU writes.
300      * The caller of the kgmmuInstBlkInit() function has to explicit flush.
301      * This is useful if the caller does back to back updates to instance block.
302      * For e.g. Subcontext array init during channel setup.
303      */
304     NvBool               bDeferFlush;
305     NvU64                uvmKernelPrivRegion;
306 
307     // Instance block is being updated for a zombie subcontext.
308     NvBool               bIsZombieSubctx;
309     NvU8                *pInstBlk;      // VA of instance block.
310 } INST_BLK_INIT_PARAMS, *PINST_BLK_INIT_PARAMS;
311 
312 typedef enum
313 {
314     fault_invalidPde              = 0x00000000,
315     fault_invalidPdeSize          = 0x00000001,
316     fault_invalidPte              = 0x00000002,
317     fault_limitViolation          = 0x00000003,
318     fault_unboundInstBlock        = 0x00000004,
319     fault_privViolation           = 0x00000005,
320     fault_write                   = 0x00000006,
321     fault_read                    = 0x00000007,
322     fault_pitchMaskViolation      = 0x00000008,
323     fault_workCreation            = 0x00000009,
324     fault_unsupportedAperture     = 0x0000000a,
325     fault_compressionFailure      = 0x0000000b,
326     fault_cc_violation            = 0x0000000b,
327     fault_unsupportedKind         = 0x0000000c,
328     fault_regionViolation         = 0x0000000d,
329     fault_poison                  = 0x0000000e,
330     fault_atomic                  = 0x0000000f
331 } FAULT_TYPE;
332 
333 typedef struct
334 {
335     INST_BLOCK_DESC         mmuFaultInstBlock;
336     NvU64                   mmuFaultAddress;
337     NvU64                   mmuFaultTimestamp;
338     FAULT_TYPE              mmuFaultType;
339     NvU32                   mmuFaultAccessType;
340     NvU32                   mmuFaultEngineId;
341     NvU32                   mmuFaultClientId;
342     NvU32                   mmuFaultClientType;
343     NvU32                   mmuFaultGpcId;
344     NvU8                    bFaultEntryValid        : 1;
345     NvU8                    bFaultInProtectedMode   : 1;
346     NvU8                    bFaultTypeReplayable    : 1;
347     NvU8                    bReplayableFaultEn      : 1;
348 } MMU_FAULT_BUFFER_ENTRY;
349 
350 /*!
351  * This structure contains information needed for targetted fault cancel
352  * This is passed in by UVM using SW methods (cl0076.h)
353  */
354 typedef struct
355 {
356     NvU32 clientId;
357     NvU32 gpcId;
358     INST_BLOCK_DESC instBlock;
359 } GMMU_FAULT_CANCEL_INFO;
360 
361 #define VMMU_MAX_GFID 64
362 
363 
364 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
365 // the matching C source file, but causes diagnostics to be issued if another
366 // source file references the field.
367 #ifdef NVOC_KERN_GMMU_H_PRIVATE_ACCESS_ALLOWED
368 #define PRIVATE_FIELD(x) x
369 #else
370 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
371 #endif
372 
373 struct KernelGmmu {
374     const struct NVOC_RTTI *__nvoc_rtti;
375     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
376     struct IntrService __nvoc_base_IntrService;
377     struct Object *__nvoc_pbase_Object;
378     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
379     struct IntrService *__nvoc_pbase_IntrService;
380     struct KernelGmmu *__nvoc_pbase_KernelGmmu;
381     NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu *, ENGDESCRIPTOR);
382     NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu *);
383     NV_STATUS (*__kgmmuStateLoad__)(OBJGPU *, struct KernelGmmu *, NvU32);
384     NV_STATUS (*__kgmmuStateUnload__)(OBJGPU *, struct KernelGmmu *, NvU32);
385     NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu *, NvU32);
386     NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu *, NvU32);
387     void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu *);
388     void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu *, IntrServiceRecord *);
389     NvBool (*__kgmmuClearInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceClearInterruptArguments *);
390     NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceInterruptArguments *);
391     NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *);
392     NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu *, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *);
393     NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu *, TLB_INVALIDATE_PARAMS *);
394     NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu *, NvU32, TLB_INVALIDATE_PARAMS *);
395     void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
396     void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
397     void (*__kgmmuFmtInitPte__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool);
398     void (*__kgmmuFmtInitPde__)(struct KernelGmmu *, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
399     NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu *, NvU32);
400     void (*__kgmmuFmtInitLevels__)(struct KernelGmmu *, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32);
401     void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu *, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
402     NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu *);
403     NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
404     NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu *, NvU32, NvBool, NvU32 *);
405     NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
406     NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *);
407     NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);
408     NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *, NvBool);
409     NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu *, NvBool);
410     NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
411     void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
412     NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu *, GMMU_FMT_FAMILY *);
413     NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *);
414     NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
415     NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu *, NvU32);
416     NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu *, NvU32);
417     NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu *, NvU32);
418     NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE, NvBool);
419     NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu *, NvP64, NvP64);
420     void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32, NvU32);
421     GMMU_FAULT_PACKET *(*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32);
422     NvU32 (*__kgmmuCopyFaultPacketToClientShadowBuffer__)(OBJGPU *, struct KernelGmmu *, struct GMMU_FAULT_BUFFER *, FAULT_BUFFER_TYPE, NvU32, NvU32, NvU32, struct THREAD_STATE_NODE *, NvU32 *);
423     NvBool (*__kgmmuIsReplayableShadowFaultBufferFull__)(OBJGPU *, struct KernelGmmu *, GMMU_CLIENT_SHADOW_FAULT_BUFFER *, NvU32, NvU32);
424     NvU32 (*__kgmmuReadClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE);
425     void (*__kgmmuWriteClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE, NvU32);
426     NvU32 (*__kgmmuGetMinCeEngineId__)(struct KernelGmmu *);
427     NvU32 (*__kgmmuGetMaxCeEngineId__)(OBJGPU *, struct KernelGmmu *);
428     NV_STATUS (*__kgmmuFaultBufferMap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
429     NV_STATUS (*__kgmmuFaultBufferUnmap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
430     NV_STATUS (*__kgmmuFaultBufferInit__)(OBJGPU *, struct KernelGmmu *);
431     NV_STATUS (*__kgmmuFaultBufferDestroy__)(OBJGPU *, struct KernelGmmu *);
432     NV_STATUS (*__kgmmuFaultBufferLoad__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
433     NV_STATUS (*__kgmmuFaultBufferUnload__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
434     NV_STATUS (*__kgmmuEnableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32);
435     NV_STATUS (*__kgmmuDisableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32);
436     NvU32 (*__kgmmuSetAndGetDefaultFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE, NvU32);
437     void (*__kgmmuReadMmuFaultInstHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *);
438     void (*__kgmmuReadMmuFaultAddrHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *);
439     NvU32 (*__kgmmuReadMmuFaultInfo__)(OBJGPU *, struct KernelGmmu *);
440     void (*__kgmmuWriteMmuFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32);
441     void (*__kgmmuWriteMmuFaultBufferHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32, NvU32);
442     NV_STATUS (*__kgmmuEnableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32);
443     NV_STATUS (*__kgmmuDisableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32);
444     NV_STATUS (*__kgmmuEnableMmuFaultOverflowIntr__)(OBJGPU *, struct KernelGmmu *, NvU32);
445     void (*__kgmmuSignExtendFaultAddress__)(OBJGPU *, struct KernelGmmu *, NvU64 *);
446     NV_STATUS (*__kgmmuGetFaultType__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_TYPE *);
447     NvBool (*__kgmmuIsP2PUnboundInstFault__)(struct KernelGmmu *, NvU32, NvU32);
448     NV_STATUS (*__kgmmuServiceVfPriFaults__)(OBJGPU *, struct KernelGmmu *, NvU32);
449     NvBool (*__kgmmuTestVidmemAccessBitBufferError__)(OBJGPU *, struct KernelGmmu *, NvU32);
450     void (*__kgmmuDisableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *);
451     NV_STATUS (*__kgmmuEnableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *);
452     void (*__kgmmuClearAccessCounterWriteNak__)(OBJGPU *, struct KernelGmmu *);
453     NV_STATUS (*__kgmmuServiceMthdBuffFaultInBar2Fault__)(OBJGPU *, struct KernelGmmu *);
454     NV_STATUS (*__kgmmuFaultCancelTargeted__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *);
455     NV_STATUS (*__kgmmuFaultCancelIssueInvalidate__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *, TLB_INVALIDATE_PARAMS *, NvBool);
456     NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
457     NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
458     NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *);
459     NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
460     NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
461     NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *);
462     void (*__kgmmuInitMissing__)(POBJGPU, struct KernelGmmu *);
463     NV_STATUS (*__kgmmuStatePreInitLocked__)(POBJGPU, struct KernelGmmu *);
464     NV_STATUS (*__kgmmuStatePreInitUnlocked__)(POBJGPU, struct KernelGmmu *);
465     NvBool (*__kgmmuIsPresent__)(POBJGPU, struct KernelGmmu *);
466     NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED;
467     NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED;
468     NvBool PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE;
469     NvBool PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE;
470     NvBool bReportFlaTranslationXid;
471     NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo;
472     NvU64 defaultBigPageSize;
473     NvU32 uvmSharedIntrRmOwnsMask;
474     GMMU_FMT_FAMILY *PRIVATE_FIELD(pFmtFamilies)[3];
475     NvU32 PRIVATE_FIELD(PDEAperture);
476     NvU32 PRIVATE_FIELD(PDEAttr);
477     NvU32 PRIVATE_FIELD(PDEBAR1Aperture);
478     NvU32 PRIVATE_FIELD(PDEBAR1Attr);
479     NvU32 PRIVATE_FIELD(PTEAperture);
480     NvU32 PRIVATE_FIELD(PTEAttr);
481     NvU32 PRIVATE_FIELD(PTEBAR1Aperture);
482     NvU32 PRIVATE_FIELD(PTEBAR1Attr);
483     NvU64 PRIVATE_FIELD(overrideBigPageSize);
484     NvBool PRIVATE_FIELD(bEnablePerVaspaceBigPage);
485     NvBool PRIVATE_FIELD(bIgnoreHubTlbInvalidate);
486     NvU64 PRIVATE_FIELD(maxVASize);
487     struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pdeApertures)[5];
488     struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pteApertures)[5];
489     MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarSmallPageTable);
490     MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarPageDirectory0);
491     struct GMMU_FAULT_BUFFER PRIVATE_FIELD(mmuFaultBuffer)[64];
492     NvU64 PRIVATE_FIELD(sysmemBaseAddress);
493     NvU32 PRIVATE_FIELD(minCeMmuFaultId);
494     NvU32 PRIVATE_FIELD(maxCeMmuFaultId);
495     NvBool PRIVATE_FIELD(bHugePageSupported);
496     NvBool PRIVATE_FIELD(bPageSize512mbSupported);
497     NvBool PRIVATE_FIELD(bBug2720120WarEnabled);
498     NvBool PRIVATE_FIELD(bVaspaceInteropSupported);
499 };
500 
501 struct KernelGmmu_PRIVATE {
502     const struct NVOC_RTTI *__nvoc_rtti;
503     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
504     struct IntrService __nvoc_base_IntrService;
505     struct Object *__nvoc_pbase_Object;
506     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
507     struct IntrService *__nvoc_pbase_IntrService;
508     struct KernelGmmu *__nvoc_pbase_KernelGmmu;
509     NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu *, ENGDESCRIPTOR);
510     NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu *);
511     NV_STATUS (*__kgmmuStateLoad__)(OBJGPU *, struct KernelGmmu *, NvU32);
512     NV_STATUS (*__kgmmuStateUnload__)(OBJGPU *, struct KernelGmmu *, NvU32);
513     NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu *, NvU32);
514     NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu *, NvU32);
515     void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu *);
516     void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu *, IntrServiceRecord *);
517     NvBool (*__kgmmuClearInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceClearInterruptArguments *);
518     NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceInterruptArguments *);
519     NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *);
520     NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu *, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *);
521     NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu *, TLB_INVALIDATE_PARAMS *);
522     NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu *, NvU32, TLB_INVALIDATE_PARAMS *);
523     void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
524     void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32);
525     void (*__kgmmuFmtInitPte__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool);
526     void (*__kgmmuFmtInitPde__)(struct KernelGmmu *, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
527     NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu *, NvU32);
528     void (*__kgmmuFmtInitLevels__)(struct KernelGmmu *, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32);
529     void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu *, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *);
530     NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu *);
531     NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
532     NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu *, NvU32, NvBool, NvU32 *);
533     NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
534     NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *);
535     NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);
536     NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *, NvBool);
537     NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu *, NvBool);
538     NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
539     void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
540     NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu *, GMMU_FMT_FAMILY *);
541     NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *);
542     NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
543     NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu *, NvU32);
544     NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu *, NvU32);
545     NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu *, NvU32);
546     NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE, NvBool);
547     NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu *, NvP64, NvP64);
548     void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32, NvU32);
549     GMMU_FAULT_PACKET *(*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32);
550     NvU32 (*__kgmmuCopyFaultPacketToClientShadowBuffer__)(OBJGPU *, struct KernelGmmu *, struct GMMU_FAULT_BUFFER *, FAULT_BUFFER_TYPE, NvU32, NvU32, NvU32, struct THREAD_STATE_NODE *, NvU32 *);
551     NvBool (*__kgmmuIsReplayableShadowFaultBufferFull__)(OBJGPU *, struct KernelGmmu *, GMMU_CLIENT_SHADOW_FAULT_BUFFER *, NvU32, NvU32);
552     NvU32 (*__kgmmuReadClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE);
553     void (*__kgmmuWriteClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE, NvU32);
554     NvU32 (*__kgmmuGetMinCeEngineId__)(struct KernelGmmu *);
555     NvU32 (*__kgmmuGetMaxCeEngineId__)(OBJGPU *, struct KernelGmmu *);
556     NV_STATUS (*__kgmmuFaultBufferMap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
557     NV_STATUS (*__kgmmuFaultBufferUnmap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
558     NV_STATUS (*__kgmmuFaultBufferInit__)(OBJGPU *, struct KernelGmmu *);
559     NV_STATUS (*__kgmmuFaultBufferDestroy__)(OBJGPU *, struct KernelGmmu *);
560     NV_STATUS (*__kgmmuFaultBufferLoad__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
561     NV_STATUS (*__kgmmuFaultBufferUnload__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32);
562     NV_STATUS (*__kgmmuEnableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32);
563     NV_STATUS (*__kgmmuDisableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32);
564     NvU32 (*__kgmmuSetAndGetDefaultFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE, NvU32);
565     void (*__kgmmuReadMmuFaultInstHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *);
566     void (*__kgmmuReadMmuFaultAddrHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *);
567     NvU32 (*__kgmmuReadMmuFaultInfo__)(OBJGPU *, struct KernelGmmu *);
568     void (*__kgmmuWriteMmuFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32);
569     void (*__kgmmuWriteMmuFaultBufferHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32, NvU32);
570     NV_STATUS (*__kgmmuEnableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32);
571     NV_STATUS (*__kgmmuDisableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32);
572     NV_STATUS (*__kgmmuEnableMmuFaultOverflowIntr__)(OBJGPU *, struct KernelGmmu *, NvU32);
573     void (*__kgmmuSignExtendFaultAddress__)(OBJGPU *, struct KernelGmmu *, NvU64 *);
574     NV_STATUS (*__kgmmuGetFaultType__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_TYPE *);
575     NvBool (*__kgmmuIsP2PUnboundInstFault__)(struct KernelGmmu *, NvU32, NvU32);
576     NV_STATUS (*__kgmmuServiceVfPriFaults__)(OBJGPU *, struct KernelGmmu *, NvU32);
577     NvBool (*__kgmmuTestVidmemAccessBitBufferError__)(OBJGPU *, struct KernelGmmu *, NvU32);
578     void (*__kgmmuDisableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *);
579     NV_STATUS (*__kgmmuEnableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *);
580     void (*__kgmmuClearAccessCounterWriteNak__)(OBJGPU *, struct KernelGmmu *);
581     NV_STATUS (*__kgmmuServiceMthdBuffFaultInBar2Fault__)(OBJGPU *, struct KernelGmmu *);
582     NV_STATUS (*__kgmmuFaultCancelTargeted__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *);
583     NV_STATUS (*__kgmmuFaultCancelIssueInvalidate__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *, TLB_INVALIDATE_PARAMS *, NvBool);
584     NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
585     NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
586     NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *);
587     NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
588     NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
589     NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *);
590     void (*__kgmmuInitMissing__)(POBJGPU, struct KernelGmmu *);
591     NV_STATUS (*__kgmmuStatePreInitLocked__)(POBJGPU, struct KernelGmmu *);
592     NV_STATUS (*__kgmmuStatePreInitUnlocked__)(POBJGPU, struct KernelGmmu *);
593     NvBool (*__kgmmuIsPresent__)(POBJGPU, struct KernelGmmu *);
594     NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED;
595     NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED;
596     NvBool PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE;
597     NvBool PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE;
598     NvBool bReportFlaTranslationXid;
599     NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo;
600     NvU64 defaultBigPageSize;
601     NvU32 uvmSharedIntrRmOwnsMask;
602     GMMU_FMT_FAMILY *pFmtFamilies[3];
603     NvU32 PDEAperture;
604     NvU32 PDEAttr;
605     NvU32 PDEBAR1Aperture;
606     NvU32 PDEBAR1Attr;
607     NvU32 PTEAperture;
608     NvU32 PTEAttr;
609     NvU32 PTEBAR1Aperture;
610     NvU32 PTEBAR1Attr;
611     NvU64 overrideBigPageSize;
612     NvBool bEnablePerVaspaceBigPage;
613     NvBool bIgnoreHubTlbInvalidate;
614     NvU64 maxVASize;
615     struct NV_FIELD_ENUM_ENTRY pdeApertures[5];
616     struct NV_FIELD_ENUM_ENTRY pteApertures[5];
617     MEMORY_DESCRIPTOR *pWarSmallPageTable;
618     MEMORY_DESCRIPTOR *pWarPageDirectory0;
619     struct GMMU_FAULT_BUFFER mmuFaultBuffer[64];
620     NvU64 sysmemBaseAddress;
621     NvU32 minCeMmuFaultId;
622     NvU32 maxCeMmuFaultId;
623     NvBool bHugePageSupported;
624     NvBool bPageSize512mbSupported;
625     NvBool bBug2720120WarEnabled;
626     NvBool bVaspaceInteropSupported;
627 };
628 
629 #ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__
630 #define __NVOC_CLASS_KernelGmmu_TYPEDEF__
631 typedef struct KernelGmmu KernelGmmu;
632 #endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */
633 
634 #ifndef __nvoc_class_id_KernelGmmu
635 #define __nvoc_class_id_KernelGmmu 0x29362f
636 #endif /* __nvoc_class_id_KernelGmmu */
637 
638 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGmmu;
639 
640 #define __staticCast_KernelGmmu(pThis) \
641     ((pThis)->__nvoc_pbase_KernelGmmu)
642 
643 #ifdef __nvoc_kern_gmmu_h_disabled
644 #define __dynamicCast_KernelGmmu(pThis) ((KernelGmmu*)NULL)
645 #else //__nvoc_kern_gmmu_h_disabled
646 #define __dynamicCast_KernelGmmu(pThis) \
647     ((KernelGmmu*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGmmu)))
648 #endif //__nvoc_kern_gmmu_h_disabled
649 
650 #define PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE_BASE_CAST
651 #define PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE_BASE_NAME PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE
652 #define PDB_PROP_KGMMU_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
653 #define PDB_PROP_KGMMU_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
654 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_CAST
655 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_NAME PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED
656 #define PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE_BASE_CAST
657 #define PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE_BASE_NAME PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE
658 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_CAST
659 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_NAME PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED
660 
661 NV_STATUS __nvoc_objCreateDynamic_KernelGmmu(KernelGmmu**, Dynamic*, NvU32, va_list);
662 
663 NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
664 #define __objCreate_KernelGmmu(ppNewObj, pParent, createFlags) \
665     __nvoc_objCreate_KernelGmmu((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
666 
667 #define kgmmuConstructEngine(pGpu, pKernelGmmu, arg0) kgmmuConstructEngine_DISPATCH(pGpu, pKernelGmmu, arg0)
668 #define kgmmuStateInitLocked(pGpu, pKernelGmmu) kgmmuStateInitLocked_DISPATCH(pGpu, pKernelGmmu)
669 #define kgmmuStateLoad(pGpu, pKernelGmmu, arg0) kgmmuStateLoad_DISPATCH(pGpu, pKernelGmmu, arg0)
670 #define kgmmuStateUnload(pGpu, pKernelGmmu, arg0) kgmmuStateUnload_DISPATCH(pGpu, pKernelGmmu, arg0)
671 #define kgmmuStatePostLoad(pGpu, pKernelGmmu, arg0) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg0)
672 #define kgmmuStatePostLoad_HAL(pGpu, pKernelGmmu, arg0) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg0)
673 #define kgmmuStatePreUnload(pGpu, pKernelGmmu, arg0) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg0)
674 #define kgmmuStatePreUnload_HAL(pGpu, pKernelGmmu, arg0) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg0)
675 #define kgmmuStateDestroy(pGpu, pKernelGmmu) kgmmuStateDestroy_DISPATCH(pGpu, pKernelGmmu)
676 #define kgmmuRegisterIntrService(pGpu, pKernelGmmu, arg0) kgmmuRegisterIntrService_DISPATCH(pGpu, pKernelGmmu, arg0)
677 #define kgmmuClearInterrupt(pGpu, pKernelGmmu, pParams) kgmmuClearInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
678 #define kgmmuServiceInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
679 #define kgmmuServiceNotificationInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
680 #define kgmmuServiceNotificationInterrupt_HAL(pGpu, pKernelGmmu, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams)
681 #define kgmmuInstBlkVaLimitGet(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
682 #define kgmmuInstBlkVaLimitGet_HAL(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
683 #define kgmmuSetTlbInvalidateMembarWarParameters(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams)
684 #define kgmmuSetTlbInvalidateMembarWarParameters_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams)
685 #define kgmmuSetTlbInvalidationScope(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams)
686 #define kgmmuSetTlbInvalidationScope_HAL(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams)
687 #define kgmmuFmtInitPteComptagLine(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version)
688 #define kgmmuFmtInitPteComptagLine_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version)
689 #define kgmmuFmtInitPeerPteFld(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version)
690 #define kgmmuFmtInitPeerPteFld_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version)
691 #define kgmmuFmtInitPte(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture)
692 #define kgmmuFmtInitPte_HAL(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture)
693 #define kgmmuFmtInitPde(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures)
694 #define kgmmuFmtInitPde_HAL(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures)
695 #define kgmmuFmtIsVersionSupported(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version)
696 #define kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version)
697 #define kgmmuFmtInitLevels(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift)
698 #define kgmmuFmtInitLevels_HAL(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift)
699 #define kgmmuFmtInitPdeMulti(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures)
700 #define kgmmuFmtInitPdeMulti_HAL(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures)
701 #define kgmmuFmtFamiliesInit(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu)
702 #define kgmmuFmtFamiliesInit_HAL(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu)
703 #define kgmmuTranslatePtePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
704 #define kgmmuTranslatePtePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
705 #define kgmmuTranslatePtePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
706 #define kgmmuTranslatePtePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
707 #define kgmmuTranslatePdePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
708 #define kgmmuTranslatePdePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1)
709 #define kgmmuTranslatePdePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
710 #define kgmmuTranslatePdePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
711 #define kgmmuGetFaultRegisterMappings(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
712 #define kgmmuGetFaultRegisterMappings_HAL(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
713 #define kgmmuIssueReplayableFaultBufferFlush(pGpu, pKernelGmmu, bCopyAndFlush) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu, bCopyAndFlush)
714 #define kgmmuIssueReplayableFaultBufferFlush_HAL(pGpu, pKernelGmmu, bCopyAndFlush) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu, bCopyAndFlush)
715 #define kgmmuToggleFaultOnPrefetch(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable)
716 #define kgmmuToggleFaultOnPrefetch_HAL(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable)
717 #define kgmmuFaultBufferAllocSharedMemory(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
718 #define kgmmuFaultBufferAllocSharedMemory_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
719 #define kgmmuFaultBufferFreeSharedMemory(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
720 #define kgmmuFaultBufferFreeSharedMemory_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
721 #define kgmmuSetupWarForBug2720120(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam)
722 #define kgmmuSetupWarForBug2720120_HAL(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam)
723 #define kgmmuGetGraphicsEngineId(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu)
724 #define kgmmuGetGraphicsEngineId_HAL(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu)
725 #define kgmmuReadShadowBufPutIndex(pGpu, pKernelGmmu, type) kgmmuReadShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, type)
726 #define kgmmuReadShadowBufPutIndex_HAL(pGpu, pKernelGmmu, type) kgmmuReadShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, type)
727 #define kgmmuIsFaultEngineBar1(pKernelGmmu, arg0) kgmmuIsFaultEngineBar1_DISPATCH(pKernelGmmu, arg0)
728 #define kgmmuIsFaultEngineBar1_HAL(pKernelGmmu, arg0) kgmmuIsFaultEngineBar1_DISPATCH(pKernelGmmu, arg0)
729 #define kgmmuIsFaultEngineBar2(pKernelGmmu, arg0) kgmmuIsFaultEngineBar2_DISPATCH(pKernelGmmu, arg0)
730 #define kgmmuIsFaultEngineBar2_HAL(pKernelGmmu, arg0) kgmmuIsFaultEngineBar2_DISPATCH(pKernelGmmu, arg0)
731 #define kgmmuIsFaultEnginePhysical(pKernelGmmu, arg0) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg0)
732 #define kgmmuIsFaultEnginePhysical_HAL(pKernelGmmu, arg0) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg0)
733 #define kgmmuCopyMmuFaults(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit)
734 #define kgmmuCopyMmuFaults_HAL(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit)
735 #define kgmmuParseFaultPacket(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry)
736 #define kgmmuParseFaultPacket_HAL(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry)
737 #define kgmmuFaultBufferClearPackets(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) kgmmuFaultBufferClearPackets_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets)
738 #define kgmmuFaultBufferClearPackets_HAL(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) kgmmuFaultBufferClearPackets_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets)
739 #define kgmmuFaultBufferGetFault(pGpu, pKernelGmmu, pFaultBuffer, idx) kgmmuFaultBufferGetFault_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, idx)
740 #define kgmmuFaultBufferGetFault_HAL(pGpu, pKernelGmmu, pFaultBuffer, idx) kgmmuFaultBufferGetFault_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, idx)
741 #define kgmmuCopyFaultPacketToClientShadowBuffer(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied)
742 #define kgmmuCopyFaultPacketToClientShadowBuffer_HAL(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied)
743 #define kgmmuIsReplayableShadowFaultBufferFull(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries)
744 #define kgmmuIsReplayableShadowFaultBufferFull_HAL(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries)
745 #define kgmmuReadClientShadowBufPutIndex(pGpu, pKernelGmmu, gfid, type) kgmmuReadClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type)
746 #define kgmmuReadClientShadowBufPutIndex_HAL(pGpu, pKernelGmmu, gfid, type) kgmmuReadClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type)
747 #define kgmmuWriteClientShadowBufPutIndex(pGpu, pKernelGmmu, gfid, type, putIndex) kgmmuWriteClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type, putIndex)
748 #define kgmmuWriteClientShadowBufPutIndex_HAL(pGpu, pKernelGmmu, gfid, type, putIndex) kgmmuWriteClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type, putIndex)
749 #define kgmmuGetMinCeEngineId(pKernelGmmu) kgmmuGetMinCeEngineId_DISPATCH(pKernelGmmu)
750 #define kgmmuGetMinCeEngineId_HAL(pKernelGmmu) kgmmuGetMinCeEngineId_DISPATCH(pKernelGmmu)
751 #define kgmmuGetMaxCeEngineId(pGpu, pKernelGmmu) kgmmuGetMaxCeEngineId_DISPATCH(pGpu, pKernelGmmu)
752 #define kgmmuGetMaxCeEngineId_HAL(pGpu, pKernelGmmu) kgmmuGetMaxCeEngineId_DISPATCH(pGpu, pKernelGmmu)
753 #define kgmmuFaultBufferMap(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferMap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
754 #define kgmmuFaultBufferMap_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferMap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
755 #define kgmmuFaultBufferUnmap(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnmap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
756 #define kgmmuFaultBufferUnmap_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnmap_DISPATCH(pGpu, pKernelGmmu, index, gfid)
757 #define kgmmuFaultBufferInit(pGpu, pKernelGmmu) kgmmuFaultBufferInit_DISPATCH(pGpu, pKernelGmmu)
758 #define kgmmuFaultBufferInit_HAL(pGpu, pKernelGmmu) kgmmuFaultBufferInit_DISPATCH(pGpu, pKernelGmmu)
759 #define kgmmuFaultBufferDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferDestroy_DISPATCH(pGpu, pKernelGmmu)
760 #define kgmmuFaultBufferDestroy_HAL(pGpu, pKernelGmmu) kgmmuFaultBufferDestroy_DISPATCH(pGpu, pKernelGmmu)
761 #define kgmmuFaultBufferLoad(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferLoad_DISPATCH(pGpu, pKernelGmmu, index, gfid)
762 #define kgmmuFaultBufferLoad_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferLoad_DISPATCH(pGpu, pKernelGmmu, index, gfid)
763 #define kgmmuFaultBufferUnload(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnload_DISPATCH(pGpu, pKernelGmmu, index, gfid)
764 #define kgmmuFaultBufferUnload_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnload_DISPATCH(pGpu, pKernelGmmu, index, gfid)
765 #define kgmmuEnableFaultBuffer(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuEnableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
766 #define kgmmuEnableFaultBuffer_HAL(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuEnableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
767 #define kgmmuDisableFaultBuffer(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuDisableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
768 #define kgmmuDisableFaultBuffer_HAL(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuDisableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid)
769 #define kgmmuSetAndGetDefaultFaultBufferSize(pGpu, pKernelGmmu, index, gfid) kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, index, gfid)
770 #define kgmmuSetAndGetDefaultFaultBufferSize_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, index, gfid)
771 #define kgmmuReadMmuFaultInstHiLo(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultInstHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1)
772 #define kgmmuReadMmuFaultInstHiLo_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultInstHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1)
773 #define kgmmuReadMmuFaultAddrHiLo(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultAddrHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1)
774 #define kgmmuReadMmuFaultAddrHiLo_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultAddrHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1)
775 #define kgmmuReadMmuFaultInfo(pGpu, pKernelGmmu) kgmmuReadMmuFaultInfo_DISPATCH(pGpu, pKernelGmmu)
776 #define kgmmuReadMmuFaultInfo_HAL(pGpu, pKernelGmmu) kgmmuReadMmuFaultInfo_DISPATCH(pGpu, pKernelGmmu)
777 #define kgmmuWriteMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, arg1, gfid) kgmmuWriteMmuFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, gfid)
778 #define kgmmuWriteMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg0, arg1, gfid) kgmmuWriteMmuFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, gfid)
779 #define kgmmuWriteMmuFaultBufferHiLo(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid) kgmmuWriteMmuFaultBufferHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid)
780 #define kgmmuWriteMmuFaultBufferHiLo_HAL(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid) kgmmuWriteMmuFaultBufferHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid)
781 #define kgmmuEnableMmuFaultInterrupts(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
782 #define kgmmuEnableMmuFaultInterrupts_HAL(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
783 #define kgmmuDisableMmuFaultInterrupts(pGpu, pKernelGmmu, index) kgmmuDisableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
784 #define kgmmuDisableMmuFaultInterrupts_HAL(pGpu, pKernelGmmu, index) kgmmuDisableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index)
785 #define kgmmuEnableMmuFaultOverflowIntr(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultOverflowIntr_DISPATCH(pGpu, pKernelGmmu, index)
786 #define kgmmuEnableMmuFaultOverflowIntr_HAL(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultOverflowIntr_DISPATCH(pGpu, pKernelGmmu, index)
787 #define kgmmuSignExtendFaultAddress(pGpu, pKernelGmmu, pMmuFaultAddress) kgmmuSignExtendFaultAddress_DISPATCH(pGpu, pKernelGmmu, pMmuFaultAddress)
788 #define kgmmuSignExtendFaultAddress_HAL(pGpu, pKernelGmmu, pMmuFaultAddress) kgmmuSignExtendFaultAddress_DISPATCH(pGpu, pKernelGmmu, pMmuFaultAddress)
789 #define kgmmuGetFaultType(pGpu, pKernelGmmu, fault, pMmuFaultType) kgmmuGetFaultType_DISPATCH(pGpu, pKernelGmmu, fault, pMmuFaultType)
790 #define kgmmuGetFaultType_HAL(pGpu, pKernelGmmu, fault, pMmuFaultType) kgmmuGetFaultType_DISPATCH(pGpu, pKernelGmmu, fault, pMmuFaultType)
791 #define kgmmuIsP2PUnboundInstFault(pKernelGmmu, arg0, arg1) kgmmuIsP2PUnboundInstFault_DISPATCH(pKernelGmmu, arg0, arg1)
792 #define kgmmuIsP2PUnboundInstFault_HAL(pKernelGmmu, arg0, arg1) kgmmuIsP2PUnboundInstFault_DISPATCH(pKernelGmmu, arg0, arg1)
793 #define kgmmuServiceVfPriFaults(pGpu, pKernelGmmu, faultType) kgmmuServiceVfPriFaults_DISPATCH(pGpu, pKernelGmmu, faultType)
794 #define kgmmuServiceVfPriFaults_HAL(pGpu, pKernelGmmu, faultType) kgmmuServiceVfPriFaults_DISPATCH(pGpu, pKernelGmmu, faultType)
795 #define kgmmuTestVidmemAccessBitBufferError(pGpu, pKernelGmmu, arg0) kgmmuTestVidmemAccessBitBufferError_DISPATCH(pGpu, pKernelGmmu, arg0)
796 #define kgmmuTestVidmemAccessBitBufferError_HAL(pGpu, pKernelGmmu, arg0) kgmmuTestVidmemAccessBitBufferError_DISPATCH(pGpu, pKernelGmmu, arg0)
797 #define kgmmuDisableVidmemAccessBitBuf(pGpu, pKernelGmmu) kgmmuDisableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
798 #define kgmmuDisableVidmemAccessBitBuf_HAL(pGpu, pKernelGmmu) kgmmuDisableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
799 #define kgmmuEnableVidmemAccessBitBuf(pGpu, pKernelGmmu) kgmmuEnableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
800 #define kgmmuEnableVidmemAccessBitBuf_HAL(pGpu, pKernelGmmu) kgmmuEnableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu)
801 #define kgmmuClearAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuClearAccessCounterWriteNak_DISPATCH(pGpu, pKernelGmmu)
802 #define kgmmuClearAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuClearAccessCounterWriteNak_DISPATCH(pGpu, pKernelGmmu)
803 #define kgmmuServiceMthdBuffFaultInBar2Fault(pGpu, pKernelGmmu) kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(pGpu, pKernelGmmu)
804 #define kgmmuServiceMthdBuffFaultInBar2Fault_HAL(pGpu, pKernelGmmu) kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(pGpu, pKernelGmmu)
805 #define kgmmuFaultCancelTargeted(pGpu, pKernelGmmu, arg0) kgmmuFaultCancelTargeted_DISPATCH(pGpu, pKernelGmmu, arg0)
806 #define kgmmuFaultCancelTargeted_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultCancelTargeted_DISPATCH(pGpu, pKernelGmmu, arg0)
807 #define kgmmuFaultCancelIssueInvalidate(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) kgmmuFaultCancelIssueInvalidate_DISPATCH(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal)
808 #define kgmmuFaultCancelIssueInvalidate_HAL(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) kgmmuFaultCancelIssueInvalidate_DISPATCH(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal)
809 #define kgmmuServiceMmuFault(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) kgmmuServiceMmuFault_DISPATCH(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData)
810 #define kgmmuServiceMmuFault_HAL(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) kgmmuServiceMmuFault_DISPATCH(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData)
811 #define kgmmuServiceUnboundInstBlockFault(pGpu, pKernelGmmu, arg0, arg1) kgmmuServiceUnboundInstBlockFault_DISPATCH(pGpu, pKernelGmmu, arg0, arg1)
812 #define kgmmuServiceUnboundInstBlockFault_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuServiceUnboundInstBlockFault_DISPATCH(pGpu, pKernelGmmu, arg0, arg1)
813 #define kgmmuGetEccCounts(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
814 #define kgmmuGetEccCounts_HAL(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
815 #define kgmmuStatePreLoad(pGpu, pEngstate, arg0) kgmmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
816 #define kgmmuStatePostUnload(pGpu, pEngstate, arg0) kgmmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
817 #define kgmmuStateInitUnlocked(pGpu, pEngstate) kgmmuStateInitUnlocked_DISPATCH(pGpu, pEngstate)
818 #define kgmmuInitMissing(pGpu, pEngstate) kgmmuInitMissing_DISPATCH(pGpu, pEngstate)
819 #define kgmmuStatePreInitLocked(pGpu, pEngstate) kgmmuStatePreInitLocked_DISPATCH(pGpu, pEngstate)
820 #define kgmmuStatePreInitUnlocked(pGpu, pEngstate) kgmmuStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
821 #define kgmmuIsPresent(pGpu, pEngstate) kgmmuIsPresent_DISPATCH(pGpu, pEngstate)
kgmmuService_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)822 static inline NvU32 kgmmuService_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
823     return 0;
824 }
825 
826 
827 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuService(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)828 static inline NvU32 kgmmuService(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
829     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
830     return 0;
831 }
832 #else //__nvoc_kern_gmmu_h_disabled
833 #define kgmmuService(pGpu, pKernelGmmu) kgmmuService_4a4dee(pGpu, pKernelGmmu)
834 #endif //__nvoc_kern_gmmu_h_disabled
835 
836 #define kgmmuService_HAL(pGpu, pKernelGmmu) kgmmuService(pGpu, pKernelGmmu)
837 
838 NvU64 kgmmuGetMaxBigPageSize_GM107(struct KernelGmmu *pKernelGmmu);
839 
840 
841 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetMaxBigPageSize(struct KernelGmmu * pKernelGmmu)842 static inline NvU64 kgmmuGetMaxBigPageSize(struct KernelGmmu *pKernelGmmu) {
843     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
844     return 0;
845 }
846 #else //__nvoc_kern_gmmu_h_disabled
847 #define kgmmuGetMaxBigPageSize(pKernelGmmu) kgmmuGetMaxBigPageSize_GM107(pKernelGmmu)
848 #endif //__nvoc_kern_gmmu_h_disabled
849 
850 #define kgmmuGetMaxBigPageSize_HAL(pKernelGmmu) kgmmuGetMaxBigPageSize(pKernelGmmu)
851 
kgmmuGetVaspaceClass_f515df(struct KernelGmmu * pKernelGmmu)852 static inline NvU32 kgmmuGetVaspaceClass_f515df(struct KernelGmmu *pKernelGmmu) {
853     return (37105);
854 }
855 
856 
857 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetVaspaceClass(struct KernelGmmu * pKernelGmmu)858 static inline NvU32 kgmmuGetVaspaceClass(struct KernelGmmu *pKernelGmmu) {
859     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
860     return 0;
861 }
862 #else //__nvoc_kern_gmmu_h_disabled
863 #define kgmmuGetVaspaceClass(pKernelGmmu) kgmmuGetVaspaceClass_f515df(pKernelGmmu)
864 #endif //__nvoc_kern_gmmu_h_disabled
865 
866 #define kgmmuGetVaspaceClass_HAL(pKernelGmmu) kgmmuGetVaspaceClass(pKernelGmmu)
867 
868 NV_STATUS kgmmuInstBlkAtsGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData);
869 
870 
871 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkAtsGet(struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,NvU32 subctxid,NvU32 * pOffset,NvU32 * pData)872 static inline NV_STATUS kgmmuInstBlkAtsGet(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData) {
873     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
874     return NV_ERR_NOT_SUPPORTED;
875 }
876 #else //__nvoc_kern_gmmu_h_disabled
877 #define kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet_GV100(pKernelGmmu, pVAS, subctxid, pOffset, pData)
878 #endif //__nvoc_kern_gmmu_h_disabled
879 
880 #define kgmmuInstBlkAtsGet_HAL(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData)
881 
kgmmuInstBlkMagicValueGet_46f6a7(struct KernelGmmu * pKernelGmmu,NvU32 * pOffset,NvU32 * pData)882 static inline NV_STATUS kgmmuInstBlkMagicValueGet_46f6a7(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
883     return NV_ERR_NOT_SUPPORTED;
884 }
885 
886 
887 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkMagicValueGet(struct KernelGmmu * pKernelGmmu,NvU32 * pOffset,NvU32 * pData)888 static inline NV_STATUS kgmmuInstBlkMagicValueGet(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
889     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
890     return NV_ERR_NOT_SUPPORTED;
891 }
892 #else //__nvoc_kern_gmmu_h_disabled
893 #define kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet_46f6a7(pKernelGmmu, pOffset, pData)
894 #endif //__nvoc_kern_gmmu_h_disabled
895 
896 #define kgmmuInstBlkMagicValueGet_HAL(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData)
897 
898 NV_STATUS kgmmuInstBlkPageDirBaseGet_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi);
899 
900 
901 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkPageDirBaseGet(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,INST_BLK_INIT_PARAMS * pParams,NvU32 subctxid,NvU32 * pOffsetLo,NvU32 * pDataLo,NvU32 * pOffsetHi,NvU32 * pDataHi)902 static inline NV_STATUS kgmmuInstBlkPageDirBaseGet(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi) {
903     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
904     return NV_ERR_NOT_SUPPORTED;
905 }
906 #else //__nvoc_kern_gmmu_h_disabled
907 #define kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet_GV100(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi)
908 #endif //__nvoc_kern_gmmu_h_disabled
909 
910 #define kgmmuInstBlkPageDirBaseGet_HAL(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi)
911 
912 NvU32 kgmmuGetPDBAllocSize_GP100(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1);
913 
914 
915 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetPDBAllocSize(struct KernelGmmu * pKernelGmmu,const MMU_FMT_LEVEL * arg0,NvU64 arg1)916 static inline NvU32 kgmmuGetPDBAllocSize(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1) {
917     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
918     return 0;
919 }
920 #else //__nvoc_kern_gmmu_h_disabled
921 #define kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize_GP100(pKernelGmmu, arg0, arg1)
922 #endif //__nvoc_kern_gmmu_h_disabled
923 
924 #define kgmmuGetPDBAllocSize_HAL(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1)
925 
926 NvU64 kgmmuGetBigPageSize_GM107(struct KernelGmmu *pKernelGmmu);
927 
928 
929 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetBigPageSize(struct KernelGmmu * pKernelGmmu)930 static inline NvU64 kgmmuGetBigPageSize(struct KernelGmmu *pKernelGmmu) {
931     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
932     return 0;
933 }
934 #else //__nvoc_kern_gmmu_h_disabled
935 #define kgmmuGetBigPageSize(pKernelGmmu) kgmmuGetBigPageSize_GM107(pKernelGmmu)
936 #endif //__nvoc_kern_gmmu_h_disabled
937 
938 #define kgmmuGetBigPageSize_HAL(pKernelGmmu) kgmmuGetBigPageSize(pKernelGmmu)
939 
940 void kgmmuFmtInitCaps_GM20X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt);
941 
942 
943 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInitCaps(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT * pFmt)944 static inline void kgmmuFmtInitCaps(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt) {
945     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
946 }
947 #else //__nvoc_kern_gmmu_h_disabled
948 #define kgmmuFmtInitCaps(pKernelGmmu, pFmt) kgmmuFmtInitCaps_GM20X(pKernelGmmu, pFmt)
949 #endif //__nvoc_kern_gmmu_h_disabled
950 
951 #define kgmmuFmtInitCaps_HAL(pKernelGmmu, pFmt) kgmmuFmtInitCaps(pKernelGmmu, pFmt)
952 
953 void kgmmuFmtInitPteApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries);
954 
955 
956 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInitPteApertures(struct KernelGmmu * pKernelGmmu,struct NV_FIELD_ENUM_ENTRY * pEntries)957 static inline void kgmmuFmtInitPteApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) {
958     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
959 }
960 #else //__nvoc_kern_gmmu_h_disabled
961 #define kgmmuFmtInitPteApertures(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures_GM10X(pKernelGmmu, pEntries)
962 #endif //__nvoc_kern_gmmu_h_disabled
963 
964 #define kgmmuFmtInitPteApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures(pKernelGmmu, pEntries)
965 
966 void kgmmuFmtInitPdeApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries);
967 
968 
969 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInitPdeApertures(struct KernelGmmu * pKernelGmmu,struct NV_FIELD_ENUM_ENTRY * pEntries)970 static inline void kgmmuFmtInitPdeApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) {
971     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
972 }
973 #else //__nvoc_kern_gmmu_h_disabled
974 #define kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures_GM10X(pKernelGmmu, pEntries)
975 #endif //__nvoc_kern_gmmu_h_disabled
976 
977 #define kgmmuFmtInitPdeApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries)
978 
979 void kgmmuInvalidateTlb_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope);
980 
981 
982 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInvalidateTlb(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,MEMORY_DESCRIPTOR * pRootPageDir,NvU32 vaspaceFlags,VAS_PTE_UPDATE_TYPE update_type,NvU32 gfid,NvU32 invalidation_scope)983 static inline void kgmmuInvalidateTlb(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope) {
984     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
985 }
986 #else //__nvoc_kern_gmmu_h_disabled
987 #define kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb_GM107(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope)
988 #endif //__nvoc_kern_gmmu_h_disabled
989 
990 #define kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope)
991 
992 NV_STATUS kgmmuCheckPendingInvalidates_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid);
993 
994 
995 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuCheckPendingInvalidates(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,RMTIMEOUT * pTimeOut,NvU32 gfid)996 static inline NV_STATUS kgmmuCheckPendingInvalidates(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid) {
997     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
998     return NV_ERR_NOT_SUPPORTED;
999 }
1000 #else //__nvoc_kern_gmmu_h_disabled
1001 #define kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates_TU102(pGpu, pKernelGmmu, pTimeOut, gfid)
1002 #endif //__nvoc_kern_gmmu_h_disabled
1003 
1004 #define kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid)
1005 
1006 NV_STATUS kgmmuCommitTlbInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
1007 
1008 
1009 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuCommitTlbInvalidate(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)1010 static inline NV_STATUS kgmmuCommitTlbInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1011     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1012     return NV_ERR_NOT_SUPPORTED;
1013 }
1014 #else //__nvoc_kern_gmmu_h_disabled
1015 #define kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate_TU102(pGpu, pKernelGmmu, pParams)
1016 #endif //__nvoc_kern_gmmu_h_disabled
1017 
1018 #define kgmmuCommitTlbInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams)
1019 
1020 void kgmmuSetPdbToInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
1021 
1022 
1023 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuSetPdbToInvalidate(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)1024 static inline void kgmmuSetPdbToInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1025     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1026 }
1027 #else //__nvoc_kern_gmmu_h_disabled
1028 #define kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate_TU102(pGpu, pKernelGmmu, pParams)
1029 #endif //__nvoc_kern_gmmu_h_disabled
1030 
1031 #define kgmmuSetPdbToInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams)
1032 
1033 NV_STATUS kgmmuEnableComputePeerAddressing_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags);
1034 
1035 
1036 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEnableComputePeerAddressing(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 flags)1037 static inline NV_STATUS kgmmuEnableComputePeerAddressing(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags) {
1038     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1039     return NV_ERR_NOT_SUPPORTED;
1040 }
1041 #else //__nvoc_kern_gmmu_h_disabled
1042 #define kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing_IMPL(pGpu, pKernelGmmu, flags)
1043 #endif //__nvoc_kern_gmmu_h_disabled
1044 
1045 #define kgmmuEnableComputePeerAddressing_HAL(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags)
1046 
1047 void kgmmuDetermineMaxVASize_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1048 
1049 
1050 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuDetermineMaxVASize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1051 static inline void kgmmuDetermineMaxVASize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1052     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1053 }
1054 #else //__nvoc_kern_gmmu_h_disabled
1055 #define kgmmuDetermineMaxVASize(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize_GM107(pGpu, pKernelGmmu)
1056 #endif //__nvoc_kern_gmmu_h_disabled
1057 
1058 #define kgmmuDetermineMaxVASize_HAL(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize(pGpu, pKernelGmmu)
1059 
1060 const char *kgmmuGetFaultTypeString_GP100(struct KernelGmmu *pKernelGmmu, NvU32 faultType);
1061 
1062 
1063 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFaultTypeString(struct KernelGmmu * pKernelGmmu,NvU32 faultType)1064 static inline const char *kgmmuGetFaultTypeString(struct KernelGmmu *pKernelGmmu, NvU32 faultType) {
1065     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1066     return NULL;
1067 }
1068 #else //__nvoc_kern_gmmu_h_disabled
1069 #define kgmmuGetFaultTypeString(pKernelGmmu, faultType) kgmmuGetFaultTypeString_GP100(pKernelGmmu, faultType)
1070 #endif //__nvoc_kern_gmmu_h_disabled
1071 
1072 #define kgmmuGetFaultTypeString_HAL(pKernelGmmu, faultType) kgmmuGetFaultTypeString(pKernelGmmu, faultType)
1073 
1074 NV_STATUS kgmmuChangeReplayableFaultOwnership_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0);
1075 
1076 
1077 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuChangeReplayableFaultOwnership(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg0)1078 static inline NV_STATUS kgmmuChangeReplayableFaultOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) {
1079     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1080     return NV_ERR_NOT_SUPPORTED;
1081 }
1082 #else //__nvoc_kern_gmmu_h_disabled
1083 #define kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership_GV100(pGpu, pKernelGmmu, arg0)
1084 #endif //__nvoc_kern_gmmu_h_disabled
1085 
1086 #define kgmmuChangeReplayableFaultOwnership_HAL(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0)
1087 
1088 NV_STATUS kgmmuServiceReplayableFault_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1089 
1090 
1091 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServiceReplayableFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1092 static inline NV_STATUS kgmmuServiceReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1093     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1094     return NV_ERR_NOT_SUPPORTED;
1095 }
1096 #else //__nvoc_kern_gmmu_h_disabled
1097 #define kgmmuServiceReplayableFault(pGpu, pKernelGmmu) kgmmuServiceReplayableFault_TU102(pGpu, pKernelGmmu)
1098 #endif //__nvoc_kern_gmmu_h_disabled
1099 
1100 #define kgmmuServiceReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceReplayableFault(pGpu, pKernelGmmu)
1101 
1102 NV_STATUS kgmmuReportFaultBufferOverflow_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1103 
1104 
1105 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReportFaultBufferOverflow(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1106 static inline NV_STATUS kgmmuReportFaultBufferOverflow(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1107     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1108     return NV_ERR_NOT_SUPPORTED;
1109 }
1110 #else //__nvoc_kern_gmmu_h_disabled
1111 #define kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow_GV100(pGpu, pKernelGmmu)
1112 #endif //__nvoc_kern_gmmu_h_disabled
1113 
1114 #define kgmmuReportFaultBufferOverflow_HAL(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu)
1115 
1116 NV_STATUS kgmmuReadFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0);
1117 
1118 
1119 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadFaultBufferGetPtr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 * pGetOffset,struct THREAD_STATE_NODE * arg0)1120 static inline NV_STATUS kgmmuReadFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0) {
1121     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1122     return NV_ERR_NOT_SUPPORTED;
1123 }
1124 #else //__nvoc_kern_gmmu_h_disabled
1125 #define kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, pGetOffset, arg0)
1126 #endif //__nvoc_kern_gmmu_h_disabled
1127 
1128 #define kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0)
1129 
1130 NV_STATUS kgmmuWriteFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 getValue, struct THREAD_STATE_NODE *arg0);
1131 
1132 
1133 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuWriteFaultBufferGetPtr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 getValue,struct THREAD_STATE_NODE * arg0)1134 static inline NV_STATUS kgmmuWriteFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 getValue, struct THREAD_STATE_NODE *arg0) {
1135     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1136     return NV_ERR_NOT_SUPPORTED;
1137 }
1138 #else //__nvoc_kern_gmmu_h_disabled
1139 #define kgmmuWriteFaultBufferGetPtr(pGpu, pKernelGmmu, index, getValue, arg0) kgmmuWriteFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, getValue, arg0)
1140 #endif //__nvoc_kern_gmmu_h_disabled
1141 
1142 #define kgmmuWriteFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, getValue, arg0) kgmmuWriteFaultBufferGetPtr(pGpu, pKernelGmmu, index, getValue, arg0)
1143 
1144 NV_STATUS kgmmuReadFaultBufferPutPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg0);
1145 
1146 
1147 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadFaultBufferPutPtr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 * pPutOffset,struct THREAD_STATE_NODE * arg0)1148 static inline NV_STATUS kgmmuReadFaultBufferPutPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg0) {
1149     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1150     return NV_ERR_NOT_SUPPORTED;
1151 }
1152 #else //__nvoc_kern_gmmu_h_disabled
1153 #define kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg0) kgmmuReadFaultBufferPutPtr_TU102(pGpu, pKernelGmmu, index, pPutOffset, arg0)
1154 #endif //__nvoc_kern_gmmu_h_disabled
1155 
1156 #define kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, index, pPutOffset, arg0) kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg0)
1157 
1158 NvU32 kgmmuReadMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid);
1159 
1160 
1161 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadMmuFaultBufferSize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 gfid)1162 static inline NvU32 kgmmuReadMmuFaultBufferSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid) {
1163     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1164     return 0;
1165 }
1166 #else //__nvoc_kern_gmmu_h_disabled
1167 #define kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize_TU102(pGpu, pKernelGmmu, arg0, gfid)
1168 #endif //__nvoc_kern_gmmu_h_disabled
1169 
1170 #define kgmmuReadMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid)
1171 
1172 NvU32 kgmmuReadMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid);
1173 
1174 
1175 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuReadMmuFaultStatus(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid)1176 static inline NvU32 kgmmuReadMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid) {
1177     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1178     return 0;
1179 }
1180 #else //__nvoc_kern_gmmu_h_disabled
1181 #define kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus_TU102(pGpu, pKernelGmmu, gfid)
1182 #endif //__nvoc_kern_gmmu_h_disabled
1183 
1184 #define kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid)
1185 
1186 void kgmmuWriteMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1187 
1188 
1189 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuWriteMmuFaultStatus(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)1190 static inline void kgmmuWriteMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1191     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1192 }
1193 #else //__nvoc_kern_gmmu_h_disabled
1194 #define kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus_TU102(pGpu, pKernelGmmu, arg0)
1195 #endif //__nvoc_kern_gmmu_h_disabled
1196 
1197 #define kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0)
1198 
1199 NvBool kgmmuIsNonReplayableFaultPending_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0);
1200 
1201 
1202 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuIsNonReplayableFaultPending(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * arg0)1203 static inline NvBool kgmmuIsNonReplayableFaultPending(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) {
1204     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1205     return NV_FALSE;
1206 }
1207 #else //__nvoc_kern_gmmu_h_disabled
1208 #define kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending_TU102(pGpu, pKernelGmmu, arg0)
1209 #endif //__nvoc_kern_gmmu_h_disabled
1210 
1211 #define kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0)
1212 
1213 NV_STATUS kgmmuClientShadowFaultBufferAlloc_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1214 
1215 
1216 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferAlloc(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)1217 static inline NV_STATUS kgmmuClientShadowFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1218     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1219     return NV_ERR_NOT_SUPPORTED;
1220 }
1221 #else //__nvoc_kern_gmmu_h_disabled
1222 #define kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAlloc_GV100(pGpu, pKernelGmmu, arg0)
1223 #endif //__nvoc_kern_gmmu_h_disabled
1224 
1225 #define kgmmuClientShadowFaultBufferAlloc_HAL(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg0)
1226 
1227 NV_STATUS kgmmuClientShadowFaultBufferFree_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1228 
1229 
1230 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferFree(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)1231 static inline NV_STATUS kgmmuClientShadowFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1232     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1233     return NV_ERR_NOT_SUPPORTED;
1234 }
1235 #else //__nvoc_kern_gmmu_h_disabled
1236 #define kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferFree_GV100(pGpu, pKernelGmmu, arg0)
1237 #endif //__nvoc_kern_gmmu_h_disabled
1238 
1239 #define kgmmuClientShadowFaultBufferFree_HAL(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg0)
1240 
1241 void kgmmuEncodeSysmemAddrs_GM107(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count);
1242 
1243 
1244 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEncodeSysmemAddrs(struct KernelGmmu * pKernelGmmu,NvU64 * pAddresses,NvU64 count)1245 static inline void kgmmuEncodeSysmemAddrs(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count) {
1246     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1247 }
1248 #else //__nvoc_kern_gmmu_h_disabled
1249 #define kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs_GM107(pKernelGmmu, pAddresses, count)
1250 #endif //__nvoc_kern_gmmu_h_disabled
1251 
1252 #define kgmmuEncodeSysmemAddrs_HAL(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count)
1253 
1254 NvU8 kgmmuGetHwPteApertureFromMemdesc_GM107(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc);
1255 
1256 
1257 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetHwPteApertureFromMemdesc(struct KernelGmmu * pKernelGmmu,MEMORY_DESCRIPTOR * pDesc)1258 static inline NvU8 kgmmuGetHwPteApertureFromMemdesc(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc) {
1259     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1260     return 0;
1261 }
1262 #else //__nvoc_kern_gmmu_h_disabled
1263 #define kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc_GM107(pKernelGmmu, pDesc)
1264 #endif //__nvoc_kern_gmmu_h_disabled
1265 
1266 #define kgmmuGetHwPteApertureFromMemdesc_HAL(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc)
1267 
1268 NvBool kgmmuTestAccessCounterWriteNak_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1269 
1270 
1271 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuTestAccessCounterWriteNak(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1272 static inline NvBool kgmmuTestAccessCounterWriteNak(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1273     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1274     return NV_FALSE;
1275 }
1276 #else //__nvoc_kern_gmmu_h_disabled
1277 #define kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak_TU102(pGpu, pKernelGmmu)
1278 #endif //__nvoc_kern_gmmu_h_disabled
1279 
1280 #define kgmmuTestAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu)
1281 
1282 NV_STATUS kgmmuEnableNvlinkComputePeerAddressing_GV100(struct KernelGmmu *pKernelGmmu);
1283 
1284 
1285 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEnableNvlinkComputePeerAddressing(struct KernelGmmu * pKernelGmmu)1286 static inline NV_STATUS kgmmuEnableNvlinkComputePeerAddressing(struct KernelGmmu *pKernelGmmu) {
1287     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1288     return NV_ERR_NOT_SUPPORTED;
1289 }
1290 #else //__nvoc_kern_gmmu_h_disabled
1291 #define kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing_GV100(pKernelGmmu)
1292 #endif //__nvoc_kern_gmmu_h_disabled
1293 
1294 #define kgmmuEnableNvlinkComputePeerAddressing_HAL(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu)
1295 
1296 void kgmmuClearNonReplayableFaultIntr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0);
1297 
1298 
1299 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClearNonReplayableFaultIntr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * arg0)1300 static inline void kgmmuClearNonReplayableFaultIntr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) {
1301     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1302 }
1303 #else //__nvoc_kern_gmmu_h_disabled
1304 #define kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg0) kgmmuClearNonReplayableFaultIntr_TU102(pGpu, pKernelGmmu, arg0)
1305 #endif //__nvoc_kern_gmmu_h_disabled
1306 
1307 #define kgmmuClearNonReplayableFaultIntr_HAL(pGpu, pKernelGmmu, arg0) kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg0)
1308 
1309 void kgmmuClearReplayableFaultIntr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0);
1310 
1311 
1312 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClearReplayableFaultIntr(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * arg0)1313 static inline void kgmmuClearReplayableFaultIntr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) {
1314     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1315 }
1316 #else //__nvoc_kern_gmmu_h_disabled
1317 #define kgmmuClearReplayableFaultIntr(pGpu, pKernelGmmu, arg0) kgmmuClearReplayableFaultIntr_TU102(pGpu, pKernelGmmu, arg0)
1318 #endif //__nvoc_kern_gmmu_h_disabled
1319 
1320 #define kgmmuClearReplayableFaultIntr_HAL(pGpu, pKernelGmmu, arg0) kgmmuClearReplayableFaultIntr(pGpu, pKernelGmmu, arg0)
1321 
1322 void kgmmuPrintFaultInfo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, FIFO_MMU_EXCEPTION_DATA *arg1);
1323 
1324 
1325 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuPrintFaultInfo(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,FIFO_MMU_EXCEPTION_DATA * arg1)1326 static inline void kgmmuPrintFaultInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) {
1327     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1328 }
1329 #else //__nvoc_kern_gmmu_h_disabled
1330 #define kgmmuPrintFaultInfo(pGpu, pKernelGmmu, arg0, arg1) kgmmuPrintFaultInfo_TU102(pGpu, pKernelGmmu, arg0, arg1)
1331 #endif //__nvoc_kern_gmmu_h_disabled
1332 
1333 #define kgmmuPrintFaultInfo_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuPrintFaultInfo(pGpu, pKernelGmmu, arg0, arg1)
1334 
kgmmuInitCeMmuFaultIdRange_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1335 static inline NV_STATUS kgmmuInitCeMmuFaultIdRange_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1336     return NV_OK;
1337 }
1338 
1339 
1340 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInitCeMmuFaultIdRange(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1341 static inline NV_STATUS kgmmuInitCeMmuFaultIdRange(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1342     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1343     return NV_ERR_NOT_SUPPORTED;
1344 }
1345 #else //__nvoc_kern_gmmu_h_disabled
1346 #define kgmmuInitCeMmuFaultIdRange(pGpu, pKernelGmmu) kgmmuInitCeMmuFaultIdRange_56cd7a(pGpu, pKernelGmmu)
1347 #endif //__nvoc_kern_gmmu_h_disabled
1348 
1349 #define kgmmuInitCeMmuFaultIdRange_HAL(pGpu, pKernelGmmu) kgmmuInitCeMmuFaultIdRange(pGpu, pKernelGmmu)
1350 
1351 NV_STATUS kgmmuServiceNonReplayableFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1352 
1353 
1354 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServiceNonReplayableFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1355 static inline NV_STATUS kgmmuServiceNonReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1356     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1357     return NV_ERR_NOT_SUPPORTED;
1358 }
1359 #else //__nvoc_kern_gmmu_h_disabled
1360 #define kgmmuServiceNonReplayableFault(pGpu, pKernelGmmu) kgmmuServiceNonReplayableFault_GV100(pGpu, pKernelGmmu)
1361 #endif //__nvoc_kern_gmmu_h_disabled
1362 
1363 #define kgmmuServiceNonReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceNonReplayableFault(pGpu, pKernelGmmu)
1364 
1365 NV_STATUS kgmmuHandleNonReplayableFaultPacket_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_PACKET *arg0);
1366 
1367 
1368 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuHandleNonReplayableFaultPacket(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_PACKET * arg0)1369 static inline NV_STATUS kgmmuHandleNonReplayableFaultPacket(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_PACKET *arg0) {
1370     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1371     return NV_ERR_NOT_SUPPORTED;
1372 }
1373 #else //__nvoc_kern_gmmu_h_disabled
1374 #define kgmmuHandleNonReplayableFaultPacket(pGpu, pKernelGmmu, arg0) kgmmuHandleNonReplayableFaultPacket_GV100(pGpu, pKernelGmmu, arg0)
1375 #endif //__nvoc_kern_gmmu_h_disabled
1376 
1377 #define kgmmuHandleNonReplayableFaultPacket_HAL(pGpu, pKernelGmmu, arg0) kgmmuHandleNonReplayableFaultPacket(pGpu, pKernelGmmu, arg0)
1378 
1379 NV_STATUS kgmmuNotifyNonReplayableFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0);
1380 
1381 
1382 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuNotifyNonReplayableFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg0)1383 static inline NV_STATUS kgmmuNotifyNonReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) {
1384     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1385     return NV_ERR_NOT_SUPPORTED;
1386 }
1387 #else //__nvoc_kern_gmmu_h_disabled
1388 #define kgmmuNotifyNonReplayableFault(pGpu, pKernelGmmu, arg0) kgmmuNotifyNonReplayableFault_GV100(pGpu, pKernelGmmu, arg0)
1389 #endif //__nvoc_kern_gmmu_h_disabled
1390 
1391 #define kgmmuNotifyNonReplayableFault_HAL(pGpu, pKernelGmmu, arg0) kgmmuNotifyNonReplayableFault(pGpu, pKernelGmmu, arg0)
1392 
1393 NvU32 kgmmuGetFaultInfoFromFaultPckt_GV100(struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry);
1394 
1395 
1396 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFaultInfoFromFaultPckt(struct KernelGmmu * pKernelGmmu,MMU_FAULT_BUFFER_ENTRY * pParsedFaultEntry)1397 static inline NvU32 kgmmuGetFaultInfoFromFaultPckt(struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry) {
1398     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1399     return 0;
1400 }
1401 #else //__nvoc_kern_gmmu_h_disabled
1402 #define kgmmuGetFaultInfoFromFaultPckt(pKernelGmmu, pParsedFaultEntry) kgmmuGetFaultInfoFromFaultPckt_GV100(pKernelGmmu, pParsedFaultEntry)
1403 #endif //__nvoc_kern_gmmu_h_disabled
1404 
1405 #define kgmmuGetFaultInfoFromFaultPckt_HAL(pKernelGmmu, pParsedFaultEntry) kgmmuGetFaultInfoFromFaultPckt(pKernelGmmu, pParsedFaultEntry)
1406 
kgmmuServiceChannelMmuFault_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,MMU_FAULT_BUFFER_ENTRY * pParsedFaultEntry,FIFO_MMU_EXCEPTION_DATA * pMmuExceptionData,struct KernelChannel * pKernelChannel)1407 static inline NV_STATUS kgmmuServiceChannelMmuFault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel) {
1408     NV_ASSERT_PRECOMP(0);
1409     return NV_ERR_NOT_SUPPORTED;
1410 }
1411 
1412 NV_STATUS kgmmuServiceChannelMmuFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel);
1413 
1414 
1415 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServiceChannelMmuFault(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,MMU_FAULT_BUFFER_ENTRY * pParsedFaultEntry,FIFO_MMU_EXCEPTION_DATA * pMmuExceptionData,struct KernelChannel * pKernelChannel)1416 static inline NV_STATUS kgmmuServiceChannelMmuFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel) {
1417     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1418     return NV_ERR_NOT_SUPPORTED;
1419 }
1420 #else //__nvoc_kern_gmmu_h_disabled
1421 #define kgmmuServiceChannelMmuFault(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) kgmmuServiceChannelMmuFault_92bfc3(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel)
1422 #endif //__nvoc_kern_gmmu_h_disabled
1423 
1424 #define kgmmuServiceChannelMmuFault_HAL(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) kgmmuServiceChannelMmuFault(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel)
1425 
1426 NV_STATUS kgmmuServicePriFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1427 
1428 
1429 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuServicePriFaults(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1430 static inline NV_STATUS kgmmuServicePriFaults(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1431     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1432     return NV_ERR_NOT_SUPPORTED;
1433 }
1434 #else //__nvoc_kern_gmmu_h_disabled
1435 #define kgmmuServicePriFaults(pGpu, pKernelGmmu) kgmmuServicePriFaults_GV100(pGpu, pKernelGmmu)
1436 #endif //__nvoc_kern_gmmu_h_disabled
1437 
1438 #define kgmmuServicePriFaults_HAL(pGpu, pKernelGmmu) kgmmuServicePriFaults(pGpu, pKernelGmmu)
1439 
1440 NV_STATUS kgmmuCheckAndDecideBigPageSize_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1441 
1442 
1443 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuCheckAndDecideBigPageSize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1444 static inline NV_STATUS kgmmuCheckAndDecideBigPageSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1445     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
1446     return NV_ERR_NOT_SUPPORTED;
1447 }
1448 #else //__nvoc_kern_gmmu_h_disabled
1449 #define kgmmuCheckAndDecideBigPageSize(pGpu, pKernelGmmu) kgmmuCheckAndDecideBigPageSize_GP100(pGpu, pKernelGmmu)
1450 #endif //__nvoc_kern_gmmu_h_disabled
1451 
1452 #define kgmmuCheckAndDecideBigPageSize_HAL(pGpu, pKernelGmmu) kgmmuCheckAndDecideBigPageSize(pGpu, pKernelGmmu)
1453 
1454 NV_STATUS kgmmuConstructEngine_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0);
1455 
kgmmuConstructEngine_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,ENGDESCRIPTOR arg0)1456 static inline NV_STATUS kgmmuConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0) {
1457     return pKernelGmmu->__kgmmuConstructEngine__(pGpu, pKernelGmmu, arg0);
1458 }
1459 
1460 NV_STATUS kgmmuStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1461 
kgmmuStateInitLocked_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1462 static inline NV_STATUS kgmmuStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1463     return pKernelGmmu->__kgmmuStateInitLocked__(pGpu, pKernelGmmu);
1464 }
1465 
1466 NV_STATUS kgmmuStateLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1467 
kgmmuStateLoad_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)1468 static inline NV_STATUS kgmmuStateLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1469     return pKernelGmmu->__kgmmuStateLoad__(pGpu, pKernelGmmu, arg0);
1470 }
1471 
1472 NV_STATUS kgmmuStateUnload_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1473 
kgmmuStateUnload_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)1474 static inline NV_STATUS kgmmuStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1475     return pKernelGmmu->__kgmmuStateUnload__(pGpu, pKernelGmmu, arg0);
1476 }
1477 
1478 NV_STATUS kgmmuStatePostLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1479 
kgmmuStatePostLoad_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)1480 static inline NV_STATUS kgmmuStatePostLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1481     return pKernelGmmu->__kgmmuStatePostLoad__(pGpu, pKernelGmmu, arg0);
1482 }
1483 
1484 NV_STATUS kgmmuStatePreUnload_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1485 
kgmmuStatePreUnload_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)1486 static inline NV_STATUS kgmmuStatePreUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1487     return pKernelGmmu->__kgmmuStatePreUnload__(pGpu, pKernelGmmu, arg0);
1488 }
1489 
1490 void kgmmuStateDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1491 
kgmmuStateDestroy_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1492 static inline void kgmmuStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1493     pKernelGmmu->__kgmmuStateDestroy__(pGpu, pKernelGmmu);
1494 }
1495 
1496 void kgmmuRegisterIntrService_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[171]);
1497 
kgmmuRegisterIntrService_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceRecord arg0[171])1498 static inline void kgmmuRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[171]) {
1499     pKernelGmmu->__kgmmuRegisterIntrService__(pGpu, pKernelGmmu, arg0);
1500 }
1501 
1502 NvBool kgmmuClearInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceClearInterruptArguments *pParams);
1503 
kgmmuClearInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceClearInterruptArguments * pParams)1504 static inline NvBool kgmmuClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceClearInterruptArguments *pParams) {
1505     return pKernelGmmu->__kgmmuClearInterrupt__(pGpu, pKernelGmmu, pParams);
1506 }
1507 
1508 NvU32 kgmmuServiceInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams);
1509 
kgmmuServiceInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceServiceInterruptArguments * pParams)1510 static inline NvU32 kgmmuServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams) {
1511     return pKernelGmmu->__kgmmuServiceInterrupt__(pGpu, pKernelGmmu, pParams);
1512 }
1513 
kgmmuServiceNotificationInterrupt_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceServiceNotificationInterruptArguments * pParams)1514 static inline NV_STATUS kgmmuServiceNotificationInterrupt_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceNotificationInterruptArguments *pParams) {
1515     return NV_OK;
1516 }
1517 
kgmmuServiceNotificationInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,IntrServiceServiceNotificationInterruptArguments * pParams)1518 static inline NV_STATUS kgmmuServiceNotificationInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceNotificationInterruptArguments *pParams) {
1519     return pKernelGmmu->__kgmmuServiceNotificationInterrupt__(pGpu, pKernelGmmu, pParams);
1520 }
1521 
1522 NV_STATUS kgmmuInstBlkVaLimitGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData);
1523 
kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,NvU32 subctxId,INST_BLK_INIT_PARAMS * pParams,NvU32 * pOffset,NvU64 * pData)1524 static inline NV_STATUS kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) {
1525     *pOffset = 0;
1526     return NV_OK;
1527 }
1528 
kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu * pKernelGmmu,struct OBJVASPACE * pVAS,NvU32 subctxId,INST_BLK_INIT_PARAMS * pParams,NvU32 * pOffset,NvU64 * pData)1529 static inline NV_STATUS kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) {
1530     return pKernelGmmu->__kgmmuInstBlkVaLimitGet__(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData);
1531 }
1532 
1533 NvU32 kgmmuSetTlbInvalidateMembarWarParameters_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);
1534 
kgmmuSetTlbInvalidateMembarWarParameters_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)1535 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1536     return 0;
1537 }
1538 
kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,TLB_INVALIDATE_PARAMS * pParams)1539 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) {
1540     return pKernelGmmu->__kgmmuSetTlbInvalidateMembarWarParameters__(pGpu, pKernelGmmu, pParams);
1541 }
1542 
1543 NV_STATUS kgmmuSetTlbInvalidationScope_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams);
1544 
kgmmuSetTlbInvalidationScope_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 flags,TLB_INVALIDATE_PARAMS * pParams)1545 static inline NV_STATUS kgmmuSetTlbInvalidationScope_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) {
1546     return NV_ERR_NOT_SUPPORTED;
1547 }
1548 
kgmmuSetTlbInvalidationScope_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 flags,TLB_INVALIDATE_PARAMS * pParams)1549 static inline NV_STATUS kgmmuSetTlbInvalidationScope_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) {
1550     return pKernelGmmu->__kgmmuSetTlbInvalidationScope__(pGpu, pKernelGmmu, flags, pParams);
1551 }
1552 
1553 void kgmmuFmtInitPteComptagLine_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version);
1554 
kgmmuFmtInitPteComptagLine_b3696a(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)1555 static inline void kgmmuFmtInitPteComptagLine_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1556     return;
1557 }
1558 
kgmmuFmtInitPteComptagLine_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)1559 static inline void kgmmuFmtInitPteComptagLine_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1560     pKernelGmmu->__kgmmuFmtInitPteComptagLine__(pKernelGmmu, pPte, version);
1561 }
1562 
1563 void kgmmuFmtInitPeerPteFld_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version);
1564 
kgmmuFmtInitPeerPteFld_b3696a(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)1565 static inline void kgmmuFmtInitPeerPteFld_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1566     return;
1567 }
1568 
kgmmuFmtInitPeerPteFld_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version)1569 static inline void kgmmuFmtInitPeerPteFld_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) {
1570     pKernelGmmu->__kgmmuFmtInitPeerPteFld__(pKernelGmmu, pPte, version);
1571 }
1572 
1573 void kgmmuFmtInitPte_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture);
1574 
1575 void kgmmuFmtInitPte_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture);
1576 
kgmmuFmtInitPte_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PTE * pPte,const NvU32 version,const struct NV_FIELD_ENUM_ENTRY * pPteApertures,const NvBool bUnifiedAperture)1577 static inline void kgmmuFmtInitPte_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture) {
1578     pKernelGmmu->__kgmmuFmtInitPte__(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture);
1579 }
1580 
1581 void kgmmuFmtInitPde_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1582 
1583 void kgmmuFmtInitPde_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1584 
kgmmuFmtInitPde_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PDE * pPde,const NvU32 version,const struct NV_FIELD_ENUM_ENTRY * pPdeApertures)1585 static inline void kgmmuFmtInitPde_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) {
1586     pKernelGmmu->__kgmmuFmtInitPde__(pKernelGmmu, pPde, version, pPdeApertures);
1587 }
1588 
1589 NvBool kgmmuFmtIsVersionSupported_GP10X(struct KernelGmmu *pKernelGmmu, NvU32 version);
1590 
1591 NvBool kgmmuFmtIsVersionSupported_GH10X(struct KernelGmmu *pKernelGmmu, NvU32 version);
1592 
kgmmuFmtIsVersionSupported_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 version)1593 static inline NvBool kgmmuFmtIsVersionSupported_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 version) {
1594     return pKernelGmmu->__kgmmuFmtIsVersionSupported__(pKernelGmmu, version);
1595 }
1596 
1597 void kgmmuFmtInitLevels_GP10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
1598 
1599 void kgmmuFmtInitLevels_GA10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
1600 
1601 void kgmmuFmtInitLevels_GH10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift);
1602 
kgmmuFmtInitLevels_DISPATCH(struct KernelGmmu * pKernelGmmu,MMU_FMT_LEVEL * pLevels,const NvU32 numLevels,const NvU32 version,const NvU32 bigPageShift)1603 static inline void kgmmuFmtInitLevels_DISPATCH(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift) {
1604     pKernelGmmu->__kgmmuFmtInitLevels__(pKernelGmmu, pLevels, numLevels, version, bigPageShift);
1605 }
1606 
1607 void kgmmuFmtInitPdeMulti_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1608 
1609 void kgmmuFmtInitPdeMulti_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures);
1610 
kgmmuFmtInitPdeMulti_DISPATCH(struct KernelGmmu * pKernelGmmu,struct GMMU_FMT_PDE_MULTI * pPdeMulti,const NvU32 version,const struct NV_FIELD_ENUM_ENTRY * pPdeApertures)1611 static inline void kgmmuFmtInitPdeMulti_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) {
1612     pKernelGmmu->__kgmmuFmtInitPdeMulti__(pKernelGmmu, pPdeMulti, version, pPdeApertures);
1613 }
1614 
1615 NV_STATUS kgmmuFmtFamiliesInit_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1616 
1617 NV_STATUS kgmmuFmtFamiliesInit_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1618 
kgmmuFmtFamiliesInit_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1619 static inline NV_STATUS kgmmuFmtFamiliesInit_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1620     return pKernelGmmu->__kgmmuFmtFamiliesInit__(pGpu, pKernelGmmu);
1621 }
1622 
1623 NV_STATUS kgmmuTranslatePtePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1);
1624 
kgmmuTranslatePtePcfFromSw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 * arg1)1625 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1626     return NV_OK;
1627 }
1628 
kgmmuTranslatePtePcfFromSw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 * arg1)1629 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1630     return pKernelGmmu->__kgmmuTranslatePtePcfFromSw__(pKernelGmmu, arg0, arg1);
1631 }
1632 
1633 NV_STATUS kgmmuTranslatePtePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2);
1634 
kgmmuTranslatePtePcfFromHw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvBool arg1,NvU32 * arg2)1635 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) {
1636     return NV_OK;
1637 }
1638 
kgmmuTranslatePtePcfFromHw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvBool arg1,NvU32 * arg2)1639 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) {
1640     return pKernelGmmu->__kgmmuTranslatePtePcfFromHw__(pKernelGmmu, arg0, arg1, arg2);
1641 }
1642 
1643 NV_STATUS kgmmuTranslatePdePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1);
1644 
kgmmuTranslatePdePcfFromSw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 * arg1)1645 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1646     return NV_OK;
1647 }
1648 
kgmmuTranslatePdePcfFromSw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 * arg1)1649 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) {
1650     return pKernelGmmu->__kgmmuTranslatePdePcfFromSw__(pKernelGmmu, arg0, arg1);
1651 }
1652 
1653 NV_STATUS kgmmuTranslatePdePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2);
1654 
kgmmuTranslatePdePcfFromHw_56cd7a(struct KernelGmmu * pKernelGmmu,NvU32 arg0,GMMU_APERTURE arg1,NvU32 * arg2)1655 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) {
1656     return NV_OK;
1657 }
1658 
kgmmuTranslatePdePcfFromHw_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0,GMMU_APERTURE arg1,NvU32 * arg2)1659 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) {
1660     return pKernelGmmu->__kgmmuTranslatePdePcfFromHw__(pKernelGmmu, arg0, arg1, arg2);
1661 }
1662 
1663 NV_STATUS kgmmuGetFaultRegisterMappings_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl);
1664 
1665 NV_STATUS kgmmuGetFaultRegisterMappings_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl);
1666 
kgmmuGetFaultRegisterMappings_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvP64 * pFaultBufferGet,NvP64 * pFaultBufferPut,NvP64 * pFaultBufferInfo,NvP64 * faultIntr,NvP64 * faultIntrSet,NvP64 * faultIntrClear,NvU32 * faultMask,NvP64 * pPrefetchCtrl)1667 static inline NV_STATUS kgmmuGetFaultRegisterMappings_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl) {
1668     return pKernelGmmu->__kgmmuGetFaultRegisterMappings__(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl);
1669 }
1670 
1671 NV_STATUS kgmmuIssueReplayableFaultBufferFlush_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush);
1672 
kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bCopyAndFlush)1673 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush) {
1674     return NV_ERR_NOT_SUPPORTED;
1675 }
1676 
kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bCopyAndFlush)1677 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush) {
1678     return pKernelGmmu->__kgmmuIssueReplayableFaultBufferFlush__(pGpu, pKernelGmmu, bCopyAndFlush);
1679 }
1680 
1681 NV_STATUS kgmmuToggleFaultOnPrefetch_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable);
1682 
kgmmuToggleFaultOnPrefetch_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bEnable)1683 static inline NV_STATUS kgmmuToggleFaultOnPrefetch_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable) {
1684     return NV_ERR_NOT_SUPPORTED;
1685 }
1686 
kgmmuToggleFaultOnPrefetch_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool bEnable)1687 static inline NV_STATUS kgmmuToggleFaultOnPrefetch_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable) {
1688     return pKernelGmmu->__kgmmuToggleFaultOnPrefetch__(pGpu, pKernelGmmu, bEnable);
1689 }
1690 
1691 NV_STATUS kgmmuFaultBufferAllocSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1692 
kgmmuFaultBufferAllocSharedMemory_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)1693 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1694     return NV_OK;
1695 }
1696 
kgmmuFaultBufferAllocSharedMemory_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)1697 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1698     return pKernelGmmu->__kgmmuFaultBufferAllocSharedMemory__(pGpu, pKernelGmmu, arg0);
1699 }
1700 
1701 void kgmmuFaultBufferFreeSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
1702 
kgmmuFaultBufferFreeSharedMemory_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)1703 static inline void kgmmuFaultBufferFreeSharedMemory_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1704     return;
1705 }
1706 
kgmmuFaultBufferFreeSharedMemory_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)1707 static inline void kgmmuFaultBufferFreeSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
1708     pKernelGmmu->__kgmmuFaultBufferFreeSharedMemory__(pGpu, pKernelGmmu, arg0);
1709 }
1710 
1711 NV_STATUS kgmmuSetupWarForBug2720120_GA100(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam);
1712 
kgmmuSetupWarForBug2720120_56cd7a(struct KernelGmmu * pKernelGmmu,GMMU_FMT_FAMILY * pFam)1713 static inline NV_STATUS kgmmuSetupWarForBug2720120_56cd7a(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) {
1714     return NV_OK;
1715 }
1716 
kgmmuSetupWarForBug2720120_DISPATCH(struct KernelGmmu * pKernelGmmu,GMMU_FMT_FAMILY * pFam)1717 static inline NV_STATUS kgmmuSetupWarForBug2720120_DISPATCH(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) {
1718     return pKernelGmmu->__kgmmuSetupWarForBug2720120__(pKernelGmmu, pFam);
1719 }
1720 
1721 NvU32 kgmmuGetGraphicsEngineId_GV100(struct KernelGmmu *pKernelGmmu);
1722 
1723 NvU32 kgmmuGetGraphicsEngineId_GH100(struct KernelGmmu *pKernelGmmu);
1724 
kgmmuGetGraphicsEngineId_DISPATCH(struct KernelGmmu * pKernelGmmu)1725 static inline NvU32 kgmmuGetGraphicsEngineId_DISPATCH(struct KernelGmmu *pKernelGmmu) {
1726     return pKernelGmmu->__kgmmuGetGraphicsEngineId__(pKernelGmmu);
1727 }
1728 
1729 NvU32 kgmmuReadShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type);
1730 
kgmmuReadShadowBufPutIndex_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE type)1731 static inline NvU32 kgmmuReadShadowBufPutIndex_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) {
1732     return 0;
1733 }
1734 
kgmmuReadShadowBufPutIndex_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE type)1735 static inline NvU32 kgmmuReadShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) {
1736     return pKernelGmmu->__kgmmuReadShadowBufPutIndex__(pGpu, pKernelGmmu, type);
1737 }
1738 
1739 NvBool kgmmuIsFaultEngineBar1_TU102(struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1740 
1741 NvBool kgmmuIsFaultEngineBar1_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1742 
kgmmuIsFaultEngineBar1_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0)1743 static inline NvBool kgmmuIsFaultEngineBar1_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1744     return pKernelGmmu->__kgmmuIsFaultEngineBar1__(pKernelGmmu, arg0);
1745 }
1746 
1747 NvBool kgmmuIsFaultEngineBar2_TU102(struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1748 
1749 NvBool kgmmuIsFaultEngineBar2_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1750 
kgmmuIsFaultEngineBar2_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0)1751 static inline NvBool kgmmuIsFaultEngineBar2_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1752     return pKernelGmmu->__kgmmuIsFaultEngineBar2__(pKernelGmmu, arg0);
1753 }
1754 
1755 NvBool kgmmuIsFaultEnginePhysical_GV100(struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1756 
1757 NvBool kgmmuIsFaultEnginePhysical_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0);
1758 
kgmmuIsFaultEnginePhysical_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0)1759 static inline NvBool kgmmuIsFaultEnginePhysical_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
1760     return pKernelGmmu->__kgmmuIsFaultEnginePhysical__(pKernelGmmu, arg0);
1761 }
1762 
kgmmuCopyMmuFaults_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * pThreadState,NvU32 * entriesCopied,FAULT_BUFFER_TYPE type,NvBool bPollForValidBit)1763 static inline NV_STATUS kgmmuCopyMmuFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit) {
1764     NV_ASSERT_PRECOMP(0);
1765     return NV_ERR_NOT_SUPPORTED;
1766 }
1767 
1768 NV_STATUS kgmmuCopyMmuFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit);
1769 
kgmmuCopyMmuFaults_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct THREAD_STATE_NODE * pThreadState,NvU32 * entriesCopied,FAULT_BUFFER_TYPE type,NvBool bPollForValidBit)1770 static inline NV_STATUS kgmmuCopyMmuFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit) {
1771     return pKernelGmmu->__kgmmuCopyMmuFaults__(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit);
1772 }
1773 
kgmmuParseFaultPacket_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 pFaultPacket,NvP64 pParsedFaultEntry)1774 static inline NV_STATUS kgmmuParseFaultPacket_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry) {
1775     NV_ASSERT_PRECOMP(0);
1776     return NV_ERR_NOT_SUPPORTED;
1777 }
1778 
1779 NV_STATUS kgmmuParseFaultPacket_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry);
1780 
kgmmuParseFaultPacket_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 pFaultPacket,NvP64 pParsedFaultEntry)1781 static inline NV_STATUS kgmmuParseFaultPacket_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry) {
1782     return pKernelGmmu->__kgmmuParseFaultPacket__(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry);
1783 }
1784 
kgmmuFaultBufferClearPackets_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 beginIdx,NvU32 numFaultPackets)1785 static inline void kgmmuFaultBufferClearPackets_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets) {
1786     NV_ASSERT_PRECOMP(0);
1787 }
1788 
1789 void kgmmuFaultBufferClearPackets_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets);
1790 
kgmmuFaultBufferClearPackets_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 beginIdx,NvU32 numFaultPackets)1791 static inline void kgmmuFaultBufferClearPackets_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets) {
1792     pKernelGmmu->__kgmmuFaultBufferClearPackets__(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets);
1793 }
1794 
kgmmuFaultBufferGetFault_dc3e6c(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 idx)1795 static inline GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_dc3e6c(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx) {
1796     NV_ASSERT_PRECOMP(0);
1797     return ((void *)0);
1798 }
1799 
1800 GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx);
1801 
kgmmuFaultBufferGetFault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct HW_FAULT_BUFFER * pFaultBuffer,NvU32 idx)1802 static inline GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx) {
1803     return pKernelGmmu->__kgmmuFaultBufferGetFault__(pGpu, pKernelGmmu, pFaultBuffer, idx);
1804 }
1805 
kgmmuCopyFaultPacketToClientShadowBuffer_13cd8d(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct GMMU_FAULT_BUFFER * pFaultBuffer,FAULT_BUFFER_TYPE type,NvU32 getIndex,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries,struct THREAD_STATE_NODE * pThreadState,NvU32 * pFaultsCopied)1806 static inline NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied) {
1807     NV_ASSERT_PRECOMP(0);
1808     return 0;
1809 }
1810 
1811 NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied);
1812 
1813 NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied);
1814 
kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,struct GMMU_FAULT_BUFFER * pFaultBuffer,FAULT_BUFFER_TYPE type,NvU32 getIndex,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries,struct THREAD_STATE_NODE * pThreadState,NvU32 * pFaultsCopied)1815 static inline NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied) {
1816     return pKernelGmmu->__kgmmuCopyFaultPacketToClientShadowBuffer__(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied);
1817 }
1818 
kgmmuIsReplayableShadowFaultBufferFull_ceaee8(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_CLIENT_SHADOW_FAULT_BUFFER * pClientFaultBuf,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries)1819 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_ceaee8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) {
1820     NV_ASSERT_PRECOMP(0);
1821     return ((NvBool)(0 != 0));
1822 }
1823 
1824 NvBool kgmmuIsReplayableShadowFaultBufferFull_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries);
1825 
kgmmuIsReplayableShadowFaultBufferFull_491d52(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_CLIENT_SHADOW_FAULT_BUFFER * pClientFaultBuf,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries)1826 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_491d52(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) {
1827     return ((NvBool)(0 != 0));
1828 }
1829 
kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_CLIENT_SHADOW_FAULT_BUFFER * pClientFaultBuf,NvU32 shadowBufPutIndex,NvU32 maxBufferEntries)1830 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) {
1831     return pKernelGmmu->__kgmmuIsReplayableShadowFaultBufferFull__(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries);
1832 }
1833 
kgmmuReadClientShadowBufPutIndex_13cd8d(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type)1834 static inline NvU32 kgmmuReadClientShadowBufPutIndex_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) {
1835     NV_ASSERT_PRECOMP(0);
1836     return 0;
1837 }
1838 
1839 NvU32 kgmmuReadClientShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type);
1840 
kgmmuReadClientShadowBufPutIndex_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type)1841 static inline NvU32 kgmmuReadClientShadowBufPutIndex_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) {
1842     return 0;
1843 }
1844 
kgmmuReadClientShadowBufPutIndex_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type)1845 static inline NvU32 kgmmuReadClientShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) {
1846     return pKernelGmmu->__kgmmuReadClientShadowBufPutIndex__(pGpu, pKernelGmmu, gfid, type);
1847 }
1848 
kgmmuWriteClientShadowBufPutIndex_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type,NvU32 putIndex)1849 static inline void kgmmuWriteClientShadowBufPutIndex_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) {
1850     NV_ASSERT_PRECOMP(0);
1851 }
1852 
1853 void kgmmuWriteClientShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex);
1854 
kgmmuWriteClientShadowBufPutIndex_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type,NvU32 putIndex)1855 static inline void kgmmuWriteClientShadowBufPutIndex_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) {
1856     return;
1857 }
1858 
kgmmuWriteClientShadowBufPutIndex_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 gfid,FAULT_BUFFER_TYPE type,NvU32 putIndex)1859 static inline void kgmmuWriteClientShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) {
1860     pKernelGmmu->__kgmmuWriteClientShadowBufPutIndex__(pGpu, pKernelGmmu, gfid, type, putIndex);
1861 }
1862 
1863 NvU32 kgmmuGetMinCeEngineId_GV100(struct KernelGmmu *pKernelGmmu);
1864 
1865 NvU32 kgmmuGetMinCeEngineId_GH100(struct KernelGmmu *pKernelGmmu);
1866 
kgmmuGetMinCeEngineId_DISPATCH(struct KernelGmmu * pKernelGmmu)1867 static inline NvU32 kgmmuGetMinCeEngineId_DISPATCH(struct KernelGmmu *pKernelGmmu) {
1868     return pKernelGmmu->__kgmmuGetMinCeEngineId__(pKernelGmmu);
1869 }
1870 
1871 NvU32 kgmmuGetMaxCeEngineId_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1872 
1873 NvU32 kgmmuGetMaxCeEngineId_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1874 
1875 NvU32 kgmmuGetMaxCeEngineId_AD102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1876 
1877 NvU32 kgmmuGetMaxCeEngineId_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1878 
kgmmuGetMaxCeEngineId_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1879 static inline NvU32 kgmmuGetMaxCeEngineId_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1880     return pKernelGmmu->__kgmmuGetMaxCeEngineId__(pGpu, pKernelGmmu);
1881 }
1882 
kgmmuFaultBufferMap_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1883 static inline NV_STATUS kgmmuFaultBufferMap_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1884     NV_ASSERT_PRECOMP(0);
1885     return NV_ERR_NOT_SUPPORTED;
1886 }
1887 
1888 NV_STATUS kgmmuFaultBufferMap_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
1889 
kgmmuFaultBufferMap_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1890 static inline NV_STATUS kgmmuFaultBufferMap_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1891     return pKernelGmmu->__kgmmuFaultBufferMap__(pGpu, pKernelGmmu, index, gfid);
1892 }
1893 
kgmmuFaultBufferUnmap_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1894 static inline NV_STATUS kgmmuFaultBufferUnmap_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1895     NV_ASSERT_PRECOMP(0);
1896     return NV_ERR_NOT_SUPPORTED;
1897 }
1898 
1899 NV_STATUS kgmmuFaultBufferUnmap_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
1900 
kgmmuFaultBufferUnmap_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1901 static inline NV_STATUS kgmmuFaultBufferUnmap_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1902     return pKernelGmmu->__kgmmuFaultBufferUnmap__(pGpu, pKernelGmmu, index, gfid);
1903 }
1904 
kgmmuFaultBufferInit_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1905 static inline NV_STATUS kgmmuFaultBufferInit_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1906     return NV_OK;
1907 }
1908 
1909 NV_STATUS kgmmuFaultBufferInit_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1910 
kgmmuFaultBufferInit_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1911 static inline NV_STATUS kgmmuFaultBufferInit_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1912     return pKernelGmmu->__kgmmuFaultBufferInit__(pGpu, pKernelGmmu);
1913 }
1914 
kgmmuFaultBufferDestroy_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1915 static inline NV_STATUS kgmmuFaultBufferDestroy_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1916     return NV_OK;
1917 }
1918 
1919 NV_STATUS kgmmuFaultBufferDestroy_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
1920 
kgmmuFaultBufferDestroy_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1921 static inline NV_STATUS kgmmuFaultBufferDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1922     return pKernelGmmu->__kgmmuFaultBufferDestroy__(pGpu, pKernelGmmu);
1923 }
1924 
kgmmuFaultBufferLoad_ac1694(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1925 static inline NV_STATUS kgmmuFaultBufferLoad_ac1694(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1926     return NV_OK;
1927 }
1928 
1929 NV_STATUS kgmmuFaultBufferLoad_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
1930 
kgmmuFaultBufferLoad_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1931 static inline NV_STATUS kgmmuFaultBufferLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1932     return pKernelGmmu->__kgmmuFaultBufferLoad__(pGpu, pKernelGmmu, index, gfid);
1933 }
1934 
kgmmuFaultBufferUnload_ac1694(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1935 static inline NV_STATUS kgmmuFaultBufferUnload_ac1694(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1936     return NV_OK;
1937 }
1938 
1939 NV_STATUS kgmmuFaultBufferUnload_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid);
1940 
kgmmuFaultBufferUnload_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvU32 gfid)1941 static inline NV_STATUS kgmmuFaultBufferUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) {
1942     return pKernelGmmu->__kgmmuFaultBufferUnload__(pGpu, pKernelGmmu, index, gfid);
1943 }
1944 
kgmmuEnableFaultBuffer_395e98(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)1945 static inline NV_STATUS kgmmuEnableFaultBuffer_395e98(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
1946     return NV_ERR_NOT_SUPPORTED;
1947 }
1948 
1949 NV_STATUS kgmmuEnableFaultBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid);
1950 
kgmmuEnableFaultBuffer_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)1951 static inline NV_STATUS kgmmuEnableFaultBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
1952     return pKernelGmmu->__kgmmuEnableFaultBuffer__(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid);
1953 }
1954 
kgmmuDisableFaultBuffer_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)1955 static inline NV_STATUS kgmmuDisableFaultBuffer_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
1956     NV_ASSERT_PRECOMP(0);
1957     return NV_ERR_NOT_SUPPORTED;
1958 }
1959 
1960 NV_STATUS kgmmuDisableFaultBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid);
1961 
kgmmuDisableFaultBuffer_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index,NvBool bIsErrorRecovery,NvU32 gfid)1962 static inline NV_STATUS kgmmuDisableFaultBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) {
1963     return pKernelGmmu->__kgmmuDisableFaultBuffer__(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid);
1964 }
1965 
kgmmuSetAndGetDefaultFaultBufferSize_13cd8d(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE index,NvU32 gfid)1966 static inline NvU32 kgmmuSetAndGetDefaultFaultBufferSize_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid) {
1967     NV_ASSERT_PRECOMP(0);
1968     return 0;
1969 }
1970 
1971 NvU32 kgmmuSetAndGetDefaultFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid);
1972 
kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE index,NvU32 gfid)1973 static inline NvU32 kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid) {
1974     return pKernelGmmu->__kgmmuSetAndGetDefaultFaultBufferSize__(pGpu, pKernelGmmu, index, gfid);
1975 }
1976 
kgmmuReadMmuFaultInstHiLo_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg0,NvU32 * arg1)1977 static inline void kgmmuReadMmuFaultInstHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) {
1978     NV_ASSERT_PRECOMP(0);
1979 }
1980 
1981 void kgmmuReadMmuFaultInstHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1);
1982 
kgmmuReadMmuFaultInstHiLo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg0,NvU32 * arg1)1983 static inline void kgmmuReadMmuFaultInstHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) {
1984     pKernelGmmu->__kgmmuReadMmuFaultInstHiLo__(pGpu, pKernelGmmu, arg0, arg1);
1985 }
1986 
kgmmuReadMmuFaultAddrHiLo_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg0,NvU32 * arg1)1987 static inline void kgmmuReadMmuFaultAddrHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) {
1988     NV_ASSERT_PRECOMP(0);
1989 }
1990 
1991 void kgmmuReadMmuFaultAddrHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1);
1992 
kgmmuReadMmuFaultAddrHiLo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 * arg0,NvU32 * arg1)1993 static inline void kgmmuReadMmuFaultAddrHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) {
1994     pKernelGmmu->__kgmmuReadMmuFaultAddrHiLo__(pGpu, pKernelGmmu, arg0, arg1);
1995 }
1996 
kgmmuReadMmuFaultInfo_a547a8(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)1997 static inline NvU32 kgmmuReadMmuFaultInfo_a547a8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
1998     NV_ASSERT_PRECOMP(0);
1999     return -1;
2000 }
2001 
2002 NvU32 kgmmuReadMmuFaultInfo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2003 
kgmmuReadMmuFaultInfo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2004 static inline NvU32 kgmmuReadMmuFaultInfo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2005     return pKernelGmmu->__kgmmuReadMmuFaultInfo__(pGpu, pKernelGmmu);
2006 }
2007 
kgmmuWriteMmuFaultBufferSize_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1,NvU32 gfid)2008 static inline void kgmmuWriteMmuFaultBufferSize_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 gfid) {
2009     NV_ASSERT_PRECOMP(0);
2010 }
2011 
2012 void kgmmuWriteMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 gfid);
2013 
kgmmuWriteMmuFaultBufferSize_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1,NvU32 gfid)2014 static inline void kgmmuWriteMmuFaultBufferSize_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 gfid) {
2015     pKernelGmmu->__kgmmuWriteMmuFaultBufferSize__(pGpu, pKernelGmmu, arg0, arg1, gfid);
2016 }
2017 
kgmmuWriteMmuFaultBufferHiLo_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1,NvU32 arg2,NvU32 gfid)2018 static inline void kgmmuWriteMmuFaultBufferHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU32 gfid) {
2019     NV_ASSERT_PRECOMP(0);
2020 }
2021 
2022 void kgmmuWriteMmuFaultBufferHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU32 gfid);
2023 
kgmmuWriteMmuFaultBufferHiLo_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1,NvU32 arg2,NvU32 gfid)2024 static inline void kgmmuWriteMmuFaultBufferHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU32 gfid) {
2025     pKernelGmmu->__kgmmuWriteMmuFaultBufferHiLo__(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid);
2026 }
2027 
kgmmuEnableMmuFaultInterrupts_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2028 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2029     NV_ASSERT_PRECOMP(0);
2030     return NV_ERR_NOT_SUPPORTED;
2031 }
2032 
kgmmuEnableMmuFaultInterrupts_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2033 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2034     return NV_ERR_NOT_SUPPORTED;
2035 }
2036 
kgmmuEnableMmuFaultInterrupts_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2037 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2038     return pKernelGmmu->__kgmmuEnableMmuFaultInterrupts__(pGpu, pKernelGmmu, index);
2039 }
2040 
kgmmuDisableMmuFaultInterrupts_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2041 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2042     NV_ASSERT_PRECOMP(0);
2043     return NV_ERR_NOT_SUPPORTED;
2044 }
2045 
kgmmuDisableMmuFaultInterrupts_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2046 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2047     return NV_ERR_NOT_SUPPORTED;
2048 }
2049 
kgmmuDisableMmuFaultInterrupts_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2050 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2051     return pKernelGmmu->__kgmmuDisableMmuFaultInterrupts__(pGpu, pKernelGmmu, index);
2052 }
2053 
kgmmuEnableMmuFaultOverflowIntr_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2054 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2055     NV_ASSERT_PRECOMP(0);
2056     return NV_ERR_NOT_SUPPORTED;
2057 }
2058 
kgmmuEnableMmuFaultOverflowIntr_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2059 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2060     return NV_ERR_NOT_SUPPORTED;
2061 }
2062 
kgmmuEnableMmuFaultOverflowIntr_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 index)2063 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) {
2064     return pKernelGmmu->__kgmmuEnableMmuFaultOverflowIntr__(pGpu, pKernelGmmu, index);
2065 }
2066 
kgmmuSignExtendFaultAddress_f2d351(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU64 * pMmuFaultAddress)2067 static inline void kgmmuSignExtendFaultAddress_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress) {
2068     NV_ASSERT_PRECOMP(0);
2069 }
2070 
2071 void kgmmuSignExtendFaultAddress_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress);
2072 
2073 void kgmmuSignExtendFaultAddress_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress);
2074 
kgmmuSignExtendFaultAddress_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU64 * pMmuFaultAddress)2075 static inline void kgmmuSignExtendFaultAddress_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress) {
2076     pKernelGmmu->__kgmmuSignExtendFaultAddress__(pGpu, pKernelGmmu, pMmuFaultAddress);
2077 }
2078 
kgmmuGetFaultType_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 fault,FAULT_TYPE * pMmuFaultType)2079 static inline NV_STATUS kgmmuGetFaultType_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType) {
2080     NV_ASSERT_PRECOMP(0);
2081     return NV_ERR_NOT_SUPPORTED;
2082 }
2083 
2084 NV_STATUS kgmmuGetFaultType_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType);
2085 
kgmmuGetFaultType_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 fault,FAULT_TYPE * pMmuFaultType)2086 static inline NV_STATUS kgmmuGetFaultType_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType) {
2087     return pKernelGmmu->__kgmmuGetFaultType__(pGpu, pKernelGmmu, fault, pMmuFaultType);
2088 }
2089 
kgmmuIsP2PUnboundInstFault_92bfc3(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1)2090 static inline NvBool kgmmuIsP2PUnboundInstFault_92bfc3(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) {
2091     NV_ASSERT_PRECOMP(0);
2092     return NV_ERR_NOT_SUPPORTED;
2093 }
2094 
2095 NvBool kgmmuIsP2PUnboundInstFault_GA100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1);
2096 
kgmmuIsP2PUnboundInstFault_491d52(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1)2097 static inline NvBool kgmmuIsP2PUnboundInstFault_491d52(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) {
2098     return ((NvBool)(0 != 0));
2099 }
2100 
kgmmuIsP2PUnboundInstFault_DISPATCH(struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1)2101 static inline NvBool kgmmuIsP2PUnboundInstFault_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) {
2102     return pKernelGmmu->__kgmmuIsP2PUnboundInstFault__(pKernelGmmu, arg0, arg1);
2103 }
2104 
2105 NV_STATUS kgmmuServiceVfPriFaults_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType);
2106 
kgmmuServiceVfPriFaults_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 faultType)2107 static inline NV_STATUS kgmmuServiceVfPriFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType) {
2108     NV_ASSERT_PRECOMP(0);
2109     return NV_ERR_NOT_SUPPORTED;
2110 }
2111 
kgmmuServiceVfPriFaults_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 faultType)2112 static inline NV_STATUS kgmmuServiceVfPriFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType) {
2113     return pKernelGmmu->__kgmmuServiceVfPriFaults__(pGpu, pKernelGmmu, faultType);
2114 }
2115 
kgmmuTestVidmemAccessBitBufferError_491d52(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)2116 static inline NvBool kgmmuTestVidmemAccessBitBufferError_491d52(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
2117     return ((NvBool)(0 != 0));
2118 }
2119 
kgmmuTestVidmemAccessBitBufferError_ceaee8(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)2120 static inline NvBool kgmmuTestVidmemAccessBitBufferError_ceaee8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
2121     NV_ASSERT_PRECOMP(0);
2122     return ((NvBool)(0 != 0));
2123 }
2124 
kgmmuTestVidmemAccessBitBufferError_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)2125 static inline NvBool kgmmuTestVidmemAccessBitBufferError_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
2126     return pKernelGmmu->__kgmmuTestVidmemAccessBitBufferError__(pGpu, pKernelGmmu, arg0);
2127 }
2128 
kgmmuDisableVidmemAccessBitBuf_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2129 static inline void kgmmuDisableVidmemAccessBitBuf_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2130     return;
2131 }
2132 
kgmmuDisableVidmemAccessBitBuf_e426af(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2133 static inline void kgmmuDisableVidmemAccessBitBuf_e426af(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2134     NV_ASSERT_PRECOMP(0);
2135     return;
2136 }
2137 
kgmmuDisableVidmemAccessBitBuf_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2138 static inline void kgmmuDisableVidmemAccessBitBuf_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2139     pKernelGmmu->__kgmmuDisableVidmemAccessBitBuf__(pGpu, pKernelGmmu);
2140 }
2141 
kgmmuEnableVidmemAccessBitBuf_46f6a7(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2142 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2143     return NV_ERR_NOT_SUPPORTED;
2144 }
2145 
kgmmuEnableVidmemAccessBitBuf_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2146 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2147     NV_ASSERT_PRECOMP(0);
2148     return NV_ERR_NOT_SUPPORTED;
2149 }
2150 
kgmmuEnableVidmemAccessBitBuf_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2151 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2152     return pKernelGmmu->__kgmmuEnableVidmemAccessBitBuf__(pGpu, pKernelGmmu);
2153 }
2154 
kgmmuClearAccessCounterWriteNak_b3696a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2155 static inline void kgmmuClearAccessCounterWriteNak_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2156     return;
2157 }
2158 
kgmmuClearAccessCounterWriteNak_e426af(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2159 static inline void kgmmuClearAccessCounterWriteNak_e426af(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2160     NV_ASSERT_PRECOMP(0);
2161     return;
2162 }
2163 
kgmmuClearAccessCounterWriteNak_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2164 static inline void kgmmuClearAccessCounterWriteNak_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2165     pKernelGmmu->__kgmmuClearAccessCounterWriteNak__(pGpu, pKernelGmmu);
2166 }
2167 
kgmmuServiceMthdBuffFaultInBar2Fault_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2168 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2169     return NV_OK;
2170 }
2171 
kgmmuServiceMthdBuffFaultInBar2Fault_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2172 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2173     NV_ASSERT_PRECOMP(0);
2174     return NV_ERR_NOT_SUPPORTED;
2175 }
2176 
kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2177 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2178     return pKernelGmmu->__kgmmuServiceMthdBuffFaultInBar2Fault__(pGpu, pKernelGmmu);
2179 }
2180 
2181 NV_STATUS kgmmuFaultCancelTargeted_VF(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0);
2182 
kgmmuFaultCancelTargeted_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * arg0)2183 static inline NV_STATUS kgmmuFaultCancelTargeted_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0) {
2184     NV_ASSERT_PRECOMP(0);
2185     return NV_ERR_NOT_SUPPORTED;
2186 }
2187 
2188 NV_STATUS kgmmuFaultCancelTargeted_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0);
2189 
kgmmuFaultCancelTargeted_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * arg0)2190 static inline NV_STATUS kgmmuFaultCancelTargeted_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0) {
2191     return pKernelGmmu->__kgmmuFaultCancelTargeted__(pGpu, pKernelGmmu, arg0);
2192 }
2193 
kgmmuFaultCancelIssueInvalidate_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * pCancelInfo,TLB_INVALIDATE_PARAMS * pParams,NvBool bGlobal)2194 static inline NV_STATUS kgmmuFaultCancelIssueInvalidate_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal) {
2195     NV_ASSERT_PRECOMP(0);
2196     return NV_ERR_NOT_SUPPORTED;
2197 }
2198 
2199 NV_STATUS kgmmuFaultCancelIssueInvalidate_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal);
2200 
kgmmuFaultCancelIssueInvalidate_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,GMMU_FAULT_CANCEL_INFO * pCancelInfo,TLB_INVALIDATE_PARAMS * pParams,NvBool bGlobal)2201 static inline NV_STATUS kgmmuFaultCancelIssueInvalidate_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal) {
2202     return pKernelGmmu->__kgmmuFaultCancelIssueInvalidate__(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal);
2203 }
2204 
2205 NV_STATUS kgmmuServiceMmuFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData);
2206 
2207 NV_STATUS kgmmuServiceMmuFault_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData);
2208 
kgmmuServiceMmuFault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 pParsedFaultInfo,FIFO_MMU_EXCEPTION_DATA * pMmuExceptionData)2209 static inline NV_STATUS kgmmuServiceMmuFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData) {
2210     return pKernelGmmu->__kgmmuServiceMmuFault__(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData);
2211 }
2212 
kgmmuServiceUnboundInstBlockFault_56cd7a(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 arg0,FIFO_MMU_EXCEPTION_DATA * arg1)2213 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) {
2214     return NV_OK;
2215 }
2216 
kgmmuServiceUnboundInstBlockFault_92bfc3(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 arg0,FIFO_MMU_EXCEPTION_DATA * arg1)2217 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) {
2218     NV_ASSERT_PRECOMP(0);
2219     return NV_ERR_NOT_SUPPORTED;
2220 }
2221 
kgmmuServiceUnboundInstBlockFault_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvP64 arg0,FIFO_MMU_EXCEPTION_DATA * arg1)2222 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) {
2223     return pKernelGmmu->__kgmmuServiceUnboundInstBlockFault__(pGpu, pKernelGmmu, arg0, arg1);
2224 }
2225 
2226 NvU32 kgmmuGetEccCounts_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2227 
kgmmuGetEccCounts_4a4dee(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2228 static inline NvU32 kgmmuGetEccCounts_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2229     return 0;
2230 }
2231 
kgmmuGetEccCounts_DISPATCH(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2232 static inline NvU32 kgmmuGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2233     return pKernelGmmu->__kgmmuGetEccCounts__(pGpu, pKernelGmmu);
2234 }
2235 
kgmmuStatePreLoad_DISPATCH(POBJGPU pGpu,struct KernelGmmu * pEngstate,NvU32 arg0)2236 static inline NV_STATUS kgmmuStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
2237     return pEngstate->__kgmmuStatePreLoad__(pGpu, pEngstate, arg0);
2238 }
2239 
kgmmuStatePostUnload_DISPATCH(POBJGPU pGpu,struct KernelGmmu * pEngstate,NvU32 arg0)2240 static inline NV_STATUS kgmmuStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
2241     return pEngstate->__kgmmuStatePostUnload__(pGpu, pEngstate, arg0);
2242 }
2243 
kgmmuStateInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelGmmu * pEngstate)2244 static inline NV_STATUS kgmmuStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
2245     return pEngstate->__kgmmuStateInitUnlocked__(pGpu, pEngstate);
2246 }
2247 
kgmmuInitMissing_DISPATCH(POBJGPU pGpu,struct KernelGmmu * pEngstate)2248 static inline void kgmmuInitMissing_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
2249     pEngstate->__kgmmuInitMissing__(pGpu, pEngstate);
2250 }
2251 
kgmmuStatePreInitLocked_DISPATCH(POBJGPU pGpu,struct KernelGmmu * pEngstate)2252 static inline NV_STATUS kgmmuStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
2253     return pEngstate->__kgmmuStatePreInitLocked__(pGpu, pEngstate);
2254 }
2255 
kgmmuStatePreInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelGmmu * pEngstate)2256 static inline NV_STATUS kgmmuStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
2257     return pEngstate->__kgmmuStatePreInitUnlocked__(pGpu, pEngstate);
2258 }
2259 
kgmmuIsPresent_DISPATCH(POBJGPU pGpu,struct KernelGmmu * pEngstate)2260 static inline NvBool kgmmuIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) {
2261     return pEngstate->__kgmmuIsPresent__(pGpu, pEngstate);
2262 }
2263 
kgmmuGetPDEAperture(struct KernelGmmu * pKernelGmmu)2264 static inline NvU32 kgmmuGetPDEAperture(struct KernelGmmu *pKernelGmmu) {
2265     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2266     return pKernelGmmu_PRIVATE->PDEAperture;
2267 }
2268 
kgmmuGetPTEAperture(struct KernelGmmu * pKernelGmmu)2269 static inline NvU32 kgmmuGetPTEAperture(struct KernelGmmu *pKernelGmmu) {
2270     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2271     return pKernelGmmu_PRIVATE->PTEAperture;
2272 }
2273 
kgmmuGetPDEBAR1Aperture(struct KernelGmmu * pKernelGmmu)2274 static inline NvU32 kgmmuGetPDEBAR1Aperture(struct KernelGmmu *pKernelGmmu) {
2275     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2276     return pKernelGmmu_PRIVATE->PDEBAR1Aperture;
2277 }
2278 
kgmmuGetPTEBAR1Aperture(struct KernelGmmu * pKernelGmmu)2279 static inline NvU32 kgmmuGetPTEBAR1Aperture(struct KernelGmmu *pKernelGmmu) {
2280     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2281     return pKernelGmmu_PRIVATE->PTEBAR1Aperture;
2282 }
2283 
kgmmuGetPDEBAR1Attr(struct KernelGmmu * pKernelGmmu)2284 static inline NvU32 kgmmuGetPDEBAR1Attr(struct KernelGmmu *pKernelGmmu) {
2285     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2286     return pKernelGmmu_PRIVATE->PDEBAR1Attr;
2287 }
2288 
kgmmuGetPTEBAR1Attr(struct KernelGmmu * pKernelGmmu)2289 static inline NvU32 kgmmuGetPTEBAR1Attr(struct KernelGmmu *pKernelGmmu) {
2290     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2291     return pKernelGmmu_PRIVATE->PTEBAR1Attr;
2292 }
2293 
kgmmuGetPDEAttr(struct KernelGmmu * pKernelGmmu)2294 static inline NvU32 kgmmuGetPDEAttr(struct KernelGmmu *pKernelGmmu) {
2295     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2296     return pKernelGmmu_PRIVATE->PDEAttr;
2297 }
2298 
kgmmuGetPTEAttr(struct KernelGmmu * pKernelGmmu)2299 static inline NvU32 kgmmuGetPTEAttr(struct KernelGmmu *pKernelGmmu) {
2300     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2301     return pKernelGmmu_PRIVATE->PTEAttr;
2302 }
2303 
kgmmuGetBigPageSizeOverride(struct KernelGmmu * pKernelGmmu)2304 static inline NvU64 kgmmuGetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu) {
2305     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2306     return pKernelGmmu_PRIVATE->overrideBigPageSize;
2307 }
2308 
kgmmuSetBigPageSizeOverride(struct KernelGmmu * pKernelGmmu,NvU64 bigPageSize)2309 static inline void kgmmuSetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) {
2310     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2311     pKernelGmmu_PRIVATE->overrideBigPageSize = bigPageSize;
2312 }
2313 
kgmmuIsPerVaspaceBigPageEn(struct KernelGmmu * pKernelGmmu)2314 static inline NvBool kgmmuIsPerVaspaceBigPageEn(struct KernelGmmu *pKernelGmmu) {
2315     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2316     return pKernelGmmu_PRIVATE->bEnablePerVaspaceBigPage;
2317 }
2318 
kgmmuIsIgnoreHubTlbInvalidate(struct KernelGmmu * pKernelGmmu)2319 static inline NvBool kgmmuIsIgnoreHubTlbInvalidate(struct KernelGmmu *pKernelGmmu) {
2320     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2321     return pKernelGmmu_PRIVATE->bIgnoreHubTlbInvalidate;
2322 }
2323 
kgmmuIsHugePageSupported(struct KernelGmmu * pKernelGmmu)2324 static inline NvBool kgmmuIsHugePageSupported(struct KernelGmmu *pKernelGmmu) {
2325     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2326     return pKernelGmmu_PRIVATE->bHugePageSupported;
2327 }
2328 
kgmmuIsPageSize512mbSupported(struct KernelGmmu * pKernelGmmu)2329 static inline NvBool kgmmuIsPageSize512mbSupported(struct KernelGmmu *pKernelGmmu) {
2330     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2331     return pKernelGmmu_PRIVATE->bPageSize512mbSupported;
2332 }
2333 
kgmmuIsBug2720120WarEnabled(struct KernelGmmu * pKernelGmmu)2334 static inline NvBool kgmmuIsBug2720120WarEnabled(struct KernelGmmu *pKernelGmmu) {
2335     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2336     return pKernelGmmu_PRIVATE->bBug2720120WarEnabled;
2337 }
2338 
kgmmuIsVaspaceInteropSupported(struct KernelGmmu * pKernelGmmu)2339 static inline NvBool kgmmuIsVaspaceInteropSupported(struct KernelGmmu *pKernelGmmu) {
2340     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2341     return pKernelGmmu_PRIVATE->bVaspaceInteropSupported;
2342 }
2343 
kgmmuGetMaxVASize(struct KernelGmmu * pKernelGmmu)2344 static inline NvU64 kgmmuGetMaxVASize(struct KernelGmmu *pKernelGmmu) {
2345     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2346     return pKernelGmmu_PRIVATE->maxVASize;
2347 }
2348 
kgmmuGetSysBaseAddress(struct KernelGmmu * pKernelGmmu)2349 static inline NvU64 kgmmuGetSysBaseAddress(struct KernelGmmu *pKernelGmmu) {
2350     struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu;
2351     return pKernelGmmu_PRIVATE->sysmemBaseAddress;
2352 }
2353 
2354 void kgmmuDestruct_IMPL(struct KernelGmmu *pKernelGmmu);
2355 
2356 #define __nvoc_kgmmuDestruct(pKernelGmmu) kgmmuDestruct_IMPL(pKernelGmmu)
2357 NV_STATUS kgmmuFmtInit_IMPL(struct KernelGmmu *pKernelGmmu);
2358 
2359 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtInit(struct KernelGmmu * pKernelGmmu)2360 static inline NV_STATUS kgmmuFmtInit(struct KernelGmmu *pKernelGmmu) {
2361     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2362     return NV_ERR_NOT_SUPPORTED;
2363 }
2364 #else //__nvoc_kern_gmmu_h_disabled
2365 #define kgmmuFmtInit(pKernelGmmu) kgmmuFmtInit_IMPL(pKernelGmmu)
2366 #endif //__nvoc_kern_gmmu_h_disabled
2367 
2368 GMMU_APERTURE kgmmuGetMemAperture_IMPL(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc);
2369 
2370 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetMemAperture(struct KernelGmmu * pKernelGmmu,MEMORY_DESCRIPTOR * pMemDesc)2371 static inline GMMU_APERTURE kgmmuGetMemAperture(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc) {
2372     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2373     GMMU_APERTURE ret;
2374     portMemSet(&ret, 0, sizeof(GMMU_APERTURE));
2375     return ret;
2376 }
2377 #else //__nvoc_kern_gmmu_h_disabled
2378 #define kgmmuGetMemAperture(pKernelGmmu, pMemDesc) kgmmuGetMemAperture_IMPL(pKernelGmmu, pMemDesc)
2379 #endif //__nvoc_kern_gmmu_h_disabled
2380 
2381 const GMMU_FMT_FAMILY *kgmmuFmtGetFamily_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version);
2382 
2383 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtGetFamily(struct KernelGmmu * pKernelGmmu,NvU32 version)2384 static inline const GMMU_FMT_FAMILY *kgmmuFmtGetFamily(struct KernelGmmu *pKernelGmmu, NvU32 version) {
2385     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2386     return NULL;
2387 }
2388 #else //__nvoc_kern_gmmu_h_disabled
2389 #define kgmmuFmtGetFamily(pKernelGmmu, version) kgmmuFmtGetFamily_IMPL(pKernelGmmu, version)
2390 #endif //__nvoc_kern_gmmu_h_disabled
2391 
2392 const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2393 
2394 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetStaticInfo(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2395 static inline const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2396     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2397     return NULL;
2398 }
2399 #else //__nvoc_kern_gmmu_h_disabled
2400 #define kgmmuGetStaticInfo(pGpu, pKernelGmmu) kgmmuGetStaticInfo_IMPL(pGpu, pKernelGmmu)
2401 #endif //__nvoc_kern_gmmu_h_disabled
2402 
2403 const struct GMMU_FMT *kgmmuFmtGet_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize);
2404 
2405 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtGet(struct KernelGmmu * pKernelGmmu,NvU32 version,NvU64 bigPageSize)2406 static inline const struct GMMU_FMT *kgmmuFmtGet(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize) {
2407     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2408     return NULL;
2409 }
2410 #else //__nvoc_kern_gmmu_h_disabled
2411 #define kgmmuFmtGet(pKernelGmmu, version, bigPageSize) kgmmuFmtGet_IMPL(pKernelGmmu, version, bigPageSize)
2412 #endif //__nvoc_kern_gmmu_h_disabled
2413 
2414 void kgmmuExtractPteInfo_IMPL(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3);
2415 
2416 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuExtractPteInfo(struct KernelGmmu * pKernelGmmu,union GMMU_ENTRY_VALUE * arg0,NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK * arg1,const struct GMMU_FMT * arg2,const MMU_FMT_LEVEL * arg3)2417 static inline void kgmmuExtractPteInfo(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3) {
2418     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2419 }
2420 #else //__nvoc_kern_gmmu_h_disabled
2421 #define kgmmuExtractPteInfo(pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuExtractPteInfo_IMPL(pKernelGmmu, arg0, arg1, arg2, arg3)
2422 #endif //__nvoc_kern_gmmu_h_disabled
2423 
2424 void kgmmuFieldSetKindCompTags_IMPL(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries);
2425 
2426 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFieldSetKindCompTags(struct KernelGmmu * pKernelGmmu,const struct GMMU_FMT * pFmt,const MMU_FMT_LEVEL * pLevel,const COMPR_INFO * pCompr,NvU64 physAddr,NvU64 surfOffset,NvU32 pteIndex,NvU8 * pEntries)2427 static inline void kgmmuFieldSetKindCompTags(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries) {
2428     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2429 }
2430 #else //__nvoc_kern_gmmu_h_disabled
2431 #define kgmmuFieldSetKindCompTags(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries) kgmmuFieldSetKindCompTags_IMPL(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries)
2432 #endif //__nvoc_kern_gmmu_h_disabled
2433 
2434 NvBool kgmmuFmtIsBigPageSizeSupported_IMPL(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize);
2435 
2436 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtIsBigPageSizeSupported(struct KernelGmmu * pKernelGmmu,NvU64 bigPageSize)2437 static inline NvBool kgmmuFmtIsBigPageSizeSupported(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) {
2438     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2439     return NV_FALSE;
2440 }
2441 #else //__nvoc_kern_gmmu_h_disabled
2442 #define kgmmuFmtIsBigPageSizeSupported(pKernelGmmu, bigPageSize) kgmmuFmtIsBigPageSizeSupported_IMPL(pKernelGmmu, bigPageSize)
2443 #endif //__nvoc_kern_gmmu_h_disabled
2444 
2445 const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2446 
2447 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFmtGetLatestSupportedFormat(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2448 static inline const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2449     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2450     return NULL;
2451 }
2452 #else //__nvoc_kern_gmmu_h_disabled
2453 #define kgmmuFmtGetLatestSupportedFormat(pGpu, pKernelGmmu) kgmmuFmtGetLatestSupportedFormat_IMPL(pGpu, pKernelGmmu)
2454 #endif //__nvoc_kern_gmmu_h_disabled
2455 
2456 NvU32 kgmmuGetFaultBufferReservedFbSpaceSize_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2457 
2458 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFaultBufferReservedFbSpaceSize(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2459 static inline NvU32 kgmmuGetFaultBufferReservedFbSpaceSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2460     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2461     return 0;
2462 }
2463 #else //__nvoc_kern_gmmu_h_disabled
2464 #define kgmmuGetFaultBufferReservedFbSpaceSize(pGpu, pKernelGmmu) kgmmuGetFaultBufferReservedFbSpaceSize_IMPL(pGpu, pKernelGmmu)
2465 #endif //__nvoc_kern_gmmu_h_disabled
2466 
2467 NV_STATUS kgmmuFaultBufferReplayableSetup_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1, NvU32 arg2, RmPhysAddr *arg3);
2468 
2469 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferReplayableSetup(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvHandle arg0,NvHandle arg1,NvU32 arg2,RmPhysAddr * arg3)2470 static inline NV_STATUS kgmmuFaultBufferReplayableSetup(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1, NvU32 arg2, RmPhysAddr *arg3) {
2471     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2472     return NV_ERR_NOT_SUPPORTED;
2473 }
2474 #else //__nvoc_kern_gmmu_h_disabled
2475 #define kgmmuFaultBufferReplayableSetup(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuFaultBufferReplayableSetup_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3)
2476 #endif //__nvoc_kern_gmmu_h_disabled
2477 
2478 NvU64 kgmmuGetMinBigPageSize_IMPL(struct KernelGmmu *pKernelGmmu);
2479 
2480 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetMinBigPageSize(struct KernelGmmu * pKernelGmmu)2481 static inline NvU64 kgmmuGetMinBigPageSize(struct KernelGmmu *pKernelGmmu) {
2482     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2483     return 0;
2484 }
2485 #else //__nvoc_kern_gmmu_h_disabled
2486 #define kgmmuGetMinBigPageSize(pKernelGmmu) kgmmuGetMinBigPageSize_IMPL(pKernelGmmu)
2487 #endif //__nvoc_kern_gmmu_h_disabled
2488 
2489 NV_STATUS kgmmuInstBlkInit_IMPL(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams);
2490 
2491 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuInstBlkInit(struct KernelGmmu * pKernelGmmu,PMEMORY_DESCRIPTOR pInstBlkDesc,struct OBJVASPACE * pVAS,NvU32 subctxId,INST_BLK_INIT_PARAMS * pInstBlkParams)2492 static inline NV_STATUS kgmmuInstBlkInit(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams) {
2493     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2494     return NV_ERR_NOT_SUPPORTED;
2495 }
2496 #else //__nvoc_kern_gmmu_h_disabled
2497 #define kgmmuInstBlkInit(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams) kgmmuInstBlkInit_IMPL(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams)
2498 #endif //__nvoc_kern_gmmu_h_disabled
2499 
2500 NV_STATUS kgmmuFaultBufferReplayableAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1);
2501 
2502 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferReplayableAllocate(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvHandle arg0,NvHandle arg1)2503 static inline NV_STATUS kgmmuFaultBufferReplayableAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1) {
2504     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2505     return NV_ERR_NOT_SUPPORTED;
2506 }
2507 #else //__nvoc_kern_gmmu_h_disabled
2508 #define kgmmuFaultBufferReplayableAllocate(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferReplayableAllocate_IMPL(pGpu, pKernelGmmu, arg0, arg1)
2509 #endif //__nvoc_kern_gmmu_h_disabled
2510 
2511 NV_STATUS kgmmuFaultBufferReplayableDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
2512 
2513 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferReplayableDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu)2514 static inline NV_STATUS kgmmuFaultBufferReplayableDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
2515     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2516     return NV_ERR_NOT_SUPPORTED;
2517 }
2518 #else //__nvoc_kern_gmmu_h_disabled
2519 #define kgmmuFaultBufferReplayableDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferReplayableDestroy_IMPL(pGpu, pKernelGmmu)
2520 #endif //__nvoc_kern_gmmu_h_disabled
2521 
2522 NV_STATUS kgmmuFaultBufferAlloc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1);
2523 
2524 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferAlloc(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1)2525 static inline NV_STATUS kgmmuFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) {
2526     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2527     return NV_ERR_NOT_SUPPORTED;
2528 }
2529 #else //__nvoc_kern_gmmu_h_disabled
2530 #define kgmmuFaultBufferAlloc(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferAlloc_IMPL(pGpu, pKernelGmmu, arg0, arg1)
2531 #endif //__nvoc_kern_gmmu_h_disabled
2532 
2533 NV_STATUS kgmmuFaultBufferCreateMemDesc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3);
2534 
2535 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferCreateMemDesc(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 arg1,NvU64 arg2,MEMORY_DESCRIPTOR ** arg3)2536 static inline NV_STATUS kgmmuFaultBufferCreateMemDesc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3) {
2537     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2538     return NV_ERR_NOT_SUPPORTED;
2539 }
2540 #else //__nvoc_kern_gmmu_h_disabled
2541 #define kgmmuFaultBufferCreateMemDesc(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuFaultBufferCreateMemDesc_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3)
2542 #endif //__nvoc_kern_gmmu_h_disabled
2543 
2544 NV_STATUS kgmmuFaultBufferGetAddressSpace_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2);
2545 
2546 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferGetAddressSpace(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0,NvU32 * arg1,NvU32 * arg2)2547 static inline NV_STATUS kgmmuFaultBufferGetAddressSpace(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2) {
2548     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2549     return NV_ERR_NOT_SUPPORTED;
2550 }
2551 #else //__nvoc_kern_gmmu_h_disabled
2552 #define kgmmuFaultBufferGetAddressSpace(pGpu, pKernelGmmu, arg0, arg1, arg2) kgmmuFaultBufferGetAddressSpace_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2)
2553 #endif //__nvoc_kern_gmmu_h_disabled
2554 
2555 NV_STATUS kgmmuFaultBufferFree_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
2556 
2557 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferFree(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)2558 static inline NV_STATUS kgmmuFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
2559     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2560     return NV_ERR_NOT_SUPPORTED;
2561 }
2562 #else //__nvoc_kern_gmmu_h_disabled
2563 #define kgmmuFaultBufferFree(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFree_IMPL(pGpu, pKernelGmmu, arg0)
2564 #endif //__nvoc_kern_gmmu_h_disabled
2565 
2566 NV_STATUS kgmmuFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0);
2567 
2568 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuFaultBufferUnregister(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU32 arg0)2569 static inline NV_STATUS kgmmuFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) {
2570     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2571     return NV_ERR_NOT_SUPPORTED;
2572 }
2573 #else //__nvoc_kern_gmmu_h_disabled
2574 #define kgmmuFaultBufferUnregister(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg0)
2575 #endif //__nvoc_kern_gmmu_h_disabled
2576 
2577 NV_STATUS kgmmuClientShadowFaultBufferAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
2578 
2579 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferAllocate(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)2580 static inline NV_STATUS kgmmuClientShadowFaultBufferAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
2581     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2582     return NV_ERR_NOT_SUPPORTED;
2583 }
2584 #else //__nvoc_kern_gmmu_h_disabled
2585 #define kgmmuClientShadowFaultBufferAllocate(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAllocate_IMPL(pGpu, pKernelGmmu, arg0)
2586 #endif //__nvoc_kern_gmmu_h_disabled
2587 
2588 NV_STATUS kgmmuClientShadowFaultBufferDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
2589 
2590 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)2591 static inline NV_STATUS kgmmuClientShadowFaultBufferDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
2592     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2593     return NV_ERR_NOT_SUPPORTED;
2594 }
2595 #else //__nvoc_kern_gmmu_h_disabled
2596 #define kgmmuClientShadowFaultBufferDestroy(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferDestroy_IMPL(pGpu, pKernelGmmu, arg0)
2597 #endif //__nvoc_kern_gmmu_h_disabled
2598 
2599 NV_STATUS kgmmuClientShadowFaultBufferRegister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
2600 
2601 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferRegister(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)2602 static inline NV_STATUS kgmmuClientShadowFaultBufferRegister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
2603     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2604     return NV_ERR_NOT_SUPPORTED;
2605 }
2606 #else //__nvoc_kern_gmmu_h_disabled
2607 #define kgmmuClientShadowFaultBufferRegister(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferRegister_IMPL(pGpu, pKernelGmmu, arg0)
2608 #endif //__nvoc_kern_gmmu_h_disabled
2609 
2610 void kgmmuClientShadowFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0);
2611 
2612 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferUnregister(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE arg0)2613 static inline void kgmmuClientShadowFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) {
2614     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2615 }
2616 #else //__nvoc_kern_gmmu_h_disabled
2617 #define kgmmuClientShadowFaultBufferUnregister(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg0)
2618 #endif //__nvoc_kern_gmmu_h_disabled
2619 
2620 void kgmmuClientShadowFaultBufferPagesDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1);
2621 
2622 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferPagesDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg0,FAULT_BUFFER_TYPE arg1)2623 static inline void kgmmuClientShadowFaultBufferPagesDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1) {
2624     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2625 }
2626 #else //__nvoc_kern_gmmu_h_disabled
2627 #define kgmmuClientShadowFaultBufferPagesDestroy(pGpu, pKernelGmmu, arg0, arg1) kgmmuClientShadowFaultBufferPagesDestroy_IMPL(pGpu, pKernelGmmu, arg0, arg1)
2628 #endif //__nvoc_kern_gmmu_h_disabled
2629 
2630 void kgmmuClientShadowFaultBufferQueueDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1);
2631 
2632 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuClientShadowFaultBufferQueueDestroy(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg0,FAULT_BUFFER_TYPE arg1)2633 static inline void kgmmuClientShadowFaultBufferQueueDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1) {
2634     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2635 }
2636 #else //__nvoc_kern_gmmu_h_disabled
2637 #define kgmmuClientShadowFaultBufferQueueDestroy(pGpu, pKernelGmmu, arg0, arg1) kgmmuClientShadowFaultBufferQueueDestroy_IMPL(pGpu, pKernelGmmu, arg0, arg1)
2638 #endif //__nvoc_kern_gmmu_h_disabled
2639 
2640 NvU64 kgmmuGetSizeOfPageTables_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3);
2641 
2642 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetSizeOfPageTables(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,const struct GMMU_FMT * arg0,NvU64 arg1,NvU64 arg2,NvU64 arg3)2643 static inline NvU64 kgmmuGetSizeOfPageTables(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) {
2644     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2645     return 0;
2646 }
2647 #else //__nvoc_kern_gmmu_h_disabled
2648 #define kgmmuGetSizeOfPageTables(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageTables_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3)
2649 #endif //__nvoc_kern_gmmu_h_disabled
2650 
2651 NvU64 kgmmuGetSizeOfPageDirs_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3);
2652 
2653 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetSizeOfPageDirs(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,const struct GMMU_FMT * arg0,NvU64 arg1,NvU64 arg2,NvU64 arg3)2654 static inline NvU64 kgmmuGetSizeOfPageDirs(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) {
2655     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2656     return 0;
2657 }
2658 #else //__nvoc_kern_gmmu_h_disabled
2659 #define kgmmuGetSizeOfPageDirs(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageDirs_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3)
2660 #endif //__nvoc_kern_gmmu_h_disabled
2661 
2662 GMMU_APERTURE kgmmuGetExternalAllocAperture_IMPL(NvU32 addressSpace);
2663 
2664 #define kgmmuGetExternalAllocAperture(addressSpace) kgmmuGetExternalAllocAperture_IMPL(addressSpace)
2665 void kgmmuEncodePhysAddrs_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count);
2666 
2667 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEncodePhysAddrs(struct KernelGmmu * pKernelGmmu,const GMMU_APERTURE aperture,NvU64 * pAddresses,NvU64 fabricBaseAddress,NvU64 count)2668 static inline void kgmmuEncodePhysAddrs(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count) {
2669     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2670 }
2671 #else //__nvoc_kern_gmmu_h_disabled
2672 #define kgmmuEncodePhysAddrs(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count) kgmmuEncodePhysAddrs_IMPL(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count)
2673 #endif //__nvoc_kern_gmmu_h_disabled
2674 
2675 NvU64 kgmmuEncodePhysAddr_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress);
2676 
2677 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuEncodePhysAddr(struct KernelGmmu * pKernelGmmu,const GMMU_APERTURE aperture,NvU64 physAddr,NvU64 fabricBaseAddress)2678 static inline NvU64 kgmmuEncodePhysAddr(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress) {
2679     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2680     return 0;
2681 }
2682 #else //__nvoc_kern_gmmu_h_disabled
2683 #define kgmmuEncodePhysAddr(pKernelGmmu, aperture, physAddr, fabricBaseAddress) kgmmuEncodePhysAddr_IMPL(pKernelGmmu, aperture, physAddr, fabricBaseAddress)
2684 #endif //__nvoc_kern_gmmu_h_disabled
2685 
2686 void kgmmuAccessCntrChangeIntrOwnership_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0);
2687 
2688 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuAccessCntrChangeIntrOwnership(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvBool arg0)2689 static inline void kgmmuAccessCntrChangeIntrOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) {
2690     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2691 }
2692 #else //__nvoc_kern_gmmu_h_disabled
2693 #define kgmmuAccessCntrChangeIntrOwnership(pGpu, pKernelGmmu, arg0) kgmmuAccessCntrChangeIntrOwnership_IMPL(pGpu, pKernelGmmu, arg0)
2694 #endif //__nvoc_kern_gmmu_h_disabled
2695 
2696 void *kgmmuGetShadowFaultBufferCslContext_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type);
2697 
2698 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetShadowFaultBufferCslContext(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,FAULT_BUFFER_TYPE type)2699 static inline void *kgmmuGetShadowFaultBufferCslContext(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) {
2700     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2701     return NULL;
2702 }
2703 #else //__nvoc_kern_gmmu_h_disabled
2704 #define kgmmuGetShadowFaultBufferCslContext(pGpu, pKernelGmmu, type) kgmmuGetShadowFaultBufferCslContext_IMPL(pGpu, pKernelGmmu, type)
2705 #endif //__nvoc_kern_gmmu_h_disabled
2706 
2707 NvS32 *kgmmuGetFatalFaultIntrPendingState_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid);
2708 
2709 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFatalFaultIntrPendingState(struct KernelGmmu * pKernelGmmu,NvU8 gfid)2710 static inline NvS32 *kgmmuGetFatalFaultIntrPendingState(struct KernelGmmu *pKernelGmmu, NvU8 gfid) {
2711     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2712     return NULL;
2713 }
2714 #else //__nvoc_kern_gmmu_h_disabled
2715 #define kgmmuGetFatalFaultIntrPendingState(pKernelGmmu, gfid) kgmmuGetFatalFaultIntrPendingState_IMPL(pKernelGmmu, gfid)
2716 #endif //__nvoc_kern_gmmu_h_disabled
2717 
2718 struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex);
2719 
2720 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetHwFaultBufferPtr(struct KernelGmmu * pKernelGmmu,NvU8 gfid,NvU8 faultBufferIndex)2721 static inline struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex) {
2722     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2723     return NULL;
2724 }
2725 #else //__nvoc_kern_gmmu_h_disabled
2726 #define kgmmuGetHwFaultBufferPtr(pKernelGmmu, gfid, faultBufferIndex) kgmmuGetHwFaultBufferPtr_IMPL(pKernelGmmu, gfid, faultBufferIndex)
2727 #endif //__nvoc_kern_gmmu_h_disabled
2728 
2729 NvU64 kgmmuGetFaultBufferGenCnt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU8 gfid);
2730 
2731 #ifdef __nvoc_kern_gmmu_h_disabled
kgmmuGetFaultBufferGenCnt(OBJGPU * pGpu,struct KernelGmmu * pKernelGmmu,NvU8 gfid)2732 static inline NvU64 kgmmuGetFaultBufferGenCnt(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU8 gfid) {
2733     NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
2734     return 0;
2735 }
2736 #else //__nvoc_kern_gmmu_h_disabled
2737 #define kgmmuGetFaultBufferGenCnt(pGpu, pKernelGmmu, gfid) kgmmuGetFaultBufferGenCnt_IMPL(pGpu, pKernelGmmu, gfid)
2738 #endif //__nvoc_kern_gmmu_h_disabled
2739 
2740 #undef PRIVATE_FIELD
2741 
2742 
2743 // defines for TLB Invalidation scope
2744 #define NV_GMMU_INVAL_SCOPE_ALL_TLBS       0x00000000
2745 #define NV_GMMU_INVAL_SCOPE_LINK_TLBS      0x00000001
2746 #define NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS  0x00000002
2747 
2748 // bit fields for uvmSharedIntrRmOwnsMask
2749 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY          NVBIT(0)
2750 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_ERROR           NVBIT(1)
2751 #define RM_UVM_SHARED_INTR_MASK_MMU_ECC_UNCORRECTED_ERROR_NOTIFY   NVBIT(2)
2752 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY        NVBIT(3)
2753 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_OVERFLOW      NVBIT(4)
2754 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_NOTIFY     NVBIT(5)
2755 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_OVERFLOW   NVBIT(6)
2756 #define RM_UVM_SHARED_INTR_MASK_MMU_OTHER_FAULT_NOTIFY             NVBIT(7)
2757 #define RM_UVM_SHARED_INTR_MASK_ALL                                (NVBIT(8) - 1)
2758 
2759 /*!
2760  * Constants used for UVM mirroring loops.
2761  */
2762 #define GMMU_USER_PAGE_DIR_INDEX       0
2763 #define GMMU_KERNEL_PAGE_DIR_INDEX     1
2764 #define GMMU_MAX_PAGE_DIR_INDEX_COUNT  (GMMU_KERNEL_PAGE_DIR_INDEX + 1)
2765 
2766 /*!
2767  * Page table walker callbacks used for map/unmap operations.
2768  */
2769 extern const MMU_WALK_CALLBACKS  g_gmmuWalkCallbacks;
2770 extern const MMU_WALK_CALLBACKS  g_bar2WalkCallbacks;
2771 extern const MMU_TRACE_CALLBACKS g_gmmuTraceCallbacks;
2772 
2773 void       gmmuMemDescCacheFree(GVAS_GPU_STATE *pGpuState);
2774 
2775 #endif // KERN_GMMU_H
2776 
2777 #ifdef __cplusplus
2778 } // extern "C"
2779 #endif
2780 
2781 #endif // _G_KERN_GMMU_NVOC_H_
2782