1 #ifndef _G_KERN_GMMU_NVOC_H_ 2 #define _G_KERN_GMMU_NVOC_H_ 3 #include "nvoc/runtime.h" 4 5 #ifdef __cplusplus 6 extern "C" { 7 #endif 8 9 /* 10 * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 11 * SPDX-License-Identifier: MIT 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a 14 * copy of this software and associated documentation files (the "Software"), 15 * to deal in the Software without restriction, including without limitation 16 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 17 * and/or sell copies of the Software, and to permit persons to whom the 18 * Software is furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 29 * DEALINGS IN THE SOFTWARE. 30 */ 31 32 /****************************************************************************** 33 * 34 * Kernel GMMU module header 35 * Defines and structures used on CPU RM for the GMMU object. 36 * 37 ******************************************************************************/ 38 39 #include "g_kern_gmmu_nvoc.h" 40 41 #ifndef KERN_GMMU_H 42 #define KERN_GMMU_H 43 44 #include "core/core.h" 45 #include "core/strict.h" 46 #include "nvtypes.h" 47 #include "nvoc/prelude.h" 48 #include "nvoc/object.h" 49 #include "gpu/mmu/mmu_trace.h" 50 #include "mmu/gmmu_fmt.h" 51 #include "class/cl90f1.h" // FERMI_VASPACE_A 52 53 #include "gpu/gpu_timeout.h" 54 #include "containers/queue.h" 55 #include "gpu/eng_state.h" 56 #include "gpu/intr/intr_service.h" 57 #include "gpu/fifo/kernel_fifo.h" 58 #include "gpu/mem_mgr/virt_mem_allocator_common.h" // RM_PAGE_SIZE_64K 59 #include "mmu/mmu_walk.h" 60 61 #include "gpu/gpu_halspec.h" 62 #include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS 63 64 #include "class/clc369.h" // MMU_FAULT_BUFFER 65 66 typedef struct COMPR_INFO COMPR_INFO; 67 68 typedef struct GVAS_GPU_STATE GVAS_GPU_STATE; 69 70 typedef struct _fifo_mmu_exception_data FIFO_MMU_EXCEPTION_DATA; 71 72 /*! 73 * Family of GMMU formats sharing the same version and PDE/PTE defines 74 * but with differing big page sizes. 75 * The term "family" is used here in the mathematical (set theory) sense. 76 * 77 * nv4kPte: GV100+ supports NV4K encoding, @ref gmmuStateInitHal_GV100 for more 78 * 79 */ 80 typedef struct 81 { 82 GMMU_FMT_PDE_MULTI pdeMulti; 83 GMMU_FMT_PDE pde; 84 GMMU_FMT_PTE pte; 85 GMMU_ENTRY_VALUE sparsePte; 86 GMMU_ENTRY_VALUE sparsePde; 87 GMMU_ENTRY_VALUE sparsePdeMulti; 88 GMMU_ENTRY_VALUE nv4kPte; 89 GMMU_ENTRY_VALUE bug2720120WarPde0; 90 GMMU_ENTRY_VALUE bug2720120WarPde1; 91 GMMU_FMT *pFmts[GMMU_FMT_MAX_BIG_PAGE_SIZES]; 92 } GMMU_FMT_FAMILY; 93 94 /*! 95 * This structure contains information needed for issuing a TLB invalidate. 96 */ 97 typedef struct 98 { 99 RmPhysAddr pdbAddress; 100 NvU32 pdbAperture; 101 NvU32 gfid; 102 NvU32 regVal; 103 RMTIMEOUT timeout; 104 } TLB_INVALIDATE_PARAMS; 105 106 typedef enum 107 { 108 NON_REPLAYABLE_FAULT_BUFFER = 0, 109 REPLAYABLE_FAULT_BUFFER, 110 //this should always be the last entry 111 NUM_FAULT_BUFFERS 112 } FAULT_BUFFER_TYPE; 113 114 /*! 115 * This structure holds information about a page 116 * of memory backing the fault buffer. 117 */ 118 typedef struct 119 { 120 /*! Virtual address of this page */ 121 NvP64 pAddress; 122 123 /*! Cookie returned by memdescMap() */ 124 NvP64 pPriv; 125 } GMMU_FAULT_BUFFER_PAGE; 126 127 /*! 128 * This structure holds the information about MMU HW Fault buffer which is mapped on BAR2 129 * and is utilized by MMU for reporting MMU faults to SW 130 */ 131 struct HW_FAULT_BUFFER 132 { 133 NvU64 bar2FaultBufferAddr; 134 MEMORY_DESCRIPTOR *pFaultBufferMemDesc; 135 /*! 136 * cookie that is stored for the CPU mapping 137 */ 138 NvP64 hCpuFaultBuffer; 139 NvP64 kernelVaddr; 140 141 GMMU_FAULT_BUFFER_PAGE *pBufferPages; 142 143 NvU32 cachedGetIndex; 144 145 /*! 146 * cached fault buffer size 147 */ 148 NvU32 faultBufferSize; 149 }; 150 151 /*! 152 * This structure holds information shared between CPU-RM 153 * and GSP-RM 154 */ 155 typedef struct 156 { 157 /*! 158 * The GET index of replayable shadow buffer. This 159 * is updated by UVM driver and read by GSP-RM 160 */ 161 NvU32 swGetIndex; 162 } FAULT_BUFFER_SHARED_MEMORY; 163 164 /*! 165 * This structure holds information about the client shadow fault buffer. 166 */ 167 typedef struct 168 { 169 /*! 170 * Pointer to circular queue structure shared by the RM with a 171 * privileged client, used as the shadow fault buffer for holding 172 * non-replayable faults. 173 * This structure is shared between CPU-RM and GSP-RM in GSP 174 * enabled driver. 175 */ 176 NvP64 pQueue; 177 178 /*! Memory descriptors associated with the queue. */ 179 MEMORY_DESCRIPTOR *pQueueMemDesc; 180 181 NvP64 pQueueAddress; 182 183 /*! 184 * Execution context for the queue. Holds environment specific 185 * data that enable queue usage 186 */ 187 QueueContext queueContext; 188 189 /*! Cookie returned by memdescMap() */ 190 NvP64 pQueuePriv; 191 192 /*! Memory descriptor associated with the buffer. */ 193 MEMORY_DESCRIPTOR *pBufferMemDesc; 194 195 NvP64 pBufferAddress; 196 197 /*! Cookie returned by memdescMap() */ 198 NvP64 pBufferPriv; 199 200 /*! GSP only split mapping of the buffer. */ 201 GMMU_FAULT_BUFFER_PAGE *pBufferPages; 202 203 NvU32 numBufferPages; 204 205 /*! 206 * Start index of the page containing the fault buffer metadata. 207 * 0 if no metadata is present. 208 */ 209 NvU32 metadataStartIndex; 210 211 /*! 212 * Used only by the replayable fault buffer. Memory descriptor used to 213 * describe shared memory b/w CPU-RM and GSP-RM. 214 */ 215 MEMORY_DESCRIPTOR *pFaultBufferSharedMemDesc; 216 217 NvP64 pFaultBufferSharedMemoryAddress; 218 219 NvP64 pFaultBufferSharedMemoryPriv; 220 221 NvP64 pFaultBufferMetadataAddress; 222 223 } GMMU_CLIENT_SHADOW_FAULT_BUFFER; 224 225 /*! 226 * Top level structure containing all dataStructures used in MMU fault handling. 227 */ 228 struct GMMU_FAULT_BUFFER 229 { 230 struct HW_FAULT_BUFFER hwFaultBuffers[NUM_FAULT_BUFFERS]; 231 232 /*! 233 * Unique client and object handle stored 234 * In VOLTA this is for MMU_FAULT_BUFFER, in PASCAL for MAXWELL_FAULT_BUFFER_A 235 */ 236 NvHandle hFaultBufferClient; 237 NvHandle hFaultBufferObject; 238 239 /*! 240 * Pointer to Circular Queue structure used as shadow fault buffer for 241 * holding fatal fault packets serviced by RM 242 */ 243 NvP64 pRmShadowFaultBuffer; 244 245 /*! 246 * Client shadow fault buffer data and pointer protected by gpu locks. 247 * Client may allocate upto 2 shadow buffers one each for replayable and 248 * non-replayable faults 249 */ 250 GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer[NUM_FAULT_BUFFERS]; 251 GMMU_CLIENT_SHADOW_FAULT_BUFFER clientShadowFaultBuffer[NUM_FAULT_BUFFERS]; 252 253 /*! 254 * SpinLock to protect shadow buffer pointers 255 */ 256 PORT_SPINLOCK *pShadowFaultBufLock; 257 258 /*! 259 * Flag stating fatalfault interrupt pending 260 */ 261 NvS32 fatalFaultIntrPending; 262 263 /*! Generational counter for fault buffer. Incremented when the fault buffer wraps around. */ 264 volatile NvU64 faultBufferGenerationCounter; 265 }; 266 267 typedef struct GMMU_FAULT_PACKET 268 { 269 // 32 bytes MMU fault packet 270 NvU8 faultPacket[NVC369_BUF_SIZE]; 271 } GMMU_FAULT_PACKET; 272 273 // Initialize Circular Queue for MMU Shadow fault buffer 274 MAKE_QUEUE_CIRCULAR(GMMU_SHADOW_FAULT_BUF, GMMU_FAULT_PACKET); 275 276 #define GMMU_FAULT_PACKET_METADATA_SIZE 32 277 #define GMMU_FAULT_PACKET_METADATA_AUTHTAG_IDX 0 278 #define GMMU_FAULT_PACKET_METADATA_AUTHTAG_SIZE 16 279 #define GMMU_FAULT_PACKET_METADATA_VALID_IDX 16 280 #define GMMU_FAULT_PACKET_METADATA_VALID_SIZE 1 281 #define GMMU_FAULT_PACKET_METADATA_VALID_YES NV_TRUE 282 #define GMMU_FAULT_PACKET_METADATA_VALID_NO NV_FALSE 283 284 typedef struct GMMU_FAULT_PACKET_METADATA 285 { 286 NvU8 metadata[GMMU_FAULT_PACKET_METADATA_SIZE]; 287 } GMMU_FAULT_PACKET_METADATA; 288 289 /*! 290 * Structure that holds different parameters passed by an engine to kgmmuInstBlkInit 291 * for initializing their instance blocks. 292 */ 293 typedef struct 294 { 295 NvBool bIsClientAdmin; 296 NvBool bIsFaultReplayable; 297 /* 298 * Defer the bus flush during the instance block init. 299 * If this field is set, the kgmmuInstBlkInit() routine won't do flush after the CPU writes. 300 * The caller of the kgmmuInstBlkInit() function has to explicit flush. 301 * This is useful if the caller does back to back updates to instance block. 302 * For e.g. Subcontext array init during channel setup. 303 */ 304 NvBool bDeferFlush; 305 NvU64 uvmKernelPrivRegion; 306 307 // Instance block is being updated for a zombie subcontext. 308 NvBool bIsZombieSubctx; 309 NvU8 *pInstBlk; // VA of instance block. 310 } INST_BLK_INIT_PARAMS, *PINST_BLK_INIT_PARAMS; 311 312 typedef enum 313 { 314 fault_invalidPde = 0x00000000, 315 fault_invalidPdeSize = 0x00000001, 316 fault_invalidPte = 0x00000002, 317 fault_limitViolation = 0x00000003, 318 fault_unboundInstBlock = 0x00000004, 319 fault_privViolation = 0x00000005, 320 fault_write = 0x00000006, 321 fault_read = 0x00000007, 322 fault_pitchMaskViolation = 0x00000008, 323 fault_workCreation = 0x00000009, 324 fault_unsupportedAperture = 0x0000000a, 325 fault_compressionFailure = 0x0000000b, 326 fault_cc_violation = 0x0000000b, 327 fault_unsupportedKind = 0x0000000c, 328 fault_regionViolation = 0x0000000d, 329 fault_poison = 0x0000000e, 330 fault_atomic = 0x0000000f 331 } FAULT_TYPE; 332 333 typedef struct 334 { 335 INST_BLOCK_DESC mmuFaultInstBlock; 336 NvU64 mmuFaultAddress; 337 NvU64 mmuFaultTimestamp; 338 FAULT_TYPE mmuFaultType; 339 NvU32 mmuFaultAccessType; 340 NvU32 mmuFaultEngineId; 341 NvU32 mmuFaultClientId; 342 NvU32 mmuFaultClientType; 343 NvU32 mmuFaultGpcId; 344 NvU8 bFaultEntryValid : 1; 345 NvU8 bFaultInProtectedMode : 1; 346 NvU8 bFaultTypeReplayable : 1; 347 NvU8 bReplayableFaultEn : 1; 348 } MMU_FAULT_BUFFER_ENTRY; 349 350 /*! 351 * This structure contains information needed for targetted fault cancel 352 * This is passed in by UVM using SW methods (cl0076.h) 353 */ 354 typedef struct 355 { 356 NvU32 clientId; 357 NvU32 gpcId; 358 INST_BLOCK_DESC instBlock; 359 } GMMU_FAULT_CANCEL_INFO; 360 361 #define VMMU_MAX_GFID 64 362 363 364 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for 365 // the matching C source file, but causes diagnostics to be issued if another 366 // source file references the field. 367 #ifdef NVOC_KERN_GMMU_H_PRIVATE_ACCESS_ALLOWED 368 #define PRIVATE_FIELD(x) x 369 #else 370 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) 371 #endif 372 373 struct KernelGmmu { 374 const struct NVOC_RTTI *__nvoc_rtti; 375 struct OBJENGSTATE __nvoc_base_OBJENGSTATE; 376 struct IntrService __nvoc_base_IntrService; 377 struct Object *__nvoc_pbase_Object; 378 struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; 379 struct IntrService *__nvoc_pbase_IntrService; 380 struct KernelGmmu *__nvoc_pbase_KernelGmmu; 381 NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu *, ENGDESCRIPTOR); 382 NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu *); 383 NV_STATUS (*__kgmmuStateLoad__)(OBJGPU *, struct KernelGmmu *, NvU32); 384 NV_STATUS (*__kgmmuStateUnload__)(OBJGPU *, struct KernelGmmu *, NvU32); 385 NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu *, NvU32); 386 NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu *, NvU32); 387 void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu *); 388 void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu *, IntrServiceRecord *); 389 NvBool (*__kgmmuClearInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceClearInterruptArguments *); 390 NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceInterruptArguments *); 391 NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *); 392 NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu *, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *); 393 NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu *, TLB_INVALIDATE_PARAMS *); 394 NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu *, NvU32, TLB_INVALIDATE_PARAMS *); 395 void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32); 396 void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32); 397 void (*__kgmmuFmtInitPte__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool); 398 void (*__kgmmuFmtInitPde__)(struct KernelGmmu *, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *); 399 NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu *, NvU32); 400 void (*__kgmmuFmtInitLevels__)(struct KernelGmmu *, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32); 401 void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu *, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *); 402 NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu *); 403 NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *); 404 NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu *, NvU32, NvBool, NvU32 *); 405 NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *); 406 NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *); 407 NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *); 408 NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *); 409 NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu *, NvBool); 410 NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE); 411 void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE); 412 NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu *, GMMU_FMT_FAMILY *); 413 NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *); 414 NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE); 415 NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu *, NvU32); 416 NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu *, NvU32); 417 NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu *, NvU32); 418 NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE); 419 NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu *, NvP64, NvP64); 420 void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32, NvU32); 421 GMMU_FAULT_PACKET *(*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32); 422 NvU32 (*__kgmmuCopyFaultPacketToClientShadowBuffer__)(OBJGPU *, struct KernelGmmu *, struct GMMU_FAULT_BUFFER *, FAULT_BUFFER_TYPE, NvU32, NvU32, NvU32, struct THREAD_STATE_NODE *, NvU32 *); 423 NvBool (*__kgmmuIsReplayableShadowFaultBufferFull__)(OBJGPU *, struct KernelGmmu *, GMMU_CLIENT_SHADOW_FAULT_BUFFER *, NvU32, NvU32); 424 NvU32 (*__kgmmuReadClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE); 425 void (*__kgmmuWriteClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE, NvU32); 426 NvU32 (*__kgmmuGetMinCeEngineId__)(struct KernelGmmu *); 427 NvU32 (*__kgmmuGetMaxCeEngineId__)(OBJGPU *, struct KernelGmmu *); 428 NV_STATUS (*__kgmmuFaultBufferMap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 429 NV_STATUS (*__kgmmuFaultBufferUnmap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 430 NV_STATUS (*__kgmmuFaultBufferInit__)(OBJGPU *, struct KernelGmmu *); 431 NV_STATUS (*__kgmmuFaultBufferDestroy__)(OBJGPU *, struct KernelGmmu *); 432 NV_STATUS (*__kgmmuFaultBufferLoad__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 433 NV_STATUS (*__kgmmuFaultBufferUnload__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 434 NV_STATUS (*__kgmmuEnableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32); 435 NV_STATUS (*__kgmmuDisableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32); 436 NvU32 (*__kgmmuSetAndGetDefaultFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE, NvU32); 437 void (*__kgmmuReadMmuFaultInstHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *); 438 void (*__kgmmuReadMmuFaultAddrHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *); 439 NvU32 (*__kgmmuReadMmuFaultInfo__)(OBJGPU *, struct KernelGmmu *); 440 void (*__kgmmuWriteMmuFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32); 441 void (*__kgmmuWriteMmuFaultBufferHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32, NvU32); 442 NV_STATUS (*__kgmmuEnableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32); 443 NV_STATUS (*__kgmmuDisableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32); 444 NV_STATUS (*__kgmmuEnableMmuFaultOverflowIntr__)(OBJGPU *, struct KernelGmmu *, NvU32); 445 void (*__kgmmuSignExtendFaultAddress__)(OBJGPU *, struct KernelGmmu *, NvU64 *); 446 NV_STATUS (*__kgmmuGetFaultType__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_TYPE *); 447 NvBool (*__kgmmuIsP2PUnboundInstFault__)(struct KernelGmmu *, NvU32, NvU32); 448 NV_STATUS (*__kgmmuServiceVfPriFaults__)(OBJGPU *, struct KernelGmmu *, NvU32); 449 NvBool (*__kgmmuTestVidmemAccessBitBufferError__)(OBJGPU *, struct KernelGmmu *, NvU32); 450 void (*__kgmmuDisableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *); 451 NV_STATUS (*__kgmmuEnableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *); 452 void (*__kgmmuClearAccessCounterWriteNak__)(OBJGPU *, struct KernelGmmu *); 453 NV_STATUS (*__kgmmuServiceMthdBuffFaultInBar2Fault__)(OBJGPU *, struct KernelGmmu *); 454 NV_STATUS (*__kgmmuFaultCancelTargeted__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *); 455 NV_STATUS (*__kgmmuFaultCancelIssueInvalidate__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *, TLB_INVALIDATE_PARAMS *, NvBool); 456 NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *); 457 NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *); 458 NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *); 459 void (*__kgmmuClearEccCounts__)(OBJGPU *, struct KernelGmmu *); 460 NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32); 461 NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32); 462 NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *); 463 void (*__kgmmuInitMissing__)(POBJGPU, struct KernelGmmu *); 464 NV_STATUS (*__kgmmuStatePreInitLocked__)(POBJGPU, struct KernelGmmu *); 465 NV_STATUS (*__kgmmuStatePreInitUnlocked__)(POBJGPU, struct KernelGmmu *); 466 NvBool (*__kgmmuIsPresent__)(POBJGPU, struct KernelGmmu *); 467 NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED; 468 NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED; 469 NvBool PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE; 470 NvBool PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE; 471 NvBool bReportFlaTranslationXid; 472 NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo; 473 NvU64 defaultBigPageSize; 474 NvU32 uvmSharedIntrRmOwnsMask; 475 GMMU_FMT_FAMILY *PRIVATE_FIELD(pFmtFamilies)[3]; 476 NvU32 PRIVATE_FIELD(PDEAperture); 477 NvU32 PRIVATE_FIELD(PDEAttr); 478 NvU32 PRIVATE_FIELD(PDEBAR1Aperture); 479 NvU32 PRIVATE_FIELD(PDEBAR1Attr); 480 NvU32 PRIVATE_FIELD(PTEAperture); 481 NvU32 PRIVATE_FIELD(PTEAttr); 482 NvU32 PRIVATE_FIELD(PTEBAR1Aperture); 483 NvU32 PRIVATE_FIELD(PTEBAR1Attr); 484 NvU64 PRIVATE_FIELD(overrideBigPageSize); 485 NvBool PRIVATE_FIELD(bEnablePerVaspaceBigPage); 486 NvBool PRIVATE_FIELD(bIgnoreHubTlbInvalidate); 487 NvU64 PRIVATE_FIELD(maxVASize); 488 struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pdeApertures)[5]; 489 struct NV_FIELD_ENUM_ENTRY PRIVATE_FIELD(pteApertures)[5]; 490 MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarSmallPageTable); 491 MEMORY_DESCRIPTOR *PRIVATE_FIELD(pWarPageDirectory0); 492 struct GMMU_FAULT_BUFFER PRIVATE_FIELD(mmuFaultBuffer)[64]; 493 NvU64 PRIVATE_FIELD(sysmemBaseAddress); 494 NvU32 PRIVATE_FIELD(minCeMmuFaultId); 495 NvU32 PRIVATE_FIELD(maxCeMmuFaultId); 496 NvBool PRIVATE_FIELD(bHugePageSupported); 497 NvBool PRIVATE_FIELD(bPageSize512mbSupported); 498 NvBool PRIVATE_FIELD(bBug2720120WarEnabled); 499 NvBool PRIVATE_FIELD(bVaspaceInteropSupported); 500 }; 501 502 struct KernelGmmu_PRIVATE { 503 const struct NVOC_RTTI *__nvoc_rtti; 504 struct OBJENGSTATE __nvoc_base_OBJENGSTATE; 505 struct IntrService __nvoc_base_IntrService; 506 struct Object *__nvoc_pbase_Object; 507 struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; 508 struct IntrService *__nvoc_pbase_IntrService; 509 struct KernelGmmu *__nvoc_pbase_KernelGmmu; 510 NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu *, ENGDESCRIPTOR); 511 NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu *); 512 NV_STATUS (*__kgmmuStateLoad__)(OBJGPU *, struct KernelGmmu *, NvU32); 513 NV_STATUS (*__kgmmuStateUnload__)(OBJGPU *, struct KernelGmmu *, NvU32); 514 NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu *, NvU32); 515 NV_STATUS (*__kgmmuStatePreUnload__)(OBJGPU *, struct KernelGmmu *, NvU32); 516 void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu *); 517 void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu *, IntrServiceRecord *); 518 NvBool (*__kgmmuClearInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceClearInterruptArguments *); 519 NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceInterruptArguments *); 520 NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *); 521 NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu *, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *); 522 NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu *, TLB_INVALIDATE_PARAMS *); 523 NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu *, NvU32, TLB_INVALIDATE_PARAMS *); 524 void (*__kgmmuFmtInitPteComptagLine__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32); 525 void (*__kgmmuFmtInitPeerPteFld__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32); 526 void (*__kgmmuFmtInitPte__)(struct KernelGmmu *, struct GMMU_FMT_PTE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *, const NvBool); 527 void (*__kgmmuFmtInitPde__)(struct KernelGmmu *, struct GMMU_FMT_PDE *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *); 528 NvBool (*__kgmmuFmtIsVersionSupported__)(struct KernelGmmu *, NvU32); 529 void (*__kgmmuFmtInitLevels__)(struct KernelGmmu *, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32); 530 void (*__kgmmuFmtInitPdeMulti__)(struct KernelGmmu *, struct GMMU_FMT_PDE_MULTI *, const NvU32, const struct NV_FIELD_ENUM_ENTRY *); 531 NV_STATUS (*__kgmmuFmtFamiliesInit__)(OBJGPU *, struct KernelGmmu *); 532 NV_STATUS (*__kgmmuTranslatePtePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *); 533 NV_STATUS (*__kgmmuTranslatePtePcfFromHw__)(struct KernelGmmu *, NvU32, NvBool, NvU32 *); 534 NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *); 535 NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *); 536 NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *); 537 NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *); 538 NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu *, NvBool); 539 NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE); 540 void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE); 541 NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu *, GMMU_FMT_FAMILY *); 542 NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *); 543 NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE); 544 NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu *, NvU32); 545 NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu *, NvU32); 546 NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu *, NvU32); 547 NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE); 548 NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu *, NvP64, NvP64); 549 void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32, NvU32); 550 GMMU_FAULT_PACKET *(*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32); 551 NvU32 (*__kgmmuCopyFaultPacketToClientShadowBuffer__)(OBJGPU *, struct KernelGmmu *, struct GMMU_FAULT_BUFFER *, FAULT_BUFFER_TYPE, NvU32, NvU32, NvU32, struct THREAD_STATE_NODE *, NvU32 *); 552 NvBool (*__kgmmuIsReplayableShadowFaultBufferFull__)(OBJGPU *, struct KernelGmmu *, GMMU_CLIENT_SHADOW_FAULT_BUFFER *, NvU32, NvU32); 553 NvU32 (*__kgmmuReadClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE); 554 void (*__kgmmuWriteClientShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_BUFFER_TYPE, NvU32); 555 NvU32 (*__kgmmuGetMinCeEngineId__)(struct KernelGmmu *); 556 NvU32 (*__kgmmuGetMaxCeEngineId__)(OBJGPU *, struct KernelGmmu *); 557 NV_STATUS (*__kgmmuFaultBufferMap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 558 NV_STATUS (*__kgmmuFaultBufferUnmap__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 559 NV_STATUS (*__kgmmuFaultBufferInit__)(OBJGPU *, struct KernelGmmu *); 560 NV_STATUS (*__kgmmuFaultBufferDestroy__)(OBJGPU *, struct KernelGmmu *); 561 NV_STATUS (*__kgmmuFaultBufferLoad__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 562 NV_STATUS (*__kgmmuFaultBufferUnload__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32); 563 NV_STATUS (*__kgmmuEnableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32); 564 NV_STATUS (*__kgmmuDisableFaultBuffer__)(OBJGPU *, struct KernelGmmu *, NvU32, NvBool, NvU32); 565 NvU32 (*__kgmmuSetAndGetDefaultFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE, NvU32); 566 void (*__kgmmuReadMmuFaultInstHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *); 567 void (*__kgmmuReadMmuFaultAddrHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32 *, NvU32 *); 568 NvU32 (*__kgmmuReadMmuFaultInfo__)(OBJGPU *, struct KernelGmmu *); 569 void (*__kgmmuWriteMmuFaultBufferSize__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32); 570 void (*__kgmmuWriteMmuFaultBufferHiLo__)(OBJGPU *, struct KernelGmmu *, NvU32, NvU32, NvU32, NvU32); 571 NV_STATUS (*__kgmmuEnableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32); 572 NV_STATUS (*__kgmmuDisableMmuFaultInterrupts__)(OBJGPU *, struct KernelGmmu *, NvU32); 573 NV_STATUS (*__kgmmuEnableMmuFaultOverflowIntr__)(OBJGPU *, struct KernelGmmu *, NvU32); 574 void (*__kgmmuSignExtendFaultAddress__)(OBJGPU *, struct KernelGmmu *, NvU64 *); 575 NV_STATUS (*__kgmmuGetFaultType__)(OBJGPU *, struct KernelGmmu *, NvU32, FAULT_TYPE *); 576 NvBool (*__kgmmuIsP2PUnboundInstFault__)(struct KernelGmmu *, NvU32, NvU32); 577 NV_STATUS (*__kgmmuServiceVfPriFaults__)(OBJGPU *, struct KernelGmmu *, NvU32); 578 NvBool (*__kgmmuTestVidmemAccessBitBufferError__)(OBJGPU *, struct KernelGmmu *, NvU32); 579 void (*__kgmmuDisableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *); 580 NV_STATUS (*__kgmmuEnableVidmemAccessBitBuf__)(OBJGPU *, struct KernelGmmu *); 581 void (*__kgmmuClearAccessCounterWriteNak__)(OBJGPU *, struct KernelGmmu *); 582 NV_STATUS (*__kgmmuServiceMthdBuffFaultInBar2Fault__)(OBJGPU *, struct KernelGmmu *); 583 NV_STATUS (*__kgmmuFaultCancelTargeted__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *); 584 NV_STATUS (*__kgmmuFaultCancelIssueInvalidate__)(OBJGPU *, struct KernelGmmu *, GMMU_FAULT_CANCEL_INFO *, TLB_INVALIDATE_PARAMS *, NvBool); 585 NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *); 586 NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *); 587 NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *); 588 void (*__kgmmuClearEccCounts__)(OBJGPU *, struct KernelGmmu *); 589 NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32); 590 NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32); 591 NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *); 592 void (*__kgmmuInitMissing__)(POBJGPU, struct KernelGmmu *); 593 NV_STATUS (*__kgmmuStatePreInitLocked__)(POBJGPU, struct KernelGmmu *); 594 NV_STATUS (*__kgmmuStatePreInitUnlocked__)(POBJGPU, struct KernelGmmu *); 595 NvBool (*__kgmmuIsPresent__)(POBJGPU, struct KernelGmmu *); 596 NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED; 597 NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED; 598 NvBool PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE; 599 NvBool PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE; 600 NvBool bReportFlaTranslationXid; 601 NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo; 602 NvU64 defaultBigPageSize; 603 NvU32 uvmSharedIntrRmOwnsMask; 604 GMMU_FMT_FAMILY *pFmtFamilies[3]; 605 NvU32 PDEAperture; 606 NvU32 PDEAttr; 607 NvU32 PDEBAR1Aperture; 608 NvU32 PDEBAR1Attr; 609 NvU32 PTEAperture; 610 NvU32 PTEAttr; 611 NvU32 PTEBAR1Aperture; 612 NvU32 PTEBAR1Attr; 613 NvU64 overrideBigPageSize; 614 NvBool bEnablePerVaspaceBigPage; 615 NvBool bIgnoreHubTlbInvalidate; 616 NvU64 maxVASize; 617 struct NV_FIELD_ENUM_ENTRY pdeApertures[5]; 618 struct NV_FIELD_ENUM_ENTRY pteApertures[5]; 619 MEMORY_DESCRIPTOR *pWarSmallPageTable; 620 MEMORY_DESCRIPTOR *pWarPageDirectory0; 621 struct GMMU_FAULT_BUFFER mmuFaultBuffer[64]; 622 NvU64 sysmemBaseAddress; 623 NvU32 minCeMmuFaultId; 624 NvU32 maxCeMmuFaultId; 625 NvBool bHugePageSupported; 626 NvBool bPageSize512mbSupported; 627 NvBool bBug2720120WarEnabled; 628 NvBool bVaspaceInteropSupported; 629 }; 630 631 #ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__ 632 #define __NVOC_CLASS_KernelGmmu_TYPEDEF__ 633 typedef struct KernelGmmu KernelGmmu; 634 #endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */ 635 636 #ifndef __nvoc_class_id_KernelGmmu 637 #define __nvoc_class_id_KernelGmmu 0x29362f 638 #endif /* __nvoc_class_id_KernelGmmu */ 639 640 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGmmu; 641 642 #define __staticCast_KernelGmmu(pThis) \ 643 ((pThis)->__nvoc_pbase_KernelGmmu) 644 645 #ifdef __nvoc_kern_gmmu_h_disabled 646 #define __dynamicCast_KernelGmmu(pThis) ((KernelGmmu*)NULL) 647 #else //__nvoc_kern_gmmu_h_disabled 648 #define __dynamicCast_KernelGmmu(pThis) \ 649 ((KernelGmmu*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGmmu))) 650 #endif //__nvoc_kern_gmmu_h_disabled 651 652 #define PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE_BASE_CAST 653 #define PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE_BASE_NAME PDB_PROP_KGMMU_REDUCE_NR_FAULT_BUFFER_SIZE 654 #define PDB_PROP_KGMMU_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. 655 #define PDB_PROP_KGMMU_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING 656 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_CAST 657 #define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_NAME PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED 658 #define PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE_BASE_CAST 659 #define PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE_BASE_NAME PDB_PROP_KGMMU_REPLAYABLE_FAULT_BUFFER_IN_USE 660 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_CAST 661 #define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_NAME PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED 662 663 NV_STATUS __nvoc_objCreateDynamic_KernelGmmu(KernelGmmu**, Dynamic*, NvU32, va_list); 664 665 NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32); 666 #define __objCreate_KernelGmmu(ppNewObj, pParent, createFlags) \ 667 __nvoc_objCreate_KernelGmmu((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) 668 669 #define kgmmuConstructEngine(pGpu, pKernelGmmu, arg0) kgmmuConstructEngine_DISPATCH(pGpu, pKernelGmmu, arg0) 670 #define kgmmuStateInitLocked(pGpu, pKernelGmmu) kgmmuStateInitLocked_DISPATCH(pGpu, pKernelGmmu) 671 #define kgmmuStateLoad(pGpu, pKernelGmmu, arg0) kgmmuStateLoad_DISPATCH(pGpu, pKernelGmmu, arg0) 672 #define kgmmuStateUnload(pGpu, pKernelGmmu, arg0) kgmmuStateUnload_DISPATCH(pGpu, pKernelGmmu, arg0) 673 #define kgmmuStatePostLoad(pGpu, pKernelGmmu, arg0) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg0) 674 #define kgmmuStatePostLoad_HAL(pGpu, pKernelGmmu, arg0) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, arg0) 675 #define kgmmuStatePreUnload(pGpu, pKernelGmmu, arg0) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg0) 676 #define kgmmuStatePreUnload_HAL(pGpu, pKernelGmmu, arg0) kgmmuStatePreUnload_DISPATCH(pGpu, pKernelGmmu, arg0) 677 #define kgmmuStateDestroy(pGpu, pKernelGmmu) kgmmuStateDestroy_DISPATCH(pGpu, pKernelGmmu) 678 #define kgmmuRegisterIntrService(pGpu, pKernelGmmu, arg0) kgmmuRegisterIntrService_DISPATCH(pGpu, pKernelGmmu, arg0) 679 #define kgmmuClearInterrupt(pGpu, pKernelGmmu, pParams) kgmmuClearInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams) 680 #define kgmmuServiceInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams) 681 #define kgmmuServiceNotificationInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams) 682 #define kgmmuServiceNotificationInterrupt_HAL(pGpu, pKernelGmmu, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams) 683 #define kgmmuInstBlkVaLimitGet(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) 684 #define kgmmuInstBlkVaLimitGet_HAL(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) 685 #define kgmmuSetTlbInvalidateMembarWarParameters(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams) 686 #define kgmmuSetTlbInvalidateMembarWarParameters_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams) 687 #define kgmmuSetTlbInvalidationScope(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams) 688 #define kgmmuSetTlbInvalidationScope_HAL(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams) 689 #define kgmmuFmtInitPteComptagLine(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version) 690 #define kgmmuFmtInitPteComptagLine_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_DISPATCH(pKernelGmmu, pPte, version) 691 #define kgmmuFmtInitPeerPteFld(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version) 692 #define kgmmuFmtInitPeerPteFld_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_DISPATCH(pKernelGmmu, pPte, version) 693 #define kgmmuFmtInitPte(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) 694 #define kgmmuFmtInitPte_HAL(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_DISPATCH(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) 695 #define kgmmuFmtInitPde(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures) 696 #define kgmmuFmtInitPde_HAL(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_DISPATCH(pKernelGmmu, pPde, version, pPdeApertures) 697 #define kgmmuFmtIsVersionSupported(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version) 698 #define kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, version) kgmmuFmtIsVersionSupported_DISPATCH(pKernelGmmu, version) 699 #define kgmmuFmtInitLevels(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift) 700 #define kgmmuFmtInitLevels_HAL(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift) 701 #define kgmmuFmtInitPdeMulti(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures) 702 #define kgmmuFmtInitPdeMulti_HAL(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_DISPATCH(pKernelGmmu, pPdeMulti, version, pPdeApertures) 703 #define kgmmuFmtFamiliesInit(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu) 704 #define kgmmuFmtFamiliesInit_HAL(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_DISPATCH(pGpu, pKernelGmmu) 705 #define kgmmuTranslatePtePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1) 706 #define kgmmuTranslatePtePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1) 707 #define kgmmuTranslatePtePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2) 708 #define kgmmuTranslatePtePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2) 709 #define kgmmuTranslatePdePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1) 710 #define kgmmuTranslatePdePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw_DISPATCH(pKernelGmmu, arg0, arg1) 711 #define kgmmuTranslatePdePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2) 712 #define kgmmuTranslatePdePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2) 713 #define kgmmuGetFaultRegisterMappings(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) 714 #define kgmmuGetFaultRegisterMappings_HAL(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) 715 #define kgmmuIssueReplayableFaultBufferFlush(pGpu, pKernelGmmu) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu) 716 #define kgmmuIssueReplayableFaultBufferFlush_HAL(pGpu, pKernelGmmu) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu) 717 #define kgmmuToggleFaultOnPrefetch(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable) 718 #define kgmmuToggleFaultOnPrefetch_HAL(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable) 719 #define kgmmuFaultBufferAllocSharedMemory(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0) 720 #define kgmmuFaultBufferAllocSharedMemory_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0) 721 #define kgmmuFaultBufferFreeSharedMemory(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0) 722 #define kgmmuFaultBufferFreeSharedMemory_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFreeSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0) 723 #define kgmmuSetupWarForBug2720120(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam) 724 #define kgmmuSetupWarForBug2720120_HAL(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam) 725 #define kgmmuGetGraphicsEngineId(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu) 726 #define kgmmuGetGraphicsEngineId_HAL(pKernelGmmu) kgmmuGetGraphicsEngineId_DISPATCH(pKernelGmmu) 727 #define kgmmuReadShadowBufPutIndex(pGpu, pKernelGmmu, type) kgmmuReadShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, type) 728 #define kgmmuReadShadowBufPutIndex_HAL(pGpu, pKernelGmmu, type) kgmmuReadShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, type) 729 #define kgmmuIsFaultEngineBar1(pKernelGmmu, arg0) kgmmuIsFaultEngineBar1_DISPATCH(pKernelGmmu, arg0) 730 #define kgmmuIsFaultEngineBar1_HAL(pKernelGmmu, arg0) kgmmuIsFaultEngineBar1_DISPATCH(pKernelGmmu, arg0) 731 #define kgmmuIsFaultEngineBar2(pKernelGmmu, arg0) kgmmuIsFaultEngineBar2_DISPATCH(pKernelGmmu, arg0) 732 #define kgmmuIsFaultEngineBar2_HAL(pKernelGmmu, arg0) kgmmuIsFaultEngineBar2_DISPATCH(pKernelGmmu, arg0) 733 #define kgmmuIsFaultEnginePhysical(pKernelGmmu, arg0) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg0) 734 #define kgmmuIsFaultEnginePhysical_HAL(pKernelGmmu, arg0) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg0) 735 #define kgmmuCopyMmuFaults(pGpu, pKernelGmmu, pThreadState, entriesCopied, type) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type) 736 #define kgmmuCopyMmuFaults_HAL(pGpu, pKernelGmmu, pThreadState, entriesCopied, type) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type) 737 #define kgmmuParseFaultPacket(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) 738 #define kgmmuParseFaultPacket_HAL(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) 739 #define kgmmuFaultBufferClearPackets(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) kgmmuFaultBufferClearPackets_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) 740 #define kgmmuFaultBufferClearPackets_HAL(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) kgmmuFaultBufferClearPackets_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) 741 #define kgmmuFaultBufferGetFault(pGpu, pKernelGmmu, pFaultBuffer, idx) kgmmuFaultBufferGetFault_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, idx) 742 #define kgmmuFaultBufferGetFault_HAL(pGpu, pKernelGmmu, pFaultBuffer, idx) kgmmuFaultBufferGetFault_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, idx) 743 #define kgmmuCopyFaultPacketToClientShadowBuffer(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) 744 #define kgmmuCopyFaultPacketToClientShadowBuffer_HAL(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied) 745 #define kgmmuIsReplayableShadowFaultBufferFull(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) 746 #define kgmmuIsReplayableShadowFaultBufferFull_HAL(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries) 747 #define kgmmuReadClientShadowBufPutIndex(pGpu, pKernelGmmu, gfid, type) kgmmuReadClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type) 748 #define kgmmuReadClientShadowBufPutIndex_HAL(pGpu, pKernelGmmu, gfid, type) kgmmuReadClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type) 749 #define kgmmuWriteClientShadowBufPutIndex(pGpu, pKernelGmmu, gfid, type, putIndex) kgmmuWriteClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type, putIndex) 750 #define kgmmuWriteClientShadowBufPutIndex_HAL(pGpu, pKernelGmmu, gfid, type, putIndex) kgmmuWriteClientShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, gfid, type, putIndex) 751 #define kgmmuGetMinCeEngineId(pKernelGmmu) kgmmuGetMinCeEngineId_DISPATCH(pKernelGmmu) 752 #define kgmmuGetMinCeEngineId_HAL(pKernelGmmu) kgmmuGetMinCeEngineId_DISPATCH(pKernelGmmu) 753 #define kgmmuGetMaxCeEngineId(pGpu, pKernelGmmu) kgmmuGetMaxCeEngineId_DISPATCH(pGpu, pKernelGmmu) 754 #define kgmmuGetMaxCeEngineId_HAL(pGpu, pKernelGmmu) kgmmuGetMaxCeEngineId_DISPATCH(pGpu, pKernelGmmu) 755 #define kgmmuFaultBufferMap(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferMap_DISPATCH(pGpu, pKernelGmmu, index, gfid) 756 #define kgmmuFaultBufferMap_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferMap_DISPATCH(pGpu, pKernelGmmu, index, gfid) 757 #define kgmmuFaultBufferUnmap(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnmap_DISPATCH(pGpu, pKernelGmmu, index, gfid) 758 #define kgmmuFaultBufferUnmap_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnmap_DISPATCH(pGpu, pKernelGmmu, index, gfid) 759 #define kgmmuFaultBufferInit(pGpu, pKernelGmmu) kgmmuFaultBufferInit_DISPATCH(pGpu, pKernelGmmu) 760 #define kgmmuFaultBufferInit_HAL(pGpu, pKernelGmmu) kgmmuFaultBufferInit_DISPATCH(pGpu, pKernelGmmu) 761 #define kgmmuFaultBufferDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferDestroy_DISPATCH(pGpu, pKernelGmmu) 762 #define kgmmuFaultBufferDestroy_HAL(pGpu, pKernelGmmu) kgmmuFaultBufferDestroy_DISPATCH(pGpu, pKernelGmmu) 763 #define kgmmuFaultBufferLoad(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferLoad_DISPATCH(pGpu, pKernelGmmu, index, gfid) 764 #define kgmmuFaultBufferLoad_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferLoad_DISPATCH(pGpu, pKernelGmmu, index, gfid) 765 #define kgmmuFaultBufferUnload(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnload_DISPATCH(pGpu, pKernelGmmu, index, gfid) 766 #define kgmmuFaultBufferUnload_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuFaultBufferUnload_DISPATCH(pGpu, pKernelGmmu, index, gfid) 767 #define kgmmuEnableFaultBuffer(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuEnableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) 768 #define kgmmuEnableFaultBuffer_HAL(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuEnableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) 769 #define kgmmuDisableFaultBuffer(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuDisableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) 770 #define kgmmuDisableFaultBuffer_HAL(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) kgmmuDisableFaultBuffer_DISPATCH(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid) 771 #define kgmmuSetAndGetDefaultFaultBufferSize(pGpu, pKernelGmmu, index, gfid) kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, index, gfid) 772 #define kgmmuSetAndGetDefaultFaultBufferSize_HAL(pGpu, pKernelGmmu, index, gfid) kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, index, gfid) 773 #define kgmmuReadMmuFaultInstHiLo(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultInstHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1) 774 #define kgmmuReadMmuFaultInstHiLo_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultInstHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1) 775 #define kgmmuReadMmuFaultAddrHiLo(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultAddrHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1) 776 #define kgmmuReadMmuFaultAddrHiLo_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuReadMmuFaultAddrHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1) 777 #define kgmmuReadMmuFaultInfo(pGpu, pKernelGmmu) kgmmuReadMmuFaultInfo_DISPATCH(pGpu, pKernelGmmu) 778 #define kgmmuReadMmuFaultInfo_HAL(pGpu, pKernelGmmu) kgmmuReadMmuFaultInfo_DISPATCH(pGpu, pKernelGmmu) 779 #define kgmmuWriteMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, arg1, gfid) kgmmuWriteMmuFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, gfid) 780 #define kgmmuWriteMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg0, arg1, gfid) kgmmuWriteMmuFaultBufferSize_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, gfid) 781 #define kgmmuWriteMmuFaultBufferHiLo(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid) kgmmuWriteMmuFaultBufferHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid) 782 #define kgmmuWriteMmuFaultBufferHiLo_HAL(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid) kgmmuWriteMmuFaultBufferHiLo_DISPATCH(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid) 783 #define kgmmuEnableMmuFaultInterrupts(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index) 784 #define kgmmuEnableMmuFaultInterrupts_HAL(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index) 785 #define kgmmuDisableMmuFaultInterrupts(pGpu, pKernelGmmu, index) kgmmuDisableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index) 786 #define kgmmuDisableMmuFaultInterrupts_HAL(pGpu, pKernelGmmu, index) kgmmuDisableMmuFaultInterrupts_DISPATCH(pGpu, pKernelGmmu, index) 787 #define kgmmuEnableMmuFaultOverflowIntr(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultOverflowIntr_DISPATCH(pGpu, pKernelGmmu, index) 788 #define kgmmuEnableMmuFaultOverflowIntr_HAL(pGpu, pKernelGmmu, index) kgmmuEnableMmuFaultOverflowIntr_DISPATCH(pGpu, pKernelGmmu, index) 789 #define kgmmuSignExtendFaultAddress(pGpu, pKernelGmmu, pMmuFaultAddress) kgmmuSignExtendFaultAddress_DISPATCH(pGpu, pKernelGmmu, pMmuFaultAddress) 790 #define kgmmuSignExtendFaultAddress_HAL(pGpu, pKernelGmmu, pMmuFaultAddress) kgmmuSignExtendFaultAddress_DISPATCH(pGpu, pKernelGmmu, pMmuFaultAddress) 791 #define kgmmuGetFaultType(pGpu, pKernelGmmu, fault, pMmuFaultType) kgmmuGetFaultType_DISPATCH(pGpu, pKernelGmmu, fault, pMmuFaultType) 792 #define kgmmuGetFaultType_HAL(pGpu, pKernelGmmu, fault, pMmuFaultType) kgmmuGetFaultType_DISPATCH(pGpu, pKernelGmmu, fault, pMmuFaultType) 793 #define kgmmuIsP2PUnboundInstFault(pKernelGmmu, arg0, arg1) kgmmuIsP2PUnboundInstFault_DISPATCH(pKernelGmmu, arg0, arg1) 794 #define kgmmuIsP2PUnboundInstFault_HAL(pKernelGmmu, arg0, arg1) kgmmuIsP2PUnboundInstFault_DISPATCH(pKernelGmmu, arg0, arg1) 795 #define kgmmuServiceVfPriFaults(pGpu, pKernelGmmu, faultType) kgmmuServiceVfPriFaults_DISPATCH(pGpu, pKernelGmmu, faultType) 796 #define kgmmuServiceVfPriFaults_HAL(pGpu, pKernelGmmu, faultType) kgmmuServiceVfPriFaults_DISPATCH(pGpu, pKernelGmmu, faultType) 797 #define kgmmuTestVidmemAccessBitBufferError(pGpu, pKernelGmmu, arg0) kgmmuTestVidmemAccessBitBufferError_DISPATCH(pGpu, pKernelGmmu, arg0) 798 #define kgmmuTestVidmemAccessBitBufferError_HAL(pGpu, pKernelGmmu, arg0) kgmmuTestVidmemAccessBitBufferError_DISPATCH(pGpu, pKernelGmmu, arg0) 799 #define kgmmuDisableVidmemAccessBitBuf(pGpu, pKernelGmmu) kgmmuDisableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu) 800 #define kgmmuDisableVidmemAccessBitBuf_HAL(pGpu, pKernelGmmu) kgmmuDisableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu) 801 #define kgmmuEnableVidmemAccessBitBuf(pGpu, pKernelGmmu) kgmmuEnableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu) 802 #define kgmmuEnableVidmemAccessBitBuf_HAL(pGpu, pKernelGmmu) kgmmuEnableVidmemAccessBitBuf_DISPATCH(pGpu, pKernelGmmu) 803 #define kgmmuClearAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuClearAccessCounterWriteNak_DISPATCH(pGpu, pKernelGmmu) 804 #define kgmmuClearAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuClearAccessCounterWriteNak_DISPATCH(pGpu, pKernelGmmu) 805 #define kgmmuServiceMthdBuffFaultInBar2Fault(pGpu, pKernelGmmu) kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(pGpu, pKernelGmmu) 806 #define kgmmuServiceMthdBuffFaultInBar2Fault_HAL(pGpu, pKernelGmmu) kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(pGpu, pKernelGmmu) 807 #define kgmmuFaultCancelTargeted(pGpu, pKernelGmmu, arg0) kgmmuFaultCancelTargeted_DISPATCH(pGpu, pKernelGmmu, arg0) 808 #define kgmmuFaultCancelTargeted_HAL(pGpu, pKernelGmmu, arg0) kgmmuFaultCancelTargeted_DISPATCH(pGpu, pKernelGmmu, arg0) 809 #define kgmmuFaultCancelIssueInvalidate(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) kgmmuFaultCancelIssueInvalidate_DISPATCH(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) 810 #define kgmmuFaultCancelIssueInvalidate_HAL(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) kgmmuFaultCancelIssueInvalidate_DISPATCH(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal) 811 #define kgmmuServiceMmuFault(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) kgmmuServiceMmuFault_DISPATCH(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) 812 #define kgmmuServiceMmuFault_HAL(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) kgmmuServiceMmuFault_DISPATCH(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData) 813 #define kgmmuServiceUnboundInstBlockFault(pGpu, pKernelGmmu, arg0, arg1) kgmmuServiceUnboundInstBlockFault_DISPATCH(pGpu, pKernelGmmu, arg0, arg1) 814 #define kgmmuServiceUnboundInstBlockFault_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuServiceUnboundInstBlockFault_DISPATCH(pGpu, pKernelGmmu, arg0, arg1) 815 #define kgmmuGetEccCounts(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu) 816 #define kgmmuGetEccCounts_HAL(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu) 817 #define kgmmuClearEccCounts(pGpu, pKernelGmmu) kgmmuClearEccCounts_DISPATCH(pGpu, pKernelGmmu) 818 #define kgmmuClearEccCounts_HAL(pGpu, pKernelGmmu) kgmmuClearEccCounts_DISPATCH(pGpu, pKernelGmmu) 819 #define kgmmuStatePreLoad(pGpu, pEngstate, arg0) kgmmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) 820 #define kgmmuStatePostUnload(pGpu, pEngstate, arg0) kgmmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) 821 #define kgmmuStateInitUnlocked(pGpu, pEngstate) kgmmuStateInitUnlocked_DISPATCH(pGpu, pEngstate) 822 #define kgmmuInitMissing(pGpu, pEngstate) kgmmuInitMissing_DISPATCH(pGpu, pEngstate) 823 #define kgmmuStatePreInitLocked(pGpu, pEngstate) kgmmuStatePreInitLocked_DISPATCH(pGpu, pEngstate) 824 #define kgmmuStatePreInitUnlocked(pGpu, pEngstate) kgmmuStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) 825 #define kgmmuIsPresent(pGpu, pEngstate) kgmmuIsPresent_DISPATCH(pGpu, pEngstate) 826 static inline NvU32 kgmmuService_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 827 return 0; 828 } 829 830 831 #ifdef __nvoc_kern_gmmu_h_disabled 832 static inline NvU32 kgmmuService(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 833 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 834 return 0; 835 } 836 #else //__nvoc_kern_gmmu_h_disabled 837 #define kgmmuService(pGpu, pKernelGmmu) kgmmuService_4a4dee(pGpu, pKernelGmmu) 838 #endif //__nvoc_kern_gmmu_h_disabled 839 840 #define kgmmuService_HAL(pGpu, pKernelGmmu) kgmmuService(pGpu, pKernelGmmu) 841 842 NvU64 kgmmuGetMaxBigPageSize_GM107(struct KernelGmmu *pKernelGmmu); 843 844 845 #ifdef __nvoc_kern_gmmu_h_disabled 846 static inline NvU64 kgmmuGetMaxBigPageSize(struct KernelGmmu *pKernelGmmu) { 847 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 848 return 0; 849 } 850 #else //__nvoc_kern_gmmu_h_disabled 851 #define kgmmuGetMaxBigPageSize(pKernelGmmu) kgmmuGetMaxBigPageSize_GM107(pKernelGmmu) 852 #endif //__nvoc_kern_gmmu_h_disabled 853 854 #define kgmmuGetMaxBigPageSize_HAL(pKernelGmmu) kgmmuGetMaxBigPageSize(pKernelGmmu) 855 856 static inline NvU32 kgmmuGetVaspaceClass_f515df(struct KernelGmmu *pKernelGmmu) { 857 return (37105); 858 } 859 860 861 #ifdef __nvoc_kern_gmmu_h_disabled 862 static inline NvU32 kgmmuGetVaspaceClass(struct KernelGmmu *pKernelGmmu) { 863 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 864 return 0; 865 } 866 #else //__nvoc_kern_gmmu_h_disabled 867 #define kgmmuGetVaspaceClass(pKernelGmmu) kgmmuGetVaspaceClass_f515df(pKernelGmmu) 868 #endif //__nvoc_kern_gmmu_h_disabled 869 870 #define kgmmuGetVaspaceClass_HAL(pKernelGmmu) kgmmuGetVaspaceClass(pKernelGmmu) 871 872 NV_STATUS kgmmuInstBlkAtsGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData); 873 874 875 #ifdef __nvoc_kern_gmmu_h_disabled 876 static inline NV_STATUS kgmmuInstBlkAtsGet(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData) { 877 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 878 return NV_ERR_NOT_SUPPORTED; 879 } 880 #else //__nvoc_kern_gmmu_h_disabled 881 #define kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet_GV100(pKernelGmmu, pVAS, subctxid, pOffset, pData) 882 #endif //__nvoc_kern_gmmu_h_disabled 883 884 #define kgmmuInstBlkAtsGet_HAL(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData) 885 886 static inline NV_STATUS kgmmuInstBlkMagicValueGet_46f6a7(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) { 887 return NV_ERR_NOT_SUPPORTED; 888 } 889 890 891 #ifdef __nvoc_kern_gmmu_h_disabled 892 static inline NV_STATUS kgmmuInstBlkMagicValueGet(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) { 893 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 894 return NV_ERR_NOT_SUPPORTED; 895 } 896 #else //__nvoc_kern_gmmu_h_disabled 897 #define kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet_46f6a7(pKernelGmmu, pOffset, pData) 898 #endif //__nvoc_kern_gmmu_h_disabled 899 900 #define kgmmuInstBlkMagicValueGet_HAL(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData) 901 902 NV_STATUS kgmmuInstBlkPageDirBaseGet_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi); 903 904 905 #ifdef __nvoc_kern_gmmu_h_disabled 906 static inline NV_STATUS kgmmuInstBlkPageDirBaseGet(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi) { 907 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 908 return NV_ERR_NOT_SUPPORTED; 909 } 910 #else //__nvoc_kern_gmmu_h_disabled 911 #define kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet_GV100(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) 912 #endif //__nvoc_kern_gmmu_h_disabled 913 914 #define kgmmuInstBlkPageDirBaseGet_HAL(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) 915 916 NvU32 kgmmuGetPDBAllocSize_GP100(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1); 917 918 919 #ifdef __nvoc_kern_gmmu_h_disabled 920 static inline NvU32 kgmmuGetPDBAllocSize(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1) { 921 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 922 return 0; 923 } 924 #else //__nvoc_kern_gmmu_h_disabled 925 #define kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize_GP100(pKernelGmmu, arg0, arg1) 926 #endif //__nvoc_kern_gmmu_h_disabled 927 928 #define kgmmuGetPDBAllocSize_HAL(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1) 929 930 NvU64 kgmmuGetBigPageSize_GM107(struct KernelGmmu *pKernelGmmu); 931 932 933 #ifdef __nvoc_kern_gmmu_h_disabled 934 static inline NvU64 kgmmuGetBigPageSize(struct KernelGmmu *pKernelGmmu) { 935 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 936 return 0; 937 } 938 #else //__nvoc_kern_gmmu_h_disabled 939 #define kgmmuGetBigPageSize(pKernelGmmu) kgmmuGetBigPageSize_GM107(pKernelGmmu) 940 #endif //__nvoc_kern_gmmu_h_disabled 941 942 #define kgmmuGetBigPageSize_HAL(pKernelGmmu) kgmmuGetBigPageSize(pKernelGmmu) 943 944 void kgmmuFmtInitCaps_GM20X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt); 945 946 947 #ifdef __nvoc_kern_gmmu_h_disabled 948 static inline void kgmmuFmtInitCaps(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt) { 949 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 950 } 951 #else //__nvoc_kern_gmmu_h_disabled 952 #define kgmmuFmtInitCaps(pKernelGmmu, pFmt) kgmmuFmtInitCaps_GM20X(pKernelGmmu, pFmt) 953 #endif //__nvoc_kern_gmmu_h_disabled 954 955 #define kgmmuFmtInitCaps_HAL(pKernelGmmu, pFmt) kgmmuFmtInitCaps(pKernelGmmu, pFmt) 956 957 void kgmmuFmtInitPteApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries); 958 959 960 #ifdef __nvoc_kern_gmmu_h_disabled 961 static inline void kgmmuFmtInitPteApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) { 962 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 963 } 964 #else //__nvoc_kern_gmmu_h_disabled 965 #define kgmmuFmtInitPteApertures(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures_GM10X(pKernelGmmu, pEntries) 966 #endif //__nvoc_kern_gmmu_h_disabled 967 968 #define kgmmuFmtInitPteApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures(pKernelGmmu, pEntries) 969 970 void kgmmuFmtInitPdeApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries); 971 972 973 #ifdef __nvoc_kern_gmmu_h_disabled 974 static inline void kgmmuFmtInitPdeApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) { 975 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 976 } 977 #else //__nvoc_kern_gmmu_h_disabled 978 #define kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures_GM10X(pKernelGmmu, pEntries) 979 #endif //__nvoc_kern_gmmu_h_disabled 980 981 #define kgmmuFmtInitPdeApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries) 982 983 void kgmmuInvalidateTlb_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope); 984 985 986 #ifdef __nvoc_kern_gmmu_h_disabled 987 static inline void kgmmuInvalidateTlb(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope) { 988 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 989 } 990 #else //__nvoc_kern_gmmu_h_disabled 991 #define kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb_GM107(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) 992 #endif //__nvoc_kern_gmmu_h_disabled 993 994 #define kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) 995 996 NV_STATUS kgmmuCheckPendingInvalidates_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid); 997 998 999 #ifdef __nvoc_kern_gmmu_h_disabled 1000 static inline NV_STATUS kgmmuCheckPendingInvalidates(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid) { 1001 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1002 return NV_ERR_NOT_SUPPORTED; 1003 } 1004 #else //__nvoc_kern_gmmu_h_disabled 1005 #define kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates_TU102(pGpu, pKernelGmmu, pTimeOut, gfid) 1006 #endif //__nvoc_kern_gmmu_h_disabled 1007 1008 #define kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid) 1009 1010 NV_STATUS kgmmuCommitTlbInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams); 1011 1012 1013 #ifdef __nvoc_kern_gmmu_h_disabled 1014 static inline NV_STATUS kgmmuCommitTlbInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { 1015 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1016 return NV_ERR_NOT_SUPPORTED; 1017 } 1018 #else //__nvoc_kern_gmmu_h_disabled 1019 #define kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate_TU102(pGpu, pKernelGmmu, pParams) 1020 #endif //__nvoc_kern_gmmu_h_disabled 1021 1022 #define kgmmuCommitTlbInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams) 1023 1024 void kgmmuSetPdbToInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams); 1025 1026 1027 #ifdef __nvoc_kern_gmmu_h_disabled 1028 static inline void kgmmuSetPdbToInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { 1029 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1030 } 1031 #else //__nvoc_kern_gmmu_h_disabled 1032 #define kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate_TU102(pGpu, pKernelGmmu, pParams) 1033 #endif //__nvoc_kern_gmmu_h_disabled 1034 1035 #define kgmmuSetPdbToInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams) 1036 1037 NV_STATUS kgmmuEnableComputePeerAddressing_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags); 1038 1039 1040 #ifdef __nvoc_kern_gmmu_h_disabled 1041 static inline NV_STATUS kgmmuEnableComputePeerAddressing(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags) { 1042 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1043 return NV_ERR_NOT_SUPPORTED; 1044 } 1045 #else //__nvoc_kern_gmmu_h_disabled 1046 #define kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing_IMPL(pGpu, pKernelGmmu, flags) 1047 #endif //__nvoc_kern_gmmu_h_disabled 1048 1049 #define kgmmuEnableComputePeerAddressing_HAL(pGpu, pKernelGmmu, flags) kgmmuEnableComputePeerAddressing(pGpu, pKernelGmmu, flags) 1050 1051 void kgmmuDetermineMaxVASize_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1052 1053 1054 #ifdef __nvoc_kern_gmmu_h_disabled 1055 static inline void kgmmuDetermineMaxVASize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1056 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1057 } 1058 #else //__nvoc_kern_gmmu_h_disabled 1059 #define kgmmuDetermineMaxVASize(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize_GM107(pGpu, pKernelGmmu) 1060 #endif //__nvoc_kern_gmmu_h_disabled 1061 1062 #define kgmmuDetermineMaxVASize_HAL(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize(pGpu, pKernelGmmu) 1063 1064 const char *kgmmuGetFaultTypeString_GP100(struct KernelGmmu *pKernelGmmu, NvU32 faultType); 1065 1066 1067 #ifdef __nvoc_kern_gmmu_h_disabled 1068 static inline const char *kgmmuGetFaultTypeString(struct KernelGmmu *pKernelGmmu, NvU32 faultType) { 1069 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1070 return NULL; 1071 } 1072 #else //__nvoc_kern_gmmu_h_disabled 1073 #define kgmmuGetFaultTypeString(pKernelGmmu, faultType) kgmmuGetFaultTypeString_GP100(pKernelGmmu, faultType) 1074 #endif //__nvoc_kern_gmmu_h_disabled 1075 1076 #define kgmmuGetFaultTypeString_HAL(pKernelGmmu, faultType) kgmmuGetFaultTypeString(pKernelGmmu, faultType) 1077 1078 NV_STATUS kgmmuChangeReplayableFaultOwnership_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0); 1079 1080 1081 #ifdef __nvoc_kern_gmmu_h_disabled 1082 static inline NV_STATUS kgmmuChangeReplayableFaultOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) { 1083 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1084 return NV_ERR_NOT_SUPPORTED; 1085 } 1086 #else //__nvoc_kern_gmmu_h_disabled 1087 #define kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership_GV100(pGpu, pKernelGmmu, arg0) 1088 #endif //__nvoc_kern_gmmu_h_disabled 1089 1090 #define kgmmuChangeReplayableFaultOwnership_HAL(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0) 1091 1092 NV_STATUS kgmmuServiceReplayableFault_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1093 1094 1095 #ifdef __nvoc_kern_gmmu_h_disabled 1096 static inline NV_STATUS kgmmuServiceReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1097 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1098 return NV_ERR_NOT_SUPPORTED; 1099 } 1100 #else //__nvoc_kern_gmmu_h_disabled 1101 #define kgmmuServiceReplayableFault(pGpu, pKernelGmmu) kgmmuServiceReplayableFault_TU102(pGpu, pKernelGmmu) 1102 #endif //__nvoc_kern_gmmu_h_disabled 1103 1104 #define kgmmuServiceReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceReplayableFault(pGpu, pKernelGmmu) 1105 1106 NV_STATUS kgmmuReportFaultBufferOverflow_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1107 1108 1109 #ifdef __nvoc_kern_gmmu_h_disabled 1110 static inline NV_STATUS kgmmuReportFaultBufferOverflow(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1111 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1112 return NV_ERR_NOT_SUPPORTED; 1113 } 1114 #else //__nvoc_kern_gmmu_h_disabled 1115 #define kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow_GV100(pGpu, pKernelGmmu) 1116 #endif //__nvoc_kern_gmmu_h_disabled 1117 1118 #define kgmmuReportFaultBufferOverflow_HAL(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu) 1119 1120 NV_STATUS kgmmuReadFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0); 1121 1122 1123 #ifdef __nvoc_kern_gmmu_h_disabled 1124 static inline NV_STATUS kgmmuReadFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0) { 1125 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1126 return NV_ERR_NOT_SUPPORTED; 1127 } 1128 #else //__nvoc_kern_gmmu_h_disabled 1129 #define kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, pGetOffset, arg0) 1130 #endif //__nvoc_kern_gmmu_h_disabled 1131 1132 #define kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0) 1133 1134 NV_STATUS kgmmuWriteFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 getValue, struct THREAD_STATE_NODE *arg0); 1135 1136 1137 #ifdef __nvoc_kern_gmmu_h_disabled 1138 static inline NV_STATUS kgmmuWriteFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 getValue, struct THREAD_STATE_NODE *arg0) { 1139 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1140 return NV_ERR_NOT_SUPPORTED; 1141 } 1142 #else //__nvoc_kern_gmmu_h_disabled 1143 #define kgmmuWriteFaultBufferGetPtr(pGpu, pKernelGmmu, index, getValue, arg0) kgmmuWriteFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, getValue, arg0) 1144 #endif //__nvoc_kern_gmmu_h_disabled 1145 1146 #define kgmmuWriteFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, getValue, arg0) kgmmuWriteFaultBufferGetPtr(pGpu, pKernelGmmu, index, getValue, arg0) 1147 1148 NV_STATUS kgmmuReadFaultBufferPutPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg0); 1149 1150 1151 #ifdef __nvoc_kern_gmmu_h_disabled 1152 static inline NV_STATUS kgmmuReadFaultBufferPutPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset, struct THREAD_STATE_NODE *arg0) { 1153 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1154 return NV_ERR_NOT_SUPPORTED; 1155 } 1156 #else //__nvoc_kern_gmmu_h_disabled 1157 #define kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg0) kgmmuReadFaultBufferPutPtr_TU102(pGpu, pKernelGmmu, index, pPutOffset, arg0) 1158 #endif //__nvoc_kern_gmmu_h_disabled 1159 1160 #define kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, index, pPutOffset, arg0) kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset, arg0) 1161 1162 NvU32 kgmmuReadMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid); 1163 1164 1165 #ifdef __nvoc_kern_gmmu_h_disabled 1166 static inline NvU32 kgmmuReadMmuFaultBufferSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid) { 1167 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1168 return 0; 1169 } 1170 #else //__nvoc_kern_gmmu_h_disabled 1171 #define kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize_TU102(pGpu, pKernelGmmu, arg0, gfid) 1172 #endif //__nvoc_kern_gmmu_h_disabled 1173 1174 #define kgmmuReadMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid) 1175 1176 NvU32 kgmmuReadMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid); 1177 1178 1179 #ifdef __nvoc_kern_gmmu_h_disabled 1180 static inline NvU32 kgmmuReadMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid) { 1181 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1182 return 0; 1183 } 1184 #else //__nvoc_kern_gmmu_h_disabled 1185 #define kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus_TU102(pGpu, pKernelGmmu, gfid) 1186 #endif //__nvoc_kern_gmmu_h_disabled 1187 1188 #define kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid) 1189 1190 void kgmmuWriteMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1191 1192 1193 #ifdef __nvoc_kern_gmmu_h_disabled 1194 static inline void kgmmuWriteMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1195 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1196 } 1197 #else //__nvoc_kern_gmmu_h_disabled 1198 #define kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus_TU102(pGpu, pKernelGmmu, arg0) 1199 #endif //__nvoc_kern_gmmu_h_disabled 1200 1201 #define kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0) 1202 1203 NvBool kgmmuIsNonReplayableFaultPending_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0); 1204 1205 1206 #ifdef __nvoc_kern_gmmu_h_disabled 1207 static inline NvBool kgmmuIsNonReplayableFaultPending(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) { 1208 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1209 return NV_FALSE; 1210 } 1211 #else //__nvoc_kern_gmmu_h_disabled 1212 #define kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending_TU102(pGpu, pKernelGmmu, arg0) 1213 #endif //__nvoc_kern_gmmu_h_disabled 1214 1215 #define kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0) 1216 1217 NV_STATUS kgmmuClientShadowFaultBufferAlloc_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 1218 1219 1220 #ifdef __nvoc_kern_gmmu_h_disabled 1221 static inline NV_STATUS kgmmuClientShadowFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 1222 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1223 return NV_ERR_NOT_SUPPORTED; 1224 } 1225 #else //__nvoc_kern_gmmu_h_disabled 1226 #define kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAlloc_GV100(pGpu, pKernelGmmu, arg0) 1227 #endif //__nvoc_kern_gmmu_h_disabled 1228 1229 #define kgmmuClientShadowFaultBufferAlloc_HAL(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu, arg0) 1230 1231 NV_STATUS kgmmuClientShadowFaultBufferFree_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 1232 1233 1234 #ifdef __nvoc_kern_gmmu_h_disabled 1235 static inline NV_STATUS kgmmuClientShadowFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 1236 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1237 return NV_ERR_NOT_SUPPORTED; 1238 } 1239 #else //__nvoc_kern_gmmu_h_disabled 1240 #define kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferFree_GV100(pGpu, pKernelGmmu, arg0) 1241 #endif //__nvoc_kern_gmmu_h_disabled 1242 1243 #define kgmmuClientShadowFaultBufferFree_HAL(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu, arg0) 1244 1245 void kgmmuEncodeSysmemAddrs_GM107(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count); 1246 1247 1248 #ifdef __nvoc_kern_gmmu_h_disabled 1249 static inline void kgmmuEncodeSysmemAddrs(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count) { 1250 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1251 } 1252 #else //__nvoc_kern_gmmu_h_disabled 1253 #define kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs_GM107(pKernelGmmu, pAddresses, count) 1254 #endif //__nvoc_kern_gmmu_h_disabled 1255 1256 #define kgmmuEncodeSysmemAddrs_HAL(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count) 1257 1258 NvU8 kgmmuGetHwPteApertureFromMemdesc_GM107(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc); 1259 1260 1261 #ifdef __nvoc_kern_gmmu_h_disabled 1262 static inline NvU8 kgmmuGetHwPteApertureFromMemdesc(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc) { 1263 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1264 return 0; 1265 } 1266 #else //__nvoc_kern_gmmu_h_disabled 1267 #define kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc_GM107(pKernelGmmu, pDesc) 1268 #endif //__nvoc_kern_gmmu_h_disabled 1269 1270 #define kgmmuGetHwPteApertureFromMemdesc_HAL(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc) 1271 1272 NvBool kgmmuTestAccessCounterWriteNak_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1273 1274 1275 #ifdef __nvoc_kern_gmmu_h_disabled 1276 static inline NvBool kgmmuTestAccessCounterWriteNak(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1277 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1278 return NV_FALSE; 1279 } 1280 #else //__nvoc_kern_gmmu_h_disabled 1281 #define kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak_TU102(pGpu, pKernelGmmu) 1282 #endif //__nvoc_kern_gmmu_h_disabled 1283 1284 #define kgmmuTestAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu) 1285 1286 NV_STATUS kgmmuEnableNvlinkComputePeerAddressing_GV100(struct KernelGmmu *pKernelGmmu); 1287 1288 1289 #ifdef __nvoc_kern_gmmu_h_disabled 1290 static inline NV_STATUS kgmmuEnableNvlinkComputePeerAddressing(struct KernelGmmu *pKernelGmmu) { 1291 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1292 return NV_ERR_NOT_SUPPORTED; 1293 } 1294 #else //__nvoc_kern_gmmu_h_disabled 1295 #define kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing_GV100(pKernelGmmu) 1296 #endif //__nvoc_kern_gmmu_h_disabled 1297 1298 #define kgmmuEnableNvlinkComputePeerAddressing_HAL(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu) 1299 1300 void kgmmuClearNonReplayableFaultIntr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0); 1301 1302 1303 #ifdef __nvoc_kern_gmmu_h_disabled 1304 static inline void kgmmuClearNonReplayableFaultIntr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) { 1305 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1306 } 1307 #else //__nvoc_kern_gmmu_h_disabled 1308 #define kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg0) kgmmuClearNonReplayableFaultIntr_TU102(pGpu, pKernelGmmu, arg0) 1309 #endif //__nvoc_kern_gmmu_h_disabled 1310 1311 #define kgmmuClearNonReplayableFaultIntr_HAL(pGpu, pKernelGmmu, arg0) kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, arg0) 1312 1313 void kgmmuClearReplayableFaultIntr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0); 1314 1315 1316 #ifdef __nvoc_kern_gmmu_h_disabled 1317 static inline void kgmmuClearReplayableFaultIntr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) { 1318 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1319 } 1320 #else //__nvoc_kern_gmmu_h_disabled 1321 #define kgmmuClearReplayableFaultIntr(pGpu, pKernelGmmu, arg0) kgmmuClearReplayableFaultIntr_TU102(pGpu, pKernelGmmu, arg0) 1322 #endif //__nvoc_kern_gmmu_h_disabled 1323 1324 #define kgmmuClearReplayableFaultIntr_HAL(pGpu, pKernelGmmu, arg0) kgmmuClearReplayableFaultIntr(pGpu, pKernelGmmu, arg0) 1325 1326 void kgmmuPrintFaultInfo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, FIFO_MMU_EXCEPTION_DATA *arg1); 1327 1328 1329 #ifdef __nvoc_kern_gmmu_h_disabled 1330 static inline void kgmmuPrintFaultInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) { 1331 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1332 } 1333 #else //__nvoc_kern_gmmu_h_disabled 1334 #define kgmmuPrintFaultInfo(pGpu, pKernelGmmu, arg0, arg1) kgmmuPrintFaultInfo_TU102(pGpu, pKernelGmmu, arg0, arg1) 1335 #endif //__nvoc_kern_gmmu_h_disabled 1336 1337 #define kgmmuPrintFaultInfo_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuPrintFaultInfo(pGpu, pKernelGmmu, arg0, arg1) 1338 1339 static inline NV_STATUS kgmmuInitCeMmuFaultIdRange_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1340 return NV_OK; 1341 } 1342 1343 1344 #ifdef __nvoc_kern_gmmu_h_disabled 1345 static inline NV_STATUS kgmmuInitCeMmuFaultIdRange(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1346 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1347 return NV_ERR_NOT_SUPPORTED; 1348 } 1349 #else //__nvoc_kern_gmmu_h_disabled 1350 #define kgmmuInitCeMmuFaultIdRange(pGpu, pKernelGmmu) kgmmuInitCeMmuFaultIdRange_56cd7a(pGpu, pKernelGmmu) 1351 #endif //__nvoc_kern_gmmu_h_disabled 1352 1353 #define kgmmuInitCeMmuFaultIdRange_HAL(pGpu, pKernelGmmu) kgmmuInitCeMmuFaultIdRange(pGpu, pKernelGmmu) 1354 1355 NV_STATUS kgmmuServiceNonReplayableFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1356 1357 1358 #ifdef __nvoc_kern_gmmu_h_disabled 1359 static inline NV_STATUS kgmmuServiceNonReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1360 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1361 return NV_ERR_NOT_SUPPORTED; 1362 } 1363 #else //__nvoc_kern_gmmu_h_disabled 1364 #define kgmmuServiceNonReplayableFault(pGpu, pKernelGmmu) kgmmuServiceNonReplayableFault_GV100(pGpu, pKernelGmmu) 1365 #endif //__nvoc_kern_gmmu_h_disabled 1366 1367 #define kgmmuServiceNonReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceNonReplayableFault(pGpu, pKernelGmmu) 1368 1369 NV_STATUS kgmmuHandleNonReplayableFaultPacket_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_PACKET *arg0); 1370 1371 1372 #ifdef __nvoc_kern_gmmu_h_disabled 1373 static inline NV_STATUS kgmmuHandleNonReplayableFaultPacket(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_PACKET *arg0) { 1374 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1375 return NV_ERR_NOT_SUPPORTED; 1376 } 1377 #else //__nvoc_kern_gmmu_h_disabled 1378 #define kgmmuHandleNonReplayableFaultPacket(pGpu, pKernelGmmu, arg0) kgmmuHandleNonReplayableFaultPacket_GV100(pGpu, pKernelGmmu, arg0) 1379 #endif //__nvoc_kern_gmmu_h_disabled 1380 1381 #define kgmmuHandleNonReplayableFaultPacket_HAL(pGpu, pKernelGmmu, arg0) kgmmuHandleNonReplayableFaultPacket(pGpu, pKernelGmmu, arg0) 1382 1383 NV_STATUS kgmmuNotifyNonReplayableFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0); 1384 1385 1386 #ifdef __nvoc_kern_gmmu_h_disabled 1387 static inline NV_STATUS kgmmuNotifyNonReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) { 1388 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1389 return NV_ERR_NOT_SUPPORTED; 1390 } 1391 #else //__nvoc_kern_gmmu_h_disabled 1392 #define kgmmuNotifyNonReplayableFault(pGpu, pKernelGmmu, arg0) kgmmuNotifyNonReplayableFault_GV100(pGpu, pKernelGmmu, arg0) 1393 #endif //__nvoc_kern_gmmu_h_disabled 1394 1395 #define kgmmuNotifyNonReplayableFault_HAL(pGpu, pKernelGmmu, arg0) kgmmuNotifyNonReplayableFault(pGpu, pKernelGmmu, arg0) 1396 1397 NvU32 kgmmuGetFaultInfoFromFaultPckt_GV100(struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry); 1398 1399 1400 #ifdef __nvoc_kern_gmmu_h_disabled 1401 static inline NvU32 kgmmuGetFaultInfoFromFaultPckt(struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry) { 1402 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1403 return 0; 1404 } 1405 #else //__nvoc_kern_gmmu_h_disabled 1406 #define kgmmuGetFaultInfoFromFaultPckt(pKernelGmmu, pParsedFaultEntry) kgmmuGetFaultInfoFromFaultPckt_GV100(pKernelGmmu, pParsedFaultEntry) 1407 #endif //__nvoc_kern_gmmu_h_disabled 1408 1409 #define kgmmuGetFaultInfoFromFaultPckt_HAL(pKernelGmmu, pParsedFaultEntry) kgmmuGetFaultInfoFromFaultPckt(pKernelGmmu, pParsedFaultEntry) 1410 1411 static inline NV_STATUS kgmmuServiceChannelMmuFault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel) { 1412 NV_ASSERT_PRECOMP(0); 1413 return NV_ERR_NOT_SUPPORTED; 1414 } 1415 1416 NV_STATUS kgmmuServiceChannelMmuFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel); 1417 1418 1419 #ifdef __nvoc_kern_gmmu_h_disabled 1420 static inline NV_STATUS kgmmuServiceChannelMmuFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MMU_FAULT_BUFFER_ENTRY *pParsedFaultEntry, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, struct KernelChannel *pKernelChannel) { 1421 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1422 return NV_ERR_NOT_SUPPORTED; 1423 } 1424 #else //__nvoc_kern_gmmu_h_disabled 1425 #define kgmmuServiceChannelMmuFault(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) kgmmuServiceChannelMmuFault_92bfc3(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) 1426 #endif //__nvoc_kern_gmmu_h_disabled 1427 1428 #define kgmmuServiceChannelMmuFault_HAL(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) kgmmuServiceChannelMmuFault(pGpu, pKernelGmmu, pParsedFaultEntry, pMmuExceptionData, pKernelChannel) 1429 1430 NV_STATUS kgmmuServicePriFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1431 1432 1433 #ifdef __nvoc_kern_gmmu_h_disabled 1434 static inline NV_STATUS kgmmuServicePriFaults(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1435 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1436 return NV_ERR_NOT_SUPPORTED; 1437 } 1438 #else //__nvoc_kern_gmmu_h_disabled 1439 #define kgmmuServicePriFaults(pGpu, pKernelGmmu) kgmmuServicePriFaults_GV100(pGpu, pKernelGmmu) 1440 #endif //__nvoc_kern_gmmu_h_disabled 1441 1442 #define kgmmuServicePriFaults_HAL(pGpu, pKernelGmmu) kgmmuServicePriFaults(pGpu, pKernelGmmu) 1443 1444 NV_STATUS kgmmuCheckAndDecideBigPageSize_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1445 1446 1447 #ifdef __nvoc_kern_gmmu_h_disabled 1448 static inline NV_STATUS kgmmuCheckAndDecideBigPageSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1449 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 1450 return NV_ERR_NOT_SUPPORTED; 1451 } 1452 #else //__nvoc_kern_gmmu_h_disabled 1453 #define kgmmuCheckAndDecideBigPageSize(pGpu, pKernelGmmu) kgmmuCheckAndDecideBigPageSize_GP100(pGpu, pKernelGmmu) 1454 #endif //__nvoc_kern_gmmu_h_disabled 1455 1456 #define kgmmuCheckAndDecideBigPageSize_HAL(pGpu, pKernelGmmu) kgmmuCheckAndDecideBigPageSize(pGpu, pKernelGmmu) 1457 1458 NV_STATUS kgmmuConstructEngine_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0); 1459 1460 static inline NV_STATUS kgmmuConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0) { 1461 return pKernelGmmu->__kgmmuConstructEngine__(pGpu, pKernelGmmu, arg0); 1462 } 1463 1464 NV_STATUS kgmmuStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1465 1466 static inline NV_STATUS kgmmuStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1467 return pKernelGmmu->__kgmmuStateInitLocked__(pGpu, pKernelGmmu); 1468 } 1469 1470 NV_STATUS kgmmuStateLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1471 1472 static inline NV_STATUS kgmmuStateLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1473 return pKernelGmmu->__kgmmuStateLoad__(pGpu, pKernelGmmu, arg0); 1474 } 1475 1476 NV_STATUS kgmmuStateUnload_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1477 1478 static inline NV_STATUS kgmmuStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1479 return pKernelGmmu->__kgmmuStateUnload__(pGpu, pKernelGmmu, arg0); 1480 } 1481 1482 NV_STATUS kgmmuStatePostLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1483 1484 static inline NV_STATUS kgmmuStatePostLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1485 return pKernelGmmu->__kgmmuStatePostLoad__(pGpu, pKernelGmmu, arg0); 1486 } 1487 1488 NV_STATUS kgmmuStatePreUnload_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1489 1490 static inline NV_STATUS kgmmuStatePreUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1491 return pKernelGmmu->__kgmmuStatePreUnload__(pGpu, pKernelGmmu, arg0); 1492 } 1493 1494 void kgmmuStateDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1495 1496 static inline void kgmmuStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1497 pKernelGmmu->__kgmmuStateDestroy__(pGpu, pKernelGmmu); 1498 } 1499 1500 void kgmmuRegisterIntrService_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[171]); 1501 1502 static inline void kgmmuRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[171]) { 1503 pKernelGmmu->__kgmmuRegisterIntrService__(pGpu, pKernelGmmu, arg0); 1504 } 1505 1506 NvBool kgmmuClearInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceClearInterruptArguments *pParams); 1507 1508 static inline NvBool kgmmuClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceClearInterruptArguments *pParams) { 1509 return pKernelGmmu->__kgmmuClearInterrupt__(pGpu, pKernelGmmu, pParams); 1510 } 1511 1512 NvU32 kgmmuServiceInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams); 1513 1514 static inline NvU32 kgmmuServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams) { 1515 return pKernelGmmu->__kgmmuServiceInterrupt__(pGpu, pKernelGmmu, pParams); 1516 } 1517 1518 static inline NV_STATUS kgmmuServiceNotificationInterrupt_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceNotificationInterruptArguments *pParams) { 1519 return NV_OK; 1520 } 1521 1522 static inline NV_STATUS kgmmuServiceNotificationInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceNotificationInterruptArguments *pParams) { 1523 return pKernelGmmu->__kgmmuServiceNotificationInterrupt__(pGpu, pKernelGmmu, pParams); 1524 } 1525 1526 NV_STATUS kgmmuInstBlkVaLimitGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData); 1527 1528 static inline NV_STATUS kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) { 1529 *pOffset = 0; 1530 return NV_OK; 1531 } 1532 1533 static inline NV_STATUS kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) { 1534 return pKernelGmmu->__kgmmuInstBlkVaLimitGet__(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData); 1535 } 1536 1537 NvU32 kgmmuSetTlbInvalidateMembarWarParameters_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams); 1538 1539 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { 1540 return 0; 1541 } 1542 1543 static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { 1544 return pKernelGmmu->__kgmmuSetTlbInvalidateMembarWarParameters__(pGpu, pKernelGmmu, pParams); 1545 } 1546 1547 NV_STATUS kgmmuSetTlbInvalidationScope_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams); 1548 1549 static inline NV_STATUS kgmmuSetTlbInvalidationScope_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) { 1550 return NV_ERR_NOT_SUPPORTED; 1551 } 1552 1553 static inline NV_STATUS kgmmuSetTlbInvalidationScope_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) { 1554 return pKernelGmmu->__kgmmuSetTlbInvalidationScope__(pGpu, pKernelGmmu, flags, pParams); 1555 } 1556 1557 void kgmmuFmtInitPteComptagLine_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version); 1558 1559 static inline void kgmmuFmtInitPteComptagLine_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) { 1560 return; 1561 } 1562 1563 static inline void kgmmuFmtInitPteComptagLine_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) { 1564 pKernelGmmu->__kgmmuFmtInitPteComptagLine__(pKernelGmmu, pPte, version); 1565 } 1566 1567 void kgmmuFmtInitPeerPteFld_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version); 1568 1569 static inline void kgmmuFmtInitPeerPteFld_b3696a(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) { 1570 return; 1571 } 1572 1573 static inline void kgmmuFmtInitPeerPteFld_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) { 1574 pKernelGmmu->__kgmmuFmtInitPeerPteFld__(pKernelGmmu, pPte, version); 1575 } 1576 1577 void kgmmuFmtInitPte_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture); 1578 1579 void kgmmuFmtInitPte_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture); 1580 1581 static inline void kgmmuFmtInitPte_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture) { 1582 pKernelGmmu->__kgmmuFmtInitPte__(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture); 1583 } 1584 1585 void kgmmuFmtInitPde_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures); 1586 1587 void kgmmuFmtInitPde_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures); 1588 1589 static inline void kgmmuFmtInitPde_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) { 1590 pKernelGmmu->__kgmmuFmtInitPde__(pKernelGmmu, pPde, version, pPdeApertures); 1591 } 1592 1593 NvBool kgmmuFmtIsVersionSupported_GP10X(struct KernelGmmu *pKernelGmmu, NvU32 version); 1594 1595 NvBool kgmmuFmtIsVersionSupported_GH10X(struct KernelGmmu *pKernelGmmu, NvU32 version); 1596 1597 static inline NvBool kgmmuFmtIsVersionSupported_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 version) { 1598 return pKernelGmmu->__kgmmuFmtIsVersionSupported__(pKernelGmmu, version); 1599 } 1600 1601 void kgmmuFmtInitLevels_GP10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift); 1602 1603 void kgmmuFmtInitLevels_GA10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift); 1604 1605 void kgmmuFmtInitLevels_GH10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift); 1606 1607 static inline void kgmmuFmtInitLevels_DISPATCH(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift) { 1608 pKernelGmmu->__kgmmuFmtInitLevels__(pKernelGmmu, pLevels, numLevels, version, bigPageShift); 1609 } 1610 1611 void kgmmuFmtInitPdeMulti_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures); 1612 1613 void kgmmuFmtInitPdeMulti_GH10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures); 1614 1615 static inline void kgmmuFmtInitPdeMulti_DISPATCH(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) { 1616 pKernelGmmu->__kgmmuFmtInitPdeMulti__(pKernelGmmu, pPdeMulti, version, pPdeApertures); 1617 } 1618 1619 NV_STATUS kgmmuFmtFamiliesInit_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1620 1621 NV_STATUS kgmmuFmtFamiliesInit_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1622 1623 static inline NV_STATUS kgmmuFmtFamiliesInit_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1624 return pKernelGmmu->__kgmmuFmtFamiliesInit__(pGpu, pKernelGmmu); 1625 } 1626 1627 NV_STATUS kgmmuTranslatePtePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1); 1628 1629 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { 1630 return NV_OK; 1631 } 1632 1633 static inline NV_STATUS kgmmuTranslatePtePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { 1634 return pKernelGmmu->__kgmmuTranslatePtePcfFromSw__(pKernelGmmu, arg0, arg1); 1635 } 1636 1637 NV_STATUS kgmmuTranslatePtePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2); 1638 1639 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) { 1640 return NV_OK; 1641 } 1642 1643 static inline NV_STATUS kgmmuTranslatePtePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) { 1644 return pKernelGmmu->__kgmmuTranslatePtePcfFromHw__(pKernelGmmu, arg0, arg1, arg2); 1645 } 1646 1647 NV_STATUS kgmmuTranslatePdePcfFromSw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1); 1648 1649 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { 1650 return NV_OK; 1651 } 1652 1653 static inline NV_STATUS kgmmuTranslatePdePcfFromSw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { 1654 return pKernelGmmu->__kgmmuTranslatePdePcfFromSw__(pKernelGmmu, arg0, arg1); 1655 } 1656 1657 NV_STATUS kgmmuTranslatePdePcfFromHw_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2); 1658 1659 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) { 1660 return NV_OK; 1661 } 1662 1663 static inline NV_STATUS kgmmuTranslatePdePcfFromHw_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) { 1664 return pKernelGmmu->__kgmmuTranslatePdePcfFromHw__(pKernelGmmu, arg0, arg1, arg2); 1665 } 1666 1667 NV_STATUS kgmmuGetFaultRegisterMappings_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl); 1668 1669 NV_STATUS kgmmuGetFaultRegisterMappings_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl); 1670 1671 static inline NV_STATUS kgmmuGetFaultRegisterMappings_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl) { 1672 return pKernelGmmu->__kgmmuGetFaultRegisterMappings__(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl); 1673 } 1674 1675 NV_STATUS kgmmuIssueReplayableFaultBufferFlush_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1676 1677 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1678 return NV_ERR_NOT_SUPPORTED; 1679 } 1680 1681 static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1682 return pKernelGmmu->__kgmmuIssueReplayableFaultBufferFlush__(pGpu, pKernelGmmu); 1683 } 1684 1685 NV_STATUS kgmmuToggleFaultOnPrefetch_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable); 1686 1687 static inline NV_STATUS kgmmuToggleFaultOnPrefetch_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable) { 1688 return NV_ERR_NOT_SUPPORTED; 1689 } 1690 1691 static inline NV_STATUS kgmmuToggleFaultOnPrefetch_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable) { 1692 return pKernelGmmu->__kgmmuToggleFaultOnPrefetch__(pGpu, pKernelGmmu, bEnable); 1693 } 1694 1695 NV_STATUS kgmmuFaultBufferAllocSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 1696 1697 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 1698 return NV_OK; 1699 } 1700 1701 static inline NV_STATUS kgmmuFaultBufferAllocSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 1702 return pKernelGmmu->__kgmmuFaultBufferAllocSharedMemory__(pGpu, pKernelGmmu, arg0); 1703 } 1704 1705 void kgmmuFaultBufferFreeSharedMemory_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 1706 1707 static inline void kgmmuFaultBufferFreeSharedMemory_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 1708 return; 1709 } 1710 1711 static inline void kgmmuFaultBufferFreeSharedMemory_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 1712 pKernelGmmu->__kgmmuFaultBufferFreeSharedMemory__(pGpu, pKernelGmmu, arg0); 1713 } 1714 1715 NV_STATUS kgmmuSetupWarForBug2720120_GA100(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam); 1716 1717 static inline NV_STATUS kgmmuSetupWarForBug2720120_56cd7a(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) { 1718 return NV_OK; 1719 } 1720 1721 static inline NV_STATUS kgmmuSetupWarForBug2720120_DISPATCH(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) { 1722 return pKernelGmmu->__kgmmuSetupWarForBug2720120__(pKernelGmmu, pFam); 1723 } 1724 1725 NvU32 kgmmuGetGraphicsEngineId_GV100(struct KernelGmmu *pKernelGmmu); 1726 1727 NvU32 kgmmuGetGraphicsEngineId_GH100(struct KernelGmmu *pKernelGmmu); 1728 1729 static inline NvU32 kgmmuGetGraphicsEngineId_DISPATCH(struct KernelGmmu *pKernelGmmu) { 1730 return pKernelGmmu->__kgmmuGetGraphicsEngineId__(pKernelGmmu); 1731 } 1732 1733 NvU32 kgmmuReadShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type); 1734 1735 static inline NvU32 kgmmuReadShadowBufPutIndex_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) { 1736 return 0; 1737 } 1738 1739 static inline NvU32 kgmmuReadShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) { 1740 return pKernelGmmu->__kgmmuReadShadowBufPutIndex__(pGpu, pKernelGmmu, type); 1741 } 1742 1743 NvBool kgmmuIsFaultEngineBar1_TU102(struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1744 1745 NvBool kgmmuIsFaultEngineBar1_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1746 1747 static inline NvBool kgmmuIsFaultEngineBar1_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1748 return pKernelGmmu->__kgmmuIsFaultEngineBar1__(pKernelGmmu, arg0); 1749 } 1750 1751 NvBool kgmmuIsFaultEngineBar2_TU102(struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1752 1753 NvBool kgmmuIsFaultEngineBar2_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1754 1755 static inline NvBool kgmmuIsFaultEngineBar2_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1756 return pKernelGmmu->__kgmmuIsFaultEngineBar2__(pKernelGmmu, arg0); 1757 } 1758 1759 NvBool kgmmuIsFaultEnginePhysical_GV100(struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1760 1761 NvBool kgmmuIsFaultEnginePhysical_GH100(struct KernelGmmu *pKernelGmmu, NvU32 arg0); 1762 1763 static inline NvBool kgmmuIsFaultEnginePhysical_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 1764 return pKernelGmmu->__kgmmuIsFaultEnginePhysical__(pKernelGmmu, arg0); 1765 } 1766 1767 static inline NV_STATUS kgmmuCopyMmuFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type) { 1768 NV_ASSERT_PRECOMP(0); 1769 return NV_ERR_NOT_SUPPORTED; 1770 } 1771 1772 NV_STATUS kgmmuCopyMmuFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type); 1773 1774 static inline NV_STATUS kgmmuCopyMmuFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type) { 1775 return pKernelGmmu->__kgmmuCopyMmuFaults__(pGpu, pKernelGmmu, pThreadState, entriesCopied, type); 1776 } 1777 1778 static inline NV_STATUS kgmmuParseFaultPacket_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry) { 1779 NV_ASSERT_PRECOMP(0); 1780 return NV_ERR_NOT_SUPPORTED; 1781 } 1782 1783 NV_STATUS kgmmuParseFaultPacket_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry); 1784 1785 static inline NV_STATUS kgmmuParseFaultPacket_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry) { 1786 return pKernelGmmu->__kgmmuParseFaultPacket__(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry); 1787 } 1788 1789 static inline void kgmmuFaultBufferClearPackets_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets) { 1790 NV_ASSERT_PRECOMP(0); 1791 } 1792 1793 void kgmmuFaultBufferClearPackets_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets); 1794 1795 static inline void kgmmuFaultBufferClearPackets_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 beginIdx, NvU32 numFaultPackets) { 1796 pKernelGmmu->__kgmmuFaultBufferClearPackets__(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets); 1797 } 1798 1799 static inline GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_dc3e6c(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx) { 1800 NV_ASSERT_PRECOMP(0); 1801 return ((void *)0); 1802 } 1803 1804 GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx); 1805 1806 static inline GMMU_FAULT_PACKET *kgmmuFaultBufferGetFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct HW_FAULT_BUFFER *pFaultBuffer, NvU32 idx) { 1807 return pKernelGmmu->__kgmmuFaultBufferGetFault__(pGpu, pKernelGmmu, pFaultBuffer, idx); 1808 } 1809 1810 static inline NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied) { 1811 NV_ASSERT_PRECOMP(0); 1812 return 0; 1813 } 1814 1815 NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied); 1816 1817 NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied); 1818 1819 static inline NvU32 kgmmuCopyFaultPacketToClientShadowBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct GMMU_FAULT_BUFFER *pFaultBuffer, FAULT_BUFFER_TYPE type, NvU32 getIndex, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries, struct THREAD_STATE_NODE *pThreadState, NvU32 *pFaultsCopied) { 1820 return pKernelGmmu->__kgmmuCopyFaultPacketToClientShadowBuffer__(pGpu, pKernelGmmu, pFaultBuffer, type, getIndex, shadowBufPutIndex, maxBufferEntries, pThreadState, pFaultsCopied); 1821 } 1822 1823 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_ceaee8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) { 1824 NV_ASSERT_PRECOMP(0); 1825 return ((NvBool)(0 != 0)); 1826 } 1827 1828 NvBool kgmmuIsReplayableShadowFaultBufferFull_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries); 1829 1830 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_491d52(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) { 1831 return ((NvBool)(0 != 0)); 1832 } 1833 1834 static inline NvBool kgmmuIsReplayableShadowFaultBufferFull_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientFaultBuf, NvU32 shadowBufPutIndex, NvU32 maxBufferEntries) { 1835 return pKernelGmmu->__kgmmuIsReplayableShadowFaultBufferFull__(pGpu, pKernelGmmu, pClientFaultBuf, shadowBufPutIndex, maxBufferEntries); 1836 } 1837 1838 static inline NvU32 kgmmuReadClientShadowBufPutIndex_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) { 1839 NV_ASSERT_PRECOMP(0); 1840 return 0; 1841 } 1842 1843 NvU32 kgmmuReadClientShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type); 1844 1845 static inline NvU32 kgmmuReadClientShadowBufPutIndex_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) { 1846 return 0; 1847 } 1848 1849 static inline NvU32 kgmmuReadClientShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type) { 1850 return pKernelGmmu->__kgmmuReadClientShadowBufPutIndex__(pGpu, pKernelGmmu, gfid, type); 1851 } 1852 1853 static inline void kgmmuWriteClientShadowBufPutIndex_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) { 1854 NV_ASSERT_PRECOMP(0); 1855 } 1856 1857 void kgmmuWriteClientShadowBufPutIndex_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex); 1858 1859 static inline void kgmmuWriteClientShadowBufPutIndex_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) { 1860 return; 1861 } 1862 1863 static inline void kgmmuWriteClientShadowBufPutIndex_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid, FAULT_BUFFER_TYPE type, NvU32 putIndex) { 1864 pKernelGmmu->__kgmmuWriteClientShadowBufPutIndex__(pGpu, pKernelGmmu, gfid, type, putIndex); 1865 } 1866 1867 NvU32 kgmmuGetMinCeEngineId_GV100(struct KernelGmmu *pKernelGmmu); 1868 1869 NvU32 kgmmuGetMinCeEngineId_GH100(struct KernelGmmu *pKernelGmmu); 1870 1871 static inline NvU32 kgmmuGetMinCeEngineId_DISPATCH(struct KernelGmmu *pKernelGmmu) { 1872 return pKernelGmmu->__kgmmuGetMinCeEngineId__(pKernelGmmu); 1873 } 1874 1875 NvU32 kgmmuGetMaxCeEngineId_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1876 1877 NvU32 kgmmuGetMaxCeEngineId_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1878 1879 NvU32 kgmmuGetMaxCeEngineId_AD102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1880 1881 NvU32 kgmmuGetMaxCeEngineId_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1882 1883 static inline NvU32 kgmmuGetMaxCeEngineId_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1884 return pKernelGmmu->__kgmmuGetMaxCeEngineId__(pGpu, pKernelGmmu); 1885 } 1886 1887 static inline NV_STATUS kgmmuFaultBufferMap_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1888 NV_ASSERT_PRECOMP(0); 1889 return NV_ERR_NOT_SUPPORTED; 1890 } 1891 1892 NV_STATUS kgmmuFaultBufferMap_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid); 1893 1894 static inline NV_STATUS kgmmuFaultBufferMap_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1895 return pKernelGmmu->__kgmmuFaultBufferMap__(pGpu, pKernelGmmu, index, gfid); 1896 } 1897 1898 static inline NV_STATUS kgmmuFaultBufferUnmap_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1899 NV_ASSERT_PRECOMP(0); 1900 return NV_ERR_NOT_SUPPORTED; 1901 } 1902 1903 NV_STATUS kgmmuFaultBufferUnmap_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid); 1904 1905 static inline NV_STATUS kgmmuFaultBufferUnmap_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1906 return pKernelGmmu->__kgmmuFaultBufferUnmap__(pGpu, pKernelGmmu, index, gfid); 1907 } 1908 1909 static inline NV_STATUS kgmmuFaultBufferInit_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1910 return NV_OK; 1911 } 1912 1913 NV_STATUS kgmmuFaultBufferInit_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1914 1915 static inline NV_STATUS kgmmuFaultBufferInit_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1916 return pKernelGmmu->__kgmmuFaultBufferInit__(pGpu, pKernelGmmu); 1917 } 1918 1919 static inline NV_STATUS kgmmuFaultBufferDestroy_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1920 return NV_OK; 1921 } 1922 1923 NV_STATUS kgmmuFaultBufferDestroy_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 1924 1925 static inline NV_STATUS kgmmuFaultBufferDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 1926 return pKernelGmmu->__kgmmuFaultBufferDestroy__(pGpu, pKernelGmmu); 1927 } 1928 1929 static inline NV_STATUS kgmmuFaultBufferLoad_ac1694(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1930 return NV_OK; 1931 } 1932 1933 NV_STATUS kgmmuFaultBufferLoad_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid); 1934 1935 static inline NV_STATUS kgmmuFaultBufferLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1936 return pKernelGmmu->__kgmmuFaultBufferLoad__(pGpu, pKernelGmmu, index, gfid); 1937 } 1938 1939 static inline NV_STATUS kgmmuFaultBufferUnload_ac1694(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1940 return NV_OK; 1941 } 1942 1943 NV_STATUS kgmmuFaultBufferUnload_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid); 1944 1945 static inline NV_STATUS kgmmuFaultBufferUnload_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 gfid) { 1946 return pKernelGmmu->__kgmmuFaultBufferUnload__(pGpu, pKernelGmmu, index, gfid); 1947 } 1948 1949 static inline NV_STATUS kgmmuEnableFaultBuffer_395e98(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) { 1950 return NV_ERR_NOT_SUPPORTED; 1951 } 1952 1953 NV_STATUS kgmmuEnableFaultBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid); 1954 1955 static inline NV_STATUS kgmmuEnableFaultBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) { 1956 return pKernelGmmu->__kgmmuEnableFaultBuffer__(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid); 1957 } 1958 1959 static inline NV_STATUS kgmmuDisableFaultBuffer_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) { 1960 NV_ASSERT_PRECOMP(0); 1961 return NV_ERR_NOT_SUPPORTED; 1962 } 1963 1964 NV_STATUS kgmmuDisableFaultBuffer_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid); 1965 1966 static inline NV_STATUS kgmmuDisableFaultBuffer_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvBool bIsErrorRecovery, NvU32 gfid) { 1967 return pKernelGmmu->__kgmmuDisableFaultBuffer__(pGpu, pKernelGmmu, index, bIsErrorRecovery, gfid); 1968 } 1969 1970 static inline NvU32 kgmmuSetAndGetDefaultFaultBufferSize_13cd8d(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid) { 1971 NV_ASSERT_PRECOMP(0); 1972 return 0; 1973 } 1974 1975 NvU32 kgmmuSetAndGetDefaultFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid); 1976 1977 static inline NvU32 kgmmuSetAndGetDefaultFaultBufferSize_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE index, NvU32 gfid) { 1978 return pKernelGmmu->__kgmmuSetAndGetDefaultFaultBufferSize__(pGpu, pKernelGmmu, index, gfid); 1979 } 1980 1981 static inline void kgmmuReadMmuFaultInstHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) { 1982 NV_ASSERT_PRECOMP(0); 1983 } 1984 1985 void kgmmuReadMmuFaultInstHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1); 1986 1987 static inline void kgmmuReadMmuFaultInstHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) { 1988 pKernelGmmu->__kgmmuReadMmuFaultInstHiLo__(pGpu, pKernelGmmu, arg0, arg1); 1989 } 1990 1991 static inline void kgmmuReadMmuFaultAddrHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) { 1992 NV_ASSERT_PRECOMP(0); 1993 } 1994 1995 void kgmmuReadMmuFaultAddrHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1); 1996 1997 static inline void kgmmuReadMmuFaultAddrHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 *arg0, NvU32 *arg1) { 1998 pKernelGmmu->__kgmmuReadMmuFaultAddrHiLo__(pGpu, pKernelGmmu, arg0, arg1); 1999 } 2000 2001 static inline NvU32 kgmmuReadMmuFaultInfo_a547a8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2002 NV_ASSERT_PRECOMP(0); 2003 return -1; 2004 } 2005 2006 NvU32 kgmmuReadMmuFaultInfo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 2007 2008 static inline NvU32 kgmmuReadMmuFaultInfo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2009 return pKernelGmmu->__kgmmuReadMmuFaultInfo__(pGpu, pKernelGmmu); 2010 } 2011 2012 static inline void kgmmuWriteMmuFaultBufferSize_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 gfid) { 2013 NV_ASSERT_PRECOMP(0); 2014 } 2015 2016 void kgmmuWriteMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 gfid); 2017 2018 static inline void kgmmuWriteMmuFaultBufferSize_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 gfid) { 2019 pKernelGmmu->__kgmmuWriteMmuFaultBufferSize__(pGpu, pKernelGmmu, arg0, arg1, gfid); 2020 } 2021 2022 static inline void kgmmuWriteMmuFaultBufferHiLo_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU32 gfid) { 2023 NV_ASSERT_PRECOMP(0); 2024 } 2025 2026 void kgmmuWriteMmuFaultBufferHiLo_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU32 gfid); 2027 2028 static inline void kgmmuWriteMmuFaultBufferHiLo_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU32 gfid) { 2029 pKernelGmmu->__kgmmuWriteMmuFaultBufferHiLo__(pGpu, pKernelGmmu, arg0, arg1, arg2, gfid); 2030 } 2031 2032 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2033 NV_ASSERT_PRECOMP(0); 2034 return NV_ERR_NOT_SUPPORTED; 2035 } 2036 2037 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2038 return NV_ERR_NOT_SUPPORTED; 2039 } 2040 2041 static inline NV_STATUS kgmmuEnableMmuFaultInterrupts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2042 return pKernelGmmu->__kgmmuEnableMmuFaultInterrupts__(pGpu, pKernelGmmu, index); 2043 } 2044 2045 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2046 NV_ASSERT_PRECOMP(0); 2047 return NV_ERR_NOT_SUPPORTED; 2048 } 2049 2050 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2051 return NV_ERR_NOT_SUPPORTED; 2052 } 2053 2054 static inline NV_STATUS kgmmuDisableMmuFaultInterrupts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2055 return pKernelGmmu->__kgmmuDisableMmuFaultInterrupts__(pGpu, pKernelGmmu, index); 2056 } 2057 2058 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2059 NV_ASSERT_PRECOMP(0); 2060 return NV_ERR_NOT_SUPPORTED; 2061 } 2062 2063 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2064 return NV_ERR_NOT_SUPPORTED; 2065 } 2066 2067 static inline NV_STATUS kgmmuEnableMmuFaultOverflowIntr_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index) { 2068 return pKernelGmmu->__kgmmuEnableMmuFaultOverflowIntr__(pGpu, pKernelGmmu, index); 2069 } 2070 2071 static inline void kgmmuSignExtendFaultAddress_f2d351(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress) { 2072 NV_ASSERT_PRECOMP(0); 2073 } 2074 2075 void kgmmuSignExtendFaultAddress_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress); 2076 2077 void kgmmuSignExtendFaultAddress_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress); 2078 2079 static inline void kgmmuSignExtendFaultAddress_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU64 *pMmuFaultAddress) { 2080 pKernelGmmu->__kgmmuSignExtendFaultAddress__(pGpu, pKernelGmmu, pMmuFaultAddress); 2081 } 2082 2083 static inline NV_STATUS kgmmuGetFaultType_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType) { 2084 NV_ASSERT_PRECOMP(0); 2085 return NV_ERR_NOT_SUPPORTED; 2086 } 2087 2088 NV_STATUS kgmmuGetFaultType_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType); 2089 2090 static inline NV_STATUS kgmmuGetFaultType_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 fault, FAULT_TYPE *pMmuFaultType) { 2091 return pKernelGmmu->__kgmmuGetFaultType__(pGpu, pKernelGmmu, fault, pMmuFaultType); 2092 } 2093 2094 static inline NvBool kgmmuIsP2PUnboundInstFault_92bfc3(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) { 2095 NV_ASSERT_PRECOMP(0); 2096 return NV_ERR_NOT_SUPPORTED; 2097 } 2098 2099 NvBool kgmmuIsP2PUnboundInstFault_GA100(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1); 2100 2101 static inline NvBool kgmmuIsP2PUnboundInstFault_491d52(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) { 2102 return ((NvBool)(0 != 0)); 2103 } 2104 2105 static inline NvBool kgmmuIsP2PUnboundInstFault_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) { 2106 return pKernelGmmu->__kgmmuIsP2PUnboundInstFault__(pKernelGmmu, arg0, arg1); 2107 } 2108 2109 NV_STATUS kgmmuServiceVfPriFaults_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType); 2110 2111 static inline NV_STATUS kgmmuServiceVfPriFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType) { 2112 NV_ASSERT_PRECOMP(0); 2113 return NV_ERR_NOT_SUPPORTED; 2114 } 2115 2116 static inline NV_STATUS kgmmuServiceVfPriFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 faultType) { 2117 return pKernelGmmu->__kgmmuServiceVfPriFaults__(pGpu, pKernelGmmu, faultType); 2118 } 2119 2120 static inline NvBool kgmmuTestVidmemAccessBitBufferError_491d52(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 2121 return ((NvBool)(0 != 0)); 2122 } 2123 2124 static inline NvBool kgmmuTestVidmemAccessBitBufferError_ceaee8(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 2125 NV_ASSERT_PRECOMP(0); 2126 return ((NvBool)(0 != 0)); 2127 } 2128 2129 static inline NvBool kgmmuTestVidmemAccessBitBufferError_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 2130 return pKernelGmmu->__kgmmuTestVidmemAccessBitBufferError__(pGpu, pKernelGmmu, arg0); 2131 } 2132 2133 static inline void kgmmuDisableVidmemAccessBitBuf_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2134 return; 2135 } 2136 2137 static inline void kgmmuDisableVidmemAccessBitBuf_e426af(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2138 NV_ASSERT_PRECOMP(0); 2139 return; 2140 } 2141 2142 static inline void kgmmuDisableVidmemAccessBitBuf_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2143 pKernelGmmu->__kgmmuDisableVidmemAccessBitBuf__(pGpu, pKernelGmmu); 2144 } 2145 2146 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2147 return NV_ERR_NOT_SUPPORTED; 2148 } 2149 2150 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2151 NV_ASSERT_PRECOMP(0); 2152 return NV_ERR_NOT_SUPPORTED; 2153 } 2154 2155 static inline NV_STATUS kgmmuEnableVidmemAccessBitBuf_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2156 return pKernelGmmu->__kgmmuEnableVidmemAccessBitBuf__(pGpu, pKernelGmmu); 2157 } 2158 2159 static inline void kgmmuClearAccessCounterWriteNak_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2160 return; 2161 } 2162 2163 static inline void kgmmuClearAccessCounterWriteNak_e426af(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2164 NV_ASSERT_PRECOMP(0); 2165 return; 2166 } 2167 2168 static inline void kgmmuClearAccessCounterWriteNak_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2169 pKernelGmmu->__kgmmuClearAccessCounterWriteNak__(pGpu, pKernelGmmu); 2170 } 2171 2172 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2173 return NV_OK; 2174 } 2175 2176 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2177 NV_ASSERT_PRECOMP(0); 2178 return NV_ERR_NOT_SUPPORTED; 2179 } 2180 2181 static inline NV_STATUS kgmmuServiceMthdBuffFaultInBar2Fault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2182 return pKernelGmmu->__kgmmuServiceMthdBuffFaultInBar2Fault__(pGpu, pKernelGmmu); 2183 } 2184 2185 NV_STATUS kgmmuFaultCancelTargeted_VF(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0); 2186 2187 static inline NV_STATUS kgmmuFaultCancelTargeted_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0) { 2188 NV_ASSERT_PRECOMP(0); 2189 return NV_ERR_NOT_SUPPORTED; 2190 } 2191 2192 NV_STATUS kgmmuFaultCancelTargeted_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0); 2193 2194 static inline NV_STATUS kgmmuFaultCancelTargeted_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *arg0) { 2195 return pKernelGmmu->__kgmmuFaultCancelTargeted__(pGpu, pKernelGmmu, arg0); 2196 } 2197 2198 static inline NV_STATUS kgmmuFaultCancelIssueInvalidate_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal) { 2199 NV_ASSERT_PRECOMP(0); 2200 return NV_ERR_NOT_SUPPORTED; 2201 } 2202 2203 NV_STATUS kgmmuFaultCancelIssueInvalidate_GP100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal); 2204 2205 static inline NV_STATUS kgmmuFaultCancelIssueInvalidate_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, GMMU_FAULT_CANCEL_INFO *pCancelInfo, TLB_INVALIDATE_PARAMS *pParams, NvBool bGlobal) { 2206 return pKernelGmmu->__kgmmuFaultCancelIssueInvalidate__(pGpu, pKernelGmmu, pCancelInfo, pParams, bGlobal); 2207 } 2208 2209 NV_STATUS kgmmuServiceMmuFault_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData); 2210 2211 NV_STATUS kgmmuServiceMmuFault_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData); 2212 2213 static inline NV_STATUS kgmmuServiceMmuFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pParsedFaultInfo, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData) { 2214 return pKernelGmmu->__kgmmuServiceMmuFault__(pGpu, pKernelGmmu, pParsedFaultInfo, pMmuExceptionData); 2215 } 2216 2217 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) { 2218 return NV_OK; 2219 } 2220 2221 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) { 2222 NV_ASSERT_PRECOMP(0); 2223 return NV_ERR_NOT_SUPPORTED; 2224 } 2225 2226 static inline NV_STATUS kgmmuServiceUnboundInstBlockFault_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 arg0, FIFO_MMU_EXCEPTION_DATA *arg1) { 2227 return pKernelGmmu->__kgmmuServiceUnboundInstBlockFault__(pGpu, pKernelGmmu, arg0, arg1); 2228 } 2229 2230 NvU32 kgmmuGetEccCounts_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 2231 2232 static inline NvU32 kgmmuGetEccCounts_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2233 return 0; 2234 } 2235 2236 static inline NvU32 kgmmuGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2237 return pKernelGmmu->__kgmmuGetEccCounts__(pGpu, pKernelGmmu); 2238 } 2239 2240 void kgmmuClearEccCounts_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 2241 2242 static inline void kgmmuClearEccCounts_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2243 return; 2244 } 2245 2246 static inline void kgmmuClearEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2247 pKernelGmmu->__kgmmuClearEccCounts__(pGpu, pKernelGmmu); 2248 } 2249 2250 static inline NV_STATUS kgmmuStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { 2251 return pEngstate->__kgmmuStatePreLoad__(pGpu, pEngstate, arg0); 2252 } 2253 2254 static inline NV_STATUS kgmmuStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { 2255 return pEngstate->__kgmmuStatePostUnload__(pGpu, pEngstate, arg0); 2256 } 2257 2258 static inline NV_STATUS kgmmuStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { 2259 return pEngstate->__kgmmuStateInitUnlocked__(pGpu, pEngstate); 2260 } 2261 2262 static inline void kgmmuInitMissing_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { 2263 pEngstate->__kgmmuInitMissing__(pGpu, pEngstate); 2264 } 2265 2266 static inline NV_STATUS kgmmuStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { 2267 return pEngstate->__kgmmuStatePreInitLocked__(pGpu, pEngstate); 2268 } 2269 2270 static inline NV_STATUS kgmmuStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { 2271 return pEngstate->__kgmmuStatePreInitUnlocked__(pGpu, pEngstate); 2272 } 2273 2274 static inline NvBool kgmmuIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { 2275 return pEngstate->__kgmmuIsPresent__(pGpu, pEngstate); 2276 } 2277 2278 static inline NvU32 kgmmuGetPDEAperture(struct KernelGmmu *pKernelGmmu) { 2279 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2280 return pKernelGmmu_PRIVATE->PDEAperture; 2281 } 2282 2283 static inline NvU32 kgmmuGetPTEAperture(struct KernelGmmu *pKernelGmmu) { 2284 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2285 return pKernelGmmu_PRIVATE->PTEAperture; 2286 } 2287 2288 static inline NvU32 kgmmuGetPDEBAR1Aperture(struct KernelGmmu *pKernelGmmu) { 2289 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2290 return pKernelGmmu_PRIVATE->PDEBAR1Aperture; 2291 } 2292 2293 static inline NvU32 kgmmuGetPTEBAR1Aperture(struct KernelGmmu *pKernelGmmu) { 2294 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2295 return pKernelGmmu_PRIVATE->PTEBAR1Aperture; 2296 } 2297 2298 static inline NvU32 kgmmuGetPDEBAR1Attr(struct KernelGmmu *pKernelGmmu) { 2299 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2300 return pKernelGmmu_PRIVATE->PDEBAR1Attr; 2301 } 2302 2303 static inline NvU32 kgmmuGetPTEBAR1Attr(struct KernelGmmu *pKernelGmmu) { 2304 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2305 return pKernelGmmu_PRIVATE->PTEBAR1Attr; 2306 } 2307 2308 static inline NvU32 kgmmuGetPDEAttr(struct KernelGmmu *pKernelGmmu) { 2309 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2310 return pKernelGmmu_PRIVATE->PDEAttr; 2311 } 2312 2313 static inline NvU32 kgmmuGetPTEAttr(struct KernelGmmu *pKernelGmmu) { 2314 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2315 return pKernelGmmu_PRIVATE->PTEAttr; 2316 } 2317 2318 static inline NvU64 kgmmuGetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu) { 2319 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2320 return pKernelGmmu_PRIVATE->overrideBigPageSize; 2321 } 2322 2323 static inline void kgmmuSetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) { 2324 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2325 pKernelGmmu_PRIVATE->overrideBigPageSize = bigPageSize; 2326 } 2327 2328 static inline NvBool kgmmuIsPerVaspaceBigPageEn(struct KernelGmmu *pKernelGmmu) { 2329 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2330 return pKernelGmmu_PRIVATE->bEnablePerVaspaceBigPage; 2331 } 2332 2333 static inline NvBool kgmmuIsIgnoreHubTlbInvalidate(struct KernelGmmu *pKernelGmmu) { 2334 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2335 return pKernelGmmu_PRIVATE->bIgnoreHubTlbInvalidate; 2336 } 2337 2338 static inline NvBool kgmmuIsHugePageSupported(struct KernelGmmu *pKernelGmmu) { 2339 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2340 return pKernelGmmu_PRIVATE->bHugePageSupported; 2341 } 2342 2343 static inline NvBool kgmmuIsPageSize512mbSupported(struct KernelGmmu *pKernelGmmu) { 2344 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2345 return pKernelGmmu_PRIVATE->bPageSize512mbSupported; 2346 } 2347 2348 static inline NvBool kgmmuIsBug2720120WarEnabled(struct KernelGmmu *pKernelGmmu) { 2349 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2350 return pKernelGmmu_PRIVATE->bBug2720120WarEnabled; 2351 } 2352 2353 static inline NvBool kgmmuIsVaspaceInteropSupported(struct KernelGmmu *pKernelGmmu) { 2354 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2355 return pKernelGmmu_PRIVATE->bVaspaceInteropSupported; 2356 } 2357 2358 static inline NvU64 kgmmuGetMaxVASize(struct KernelGmmu *pKernelGmmu) { 2359 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2360 return pKernelGmmu_PRIVATE->maxVASize; 2361 } 2362 2363 static inline NvU64 kgmmuGetSysBaseAddress(struct KernelGmmu *pKernelGmmu) { 2364 struct KernelGmmu_PRIVATE *pKernelGmmu_PRIVATE = (struct KernelGmmu_PRIVATE *)pKernelGmmu; 2365 return pKernelGmmu_PRIVATE->sysmemBaseAddress; 2366 } 2367 2368 void kgmmuDestruct_IMPL(struct KernelGmmu *pKernelGmmu); 2369 2370 #define __nvoc_kgmmuDestruct(pKernelGmmu) kgmmuDestruct_IMPL(pKernelGmmu) 2371 NV_STATUS kgmmuFmtInit_IMPL(struct KernelGmmu *pKernelGmmu); 2372 2373 #ifdef __nvoc_kern_gmmu_h_disabled 2374 static inline NV_STATUS kgmmuFmtInit(struct KernelGmmu *pKernelGmmu) { 2375 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2376 return NV_ERR_NOT_SUPPORTED; 2377 } 2378 #else //__nvoc_kern_gmmu_h_disabled 2379 #define kgmmuFmtInit(pKernelGmmu) kgmmuFmtInit_IMPL(pKernelGmmu) 2380 #endif //__nvoc_kern_gmmu_h_disabled 2381 2382 GMMU_APERTURE kgmmuGetMemAperture_IMPL(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc); 2383 2384 #ifdef __nvoc_kern_gmmu_h_disabled 2385 static inline GMMU_APERTURE kgmmuGetMemAperture(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc) { 2386 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2387 GMMU_APERTURE ret; 2388 portMemSet(&ret, 0, sizeof(GMMU_APERTURE)); 2389 return ret; 2390 } 2391 #else //__nvoc_kern_gmmu_h_disabled 2392 #define kgmmuGetMemAperture(pKernelGmmu, pMemDesc) kgmmuGetMemAperture_IMPL(pKernelGmmu, pMemDesc) 2393 #endif //__nvoc_kern_gmmu_h_disabled 2394 2395 const GMMU_FMT_FAMILY *kgmmuFmtGetFamily_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version); 2396 2397 #ifdef __nvoc_kern_gmmu_h_disabled 2398 static inline const GMMU_FMT_FAMILY *kgmmuFmtGetFamily(struct KernelGmmu *pKernelGmmu, NvU32 version) { 2399 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2400 return NULL; 2401 } 2402 #else //__nvoc_kern_gmmu_h_disabled 2403 #define kgmmuFmtGetFamily(pKernelGmmu, version) kgmmuFmtGetFamily_IMPL(pKernelGmmu, version) 2404 #endif //__nvoc_kern_gmmu_h_disabled 2405 2406 const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 2407 2408 #ifdef __nvoc_kern_gmmu_h_disabled 2409 static inline const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2410 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2411 return NULL; 2412 } 2413 #else //__nvoc_kern_gmmu_h_disabled 2414 #define kgmmuGetStaticInfo(pGpu, pKernelGmmu) kgmmuGetStaticInfo_IMPL(pGpu, pKernelGmmu) 2415 #endif //__nvoc_kern_gmmu_h_disabled 2416 2417 const struct GMMU_FMT *kgmmuFmtGet_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize); 2418 2419 #ifdef __nvoc_kern_gmmu_h_disabled 2420 static inline const struct GMMU_FMT *kgmmuFmtGet(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize) { 2421 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2422 return NULL; 2423 } 2424 #else //__nvoc_kern_gmmu_h_disabled 2425 #define kgmmuFmtGet(pKernelGmmu, version, bigPageSize) kgmmuFmtGet_IMPL(pKernelGmmu, version, bigPageSize) 2426 #endif //__nvoc_kern_gmmu_h_disabled 2427 2428 void kgmmuExtractPteInfo_IMPL(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3); 2429 2430 #ifdef __nvoc_kern_gmmu_h_disabled 2431 static inline void kgmmuExtractPteInfo(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3) { 2432 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2433 } 2434 #else //__nvoc_kern_gmmu_h_disabled 2435 #define kgmmuExtractPteInfo(pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuExtractPteInfo_IMPL(pKernelGmmu, arg0, arg1, arg2, arg3) 2436 #endif //__nvoc_kern_gmmu_h_disabled 2437 2438 void kgmmuFieldSetKindCompTags_IMPL(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries); 2439 2440 #ifdef __nvoc_kern_gmmu_h_disabled 2441 static inline void kgmmuFieldSetKindCompTags(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries) { 2442 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2443 } 2444 #else //__nvoc_kern_gmmu_h_disabled 2445 #define kgmmuFieldSetKindCompTags(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries) kgmmuFieldSetKindCompTags_IMPL(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries) 2446 #endif //__nvoc_kern_gmmu_h_disabled 2447 2448 NvBool kgmmuFmtIsBigPageSizeSupported_IMPL(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize); 2449 2450 #ifdef __nvoc_kern_gmmu_h_disabled 2451 static inline NvBool kgmmuFmtIsBigPageSizeSupported(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) { 2452 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2453 return NV_FALSE; 2454 } 2455 #else //__nvoc_kern_gmmu_h_disabled 2456 #define kgmmuFmtIsBigPageSizeSupported(pKernelGmmu, bigPageSize) kgmmuFmtIsBigPageSizeSupported_IMPL(pKernelGmmu, bigPageSize) 2457 #endif //__nvoc_kern_gmmu_h_disabled 2458 2459 const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 2460 2461 #ifdef __nvoc_kern_gmmu_h_disabled 2462 static inline const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2463 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2464 return NULL; 2465 } 2466 #else //__nvoc_kern_gmmu_h_disabled 2467 #define kgmmuFmtGetLatestSupportedFormat(pGpu, pKernelGmmu) kgmmuFmtGetLatestSupportedFormat_IMPL(pGpu, pKernelGmmu) 2468 #endif //__nvoc_kern_gmmu_h_disabled 2469 2470 NvU32 kgmmuGetFaultBufferReservedFbSpaceSize_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 2471 2472 #ifdef __nvoc_kern_gmmu_h_disabled 2473 static inline NvU32 kgmmuGetFaultBufferReservedFbSpaceSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2474 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2475 return 0; 2476 } 2477 #else //__nvoc_kern_gmmu_h_disabled 2478 #define kgmmuGetFaultBufferReservedFbSpaceSize(pGpu, pKernelGmmu) kgmmuGetFaultBufferReservedFbSpaceSize_IMPL(pGpu, pKernelGmmu) 2479 #endif //__nvoc_kern_gmmu_h_disabled 2480 2481 NV_STATUS kgmmuFaultBufferReplayableSetup_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1, NvU32 arg2, RmPhysAddr *arg3); 2482 2483 #ifdef __nvoc_kern_gmmu_h_disabled 2484 static inline NV_STATUS kgmmuFaultBufferReplayableSetup(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1, NvU32 arg2, RmPhysAddr *arg3) { 2485 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2486 return NV_ERR_NOT_SUPPORTED; 2487 } 2488 #else //__nvoc_kern_gmmu_h_disabled 2489 #define kgmmuFaultBufferReplayableSetup(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuFaultBufferReplayableSetup_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) 2490 #endif //__nvoc_kern_gmmu_h_disabled 2491 2492 NvU64 kgmmuGetMinBigPageSize_IMPL(struct KernelGmmu *pKernelGmmu); 2493 2494 #ifdef __nvoc_kern_gmmu_h_disabled 2495 static inline NvU64 kgmmuGetMinBigPageSize(struct KernelGmmu *pKernelGmmu) { 2496 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2497 return 0; 2498 } 2499 #else //__nvoc_kern_gmmu_h_disabled 2500 #define kgmmuGetMinBigPageSize(pKernelGmmu) kgmmuGetMinBigPageSize_IMPL(pKernelGmmu) 2501 #endif //__nvoc_kern_gmmu_h_disabled 2502 2503 NV_STATUS kgmmuInstBlkInit_IMPL(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams); 2504 2505 #ifdef __nvoc_kern_gmmu_h_disabled 2506 static inline NV_STATUS kgmmuInstBlkInit(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams) { 2507 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2508 return NV_ERR_NOT_SUPPORTED; 2509 } 2510 #else //__nvoc_kern_gmmu_h_disabled 2511 #define kgmmuInstBlkInit(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams) kgmmuInstBlkInit_IMPL(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams) 2512 #endif //__nvoc_kern_gmmu_h_disabled 2513 2514 NV_STATUS kgmmuFaultBufferReplayableAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1); 2515 2516 #ifdef __nvoc_kern_gmmu_h_disabled 2517 static inline NV_STATUS kgmmuFaultBufferReplayableAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1) { 2518 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2519 return NV_ERR_NOT_SUPPORTED; 2520 } 2521 #else //__nvoc_kern_gmmu_h_disabled 2522 #define kgmmuFaultBufferReplayableAllocate(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferReplayableAllocate_IMPL(pGpu, pKernelGmmu, arg0, arg1) 2523 #endif //__nvoc_kern_gmmu_h_disabled 2524 2525 NV_STATUS kgmmuFaultBufferReplayableDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); 2526 2527 #ifdef __nvoc_kern_gmmu_h_disabled 2528 static inline NV_STATUS kgmmuFaultBufferReplayableDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { 2529 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2530 return NV_ERR_NOT_SUPPORTED; 2531 } 2532 #else //__nvoc_kern_gmmu_h_disabled 2533 #define kgmmuFaultBufferReplayableDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferReplayableDestroy_IMPL(pGpu, pKernelGmmu) 2534 #endif //__nvoc_kern_gmmu_h_disabled 2535 2536 NV_STATUS kgmmuFaultBufferAlloc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1); 2537 2538 #ifdef __nvoc_kern_gmmu_h_disabled 2539 static inline NV_STATUS kgmmuFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) { 2540 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2541 return NV_ERR_NOT_SUPPORTED; 2542 } 2543 #else //__nvoc_kern_gmmu_h_disabled 2544 #define kgmmuFaultBufferAlloc(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferAlloc_IMPL(pGpu, pKernelGmmu, arg0, arg1) 2545 #endif //__nvoc_kern_gmmu_h_disabled 2546 2547 NV_STATUS kgmmuFaultBufferCreateMemDesc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3); 2548 2549 #ifdef __nvoc_kern_gmmu_h_disabled 2550 static inline NV_STATUS kgmmuFaultBufferCreateMemDesc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3) { 2551 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2552 return NV_ERR_NOT_SUPPORTED; 2553 } 2554 #else //__nvoc_kern_gmmu_h_disabled 2555 #define kgmmuFaultBufferCreateMemDesc(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuFaultBufferCreateMemDesc_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) 2556 #endif //__nvoc_kern_gmmu_h_disabled 2557 2558 NV_STATUS kgmmuFaultBufferGetAddressSpace_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2); 2559 2560 #ifdef __nvoc_kern_gmmu_h_disabled 2561 static inline NV_STATUS kgmmuFaultBufferGetAddressSpace(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2) { 2562 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2563 return NV_ERR_NOT_SUPPORTED; 2564 } 2565 #else //__nvoc_kern_gmmu_h_disabled 2566 #define kgmmuFaultBufferGetAddressSpace(pGpu, pKernelGmmu, arg0, arg1, arg2) kgmmuFaultBufferGetAddressSpace_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2) 2567 #endif //__nvoc_kern_gmmu_h_disabled 2568 2569 NV_STATUS kgmmuFaultBufferFree_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); 2570 2571 #ifdef __nvoc_kern_gmmu_h_disabled 2572 static inline NV_STATUS kgmmuFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 2573 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2574 return NV_ERR_NOT_SUPPORTED; 2575 } 2576 #else //__nvoc_kern_gmmu_h_disabled 2577 #define kgmmuFaultBufferFree(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFree_IMPL(pGpu, pKernelGmmu, arg0) 2578 #endif //__nvoc_kern_gmmu_h_disabled 2579 2580 NV_STATUS kgmmuFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); 2581 2582 #ifdef __nvoc_kern_gmmu_h_disabled 2583 static inline NV_STATUS kgmmuFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { 2584 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2585 return NV_ERR_NOT_SUPPORTED; 2586 } 2587 #else //__nvoc_kern_gmmu_h_disabled 2588 #define kgmmuFaultBufferUnregister(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg0) 2589 #endif //__nvoc_kern_gmmu_h_disabled 2590 2591 NV_STATUS kgmmuClientShadowFaultBufferAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 2592 2593 #ifdef __nvoc_kern_gmmu_h_disabled 2594 static inline NV_STATUS kgmmuClientShadowFaultBufferAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 2595 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2596 return NV_ERR_NOT_SUPPORTED; 2597 } 2598 #else //__nvoc_kern_gmmu_h_disabled 2599 #define kgmmuClientShadowFaultBufferAllocate(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferAllocate_IMPL(pGpu, pKernelGmmu, arg0) 2600 #endif //__nvoc_kern_gmmu_h_disabled 2601 2602 NV_STATUS kgmmuClientShadowFaultBufferDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 2603 2604 #ifdef __nvoc_kern_gmmu_h_disabled 2605 static inline NV_STATUS kgmmuClientShadowFaultBufferDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 2606 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2607 return NV_ERR_NOT_SUPPORTED; 2608 } 2609 #else //__nvoc_kern_gmmu_h_disabled 2610 #define kgmmuClientShadowFaultBufferDestroy(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferDestroy_IMPL(pGpu, pKernelGmmu, arg0) 2611 #endif //__nvoc_kern_gmmu_h_disabled 2612 2613 NV_STATUS kgmmuClientShadowFaultBufferRegister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 2614 2615 #ifdef __nvoc_kern_gmmu_h_disabled 2616 static inline NV_STATUS kgmmuClientShadowFaultBufferRegister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 2617 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2618 return NV_ERR_NOT_SUPPORTED; 2619 } 2620 #else //__nvoc_kern_gmmu_h_disabled 2621 #define kgmmuClientShadowFaultBufferRegister(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferRegister_IMPL(pGpu, pKernelGmmu, arg0) 2622 #endif //__nvoc_kern_gmmu_h_disabled 2623 2624 void kgmmuClientShadowFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0); 2625 2626 #ifdef __nvoc_kern_gmmu_h_disabled 2627 static inline void kgmmuClientShadowFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE arg0) { 2628 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2629 } 2630 #else //__nvoc_kern_gmmu_h_disabled 2631 #define kgmmuClientShadowFaultBufferUnregister(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg0) 2632 #endif //__nvoc_kern_gmmu_h_disabled 2633 2634 void kgmmuClientShadowFaultBufferPagesDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1); 2635 2636 #ifdef __nvoc_kern_gmmu_h_disabled 2637 static inline void kgmmuClientShadowFaultBufferPagesDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1) { 2638 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2639 } 2640 #else //__nvoc_kern_gmmu_h_disabled 2641 #define kgmmuClientShadowFaultBufferPagesDestroy(pGpu, pKernelGmmu, arg0, arg1) kgmmuClientShadowFaultBufferPagesDestroy_IMPL(pGpu, pKernelGmmu, arg0, arg1) 2642 #endif //__nvoc_kern_gmmu_h_disabled 2643 2644 void kgmmuClientShadowFaultBufferQueueDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1); 2645 2646 #ifdef __nvoc_kern_gmmu_h_disabled 2647 static inline void kgmmuClientShadowFaultBufferQueueDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0, FAULT_BUFFER_TYPE arg1) { 2648 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2649 } 2650 #else //__nvoc_kern_gmmu_h_disabled 2651 #define kgmmuClientShadowFaultBufferQueueDestroy(pGpu, pKernelGmmu, arg0, arg1) kgmmuClientShadowFaultBufferQueueDestroy_IMPL(pGpu, pKernelGmmu, arg0, arg1) 2652 #endif //__nvoc_kern_gmmu_h_disabled 2653 2654 NvU64 kgmmuGetSizeOfPageTables_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3); 2655 2656 #ifdef __nvoc_kern_gmmu_h_disabled 2657 static inline NvU64 kgmmuGetSizeOfPageTables(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) { 2658 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2659 return 0; 2660 } 2661 #else //__nvoc_kern_gmmu_h_disabled 2662 #define kgmmuGetSizeOfPageTables(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageTables_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) 2663 #endif //__nvoc_kern_gmmu_h_disabled 2664 2665 NvU64 kgmmuGetSizeOfPageDirs_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3); 2666 2667 #ifdef __nvoc_kern_gmmu_h_disabled 2668 static inline NvU64 kgmmuGetSizeOfPageDirs(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) { 2669 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2670 return 0; 2671 } 2672 #else //__nvoc_kern_gmmu_h_disabled 2673 #define kgmmuGetSizeOfPageDirs(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageDirs_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) 2674 #endif //__nvoc_kern_gmmu_h_disabled 2675 2676 GMMU_APERTURE kgmmuGetExternalAllocAperture_IMPL(NvU32 addressSpace); 2677 2678 #define kgmmuGetExternalAllocAperture(addressSpace) kgmmuGetExternalAllocAperture_IMPL(addressSpace) 2679 void kgmmuEncodePhysAddrs_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count); 2680 2681 #ifdef __nvoc_kern_gmmu_h_disabled 2682 static inline void kgmmuEncodePhysAddrs(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count) { 2683 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2684 } 2685 #else //__nvoc_kern_gmmu_h_disabled 2686 #define kgmmuEncodePhysAddrs(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count) kgmmuEncodePhysAddrs_IMPL(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count) 2687 #endif //__nvoc_kern_gmmu_h_disabled 2688 2689 NvU64 kgmmuEncodePhysAddr_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress); 2690 2691 #ifdef __nvoc_kern_gmmu_h_disabled 2692 static inline NvU64 kgmmuEncodePhysAddr(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress) { 2693 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2694 return 0; 2695 } 2696 #else //__nvoc_kern_gmmu_h_disabled 2697 #define kgmmuEncodePhysAddr(pKernelGmmu, aperture, physAddr, fabricBaseAddress) kgmmuEncodePhysAddr_IMPL(pKernelGmmu, aperture, physAddr, fabricBaseAddress) 2698 #endif //__nvoc_kern_gmmu_h_disabled 2699 2700 void kgmmuAccessCntrChangeIntrOwnership_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0); 2701 2702 #ifdef __nvoc_kern_gmmu_h_disabled 2703 static inline void kgmmuAccessCntrChangeIntrOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) { 2704 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2705 } 2706 #else //__nvoc_kern_gmmu_h_disabled 2707 #define kgmmuAccessCntrChangeIntrOwnership(pGpu, pKernelGmmu, arg0) kgmmuAccessCntrChangeIntrOwnership_IMPL(pGpu, pKernelGmmu, arg0) 2708 #endif //__nvoc_kern_gmmu_h_disabled 2709 2710 void *kgmmuGetShadowFaultBufferCslContext_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type); 2711 2712 #ifdef __nvoc_kern_gmmu_h_disabled 2713 static inline void *kgmmuGetShadowFaultBufferCslContext(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, FAULT_BUFFER_TYPE type) { 2714 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2715 return NULL; 2716 } 2717 #else //__nvoc_kern_gmmu_h_disabled 2718 #define kgmmuGetShadowFaultBufferCslContext(pGpu, pKernelGmmu, type) kgmmuGetShadowFaultBufferCslContext_IMPL(pGpu, pKernelGmmu, type) 2719 #endif //__nvoc_kern_gmmu_h_disabled 2720 2721 NvS32 *kgmmuGetFatalFaultIntrPendingState_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid); 2722 2723 #ifdef __nvoc_kern_gmmu_h_disabled 2724 static inline NvS32 *kgmmuGetFatalFaultIntrPendingState(struct KernelGmmu *pKernelGmmu, NvU8 gfid) { 2725 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2726 return NULL; 2727 } 2728 #else //__nvoc_kern_gmmu_h_disabled 2729 #define kgmmuGetFatalFaultIntrPendingState(pKernelGmmu, gfid) kgmmuGetFatalFaultIntrPendingState_IMPL(pKernelGmmu, gfid) 2730 #endif //__nvoc_kern_gmmu_h_disabled 2731 2732 struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr_IMPL(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex); 2733 2734 #ifdef __nvoc_kern_gmmu_h_disabled 2735 static inline struct HW_FAULT_BUFFER *kgmmuGetHwFaultBufferPtr(struct KernelGmmu *pKernelGmmu, NvU8 gfid, NvU8 faultBufferIndex) { 2736 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2737 return NULL; 2738 } 2739 #else //__nvoc_kern_gmmu_h_disabled 2740 #define kgmmuGetHwFaultBufferPtr(pKernelGmmu, gfid, faultBufferIndex) kgmmuGetHwFaultBufferPtr_IMPL(pKernelGmmu, gfid, faultBufferIndex) 2741 #endif //__nvoc_kern_gmmu_h_disabled 2742 2743 NvU64 kgmmuGetFaultBufferGenCnt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU8 gfid); 2744 2745 #ifdef __nvoc_kern_gmmu_h_disabled 2746 static inline NvU64 kgmmuGetFaultBufferGenCnt(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU8 gfid) { 2747 NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); 2748 return 0; 2749 } 2750 #else //__nvoc_kern_gmmu_h_disabled 2751 #define kgmmuGetFaultBufferGenCnt(pGpu, pKernelGmmu, gfid) kgmmuGetFaultBufferGenCnt_IMPL(pGpu, pKernelGmmu, gfid) 2752 #endif //__nvoc_kern_gmmu_h_disabled 2753 2754 #undef PRIVATE_FIELD 2755 2756 2757 // defines for TLB Invalidation scope 2758 #define NV_GMMU_INVAL_SCOPE_ALL_TLBS 0x00000000 2759 #define NV_GMMU_INVAL_SCOPE_LINK_TLBS 0x00000001 2760 #define NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS 0x00000002 2761 2762 // bit fields for uvmSharedIntrRmOwnsMask 2763 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY NVBIT(0) 2764 #define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_ERROR NVBIT(1) 2765 #define RM_UVM_SHARED_INTR_MASK_MMU_ECC_UNCORRECTED_ERROR_NOTIFY NVBIT(2) 2766 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY NVBIT(3) 2767 #define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_OVERFLOW NVBIT(4) 2768 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_NOTIFY NVBIT(5) 2769 #define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_OVERFLOW NVBIT(6) 2770 #define RM_UVM_SHARED_INTR_MASK_MMU_OTHER_FAULT_NOTIFY NVBIT(7) 2771 #define RM_UVM_SHARED_INTR_MASK_ALL (NVBIT(8) - 1) 2772 2773 /*! 2774 * Constants used for UVM mirroring loops. 2775 */ 2776 #define GMMU_USER_PAGE_DIR_INDEX 0 2777 #define GMMU_KERNEL_PAGE_DIR_INDEX 1 2778 #define GMMU_MAX_PAGE_DIR_INDEX_COUNT (GMMU_KERNEL_PAGE_DIR_INDEX + 1) 2779 2780 /*! 2781 * Page table walker callbacks used for map/unmap operations. 2782 */ 2783 extern const MMU_WALK_CALLBACKS g_gmmuWalkCallbacks; 2784 extern const MMU_WALK_CALLBACKS g_bar2WalkCallbacks; 2785 extern const MMU_TRACE_CALLBACKS g_gmmuTraceCallbacks; 2786 2787 void gmmuMemDescCacheFree(GVAS_GPU_STATE *pGpuState); 2788 2789 #endif // KERN_GMMU_H 2790 2791 #ifdef __cplusplus 2792 } // extern "C" 2793 #endif 2794 2795 #endif // _G_KERN_GMMU_NVOC_H_ 2796