1 #ifndef _G_GPU_NVOC_H_ 2 #define _G_GPU_NVOC_H_ 3 #include "nvoc/runtime.h" 4 5 #ifdef __cplusplus 6 extern "C" { 7 #endif 8 9 /* 10 * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 11 * SPDX-License-Identifier: MIT 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a 14 * copy of this software and associated documentation files (the "Software"), 15 * to deal in the Software without restriction, including without limitation 16 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 17 * and/or sell copies of the Software, and to permit persons to whom the 18 * Software is furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 29 * DEALINGS IN THE SOFTWARE. 30 */ 31 #include "g_gpu_nvoc.h" 32 33 #ifndef _OBJGPU_H_ 34 #define _OBJGPU_H_ 35 36 /*! 37 * @file 38 * @brief Resource Manager Defines and Structures: Defines and structures used for the GPU Object. 39 */ 40 41 /*! 42 * 43 * Forward declaration of SEQSCRIPT - here because it is used by many clients 44 * and we don't want objseq.h to have to be included everywhere, so adding this 45 * here. See NVCR 12827752 46 * 47 */ 48 typedef struct _SEQSCRIPT SEQSCRIPT, *PSEQSCRIPT; 49 50 typedef struct GPUATTACHARG GPUATTACHARG; 51 52 /* 53 * WARNING -- Avoid including headers in gpu.h 54 * A change in gpu.h and headers included by gpu.h triggers recompilation of most RM 55 * files in an incremental build. We should keep the list of included header as short as 56 * possible. 57 * Especially, GPU's child module should not have its object header being included here. 58 * A child module generally includes the header of its parent. A child module header included 59 * by the parent module affects all the sibling modules. 60 * */ 61 #include "ctrl/ctrl0000/ctrl0000system.h" 62 #include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS (form hal) 63 #include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_CMD_INTERNAL_MAX_BSPS/NVENCS 64 #include "ctrl/ctrl2080/ctrl2080ecc.h" 65 #include "ctrl/ctrl2080/ctrl2080nvd.h" 66 #include "ctrl/ctrl0073/ctrl0073system.h" 67 #include "class/cl2080.h" 68 #include "class/cl90cd.h" 69 70 #include "nvlimits.h" 71 #include "utils/nv_enum.h" 72 73 #include "gpu/gpu_timeout.h" 74 #include "gpu/gpu_access.h" 75 #include "gpu/gpu_shared_data_map.h" 76 #include "gpu/kern_gpu_power.h" 77 78 #include "platform/acpi_common.h" 79 #include "gpu/gpu_acpi_data.h" 80 #include "platform/sli/sli.h" 81 82 #include "core/core.h" 83 #include "core/system.h" 84 #include "core/info_block.h" 85 #include "core/hal.h" 86 #include "nvoc/utility.h" 87 #include "gpu/mem_mgr/mem_desc.h" 88 #include "gpu/gpu_resource_desc.h" 89 #include "diagnostics/traceable.h" 90 #include "gpu/gpu_uuid.h" 91 #include "prereq_tracker/prereq_tracker.h" 92 #include "gpu/gpu_halspec.h" 93 #include "kernel/gpu/gpu_engine_type.h" 94 95 #include "rmapi/control.h" 96 #include "rmapi/event.h" 97 #include "rmapi/rmapi.h" 98 99 #include "kernel/gpu/gr/fecs_event_list.h" 100 #include "class/cl90cdfecs.h" 101 102 #include "gpuvideo/videoeventlist.h" 103 104 #include "gpu/gpu_fabric_probe.h" 105 106 #include "nv_arch.h" 107 108 #include "g_rmconfig_util.h" // prototypes for rmconfig utility functions, eg: rmcfg_IsGK104() 109 110 // TODO - the forward declaration of OS_GPU_INFO should be simplified 111 typedef struct nv_state_t OS_GPU_INFO; 112 113 struct OBJGMMU; 114 115 #ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ 116 #define __NVOC_CLASS_OBJGMMU_TYPEDEF__ 117 typedef struct OBJGMMU OBJGMMU; 118 #endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ 119 120 #ifndef __nvoc_class_id_OBJGMMU 121 #define __nvoc_class_id_OBJGMMU 0xd7a41d 122 #endif /* __nvoc_class_id_OBJGMMU */ 123 124 125 struct OBJGRIDDISPLAYLESS; 126 127 #ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ 128 #define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ 129 typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; 130 #endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ 131 132 #ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS 133 #define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a 134 #endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ 135 136 137 struct OBJHOSTENG; 138 139 #ifndef __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ 140 #define __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ 141 typedef struct OBJHOSTENG OBJHOSTENG; 142 #endif /* __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ */ 143 144 #ifndef __nvoc_class_id_OBJHOSTENG 145 #define __nvoc_class_id_OBJHOSTENG 0xb356e7 146 #endif /* __nvoc_class_id_OBJHOSTENG */ 147 148 149 struct OBJPMU_CLIENT_IMPLEMENTER; 150 151 #ifndef __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ 152 #define __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ 153 typedef struct OBJPMU_CLIENT_IMPLEMENTER OBJPMU_CLIENT_IMPLEMENTER; 154 #endif /* __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ */ 155 156 #ifndef __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER 157 #define __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER 0x88cace 158 #endif /* __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER */ 159 160 161 struct OBJINTRABLE; 162 163 #ifndef __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ 164 #define __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ 165 typedef struct OBJINTRABLE OBJINTRABLE; 166 #endif /* __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ */ 167 168 #ifndef __nvoc_class_id_OBJINTRABLE 169 #define __nvoc_class_id_OBJINTRABLE 0x31ccb7 170 #endif /* __nvoc_class_id_OBJINTRABLE */ 171 172 173 struct OBJVBIOS; 174 175 #ifndef __NVOC_CLASS_OBJVBIOS_TYPEDEF__ 176 #define __NVOC_CLASS_OBJVBIOS_TYPEDEF__ 177 typedef struct OBJVBIOS OBJVBIOS; 178 #endif /* __NVOC_CLASS_OBJVBIOS_TYPEDEF__ */ 179 180 #ifndef __nvoc_class_id_OBJVBIOS 181 #define __nvoc_class_id_OBJVBIOS 0x5dc772 182 #endif /* __nvoc_class_id_OBJVBIOS */ 183 184 185 struct NvDebugDump; 186 187 #ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ 188 #define __NVOC_CLASS_NvDebugDump_TYPEDEF__ 189 typedef struct NvDebugDump NvDebugDump; 190 #endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ 191 192 #ifndef __nvoc_class_id_NvDebugDump 193 #define __nvoc_class_id_NvDebugDump 0x7e80a2 194 #endif /* __nvoc_class_id_NvDebugDump */ 195 196 197 struct GpuMutexMgr; 198 199 #ifndef __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ 200 #define __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ 201 typedef struct GpuMutexMgr GpuMutexMgr; 202 #endif /* __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ */ 203 204 #ifndef __nvoc_class_id_GpuMutexMgr 205 #define __nvoc_class_id_GpuMutexMgr 0x9d93b2 206 #endif /* __nvoc_class_id_GpuMutexMgr */ 207 208 209 struct KernelFalcon; 210 211 #ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__ 212 #define __NVOC_CLASS_KernelFalcon_TYPEDEF__ 213 typedef struct KernelFalcon KernelFalcon; 214 #endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */ 215 216 #ifndef __nvoc_class_id_KernelFalcon 217 #define __nvoc_class_id_KernelFalcon 0xb6b1af 218 #endif /* __nvoc_class_id_KernelFalcon */ 219 220 221 struct KernelVideoEngine; 222 223 #ifndef __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ 224 #define __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ 225 typedef struct KernelVideoEngine KernelVideoEngine; 226 #endif /* __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ */ 227 228 #ifndef __nvoc_class_id_KernelVideoEngine 229 #define __nvoc_class_id_KernelVideoEngine 0x9e2f3e 230 #endif /* __nvoc_class_id_KernelVideoEngine */ 231 232 233 struct KernelChannel; 234 235 #ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ 236 #define __NVOC_CLASS_KernelChannel_TYPEDEF__ 237 typedef struct KernelChannel KernelChannel; 238 #endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ 239 240 #ifndef __nvoc_class_id_KernelChannel 241 #define __nvoc_class_id_KernelChannel 0x5d8d70 242 #endif /* __nvoc_class_id_KernelChannel */ 243 244 245 struct GenericKernelFalcon; 246 247 #ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ 248 #define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ 249 typedef struct GenericKernelFalcon GenericKernelFalcon; 250 #endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */ 251 252 #ifndef __nvoc_class_id_GenericKernelFalcon 253 #define __nvoc_class_id_GenericKernelFalcon 0xabcf08 254 #endif /* __nvoc_class_id_GenericKernelFalcon */ 255 256 257 258 struct Subdevice; 259 260 #ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ 261 #define __NVOC_CLASS_Subdevice_TYPEDEF__ 262 typedef struct Subdevice Subdevice; 263 #endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ 264 265 #ifndef __nvoc_class_id_Subdevice 266 #define __nvoc_class_id_Subdevice 0x4b01b3 267 #endif /* __nvoc_class_id_Subdevice */ 268 269 270 struct Device; 271 272 #ifndef __NVOC_CLASS_Device_TYPEDEF__ 273 #define __NVOC_CLASS_Device_TYPEDEF__ 274 typedef struct Device Device; 275 #endif /* __NVOC_CLASS_Device_TYPEDEF__ */ 276 277 #ifndef __nvoc_class_id_Device 278 #define __nvoc_class_id_Device 0xe0ac20 279 #endif /* __nvoc_class_id_Device */ 280 281 282 struct RsClient; 283 284 #ifndef __NVOC_CLASS_RsClient_TYPEDEF__ 285 #define __NVOC_CLASS_RsClient_TYPEDEF__ 286 typedef struct RsClient RsClient; 287 #endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ 288 289 #ifndef __nvoc_class_id_RsClient 290 #define __nvoc_class_id_RsClient 0x8f87e5 291 #endif /* __nvoc_class_id_RsClient */ 292 293 294 struct Memory; 295 296 #ifndef __NVOC_CLASS_Memory_TYPEDEF__ 297 #define __NVOC_CLASS_Memory_TYPEDEF__ 298 typedef struct Memory Memory; 299 #endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ 300 301 #ifndef __nvoc_class_id_Memory 302 #define __nvoc_class_id_Memory 0x4789f2 303 #endif /* __nvoc_class_id_Memory */ 304 305 306 307 #ifndef PARTITIONID_INVALID 308 #define PARTITIONID_INVALID 0xFFFFFFFF 309 #endif 310 typedef struct MIG_INSTANCE_REF MIG_INSTANCE_REF; 311 typedef struct NV2080_CTRL_GPU_REG_OP NV2080_CTRL_GPU_REG_OP; 312 313 typedef enum 314 { 315 BRANDING_TYPE_UNCACHED, 316 BRANDING_TYPE_NONE, 317 BRANDING_TYPE_QUADRO_GENERIC, 318 BRANDING_TYPE_QUADRO_AD, 319 BRANDING_TYPE_NVS_NVIDIA, // "NVIDIA NVS" 320 BRANDING_TYPE_VGX, 321 } BRANDING_TYPE; 322 323 typedef enum 324 { 325 COMPUTE_BRANDING_TYPE_NONE, 326 COMPUTE_BRANDING_TYPE_TESLA, 327 } COMPUTE_BRANDING_TYPE; 328 329 #define OOR_ARCH_DEF(x) \ 330 NV_ENUM_ENTRY(x, OOR_ARCH_X86_64, 0x00000000) \ 331 NV_ENUM_ENTRY(x, OOR_ARCH_PPC64LE, 0x00000001) \ 332 NV_ENUM_ENTRY(x, OOR_ARCH_ARM, 0x00000002) \ 333 NV_ENUM_ENTRY(x, OOR_ARCH_AARCH64, 0x00000003) \ 334 NV_ENUM_ENTRY(x, OOR_ARCH_NONE, 0x00000004) 335 336 NV_ENUM_DEF(OOR_ARCH, OOR_ARCH_DEF) 337 338 typedef struct 339 { 340 NvU32 classId; 341 NvU32 flags; 342 } GPUCHILDORDER; 343 344 typedef struct 345 { 346 NvU32 classId; 347 NvU32 instances; 348 349 /*! 350 * Pointer to the @ref NVOC_CLASS_INFO for the concrete class to instantiate 351 * for this child. 352 */ 353 const NVOC_CLASS_INFO *pClassInfo; 354 } GPUCHILDPRESENT; 355 356 /*! 357 * @brief Generates an entry for a list of @ref GPUCHILDPRESENT objects for a 358 * class of the given name 359 * 360 * @param[in] _childClassName 361 * Name of the class for the entry 362 * @param[in] _instances 363 * Number of instances of the child that may be present; see 364 * @ref GPUCHILDPRESENT::instances 365 * 366 * @return An entry suitable for a list of @ref GPUCHILDPRESENT for the given 367 * child of @ref OBJGPU 368 */ 369 #define GPU_CHILD_PRESENT(_childClassName, _instances) \ 370 GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, (_instances), _childClassName) 371 372 /*! 373 * @brief Generates an entry for a list of @ref GPUCHILDPRESENT objects that 374 * allows the @ref OBJGPU child to instantiate a sub-class of the base 375 * @ref OBJGPU child class. 376 * 377 * @details The intention of this macro is to allow a list of 378 * @ref GPUCHILDPRESENT to essentially state "this child should be 379 * present with this concrete class type". This allows for different 380 * @ref GPUCHILDPRESENT lists to request different classes with 381 * different behavior via sub-classes, for the same basic @ref OBJGPU 382 * child. 383 * 384 * @param[in] _childClassName 385 * Name of the base class at which @ref OBJGPU points 386 * @param[in] _instances 387 * Number of instances of the child that may be present; see 388 * @ref GPUCHILDPRESENT::instances 389 * @param[in] _concreteClassName 390 * Name of the sub-class of _childClassName that should actually be 391 * instantiated 392 * 393 * @return An entry suitable for a list of @ref GPUCHILDPRESENT for the given 394 * child of @ref OBJGPU with the given concrete class type. 395 */ 396 #define GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, _instances, _concreteClassName) \ 397 { \ 398 .classId = classId(_childClassName), \ 399 .instances = (_instances), \ 400 .pClassInfo = classInfo(_concreteClassName) \ 401 } 402 403 // GPU Child Order Flags 404 #define GCO_LIST_INIT NVBIT(0) // entry is used for init ordering (DO NOT USE) 405 #define GCO_LIST_LOAD NVBIT(1) // entry is used for load and postload ordering (DO NOT USE) 406 #define GCO_LIST_UNLOAD NVBIT(2) // entry is used for unload and preunload ordering (DO NOT USE) 407 #define GCO_LIST_DESTROY NVBIT(3) // entry is used for destroy order (DO NOT USE) 408 #define GCO_LIST_ALL (GCO_LIST_INIT | GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY) 409 // ^ entry is used for all list types (RECOMMENDED) 410 #define GCO_ALL (GCO_LIST_ALL) 411 412 413 typedef struct 414 { 415 NvU32 childTypeIdx; 416 NvU32 childInst; 417 NvU32 gpuChildPtrOffset; 418 } GPU_CHILD_ITER; 419 420 typedef GPU_CHILD_ITER ENGSTATE_ITER; 421 typedef GPU_CHILD_ITER PMU_CLIENT_IMPLEMENTER_ITER; 422 423 // 424 // Object 'get' macros for GPU relative object retrievals. 425 // 426 427 #define ENG_GET_GPU(p) objFindAncestorOfType(OBJGPU, (p)) 428 429 // GPU_GET_FIFO_UC is autogenerated, returns per Gpu pFifo. 430 #define GPU_GET_FIFO(p) GPU_GET_FIFO_UC(p) 431 432 // GPU_GET_KERNEL_FIFO_UC is autogenerated, returns per Gpu pKernelFifo. 433 #define GPU_GET_KERNEL_FIFO(p) gpuGetKernelFifoShared(p) 434 435 #define GPU_GET_HEAP(p) (RMCFG_MODULE_HEAP ? MEMORY_MANAGER_GET_HEAP(GPU_GET_MEMORY_MANAGER(p)) : NULL) 436 437 #define GPU_GET_HAL(p) (RMCFG_MODULE_HAL ? (p)->pHal : NULL) 438 439 #define GPU_GET_OS(p) (RMCFG_MODULE_OS ? (p)->pOS : NULL) // TBD: replace with SYS_GET_OS 440 #define GPU_QUICK_PATH_GET_OS(p) GPU_GET_OS(p) // TBD: remove 441 442 #define GPU_GET_REGISTER_ACCESS(g) (&(g)->registerAccess) 443 444 // Returns the pRmApi that routes to the physical driver, either via RPC or local calls 445 #define GPU_GET_PHYSICAL_RMAPI(g) (&(g)->physicalRmApi) 446 447 // 448 // Defines and helpers for encoding and decoding PCI domain, bus and device. 449 // 450 // Ideally these would live in objbus.h (or somewhere else more appropriate) and 451 // not gpu/gpu.h, but keep them here for now while support for 32-bit domains is 452 // being added as part of bug 1904645. 453 // 454 455 // DRF macros for GPUBUSINFO::nvDomainBusDeviceFunc 456 #define NVGPU_BUSDEVICE_DOMAIN 63:32 457 #define NVGPU_BUSDEVICE_BUS 15:8 458 #define NVGPU_BUSDEVICE_DEVICE 7:0 459 460 static NV_INLINE NvU32 gpuDecodeDomain(NvU64 gpuDomainBusDevice) 461 { 462 return (NvU32)DRF_VAL64(GPU, _BUSDEVICE, _DOMAIN, gpuDomainBusDevice); 463 } 464 465 static NV_INLINE NvU8 gpuDecodeBus(NvU64 gpuDomainBusDevice) 466 { 467 return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _BUS, gpuDomainBusDevice); 468 } 469 470 static NV_INLINE NvU8 gpuDecodeDevice(NvU64 gpuDomainBusDevice) 471 { 472 return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _DEVICE, gpuDomainBusDevice); 473 } 474 475 static NV_INLINE NvU64 gpuEncodeDomainBusDevice(NvU32 domain, NvU8 bus, NvU8 device) 476 { 477 return DRF_NUM64(GPU, _BUSDEVICE, _DOMAIN, domain) | 478 DRF_NUM64(GPU, _BUSDEVICE, _BUS, bus) | 479 DRF_NUM64(GPU, _BUSDEVICE, _DEVICE, device); 480 } 481 482 static NV_INLINE NvU32 gpuEncodeBusDevice(NvU8 bus, NvU8 device) 483 { 484 NvU64 busDevice = gpuEncodeDomainBusDevice(0, bus, device); 485 486 // Bus and device are guaranteed to fit in the lower 32bits 487 return (NvU32)busDevice; 488 } 489 490 // 491 // Generate a 32-bit id from domain, bus and device tuple. 492 // 493 NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device); 494 495 // 496 // Generate a 32-bit id from a physical address 497 // 498 NvU32 gpuGenerate32BitIdFromPhysAddr(RmPhysAddr addr); 499 500 // 501 // Helpers for getting domain, bus and device of a GPU 502 // 503 // Ideally these would be inline functions, but NVOC doesn't support that today, 504 // tracked in bug 1905882 505 // 506 #define gpuGetDBDF(pGpu) ((pGpu)->busInfo.nvDomainBusDeviceFunc) 507 #define gpuGetDomain(pGpu) gpuDecodeDomain((pGpu)->busInfo.nvDomainBusDeviceFunc) 508 #define gpuGetBus(pGpu) gpuDecodeBus((pGpu)->busInfo.nvDomainBusDeviceFunc) 509 #define gpuGetDevice(pGpu) gpuDecodeDevice((pGpu)->busInfo.nvDomainBusDeviceFunc) 510 511 #undef NVGPU_BUSDEVICE_DOMAIN 512 #undef NVGPU_BUSDEVICE_BUS 513 #undef NVGPU_BUSDEVICE_DEVICE 514 515 // 516 // MaskRevision constants. 517 // 518 #define GPU_NO_MASK_REVISION 0x00 519 #define GPU_MASK_REVISION_A1 0xA1 520 #define GPU_MASK_REVISION_A2 0xA2 521 #define GPU_MASK_REVISION_A3 0xA3 522 #define GPU_MASK_REVISION_A4 0xA4 523 #define GPU_MASK_REVISION_A5 0xA5 524 #define GPU_MASK_REVISION_A6 0xA6 525 #define GPU_MASK_REVISION_B1 0xB1 526 #define GPU_MASK_REVISION_B2 0xB2 527 #define GPU_MASK_REVISION_C1 0xC1 528 #define GPU_MASK_REVISION_D1 0xD1 529 530 #define GPU_GET_MASKREVISION(pGpu) (((gpuGetChipMajRev(pGpu))<<4)|(gpuGetChipMinRev(pGpu))) 531 532 // 533 // Revision constants. 534 // 535 #define GPU_NO_REVISION 0xFF 536 #define GPU_REVISION_0 0x00 537 #define GPU_REVISION_1 0x01 538 #define GPU_REVISION_2 0x02 539 #define GPU_REVISION_3 0x03 540 #define GPU_REVISION_4 0x04 541 #define GPU_REVISION_5 0x05 542 #define GPU_REVISION_6 0x06 543 #define GPU_REVISION_7 0x07 544 #define GPU_REVISION_8 0x08 545 #define GPU_REVISION_9 0x09 546 #define GPU_REVISION_A 0x0A 547 #define GPU_REVISION_B 0x0B 548 #define GPU_REVISION_C 0x0C 549 #define GPU_REVISION_D 0x0D 550 #define GPU_REVISION_E 0x0E 551 #define GPU_REVISION_F 0x0F 552 553 // 554 // One extra nibble should be added to the architecture version read from the 555 // PMC boot register to represent the architecture number in RM. 556 // 557 #define GPU_ARCH_SHIFT 0x4 558 559 // Registry key for inst mem modification defines 560 #define INSTMEM_TAG_MASK (0xf0000000) 561 #define INSTMEM_TAG(a) ((INSTMEM_TAG_MASK & (a)) >> 28) 562 563 564 typedef struct 565 { 566 567 NvU32 PCIDeviceID; 568 NvU32 Manufacturer; 569 NvU32 PCISubDeviceID; 570 NvU32 PCIRevisionID; 571 NvU32 Subrevision; 572 573 } GPUIDINFO; 574 575 576 typedef struct 577 { 578 NvU32 impl; 579 NvU32 arch; 580 NvU32 majorRev; 581 NvU32 minorRev; 582 NvU32 minorExtRev; 583 } PMCBOOT0; 584 585 typedef struct 586 { 587 NvU32 impl; 588 NvU32 arch; 589 NvU32 majorRev; 590 NvU32 minorRev; 591 NvU32 minorExtRev; 592 } PMCBOOT42; 593 594 // 595 // Random collection of bus-related configuration state. 596 // 597 typedef struct 598 { 599 RmPhysAddr gpuPhysAddr; 600 RmPhysAddr gpuPhysFbAddr; 601 RmPhysAddr gpuPhysInstAddr; 602 RmPhysAddr gpuPhysIoAddr; 603 NvU32 iovaspaceId; 604 NvU32 IntLine; 605 NvU32 IsrHooked; 606 NvU64 nvDomainBusDeviceFunc; 607 OOR_ARCH oorArch; 608 } GPUBUSINFO; 609 610 typedef struct 611 { 612 PCLASSDESCRIPTOR pClasses; 613 NvU32 *pSuppressClasses; 614 NvU32 numClasses; 615 NvBool bSuppressRead; 616 } GPUCLASSDB, *PGPUCLASSDB; 617 618 typedef struct 619 { 620 const CLASSDESCRIPTOR *pClassDescriptors; 621 NvU32 numClassDescriptors; 622 623 PENGDESCRIPTOR pEngineInitDescriptors; 624 PENGDESCRIPTOR pEngineDestroyDescriptors; 625 PENGDESCRIPTOR pEngineLoadDescriptors; 626 PENGDESCRIPTOR pEngineUnloadDescriptors; 627 NvU32 numEngineDescriptors; 628 } GPU_ENGINE_ORDER, *PGPU_ENGINE_ORDER; 629 630 // 631 // PCI Express Support 632 // 633 typedef struct NBADDR 634 { 635 NvU32 domain; 636 NvU8 bus; 637 NvU8 device; 638 NvU8 func; 639 NvU8 valid; 640 void *handle; 641 } NBADDR; 642 643 typedef struct 644 { 645 NBADDR addr; 646 void *vAddr; // virtual address of the port, if it has been mapped . Not used starting with Win10 BuildXXXXX 647 NvU32 PCIECapPtr; // offset of the PCIE capptr in the NB 648 // Capability register set in enhanced configuration space 649 // 650 NvU32 PCIEErrorCapPtr; // offset of the Advanced Error Reporting Capability register set 651 NvU32 PCIEVCCapPtr; // offset of the Virtual Channel (VC) Capability register set 652 NvU32 PCIEL1SsCapPtr; // Offset of the L1 Substates Capabilities 653 NvU16 DeviceID, VendorID; // device and vendor ID for port 654 } PORTDATA; 655 656 typedef struct // GPU specific data for core logic object, stored in GPU object 657 { 658 PORTDATA upstreamPort; // the upstream port info for the GPU 659 // If there is a switch this is equal to boardDownstreamPort 660 // If there is no switch this is equal to rootPort 661 PORTDATA rootPort; // The root port of the PCI-E root complex 662 PORTDATA boardUpstreamPort; // If there is no BR03 this is equal to rootPort. 663 PORTDATA boardDownstreamPort; // If there is no BR03 these data are not set. 664 } GPUCLDATA; 665 666 // For SLI Support Using Peer Model 667 typedef struct 668 { 669 OBJGPU *pGpu; // Mapping from the local pinset number (i.e. array index) to peer GPU 670 NvU32 pinset; // Mapping from the local pinset number (i.e. array index) to peer pinset number 671 } _GPU_SLI_PEER; 672 673 674 // 675 // Flags for gpuStateLoad() and gpuStateUnload() routines. Flags *must* be used 676 // symmetrically across an Unload/Load pair. 677 // 678 #define GPU_STATE_FLAGS_PRESERVING NVBIT(0) // GPU state is preserved 679 #define GPU_STATE_FLAGS_VGA_TRANSITION NVBIT(1) // To be used with GPU_STATE_FLAGS_PRESERVING. 680 #define GPU_STATE_FLAGS_PM_TRANSITION NVBIT(2) // To be used with GPU_STATE_FLAGS_PRESERVING. 681 #define GPU_STATE_FLAGS_PM_SUSPEND NVBIT(3) 682 #define GPU_STATE_FLAGS_PM_HIBERNATE NVBIT(4) 683 #define GPU_STATE_FLAGS_GC6_TRANSITION NVBIT(5) // To be used with GPU_STATE_FLAGS_PRESERVING. 684 #define GPU_STATE_DEFAULT 0 // Default flags for destructive state loads 685 // and unloads 686 687 struct OBJHWBC; 688 typedef struct hwbc_list 689 { 690 struct OBJHWBC *pHWBC; 691 struct hwbc_list *pNext; 692 } HWBC_LIST; 693 694 /*! 695 * GFID allocation state 696 */ 697 typedef enum 698 { 699 GFID_FREE = 0, 700 GFID_ALLOCATED = 1, 701 GFID_INVALIDATED = 2, 702 } GFID_ALLOC_STATUS; 703 704 typedef struct SRIOV_P2P_INFO 705 { 706 NvU32 gfid; 707 NvBool bAllowP2pAccess; 708 NvU32 accessRefCount; 709 NvU32 destRefCount; 710 } SRIOV_P2P_INFO, *PSRIOV_P2P_INFO; 711 712 typedef struct 713 { 714 NvU32 peerGpuId; 715 NvU32 peerGpuInstance; 716 NvU32 p2pCaps; 717 NvU32 p2pOptimalReadCEs; 718 NvU32 p2pOptimalWriteCEs; 719 NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; 720 NvU32 busPeerId; 721 } GPU_P2P_PEER_GPU_CAPS; 722 723 // 724 // typedef of private struct used in OBJGPU's data field 725 // 726 727 typedef struct 728 { 729 NvBool isInitialized; 730 NvU8 uuid[RM_SHA1_GID_SIZE]; 731 } _GPU_UUID; 732 733 typedef struct 734 { 735 NvBool bValid; 736 NvU8 id; 737 } _GPU_PCIE_PEER_CLIQUE; 738 739 typedef struct 740 { 741 NvU32 platformId; // used to identify soc 742 NvU32 implementationId; // soc-specific 743 NvU32 revisionId; // soc-revision 744 PMCBOOT0 pmcBoot0; 745 PMCBOOT42 pmcBoot42; 746 NvU8 subRevision; // sub-revision (NV_FUSE_OPT_SUBREVISION on GPU) 747 } _GPU_CHIP_INFO; 748 749 750 // Engine Database 751 typedef struct 752 { 753 NvU32 size; 754 RM_ENGINE_TYPE *pType; 755 NvBool bValid; 756 } _GPU_ENGINE_DB; 757 758 #define MAX_NUM_BARS (8) 759 // SRIOV state 760 typedef struct 761 { 762 /*! 763 * Total number of VFs available in this GPU 764 */ 765 NvU32 totalVFs; 766 767 /*! 768 * First VF Offset 769 */ 770 NvU32 firstVFOffset; 771 772 /*! 773 * Max GFID possible 774 */ 775 NvU32 maxGfid; 776 777 /*! 778 * Physical offset of Virtual BAR0 register. Stores the offset if the GPU is 779 * a physical function, else 0 780 */ 781 NvU32 virtualRegPhysOffset; 782 783 /*! 784 * Allocated GFIDs. Will be used to ensure plugins doesn't use same GFID for multiple VFs 785 */ 786 NvU8 *pAllocatedGfids; 787 788 /*! 789 * The sizes of the BAR regions on the VF 790 */ 791 NvU64 vfBarSize[MAX_NUM_BARS]; 792 793 /*! 794 * First PF's BAR addresses 795 */ 796 NvU64 firstVFBarAddress[MAX_NUM_BARS]; 797 798 /*! 799 * If the VF BARs are 64-bit addressable 800 */ 801 NvBool b64bitVFBar0; 802 NvBool b64bitVFBar1; 803 NvBool b64bitVFBar2; 804 805 /*! 806 * GFID used for P2P access 807 */ 808 PSRIOV_P2P_INFO pP2PInfo; 809 NvBool bP2PAllocated; 810 NvU32 maxP2pGfid; 811 NvU32 p2pFabricPartitionId; 812 } _GPU_SRIOV_STATE; 813 814 // Max # of instances for GPU children 815 #define GPU_MAX_CES 10 816 #define GPU_MAX_GRS 8 817 #define GPU_MAX_FIFOS 1 818 #define GPU_MAX_MSENCS NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS 819 #define GPU_MAX_NVDECS NV2080_CTRL_CMD_INTERNAL_MAX_BSPS 820 #define GPU_MAX_NVJPGS 8 821 #define GPU_MAX_HSHUBS 5 822 #define GPU_MAX_OFAS 1 823 824 // 825 // Macro defines for OBJGPU fields -- Macro defines inside NVOC class block is 826 // gone after NVOC preprocessing stage. For macros used outside gpu/gpu.h should 827 // not be defined inside the class block. 828 // 829 830 // 831 // Maximum number of Falcon objects that can be allocated on one GPU. 832 // This is purely a software limit and can be raised freely as more are added. 833 // 834 #define GPU_MAX_FALCON_ENGINES \ 835 ENG_IOCTRL__SIZE_1 + \ 836 ENG_GPCCS__SIZE_1 + \ 837 ENG_FECS__SIZE_1 + \ 838 ENG_NVJPEG__SIZE_1 + \ 839 ENG_NVDEC__SIZE_1 + \ 840 ENG_MSENC__SIZE_1 + \ 841 32 842 843 #define GPU_MAX_VIDEO_ENGINES \ 844 (ENG_NVJPEG__SIZE_1 + \ 845 ENG_NVDEC__SIZE_1 + \ 846 ENG_MSENC__SIZE_1 + \ 847 ENG_OFA__SIZE_1) 848 849 // for OBJGPU::pRmCtrlDeferredCmd 850 #define MAX_DEFERRED_CMDS 2 851 852 // for OBJGPU::computeModeRefCount 853 #define NV_GPU_MODE_GRAPHICS_MODE 0x00000001 854 #define NV_GPU_MODE_COMPUTE_MODE 0x00000002 855 #define NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT 0x0000000a 856 #define NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT 0x0000000b 857 858 // 859 // Structure to hold information obtained from 860 // parsing the DEVICE_INFO2 table during init. 861 // 862 863 typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO DEVICE_INFO2_ENTRY; 864 865 866 //! Value of DEV_GROUP_ID used in gpuGetDeviceEntryByType for any group ID. 867 #define DEVICE_INFO2_ENTRY_GROUP_ID_ANY (-1) 868 869 #define NV_GPU_INTERNAL_DEVICE_HANDLE 0xABCD0080 870 #define NV_GPU_INTERNAL_SUBDEVICE_HANDLE 0xABCD2080 871 872 // 873 // NV GPU simulation mode defines 874 // Keep in sync with os.h SIM MODE defines until osGetSimulationMode is deprecated. 875 // 876 #ifndef NV_SIM_MODE_DEFS 877 #define NV_SIM_MODE_DEFS 878 #define NV_SIM_MODE_HARDWARE 0U 879 #define NV_SIM_MODE_RTL 1U 880 #define NV_SIM_MODE_CMODEL 2U 881 #define NV_SIM_MODE_MODS_AMODEL 3U 882 #define NV_SIM_MODE_TEGRA_FPGA 4U 883 #define NV_SIM_MODE_INVALID (~0x0U) 884 #endif 885 886 #define GPU_IS_NVSWITCH_DETECTED(pGpu) \ 887 (pGpu->nvswitchSupport == NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_SUPPORTED) 888 889 890 // 891 // The actual GPU object definition 892 // 893 #ifdef NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED 894 #define PRIVATE_FIELD(x) x 895 #else 896 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) 897 #endif 898 struct OBJGPU { 899 const struct NVOC_RTTI *__nvoc_rtti; 900 struct Object __nvoc_base_Object; 901 struct RmHalspecOwner __nvoc_base_RmHalspecOwner; 902 struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; 903 struct Object *__nvoc_pbase_Object; 904 struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; 905 struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; 906 struct OBJGPU *__nvoc_pbase_OBJGPU; 907 NV_STATUS (*__gpuConstructDeviceInfoTable__)(struct OBJGPU *); 908 NV_STATUS (*__gpuWriteBusConfigReg__)(struct OBJGPU *, NvU32, NvU32); 909 NV_STATUS (*__gpuReadBusConfigReg__)(struct OBJGPU *, NvU32, NvU32 *); 910 NV_STATUS (*__gpuReadBusConfigRegEx__)(struct OBJGPU *, NvU32, NvU32 *, THREAD_STATE_NODE *); 911 NV_STATUS (*__gpuReadFunctionConfigReg__)(struct OBJGPU *, NvU32, NvU32, NvU32 *); 912 NV_STATUS (*__gpuWriteFunctionConfigReg__)(struct OBJGPU *, NvU32, NvU32, NvU32); 913 NV_STATUS (*__gpuWriteFunctionConfigRegEx__)(struct OBJGPU *, NvU32, NvU32, NvU32, THREAD_STATE_NODE *); 914 NV_STATUS (*__gpuReadVgpuConfigReg__)(struct OBJGPU *, NvU32, NvU32 *); 915 void (*__gpuGetIdInfo__)(struct OBJGPU *); 916 void (*__gpuHandleSanityCheckRegReadError__)(struct OBJGPU *, NvU32, NvU32); 917 void (*__gpuHandleSecFault__)(struct OBJGPU *); 918 const GPUCHILDPRESENT *(*__gpuGetChildrenPresent__)(struct OBJGPU *, NvU32 *); 919 const CLASSDESCRIPTOR *(*__gpuGetClassDescriptorList__)(struct OBJGPU *, NvU32 *); 920 NvU32 (*__gpuGetPhysAddrWidth__)(struct OBJGPU *, NV_ADDRESS_SPACE); 921 NvBool (*__gpuFuseSupportsDisplay__)(struct OBJGPU *); 922 NV_STATUS (*__gpuClearFbhubPoisonIntrForBug2924523__)(struct OBJGPU *); 923 void (*__gpuReadDeviceId__)(struct OBJGPU *, NvU32 *, NvU32 *); 924 NvU64 (*__gpuGetFlaVasSize__)(struct OBJGPU *, NvBool); 925 void (*__gpuDetermineSelfHostedMode__)(struct OBJGPU *); 926 void (*__gpuDetermineMIGSupport__)(struct OBJGPU *); 927 NvBool (*__gpuIsAtsSupportedWithSmcMemPartitioning__)(struct OBJGPU *); 928 NvBool (*__gpuIsSliCapableWithoutDisplay__)(struct OBJGPU *); 929 NvBool (*__gpuIsCCEnabledInHw__)(struct OBJGPU *); 930 NvBool (*__gpuIsDevModeEnabledInHw__)(struct OBJGPU *); 931 NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *); 932 NvBool PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED; 933 NvBool bVideoLinkDisabled; 934 GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel; 935 NvU32 moduleId; 936 NvU8 nvswitchSupport; 937 NvBool PDB_PROP_GPU_IN_STANDBY; 938 NvBool PDB_PROP_GPU_IN_HIBERNATE; 939 NvBool PDB_PROP_GPU_IN_PM_CODEPATH; 940 NvBool PDB_PROP_GPU_IN_PM_RESUME_CODEPATH; 941 NvBool PDB_PROP_GPU_STATE_INITIALIZED; 942 NvBool PDB_PROP_GPU_EMULATION; 943 NvBool PDB_PROP_GPU_PRIMARY_DEVICE; 944 NvBool PDB_PROP_GPU_HYBRID_MGPU; 945 NvBool PDB_PROP_GPU_ALTERNATE_TREE_ENABLED; 946 NvBool PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS; 947 NvBool PDB_PROP_GPU_3D_CONTROLLER; 948 NvBool PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM; 949 NvBool PDB_PROP_GPU_IS_CONNECTED; 950 NvBool PDB_PROP_GPU_BROKEN_FB; 951 NvBool PDB_PROP_GPU_IN_FULLCHIP_RESET; 952 NvBool PDB_PROP_GPU_IN_SECONDARY_BUS_RESET; 953 NvBool PDB_PROP_GPU_IN_GC6_RESET; 954 NvBool PDB_PROP_GPU_IS_GEMINI; 955 NvBool PDB_PROP_GPU_PERSISTENT_SW_STATE; 956 NvBool PDB_PROP_GPU_COHERENT_CPU_MAPPING; 957 NvBool PDB_PROP_GPU_IS_LOST; 958 NvBool PDB_PROP_GPU_IN_TIMEOUT_RECOVERY; 959 NvBool PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT; 960 NvBool PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY; 961 NvBool PDB_PROP_GPU_TEGRA_SOC_IGPU; 962 NvBool PDB_PROP_GPU_ATS_SUPPORTED; 963 NvBool PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING; 964 NvBool PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE; 965 NvBool PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE; 966 NvBool PDB_PROP_GPU_IS_UEFI; 967 NvBool PDB_PROP_GPU_ZERO_FB; 968 NvBool PDB_PROP_GPU_BAR1_BAR2_DISABLED; 969 NvBool PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE; 970 NvBool PDB_PROP_GPU_MIG_SUPPORTED; 971 NvBool PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED; 972 NvBool PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED; 973 NvBool PDB_PROP_GPU_IS_COT_ENABLED; 974 NvBool PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE; 975 NvBool PDB_PROP_GPU_SWRL_GRANULAR_LOCKING; 976 NvBool PDB_PROP_GPU_IN_SLI_LINK_CODEPATH; 977 NvBool PDB_PROP_GPU_IS_PLX_PRESENT; 978 NvBool PDB_PROP_GPU_IS_BR03_PRESENT; 979 NvBool PDB_PROP_GPU_IS_BR04_PRESENT; 980 NvBool PDB_PROP_GPU_BEHIND_BRIDGE; 981 NvBool PDB_PROP_GPU_BEHIND_BR03; 982 NvBool PDB_PROP_GPU_BEHIND_BR04; 983 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED; 984 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED; 985 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED; 986 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY; 987 NvBool PDB_PROP_GPU_RM_UNLINKED_SLI; 988 NvBool PDB_PROP_GPU_SLI_LINK_ACTIVE; 989 NvBool PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST; 990 NvBool PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH; 991 NvBool PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL; 992 NvBool PDB_PROP_GPU_IS_MOBILE; 993 NvBool PDB_PROP_GPU_RTD3_GC6_SUPPORTED; 994 NvBool PDB_PROP_GPU_RTD3_GC6_ACTIVE; 995 NvBool PDB_PROP_GPU_FAST_GC6_ACTIVE; 996 NvBool PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED; 997 NvBool PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA; 998 NvBool PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED; 999 NvBool PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED; 1000 NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERING; 1001 NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERED; 1002 NvBool PDB_PROP_GPU_ACCOUNTING_ON; 1003 NvBool PDB_PROP_GPU_INACCESSIBLE; 1004 NvBool PDB_PROP_GPU_NVLINK_SYSMEM; 1005 NvBool PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK; 1006 NvBool PDB_PROP_GPU_C2C_SYSMEM; 1007 NvBool PDB_PROP_GPU_IN_TCC_MODE; 1008 NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE; 1009 NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K; 1010 NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT; 1011 NvBool PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT; 1012 NvBool PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS; 1013 NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU; 1014 NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA; 1015 NvBool PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED; 1016 NvBool PDB_PROP_GPU_NV_USERMODE_ENABLED; 1017 NvBool PDB_PROP_GPU_IN_FATAL_ERROR; 1018 NvBool PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE; 1019 NvBool PDB_PROP_GPU_VGA_ENABLED; 1020 NvBool PDB_PROP_GPU_IS_MXM_3X; 1021 NvBool PDB_PROP_GPU_GSYNC_III_ATTACHED; 1022 NvBool PDB_PROP_GPU_QSYNC_II_ATTACHED; 1023 NvBool PDB_PROP_GPU_CC_FEATURE_CAPABLE; 1024 NvBool PDB_PROP_GPU_APM_FEATURE_CAPABLE; 1025 NvBool PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX; 1026 NvBool PDB_PROP_GPU_SKIP_TABLE_CE_MAP; 1027 NvBool PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF; 1028 NvBool PDB_PROP_GPU_IS_SOC_SDM; 1029 NvBool PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL; 1030 OS_GPU_INFO *pOsGpuInfo; 1031 OS_RM_CAPS *pOsRmCaps; 1032 NvU32 halImpl; 1033 void *hPci; 1034 GpuEngineEventNotificationList *engineNonstallIntrEventNotifications[63]; 1035 NvBool bIsSOC; 1036 NvU32 gpuInstance; 1037 NvU32 gpuDisabled; 1038 NvU32 gpuId; 1039 NvU32 boardId; 1040 NvU32 deviceInstance; 1041 NvU32 subdeviceInstance; 1042 NvS32 numaNodeId; 1043 _GPU_UUID gpuUuid; 1044 NvU32 gpuPhysicalId; 1045 NvU32 gpuTerminatedLinkMask; 1046 NvBool gpuLinkTerminationEnabled; 1047 NvBool gspRmInitialized; 1048 _GPU_PCIE_PEER_CLIQUE pciePeerClique; 1049 NvU32 i2cPortForExtdev; 1050 GPUIDINFO idInfo; 1051 _GPU_CHIP_INFO chipInfo; 1052 GPUBUSINFO busInfo; 1053 const GPUCHILDPRESENT *pChildrenPresent; 1054 NvU32 numChildrenPresent; 1055 GPU_ENGINE_ORDER engineOrder; 1056 GPUCLASSDB classDB; 1057 NvU32 chipId0; 1058 NvU32 chipId1; 1059 NvU32 pmcEnable; 1060 NvU32 pmcRmOwnsIntrMask; 1061 NvBool testIntr; 1062 NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *gspSupportedEngines; 1063 NvU32 numCEs; 1064 NvU32 ceFaultMethodBufferSize; 1065 NvBool isVirtual; 1066 NvBool isGspClient; 1067 NvU64 fbLength; 1068 NvU32 instLength; 1069 NvBool instSetViaAttachArg; 1070 NvU32 activeFBIOs; 1071 NvU64 gpuVbiosPostTime; 1072 NvU32 uefiScanoutSurfaceSizeInMB; 1073 RmPhysAddr dmaStartAddress; 1074 NvU32 gpuDeviceMapCount; 1075 DEVICE_MAPPING deviceMappings[60]; 1076 struct IoAperture *pIOApertures[12]; 1077 DEVICE_MAPPING *pDeviceMappingsByDeviceInstance[12]; 1078 void *gpuCfgAddr; 1079 TIMEOUT_DATA timeoutData; 1080 NvU32 computeModeRules; 1081 NvS32 computeModeRefCount; 1082 NvHandle hComputeModeReservation; 1083 NvBool bIsDebugModeEnabled; 1084 NvU32 masterFromSLIConfig; 1085 NvU32 sliStatus; 1086 PENG_INFO_LINK_NODE infoList; 1087 struct OBJOS *pOS; 1088 struct OBJHAL *pHal; 1089 struct KernelBif *pKernelBif; 1090 struct KernelMc *pKernelMc; 1091 struct SwIntr *pSwIntr; 1092 struct KernelMemorySystem *pKernelMemorySystem; 1093 struct MemoryManager *pMemoryManager; 1094 struct KernelDisplay *pKernelDisplay; 1095 struct OBJTMR *pTmr; 1096 struct KernelBus *pKernelBus; 1097 struct KernelGmmu *pKernelGmmu; 1098 struct KernelSec2 *pKernelSec2; 1099 struct KernelGsp *pKernelGsp; 1100 struct VirtMemAllocator *pDma; 1101 struct KernelMIGManager *pKernelMIGManager; 1102 struct KernelGraphicsManager *pKernelGraphicsManager; 1103 struct KernelGraphics *pKernelGraphics[8]; 1104 struct KernelPerf *pKernelPerf; 1105 struct KernelRc *pKernelRc; 1106 struct Intr *pIntr; 1107 struct KernelPmu *pKernelPmu; 1108 struct KernelCE *pKCe[10]; 1109 struct KernelFifo *pKernelFifo; 1110 struct OBJUVM *pUvm; 1111 struct NvDebugDump *pNvd; 1112 struct KernelNvlink *pKernelNvlink; 1113 struct OBJGPUMON *pGpuMon; 1114 struct OBJSWENG *pSwEng; 1115 struct KernelFsp *pKernelFsp; 1116 struct ConfidentialCompute *pConfCompute; 1117 struct KernelCcu *pKernelCcu; 1118 HWBC_LIST *pHWBCList; 1119 GPUCLDATA gpuClData; 1120 _GPU_ENGINE_DB engineDB; 1121 NvU32 engineDBSize; 1122 NvU32 instCacheOverride; 1123 NvS32 numOfMclkLockRequests; 1124 NvU32 netlistNum; 1125 RmCtrlDeferredCmd pRmCtrlDeferredCmd[2]; 1126 ACPI_DATA acpi; 1127 ACPI_METHOD_DATA acpiMethodData; 1128 NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS backLightMethodData; 1129 NvU32 activeFifoEventMthdNotifiers; 1130 struct Falcon *constructedFalcons[70]; 1131 NvU32 numConstructedFalcons; 1132 struct GenericKernelFalcon *genericKernelFalcons[70]; 1133 NvU32 numGenericKernelFalcons; 1134 struct KernelVideoEngine *kernelVideoEngines[20]; 1135 NvU32 numKernelVideoEngines; 1136 NvU8 *pUserRegisterAccessMap; 1137 NvU8 *pUnrestrictedRegisterAccessMap; 1138 NvU32 userRegisterAccessMapSize; 1139 struct PrereqTracker *pPrereqTracker; 1140 RegisterAccess registerAccess; 1141 NvBool bUseRegisterAccessMap; 1142 NvU32 *pRegopOffsetScratchBuffer; 1143 NvU32 *pRegopOffsetAddrScratchBuffer; 1144 NvU32 regopScratchBufferMaxOffsets; 1145 _GPU_SRIOV_STATE sriovState; 1146 NvU64 vmmuSegmentSize; 1147 NvHandle hDefaultClientShare; 1148 NvHandle hDefaultClientShareDevice; 1149 NvHandle hDefaultClientShareSubDevice; 1150 NvU32 externalKernelClientCount; 1151 DEVICE_INFO2_ENTRY *pDeviceInfoTable; 1152 NvU32 numDeviceInfoEntries; 1153 NvHandle hInternalClient; 1154 NvHandle hInternalDevice; 1155 NvHandle hInternalSubdevice; 1156 struct Subdevice *pCachedSubdevice; 1157 struct RsClient *pCachedRsClient; 1158 RM_API physicalRmApi; 1159 struct Subdevice **pSubdeviceBackReferences; 1160 NvU32 numSubdeviceBackReferences; 1161 NvU32 maxSubdeviceBackReferences; 1162 NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo; 1163 NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *boardInfo; 1164 GpuSharedDataMap userSharedData; 1165 NvBool bBar2MovedByVtd; 1166 NvBool bBar1Is64Bit; 1167 NvBool bSurpriseRemovalSupported; 1168 NvBool bTwoStageRcRecoveryEnabled; 1169 NvBool bReplayableTraceEnabled; 1170 NvBool bInD3Cold; 1171 NvBool bIsSimulation; 1172 NvBool bIsModsAmodel; 1173 NvBool bIsFmodel; 1174 NvBool bIsRtlsim; 1175 NvBool bIsPassthru; 1176 NvBool bIsVirtualWithSriov; 1177 NvU32 P2PPeerGpuCount; 1178 GPU_P2P_PEER_GPU_CAPS P2PPeerGpuCaps[32]; 1179 NvBool bIsSelfHosted; 1180 NvBool bStateLoading; 1181 NvBool bStateUnloading; 1182 NvBool bStateLoaded; 1183 NvBool bFullyConstructed; 1184 NvBool bBf3WarBug4040336Enabled; 1185 NvBool bUnifiedMemorySpaceEnabled; 1186 NvBool bSriovEnabled; 1187 NvBool bWarBug200577889SriovHeavyEnabled; 1188 NvBool bNonPowerOf2ChannelCountSupported; 1189 NvBool bCacheOnlyMode; 1190 NvBool bNeed4kPageIsolation; 1191 NvBool bSplitVasManagementServerClientRm; 1192 NvU32 instLocOverrides; 1193 NvU32 instLocOverrides2; 1194 NvU32 instLocOverrides3; 1195 NvU32 instLocOverrides4; 1196 NvBool bInstLoc47bitPaWar; 1197 NvU32 instVprOverrides; 1198 NvU32 optimizeUseCaseOverride; 1199 NvS16 fecsCtxswLogConsumerCount; 1200 NvS16 videoCtxswLogConsumerCount; 1201 EventBufferMap vgpuFecsTraceStagingBindings; 1202 FecsEventBufferBindMultiMap fecsEventBufferBindingsUid; 1203 TMR_EVENT *pFecsTimerEvent; 1204 VideoEventBufferBindMultiMap videoEventBufferBindingsUid; 1205 TMR_EVENT *pVideoTimerEvent; 1206 struct OBJVASPACE *pFabricVAS; 1207 NvBool bPipelinedPteMemEnabled; 1208 NvBool bIsBarPteInSysmemSupported; 1209 NvBool bRegUsesGlobalSurfaceOverrides; 1210 NvBool bClientRmAllocatedCtxBuffer; 1211 NvBool bIterativeMmuWalker; 1212 NvBool bEccPageRetirementWithSliAllowed; 1213 NvBool bVidmemPreservationBrokenBug3172217; 1214 NvBool bInstanceMemoryAlwaysCached; 1215 NvBool bUseRpcSimEscapes; 1216 NvBool bRmProfilingPrivileged; 1217 NvBool bGeforceSmb; 1218 NvBool bIsGeforce; 1219 NvBool bIsQuadro; 1220 NvBool bIsVgx; 1221 NvBool bIsNvidiaNvs; 1222 NvBool bIsTitan; 1223 NvBool bIsTesla; 1224 NvBool bIsAC; 1225 BRANDING_TYPE brandingCache; 1226 NvBool bComputePolicyTimesliceSupported; 1227 NvBool bGlobalPoisonFuseEnabled; 1228 RmPhysAddr simAccessBufPhysAddr; 1229 NvU32 fabricProbeRegKeyOverride; 1230 NvU8 fabricProbeRetryDelay; 1231 NvU8 fabricProbeSlowdownThreshold; 1232 NvBool bVgpuGspPluginOffloadEnabled; 1233 NvBool bSriovCapable; 1234 NvBool bRecheckSliSupportAtResume; 1235 NvBool bGpuNvEncAv1Supported; 1236 _GPU_SLI_PEER peer[2]; 1237 NvBool bIsGspOwnedFaultBuffersEnabled; 1238 _GPU_GC6_STATE gc6State; 1239 }; 1240 1241 #ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ 1242 #define __NVOC_CLASS_OBJGPU_TYPEDEF__ 1243 typedef struct OBJGPU OBJGPU; 1244 #endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ 1245 1246 #ifndef __nvoc_class_id_OBJGPU 1247 #define __nvoc_class_id_OBJGPU 0x7ef3cb 1248 #endif /* __nvoc_class_id_OBJGPU */ 1249 1250 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; 1251 1252 #define __staticCast_OBJGPU(pThis) \ 1253 ((pThis)->__nvoc_pbase_OBJGPU) 1254 1255 #ifdef __nvoc_gpu_h_disabled 1256 #define __dynamicCast_OBJGPU(pThis) ((OBJGPU*)NULL) 1257 #else //__nvoc_gpu_h_disabled 1258 #define __dynamicCast_OBJGPU(pThis) \ 1259 ((OBJGPU*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPU))) 1260 #endif //__nvoc_gpu_h_disabled 1261 1262 #define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_CAST 1263 #define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_NAME PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL 1264 #define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_CAST 1265 #define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GC6_SUPPORTED 1266 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_CAST 1267 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU 1268 #define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_CAST 1269 #define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_NAME PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K 1270 #define PDB_PROP_GPU_INACCESSIBLE_BASE_CAST 1271 #define PDB_PROP_GPU_INACCESSIBLE_BASE_NAME PDB_PROP_GPU_INACCESSIBLE 1272 #define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_CAST 1273 #define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH 1274 #define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_CAST 1275 #define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_NAME PDB_PROP_GPU_IN_FATAL_ERROR 1276 #define PDB_PROP_GPU_VGA_ENABLED_BASE_CAST 1277 #define PDB_PROP_GPU_VGA_ENABLED_BASE_NAME PDB_PROP_GPU_VGA_ENABLED 1278 #define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_CAST 1279 #define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_RESUME_CODEPATH 1280 #define PDB_PROP_GPU_IN_STANDBY_BASE_CAST 1281 #define PDB_PROP_GPU_IN_STANDBY_BASE_NAME PDB_PROP_GPU_IN_STANDBY 1282 #define PDB_PROP_GPU_IS_COT_ENABLED_BASE_CAST 1283 #define PDB_PROP_GPU_IS_COT_ENABLED_BASE_NAME PDB_PROP_GPU_IS_COT_ENABLED 1284 #define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_CAST 1285 #define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_NAME PDB_PROP_GPU_COHERENT_CPU_MAPPING 1286 #define PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED_BASE_CAST 1287 #define PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED_BASE_NAME PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED 1288 #define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_CAST 1289 #define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY 1290 #define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_CAST 1291 #define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED 1292 #define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_CAST 1293 #define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_NAME PDB_PROP_GPU_SLI_LINK_ACTIVE 1294 #define PDB_PROP_GPU_IN_TCC_MODE_BASE_CAST 1295 #define PDB_PROP_GPU_IN_TCC_MODE_BASE_NAME PDB_PROP_GPU_IN_TCC_MODE 1296 #define PDB_PROP_GPU_C2C_SYSMEM_BASE_CAST 1297 #define PDB_PROP_GPU_C2C_SYSMEM_BASE_NAME PDB_PROP_GPU_C2C_SYSMEM 1298 #define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_CAST 1299 #define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_NAME PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING 1300 #define PDB_PROP_GPU_IN_GC6_RESET_BASE_CAST 1301 #define PDB_PROP_GPU_IN_GC6_RESET_BASE_NAME PDB_PROP_GPU_IN_GC6_RESET 1302 #define PDB_PROP_GPU_HYBRID_MGPU_BASE_CAST 1303 #define PDB_PROP_GPU_HYBRID_MGPU_BASE_NAME PDB_PROP_GPU_HYBRID_MGPU 1304 #define PDB_PROP_GPU_3D_CONTROLLER_BASE_CAST 1305 #define PDB_PROP_GPU_3D_CONTROLLER_BASE_NAME PDB_PROP_GPU_3D_CONTROLLER 1306 #define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_CAST 1307 #define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED 1308 #define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_CAST 1309 #define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE 1310 #define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_CAST 1311 #define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_NAME PDB_PROP_GPU_SKIP_TABLE_CE_MAP 1312 #define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_CAST 1313 #define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_NAME PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED 1314 #define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_CAST 1315 #define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI 1316 #define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_CAST 1317 #define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_NAME PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL 1318 #define PDB_PROP_GPU_IS_UEFI_BASE_CAST 1319 #define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI 1320 #define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_CAST 1321 #define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_NAME PDB_PROP_GPU_IN_SECONDARY_BUS_RESET 1322 #define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_CAST 1323 #define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_NAME PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT 1324 #define PDB_PROP_GPU_IS_CONNECTED_BASE_CAST 1325 #define PDB_PROP_GPU_IS_CONNECTED_BASE_NAME PDB_PROP_GPU_IS_CONNECTED 1326 #define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_CAST 1327 #define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_NAME PDB_PROP_GPU_IS_PLX_PRESENT 1328 #define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_CAST 1329 #define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_NAME PDB_PROP_GPU_NVLINK_SYSMEM 1330 #define PDB_PROP_GPU_IS_MOBILE_BASE_CAST 1331 #define PDB_PROP_GPU_IS_MOBILE_BASE_NAME PDB_PROP_GPU_IS_MOBILE 1332 #define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_CAST 1333 #define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_RTD3_GC6_ACTIVE 1334 #define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_CAST 1335 #define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_CC_FEATURE_CAPABLE 1336 #define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_CAST 1337 #define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_ENABLED 1338 #define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_CAST 1339 #define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_NAME PDB_PROP_GPU_PERSISTENT_SW_STATE 1340 #define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_CAST 1341 #define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH 1342 #define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_CAST 1343 #define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_NAME PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT 1344 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST 1345 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED 1346 #define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST 1347 #define PDB_PROP_GPU_BEHIND_BR03_BASE_NAME PDB_PROP_GPU_BEHIND_BR03 1348 #define PDB_PROP_GPU_BEHIND_BR04_BASE_CAST 1349 #define PDB_PROP_GPU_BEHIND_BR04_BASE_NAME PDB_PROP_GPU_BEHIND_BR04 1350 #define PDB_PROP_GPU_MIG_SUPPORTED_BASE_CAST 1351 #define PDB_PROP_GPU_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_SUPPORTED 1352 #define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_CAST 1353 #define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_NAME PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE 1354 #define PDB_PROP_GPU_BAR1_BAR2_DISABLED_BASE_CAST 1355 #define PDB_PROP_GPU_BAR1_BAR2_DISABLED_BASE_NAME PDB_PROP_GPU_BAR1_BAR2_DISABLED 1356 #define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_CAST 1357 #define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_NAME PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE 1358 #define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_CAST 1359 #define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_NAME PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE 1360 #define PDB_PROP_GPU_ACCOUNTING_ON_BASE_CAST 1361 #define PDB_PROP_GPU_ACCOUNTING_ON_BASE_NAME PDB_PROP_GPU_ACCOUNTING_ON 1362 #define PDB_PROP_GPU_IN_HIBERNATE_BASE_CAST 1363 #define PDB_PROP_GPU_IN_HIBERNATE_BASE_NAME PDB_PROP_GPU_IN_HIBERNATE 1364 #define PDB_PROP_GPU_BROKEN_FB_BASE_CAST 1365 #define PDB_PROP_GPU_BROKEN_FB_BASE_NAME PDB_PROP_GPU_BROKEN_FB 1366 #define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_CAST 1367 #define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERING 1368 #define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_CAST 1369 #define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_NAME PDB_PROP_GPU_IN_TIMEOUT_RECOVERY 1370 #define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_CAST 1371 #define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERED 1372 #define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_CAST 1373 #define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_FAST_GC6_ACTIVE 1374 #define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_CAST 1375 #define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_NAME PDB_PROP_GPU_IN_FULLCHIP_RESET 1376 #define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_CAST 1377 #define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_NAME PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA 1378 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_CAST 1379 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA 1380 #define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_CAST 1381 #define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_SLI_LINK_CODEPATH 1382 #define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_CAST 1383 #define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR03_PRESENT 1384 #define PDB_PROP_GPU_IS_GEMINI_BASE_CAST 1385 #define PDB_PROP_GPU_IS_GEMINI_BASE_NAME PDB_PROP_GPU_IS_GEMINI 1386 #define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_CAST 1387 #define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_NAME PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED 1388 #define PDB_PROP_GPU_STATE_INITIALIZED_BASE_CAST 1389 #define PDB_PROP_GPU_STATE_INITIALIZED_BASE_NAME PDB_PROP_GPU_STATE_INITIALIZED 1390 #define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_CAST 1391 #define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_NAME PDB_PROP_GPU_NV_USERMODE_ENABLED 1392 #define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_CAST 1393 #define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_NAME PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT 1394 #define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_CAST 1395 #define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS 1396 #define PDB_PROP_GPU_IS_MXM_3X_BASE_CAST 1397 #define PDB_PROP_GPU_IS_MXM_3X_BASE_NAME PDB_PROP_GPU_IS_MXM_3X 1398 #define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_CAST 1399 #define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_NAME PDB_PROP_GPU_GSYNC_III_ATTACHED 1400 #define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_CAST 1401 #define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_NAME PDB_PROP_GPU_QSYNC_II_ATTACHED 1402 #define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_CAST 1403 #define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR04_PRESENT 1404 #define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_CAST 1405 #define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_NAME PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF 1406 #define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_CAST 1407 #define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_NAME PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE 1408 #define PDB_PROP_GPU_IS_SOC_SDM_BASE_CAST 1409 #define PDB_PROP_GPU_IS_SOC_SDM_BASE_NAME PDB_PROP_GPU_IS_SOC_SDM 1410 #define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_CAST 1411 #define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_NAME PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM 1412 #define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_CAST 1413 #define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_NAME PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED 1414 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_CAST 1415 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED 1416 #define PDB_PROP_GPU_ZERO_FB_BASE_CAST 1417 #define PDB_PROP_GPU_ZERO_FB_BASE_NAME PDB_PROP_GPU_ZERO_FB 1418 #define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_CAST 1419 #define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_NAME PDB_PROP_GPU_SWRL_GRANULAR_LOCKING 1420 #define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_CAST 1421 #define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_NAME PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK 1422 #define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_CAST 1423 #define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_IGPU 1424 #define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_CAST 1425 #define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED 1426 #define PDB_PROP_GPU_ATS_SUPPORTED_BASE_CAST 1427 #define PDB_PROP_GPU_ATS_SUPPORTED_BASE_NAME PDB_PROP_GPU_ATS_SUPPORTED 1428 #define PDB_PROP_GPU_EMULATION_BASE_CAST 1429 #define PDB_PROP_GPU_EMULATION_BASE_NAME PDB_PROP_GPU_EMULATION 1430 #define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_CAST 1431 #define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_APM_FEATURE_CAPABLE 1432 #define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_CAST 1433 #define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_NAME PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS 1434 #define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_CAST 1435 #define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_NAME PDB_PROP_GPU_PRIMARY_DEVICE 1436 #define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_CAST 1437 #define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_NAME PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE 1438 #define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_CAST 1439 #define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_NAME PDB_PROP_GPU_BEHIND_BRIDGE 1440 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_CAST 1441 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY 1442 #define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_CAST 1443 #define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_NAME PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST 1444 #define PDB_PROP_GPU_IS_LOST_BASE_CAST 1445 #define PDB_PROP_GPU_IS_LOST_BASE_NAME PDB_PROP_GPU_IS_LOST 1446 #define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_CAST 1447 #define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED 1448 #define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_CAST 1449 #define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_NAME PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX 1450 1451 NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU**, Dynamic*, NvU32, va_list); 1452 1453 NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32, 1454 NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, 1455 RM_RUNTIME_VARIANT RmVariantHal_rmVariant, 1456 TEGRA_CHIP_TYPE TegraChipHal_tegraType, 1457 NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance); 1458 #define __objCreate_OBJGPU(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, TegraChipHal_tegraType, DispIpHal_ipver, arg_gpuInstance) \ 1459 __nvoc_objCreate_OBJGPU((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, TegraChipHal_tegraType, DispIpHal_ipver, arg_gpuInstance) 1460 1461 #define gpuConstructDeviceInfoTable(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu) 1462 #define gpuConstructDeviceInfoTable_HAL(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu) 1463 #define gpuWriteBusConfigReg(pGpu, index, value) gpuWriteBusConfigReg_DISPATCH(pGpu, index, value) 1464 #define gpuWriteBusConfigReg_HAL(pGpu, index, value) gpuWriteBusConfigReg_DISPATCH(pGpu, index, value) 1465 #define gpuReadBusConfigReg(pGpu, index, data) gpuReadBusConfigReg_DISPATCH(pGpu, index, data) 1466 #define gpuReadBusConfigReg_HAL(pGpu, index, data) gpuReadBusConfigReg_DISPATCH(pGpu, index, data) 1467 #define gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_DISPATCH(pGpu, index, data, pThreadState) 1468 #define gpuReadBusConfigRegEx_HAL(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_DISPATCH(pGpu, index, data, pThreadState) 1469 #define gpuReadFunctionConfigReg(pGpu, function, reg, data) gpuReadFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1470 #define gpuReadFunctionConfigReg_HAL(pGpu, function, reg, data) gpuReadFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1471 #define gpuWriteFunctionConfigReg(pGpu, function, reg, data) gpuWriteFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1472 #define gpuWriteFunctionConfigReg_HAL(pGpu, function, reg, data) gpuWriteFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1473 #define gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_DISPATCH(pGpu, function, reg, data, pThreadState) 1474 #define gpuWriteFunctionConfigRegEx_HAL(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_DISPATCH(pGpu, function, reg, data, pThreadState) 1475 #define gpuReadVgpuConfigReg(pGpu, index, data) gpuReadVgpuConfigReg_DISPATCH(pGpu, index, data) 1476 #define gpuReadVgpuConfigReg_HAL(pGpu, index, data) gpuReadVgpuConfigReg_DISPATCH(pGpu, index, data) 1477 #define gpuGetIdInfo(pGpu) gpuGetIdInfo_DISPATCH(pGpu) 1478 #define gpuGetIdInfo_HAL(pGpu) gpuGetIdInfo_DISPATCH(pGpu) 1479 #define gpuHandleSanityCheckRegReadError(pGpu, addr, value) gpuHandleSanityCheckRegReadError_DISPATCH(pGpu, addr, value) 1480 #define gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value) gpuHandleSanityCheckRegReadError_DISPATCH(pGpu, addr, value) 1481 #define gpuHandleSecFault(pGpu) gpuHandleSecFault_DISPATCH(pGpu) 1482 #define gpuHandleSecFault_HAL(pGpu) gpuHandleSecFault_DISPATCH(pGpu) 1483 #define gpuGetChildrenPresent(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) 1484 #define gpuGetChildrenPresent_HAL(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) 1485 #define gpuGetClassDescriptorList(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0) 1486 #define gpuGetClassDescriptorList_HAL(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0) 1487 #define gpuGetPhysAddrWidth(pGpu, arg0) gpuGetPhysAddrWidth_DISPATCH(pGpu, arg0) 1488 #define gpuGetPhysAddrWidth_HAL(pGpu, arg0) gpuGetPhysAddrWidth_DISPATCH(pGpu, arg0) 1489 #define gpuFuseSupportsDisplay(pGpu) gpuFuseSupportsDisplay_DISPATCH(pGpu) 1490 #define gpuFuseSupportsDisplay_HAL(pGpu) gpuFuseSupportsDisplay_DISPATCH(pGpu) 1491 #define gpuClearFbhubPoisonIntrForBug2924523(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu) 1492 #define gpuClearFbhubPoisonIntrForBug2924523_HAL(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu) 1493 #define gpuReadDeviceId(pGpu, arg0, arg1) gpuReadDeviceId_DISPATCH(pGpu, arg0, arg1) 1494 #define gpuReadDeviceId_HAL(pGpu, arg0, arg1) gpuReadDeviceId_DISPATCH(pGpu, arg0, arg1) 1495 #define gpuGetFlaVasSize(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization) 1496 #define gpuGetFlaVasSize_HAL(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization) 1497 #define gpuDetermineSelfHostedMode(pGpu) gpuDetermineSelfHostedMode_DISPATCH(pGpu) 1498 #define gpuDetermineSelfHostedMode_HAL(pGpu) gpuDetermineSelfHostedMode_DISPATCH(pGpu) 1499 #define gpuDetermineMIGSupport(pGpu) gpuDetermineMIGSupport_DISPATCH(pGpu) 1500 #define gpuDetermineMIGSupport_HAL(pGpu) gpuDetermineMIGSupport_DISPATCH(pGpu) 1501 #define gpuIsAtsSupportedWithSmcMemPartitioning(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(pGpu) 1502 #define gpuIsAtsSupportedWithSmcMemPartitioning_HAL(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(pGpu) 1503 #define gpuIsSliCapableWithoutDisplay(pGpu) gpuIsSliCapableWithoutDisplay_DISPATCH(pGpu) 1504 #define gpuIsSliCapableWithoutDisplay_HAL(pGpu) gpuIsSliCapableWithoutDisplay_DISPATCH(pGpu) 1505 #define gpuIsCCEnabledInHw(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu) 1506 #define gpuIsCCEnabledInHw_HAL(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu) 1507 #define gpuIsDevModeEnabledInHw(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu) 1508 #define gpuIsDevModeEnabledInHw_HAL(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu) 1509 #define gpuIsCtxBufAllocInPmaSupported(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu) 1510 #define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu) 1511 static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) { 1512 return NV_OK; 1513 } 1514 1515 1516 #ifdef __nvoc_gpu_h_disabled 1517 static inline NV_STATUS gpuConstructPhysical(struct OBJGPU *pGpu) { 1518 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1519 return NV_ERR_NOT_SUPPORTED; 1520 } 1521 #else //__nvoc_gpu_h_disabled 1522 #define gpuConstructPhysical(pGpu) gpuConstructPhysical_56cd7a(pGpu) 1523 #endif //__nvoc_gpu_h_disabled 1524 1525 #define gpuConstructPhysical_HAL(pGpu) gpuConstructPhysical(pGpu) 1526 1527 static inline void gpuDestructPhysical_b3696a(struct OBJGPU *pGpu) { 1528 return; 1529 } 1530 1531 1532 #ifdef __nvoc_gpu_h_disabled 1533 static inline void gpuDestructPhysical(struct OBJGPU *pGpu) { 1534 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1535 } 1536 #else //__nvoc_gpu_h_disabled 1537 #define gpuDestructPhysical(pGpu) gpuDestructPhysical_b3696a(pGpu) 1538 #endif //__nvoc_gpu_h_disabled 1539 1540 #define gpuDestructPhysical_HAL(pGpu) gpuDestructPhysical(pGpu) 1541 1542 NV_STATUS gpuStatePreInit_IMPL(struct OBJGPU *pGpu); 1543 1544 1545 #ifdef __nvoc_gpu_h_disabled 1546 static inline NV_STATUS gpuStatePreInit(struct OBJGPU *pGpu) { 1547 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1548 return NV_ERR_NOT_SUPPORTED; 1549 } 1550 #else //__nvoc_gpu_h_disabled 1551 #define gpuStatePreInit(pGpu) gpuStatePreInit_IMPL(pGpu) 1552 #endif //__nvoc_gpu_h_disabled 1553 1554 #define gpuStatePreInit_HAL(pGpu) gpuStatePreInit(pGpu) 1555 1556 NV_STATUS gpuStateLoad_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 1557 1558 1559 #ifdef __nvoc_gpu_h_disabled 1560 static inline NV_STATUS gpuStateLoad(struct OBJGPU *pGpu, NvU32 arg0) { 1561 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1562 return NV_ERR_NOT_SUPPORTED; 1563 } 1564 #else //__nvoc_gpu_h_disabled 1565 #define gpuStateLoad(pGpu, arg0) gpuStateLoad_IMPL(pGpu, arg0) 1566 #endif //__nvoc_gpu_h_disabled 1567 1568 #define gpuStateLoad_HAL(pGpu, arg0) gpuStateLoad(pGpu, arg0) 1569 1570 NV_STATUS gpuStateDestroy_IMPL(struct OBJGPU *pGpu); 1571 1572 1573 #ifdef __nvoc_gpu_h_disabled 1574 static inline NV_STATUS gpuStateDestroy(struct OBJGPU *pGpu) { 1575 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1576 return NV_ERR_NOT_SUPPORTED; 1577 } 1578 #else //__nvoc_gpu_h_disabled 1579 #define gpuStateDestroy(pGpu) gpuStateDestroy_IMPL(pGpu) 1580 #endif //__nvoc_gpu_h_disabled 1581 1582 #define gpuStateDestroy_HAL(pGpu) gpuStateDestroy(pGpu) 1583 1584 static inline NV_STATUS gpuApplyOverrides_46f6a7(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 1585 return NV_ERR_NOT_SUPPORTED; 1586 } 1587 1588 1589 #ifdef __nvoc_gpu_h_disabled 1590 static inline NV_STATUS gpuApplyOverrides(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 1591 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1592 return NV_ERR_NOT_SUPPORTED; 1593 } 1594 #else //__nvoc_gpu_h_disabled 1595 #define gpuApplyOverrides(pGpu, arg0, arg1) gpuApplyOverrides_46f6a7(pGpu, arg0, arg1) 1596 #endif //__nvoc_gpu_h_disabled 1597 1598 #define gpuApplyOverrides_HAL(pGpu, arg0, arg1) gpuApplyOverrides(pGpu, arg0, arg1) 1599 1600 static inline NV_STATUS gpuInitDevinitOverridesFromRegistry_56cd7a(struct OBJGPU *pGpu) { 1601 return NV_OK; 1602 } 1603 1604 1605 #ifdef __nvoc_gpu_h_disabled 1606 static inline NV_STATUS gpuInitDevinitOverridesFromRegistry(struct OBJGPU *pGpu) { 1607 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1608 return NV_ERR_NOT_SUPPORTED; 1609 } 1610 #else //__nvoc_gpu_h_disabled 1611 #define gpuInitDevinitOverridesFromRegistry(pGpu) gpuInitDevinitOverridesFromRegistry_56cd7a(pGpu) 1612 #endif //__nvoc_gpu_h_disabled 1613 1614 #define gpuInitDevinitOverridesFromRegistry_HAL(pGpu) gpuInitDevinitOverridesFromRegistry(pGpu) 1615 1616 static inline NV_STATUS gpuApplyDevinitReg032Override_46f6a7(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1617 return NV_ERR_NOT_SUPPORTED; 1618 } 1619 1620 1621 #ifdef __nvoc_gpu_h_disabled 1622 static inline NV_STATUS gpuApplyDevinitReg032Override(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1623 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1624 return NV_ERR_NOT_SUPPORTED; 1625 } 1626 #else //__nvoc_gpu_h_disabled 1627 #define gpuApplyDevinitReg032Override(pGpu, arg0, arg1) gpuApplyDevinitReg032Override_46f6a7(pGpu, arg0, arg1) 1628 #endif //__nvoc_gpu_h_disabled 1629 1630 #define gpuApplyDevinitReg032Override_HAL(pGpu, arg0, arg1) gpuApplyDevinitReg032Override(pGpu, arg0, arg1) 1631 1632 static inline NV_STATUS gpuCheckPCIIDMismatch_56cd7a(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { 1633 return NV_OK; 1634 } 1635 1636 1637 #ifdef __nvoc_gpu_h_disabled 1638 static inline NV_STATUS gpuCheckPCIIDMismatch(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { 1639 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1640 return NV_ERR_NOT_SUPPORTED; 1641 } 1642 #else //__nvoc_gpu_h_disabled 1643 #define gpuCheckPCIIDMismatch(pGpu, arg0) gpuCheckPCIIDMismatch_56cd7a(pGpu, arg0) 1644 #endif //__nvoc_gpu_h_disabled 1645 1646 #define gpuCheckPCIIDMismatch_HAL(pGpu, arg0) gpuCheckPCIIDMismatch(pGpu, arg0) 1647 1648 static inline NvBool gpuCheckGpuIDMismatch_491d52(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1649 return ((NvBool)(0 != 0)); 1650 } 1651 1652 1653 #ifdef __nvoc_gpu_h_disabled 1654 static inline NvBool gpuCheckGpuIDMismatch(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1655 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1656 return NV_FALSE; 1657 } 1658 #else //__nvoc_gpu_h_disabled 1659 #define gpuCheckGpuIDMismatch(pGpu, arg0, arg1) gpuCheckGpuIDMismatch_491d52(pGpu, arg0, arg1) 1660 #endif //__nvoc_gpu_h_disabled 1661 1662 #define gpuCheckGpuIDMismatch_HAL(pGpu, arg0, arg1) gpuCheckGpuIDMismatch(pGpu, arg0, arg1) 1663 1664 static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical_56cd7a(struct OBJGPU *pGpu) { 1665 return NV_OK; 1666 } 1667 1668 NV_STATUS gpuPowerManagementEnterPreUnloadPhysical_IMPL(struct OBJGPU *pGpu); 1669 1670 1671 #ifdef __nvoc_gpu_h_disabled 1672 static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical(struct OBJGPU *pGpu) { 1673 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1674 return NV_ERR_NOT_SUPPORTED; 1675 } 1676 #else //__nvoc_gpu_h_disabled 1677 #define gpuPowerManagementEnterPreUnloadPhysical(pGpu) gpuPowerManagementEnterPreUnloadPhysical_56cd7a(pGpu) 1678 #endif //__nvoc_gpu_h_disabled 1679 1680 #define gpuPowerManagementEnterPreUnloadPhysical_HAL(pGpu) gpuPowerManagementEnterPreUnloadPhysical(pGpu) 1681 1682 static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 newLevel) { 1683 return NV_OK; 1684 } 1685 1686 NV_STATUS gpuPowerManagementEnterPostUnloadPhysical_IMPL(struct OBJGPU *pGpu, NvU32 newLevel); 1687 1688 1689 #ifdef __nvoc_gpu_h_disabled 1690 static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical(struct OBJGPU *pGpu, NvU32 newLevel) { 1691 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1692 return NV_ERR_NOT_SUPPORTED; 1693 } 1694 #else //__nvoc_gpu_h_disabled 1695 #define gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical_56cd7a(pGpu, newLevel) 1696 #endif //__nvoc_gpu_h_disabled 1697 1698 #define gpuPowerManagementEnterPostUnloadPhysical_HAL(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel) 1699 1700 static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) { 1701 return NV_OK; 1702 } 1703 1704 NV_STATUS gpuPowerManagementResumePreLoadPhysical_IMPL(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags); 1705 1706 1707 #ifdef __nvoc_gpu_h_disabled 1708 static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) { 1709 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1710 return NV_ERR_NOT_SUPPORTED; 1711 } 1712 #else //__nvoc_gpu_h_disabled 1713 #define gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical_56cd7a(pGpu, oldLevel, flags) 1714 #endif //__nvoc_gpu_h_disabled 1715 1716 #define gpuPowerManagementResumePreLoadPhysical_HAL(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags) 1717 1718 static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical_56cd7a(struct OBJGPU *pGpu) { 1719 return NV_OK; 1720 } 1721 1722 NV_STATUS gpuPowerManagementResumePostLoadPhysical_IMPL(struct OBJGPU *pGpu); 1723 1724 1725 #ifdef __nvoc_gpu_h_disabled 1726 static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical(struct OBJGPU *pGpu) { 1727 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1728 return NV_ERR_NOT_SUPPORTED; 1729 } 1730 #else //__nvoc_gpu_h_disabled 1731 #define gpuPowerManagementResumePostLoadPhysical(pGpu) gpuPowerManagementResumePostLoadPhysical_56cd7a(pGpu) 1732 #endif //__nvoc_gpu_h_disabled 1733 1734 #define gpuPowerManagementResumePostLoadPhysical_HAL(pGpu) gpuPowerManagementResumePostLoadPhysical(pGpu) 1735 1736 static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx_46f6a7(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) { 1737 return NV_ERR_NOT_SUPPORTED; 1738 } 1739 1740 1741 #ifdef __nvoc_gpu_h_disabled 1742 static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) { 1743 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1744 return NV_ERR_NOT_SUPPORTED; 1745 } 1746 #else //__nvoc_gpu_h_disabled 1747 #define gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx_46f6a7(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) 1748 #endif //__nvoc_gpu_h_disabled 1749 1750 #define gpuInitializeMemDescFromPromotedCtx_HAL(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) 1751 1752 NV_STATUS gpuGetNameString_KERNEL(struct OBJGPU *pGpu, NvU32 arg0, void *arg1); 1753 1754 NV_STATUS gpuGetNameString_IMPL(struct OBJGPU *pGpu, NvU32 arg0, void *arg1); 1755 1756 1757 #ifdef __nvoc_gpu_h_disabled 1758 static inline NV_STATUS gpuGetNameString(struct OBJGPU *pGpu, NvU32 arg0, void *arg1) { 1759 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1760 return NV_ERR_NOT_SUPPORTED; 1761 } 1762 #else //__nvoc_gpu_h_disabled 1763 #define gpuGetNameString(pGpu, arg0, arg1) gpuGetNameString_KERNEL(pGpu, arg0, arg1) 1764 #endif //__nvoc_gpu_h_disabled 1765 1766 #define gpuGetNameString_HAL(pGpu, arg0, arg1) gpuGetNameString(pGpu, arg0, arg1) 1767 1768 NV_STATUS gpuGetShortNameString_KERNEL(struct OBJGPU *pGpu, NvU8 *arg0); 1769 1770 NV_STATUS gpuGetShortNameString_IMPL(struct OBJGPU *pGpu, NvU8 *arg0); 1771 1772 1773 #ifdef __nvoc_gpu_h_disabled 1774 static inline NV_STATUS gpuGetShortNameString(struct OBJGPU *pGpu, NvU8 *arg0) { 1775 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1776 return NV_ERR_NOT_SUPPORTED; 1777 } 1778 #else //__nvoc_gpu_h_disabled 1779 #define gpuGetShortNameString(pGpu, arg0) gpuGetShortNameString_KERNEL(pGpu, arg0) 1780 #endif //__nvoc_gpu_h_disabled 1781 1782 #define gpuGetShortNameString_HAL(pGpu, arg0) gpuGetShortNameString(pGpu, arg0) 1783 1784 void gpuInitBranding_FWCLIENT(struct OBJGPU *pGpu); 1785 1786 1787 #ifdef __nvoc_gpu_h_disabled 1788 static inline void gpuInitBranding(struct OBJGPU *pGpu) { 1789 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1790 } 1791 #else //__nvoc_gpu_h_disabled 1792 #define gpuInitBranding(pGpu) gpuInitBranding_FWCLIENT(pGpu) 1793 #endif //__nvoc_gpu_h_disabled 1794 1795 #define gpuInitBranding_HAL(pGpu) gpuInitBranding(pGpu) 1796 1797 BRANDING_TYPE gpuDetectBranding_FWCLIENT(struct OBJGPU *pGpu); 1798 1799 1800 #ifdef __nvoc_gpu_h_disabled 1801 static inline BRANDING_TYPE gpuDetectBranding(struct OBJGPU *pGpu) { 1802 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1803 BRANDING_TYPE ret; 1804 portMemSet(&ret, 0, sizeof(BRANDING_TYPE)); 1805 return ret; 1806 } 1807 #else //__nvoc_gpu_h_disabled 1808 #define gpuDetectBranding(pGpu) gpuDetectBranding_FWCLIENT(pGpu) 1809 #endif //__nvoc_gpu_h_disabled 1810 1811 #define gpuDetectBranding_HAL(pGpu) gpuDetectBranding(pGpu) 1812 1813 COMPUTE_BRANDING_TYPE gpuDetectComputeBranding_FWCLIENT(struct OBJGPU *pGpu); 1814 1815 1816 #ifdef __nvoc_gpu_h_disabled 1817 static inline COMPUTE_BRANDING_TYPE gpuDetectComputeBranding(struct OBJGPU *pGpu) { 1818 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1819 COMPUTE_BRANDING_TYPE ret; 1820 portMemSet(&ret, 0, sizeof(COMPUTE_BRANDING_TYPE)); 1821 return ret; 1822 } 1823 #else //__nvoc_gpu_h_disabled 1824 #define gpuDetectComputeBranding(pGpu) gpuDetectComputeBranding_FWCLIENT(pGpu) 1825 #endif //__nvoc_gpu_h_disabled 1826 1827 #define gpuDetectComputeBranding_HAL(pGpu) gpuDetectComputeBranding(pGpu) 1828 1829 BRANDING_TYPE gpuDetectVgxBranding_FWCLIENT(struct OBJGPU *pGpu); 1830 1831 1832 #ifdef __nvoc_gpu_h_disabled 1833 static inline BRANDING_TYPE gpuDetectVgxBranding(struct OBJGPU *pGpu) { 1834 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1835 BRANDING_TYPE ret; 1836 portMemSet(&ret, 0, sizeof(BRANDING_TYPE)); 1837 return ret; 1838 } 1839 #else //__nvoc_gpu_h_disabled 1840 #define gpuDetectVgxBranding(pGpu) gpuDetectVgxBranding_FWCLIENT(pGpu) 1841 #endif //__nvoc_gpu_h_disabled 1842 1843 #define gpuDetectVgxBranding_HAL(pGpu) gpuDetectVgxBranding(pGpu) 1844 1845 void gpuInitProperties_FWCLIENT(struct OBJGPU *pGpu); 1846 1847 1848 #ifdef __nvoc_gpu_h_disabled 1849 static inline void gpuInitProperties(struct OBJGPU *pGpu) { 1850 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1851 } 1852 #else //__nvoc_gpu_h_disabled 1853 #define gpuInitProperties(pGpu) gpuInitProperties_FWCLIENT(pGpu) 1854 #endif //__nvoc_gpu_h_disabled 1855 1856 #define gpuInitProperties_HAL(pGpu) gpuInitProperties(pGpu) 1857 1858 static inline void gpuSetThreadBcState_b3696a(struct OBJGPU *pGpu, NvBool arg0) { 1859 return; 1860 } 1861 1862 1863 #ifdef __nvoc_gpu_h_disabled 1864 static inline void gpuSetThreadBcState(struct OBJGPU *pGpu, NvBool arg0) { 1865 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1866 } 1867 #else //__nvoc_gpu_h_disabled 1868 #define gpuSetThreadBcState(pGpu, arg0) gpuSetThreadBcState_b3696a(pGpu, arg0) 1869 #endif //__nvoc_gpu_h_disabled 1870 1871 #define gpuSetThreadBcState_HAL(pGpu, arg0) gpuSetThreadBcState(pGpu, arg0) 1872 1873 static inline void gpuDeterminePersistantIllumSettings_b3696a(struct OBJGPU *pGpu) { 1874 return; 1875 } 1876 1877 1878 #ifdef __nvoc_gpu_h_disabled 1879 static inline void gpuDeterminePersistantIllumSettings(struct OBJGPU *pGpu) { 1880 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1881 } 1882 #else //__nvoc_gpu_h_disabled 1883 #define gpuDeterminePersistantIllumSettings(pGpu) gpuDeterminePersistantIllumSettings_b3696a(pGpu) 1884 #endif //__nvoc_gpu_h_disabled 1885 1886 #define gpuDeterminePersistantIllumSettings_HAL(pGpu) gpuDeterminePersistantIllumSettings(pGpu) 1887 1888 static inline NV_STATUS gpuInitSliIllumination_46f6a7(struct OBJGPU *pGpu) { 1889 return NV_ERR_NOT_SUPPORTED; 1890 } 1891 1892 1893 #ifdef __nvoc_gpu_h_disabled 1894 static inline NV_STATUS gpuInitSliIllumination(struct OBJGPU *pGpu) { 1895 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1896 return NV_ERR_NOT_SUPPORTED; 1897 } 1898 #else //__nvoc_gpu_h_disabled 1899 #define gpuInitSliIllumination(pGpu) gpuInitSliIllumination_46f6a7(pGpu) 1900 #endif //__nvoc_gpu_h_disabled 1901 1902 #define gpuInitSliIllumination_HAL(pGpu) gpuInitSliIllumination(pGpu) 1903 1904 NV_STATUS gpuBuildGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); 1905 1906 1907 #ifdef __nvoc_gpu_h_disabled 1908 static inline NV_STATUS gpuBuildGenericKernelFalconList(struct OBJGPU *pGpu) { 1909 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1910 return NV_ERR_NOT_SUPPORTED; 1911 } 1912 #else //__nvoc_gpu_h_disabled 1913 #define gpuBuildGenericKernelFalconList(pGpu) gpuBuildGenericKernelFalconList_IMPL(pGpu) 1914 #endif //__nvoc_gpu_h_disabled 1915 1916 #define gpuBuildGenericKernelFalconList_HAL(pGpu) gpuBuildGenericKernelFalconList(pGpu) 1917 1918 void gpuDestroyGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); 1919 1920 1921 #ifdef __nvoc_gpu_h_disabled 1922 static inline void gpuDestroyGenericKernelFalconList(struct OBJGPU *pGpu) { 1923 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1924 } 1925 #else //__nvoc_gpu_h_disabled 1926 #define gpuDestroyGenericKernelFalconList(pGpu) gpuDestroyGenericKernelFalconList_IMPL(pGpu) 1927 #endif //__nvoc_gpu_h_disabled 1928 1929 #define gpuDestroyGenericKernelFalconList_HAL(pGpu) gpuDestroyGenericKernelFalconList(pGpu) 1930 1931 NV_STATUS gpuBuildKernelVideoEngineList_IMPL(struct OBJGPU *pGpu); 1932 1933 1934 #ifdef __nvoc_gpu_h_disabled 1935 static inline NV_STATUS gpuBuildKernelVideoEngineList(struct OBJGPU *pGpu) { 1936 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1937 return NV_ERR_NOT_SUPPORTED; 1938 } 1939 #else //__nvoc_gpu_h_disabled 1940 #define gpuBuildKernelVideoEngineList(pGpu) gpuBuildKernelVideoEngineList_IMPL(pGpu) 1941 #endif //__nvoc_gpu_h_disabled 1942 1943 #define gpuBuildKernelVideoEngineList_HAL(pGpu) gpuBuildKernelVideoEngineList(pGpu) 1944 1945 NV_STATUS gpuInitVideoLogging_IMPL(struct OBJGPU *pGpu); 1946 1947 1948 #ifdef __nvoc_gpu_h_disabled 1949 static inline NV_STATUS gpuInitVideoLogging(struct OBJGPU *pGpu) { 1950 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1951 return NV_ERR_NOT_SUPPORTED; 1952 } 1953 #else //__nvoc_gpu_h_disabled 1954 #define gpuInitVideoLogging(pGpu) gpuInitVideoLogging_IMPL(pGpu) 1955 #endif //__nvoc_gpu_h_disabled 1956 1957 #define gpuInitVideoLogging_HAL(pGpu) gpuInitVideoLogging(pGpu) 1958 1959 void gpuFreeVideoLogging_IMPL(struct OBJGPU *pGpu); 1960 1961 1962 #ifdef __nvoc_gpu_h_disabled 1963 static inline void gpuFreeVideoLogging(struct OBJGPU *pGpu) { 1964 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1965 } 1966 #else //__nvoc_gpu_h_disabled 1967 #define gpuFreeVideoLogging(pGpu) gpuFreeVideoLogging_IMPL(pGpu) 1968 #endif //__nvoc_gpu_h_disabled 1969 1970 #define gpuFreeVideoLogging_HAL(pGpu) gpuFreeVideoLogging(pGpu) 1971 1972 void gpuDestroyKernelVideoEngineList_IMPL(struct OBJGPU *pGpu); 1973 1974 1975 #ifdef __nvoc_gpu_h_disabled 1976 static inline void gpuDestroyKernelVideoEngineList(struct OBJGPU *pGpu) { 1977 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1978 } 1979 #else //__nvoc_gpu_h_disabled 1980 #define gpuDestroyKernelVideoEngineList(pGpu) gpuDestroyKernelVideoEngineList_IMPL(pGpu) 1981 #endif //__nvoc_gpu_h_disabled 1982 1983 #define gpuDestroyKernelVideoEngineList_HAL(pGpu) gpuDestroyKernelVideoEngineList(pGpu) 1984 1985 struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 1986 1987 1988 #ifdef __nvoc_gpu_h_disabled 1989 static inline struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 1990 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1991 return NULL; 1992 } 1993 #else //__nvoc_gpu_h_disabled 1994 #define gpuGetGenericKernelFalconForEngine(pGpu, arg0) gpuGetGenericKernelFalconForEngine_IMPL(pGpu, arg0) 1995 #endif //__nvoc_gpu_h_disabled 1996 1997 #define gpuGetGenericKernelFalconForEngine_HAL(pGpu, arg0) gpuGetGenericKernelFalconForEngine(pGpu, arg0) 1998 1999 void gpuRegisterGenericKernelFalconIntrService_IMPL(struct OBJGPU *pGpu, void *pRecords); 2000 2001 2002 #ifdef __nvoc_gpu_h_disabled 2003 static inline void gpuRegisterGenericKernelFalconIntrService(struct OBJGPU *pGpu, void *pRecords) { 2004 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2005 } 2006 #else //__nvoc_gpu_h_disabled 2007 #define gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService_IMPL(pGpu, pRecords) 2008 #endif //__nvoc_gpu_h_disabled 2009 2010 #define gpuRegisterGenericKernelFalconIntrService_HAL(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) 2011 2012 static inline void gpuGetHwDefaults_b3696a(struct OBJGPU *pGpu) { 2013 return; 2014 } 2015 2016 2017 #ifdef __nvoc_gpu_h_disabled 2018 static inline void gpuGetHwDefaults(struct OBJGPU *pGpu) { 2019 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2020 } 2021 #else //__nvoc_gpu_h_disabled 2022 #define gpuGetHwDefaults(pGpu) gpuGetHwDefaults_b3696a(pGpu) 2023 #endif //__nvoc_gpu_h_disabled 2024 2025 #define gpuGetHwDefaults_HAL(pGpu) gpuGetHwDefaults(pGpu) 2026 2027 RmPhysAddr gpuGetDmaEndAddress_IMPL(struct OBJGPU *pGpu); 2028 2029 2030 #ifdef __nvoc_gpu_h_disabled 2031 static inline RmPhysAddr gpuGetDmaEndAddress(struct OBJGPU *pGpu) { 2032 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2033 RmPhysAddr ret; 2034 portMemSet(&ret, 0, sizeof(RmPhysAddr)); 2035 return ret; 2036 } 2037 #else //__nvoc_gpu_h_disabled 2038 #define gpuGetDmaEndAddress(pGpu) gpuGetDmaEndAddress_IMPL(pGpu) 2039 #endif //__nvoc_gpu_h_disabled 2040 2041 #define gpuGetDmaEndAddress_HAL(pGpu) gpuGetDmaEndAddress(pGpu) 2042 2043 static inline NV_STATUS gpuSetStateResetRequired_395e98(struct OBJGPU *pGpu, NvU32 exceptType) { 2044 return NV_ERR_NOT_SUPPORTED; 2045 } 2046 2047 2048 #ifdef __nvoc_gpu_h_disabled 2049 static inline NV_STATUS gpuSetStateResetRequired(struct OBJGPU *pGpu, NvU32 exceptType) { 2050 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2051 return NV_ERR_NOT_SUPPORTED; 2052 } 2053 #else //__nvoc_gpu_h_disabled 2054 #define gpuSetStateResetRequired(pGpu, exceptType) gpuSetStateResetRequired_395e98(pGpu, exceptType) 2055 #endif //__nvoc_gpu_h_disabled 2056 2057 #define gpuSetStateResetRequired_HAL(pGpu, exceptType) gpuSetStateResetRequired(pGpu, exceptType) 2058 2059 static inline NV_STATUS gpuMarkDeviceForReset_395e98(struct OBJGPU *pGpu) { 2060 return NV_ERR_NOT_SUPPORTED; 2061 } 2062 2063 2064 #ifdef __nvoc_gpu_h_disabled 2065 static inline NV_STATUS gpuMarkDeviceForReset(struct OBJGPU *pGpu) { 2066 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2067 return NV_ERR_NOT_SUPPORTED; 2068 } 2069 #else //__nvoc_gpu_h_disabled 2070 #define gpuMarkDeviceForReset(pGpu) gpuMarkDeviceForReset_395e98(pGpu) 2071 #endif //__nvoc_gpu_h_disabled 2072 2073 #define gpuMarkDeviceForReset_HAL(pGpu) gpuMarkDeviceForReset(pGpu) 2074 2075 static inline NV_STATUS gpuUnmarkDeviceForReset_395e98(struct OBJGPU *pGpu) { 2076 return NV_ERR_NOT_SUPPORTED; 2077 } 2078 2079 2080 #ifdef __nvoc_gpu_h_disabled 2081 static inline NV_STATUS gpuUnmarkDeviceForReset(struct OBJGPU *pGpu) { 2082 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2083 return NV_ERR_NOT_SUPPORTED; 2084 } 2085 #else //__nvoc_gpu_h_disabled 2086 #define gpuUnmarkDeviceForReset(pGpu) gpuUnmarkDeviceForReset_395e98(pGpu) 2087 #endif //__nvoc_gpu_h_disabled 2088 2089 #define gpuUnmarkDeviceForReset_HAL(pGpu) gpuUnmarkDeviceForReset(pGpu) 2090 2091 static inline NV_STATUS gpuIsDeviceMarkedForReset_82f166(struct OBJGPU *pGpu, NvBool *pbResetRequired) { 2092 *pbResetRequired = ((NvBool)(0 != 0)); 2093 { 2094 return NV_ERR_NOT_SUPPORTED; 2095 } 2096 ; 2097 } 2098 2099 2100 #ifdef __nvoc_gpu_h_disabled 2101 static inline NV_STATUS gpuIsDeviceMarkedForReset(struct OBJGPU *pGpu, NvBool *pbResetRequired) { 2102 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2103 return NV_ERR_NOT_SUPPORTED; 2104 } 2105 #else //__nvoc_gpu_h_disabled 2106 #define gpuIsDeviceMarkedForReset(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset_82f166(pGpu, pbResetRequired) 2107 #endif //__nvoc_gpu_h_disabled 2108 2109 #define gpuIsDeviceMarkedForReset_HAL(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset(pGpu, pbResetRequired) 2110 2111 static inline NV_STATUS gpuMarkDeviceForDrainAndReset_395e98(struct OBJGPU *pGpu) { 2112 return NV_ERR_NOT_SUPPORTED; 2113 } 2114 2115 2116 #ifdef __nvoc_gpu_h_disabled 2117 static inline NV_STATUS gpuMarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { 2118 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2119 return NV_ERR_NOT_SUPPORTED; 2120 } 2121 #else //__nvoc_gpu_h_disabled 2122 #define gpuMarkDeviceForDrainAndReset(pGpu) gpuMarkDeviceForDrainAndReset_395e98(pGpu) 2123 #endif //__nvoc_gpu_h_disabled 2124 2125 #define gpuMarkDeviceForDrainAndReset_HAL(pGpu) gpuMarkDeviceForDrainAndReset(pGpu) 2126 2127 static inline NV_STATUS gpuUnmarkDeviceForDrainAndReset_395e98(struct OBJGPU *pGpu) { 2128 return NV_ERR_NOT_SUPPORTED; 2129 } 2130 2131 2132 #ifdef __nvoc_gpu_h_disabled 2133 static inline NV_STATUS gpuUnmarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { 2134 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2135 return NV_ERR_NOT_SUPPORTED; 2136 } 2137 #else //__nvoc_gpu_h_disabled 2138 #define gpuUnmarkDeviceForDrainAndReset(pGpu) gpuUnmarkDeviceForDrainAndReset_395e98(pGpu) 2139 #endif //__nvoc_gpu_h_disabled 2140 2141 #define gpuUnmarkDeviceForDrainAndReset_HAL(pGpu) gpuUnmarkDeviceForDrainAndReset(pGpu) 2142 2143 static inline NV_STATUS gpuIsDeviceMarkedForDrainAndReset_244f65(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) { 2144 *pbDrainRecommended = ((NvBool)(0 != 0)); 2145 { 2146 return NV_ERR_NOT_SUPPORTED; 2147 } 2148 ; 2149 } 2150 2151 2152 #ifdef __nvoc_gpu_h_disabled 2153 static inline NV_STATUS gpuIsDeviceMarkedForDrainAndReset(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) { 2154 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2155 return NV_ERR_NOT_SUPPORTED; 2156 } 2157 #else //__nvoc_gpu_h_disabled 2158 #define gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset_244f65(pGpu, pbDrainRecommended) 2159 #endif //__nvoc_gpu_h_disabled 2160 2161 #define gpuIsDeviceMarkedForDrainAndReset_HAL(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended) 2162 2163 static inline NvU32 gpuGetSliFingerPinsetMask_4a4dee(struct OBJGPU *pGpu) { 2164 return 0; 2165 } 2166 2167 2168 #ifdef __nvoc_gpu_h_disabled 2169 static inline NvU32 gpuGetSliFingerPinsetMask(struct OBJGPU *pGpu) { 2170 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2171 return 0; 2172 } 2173 #else //__nvoc_gpu_h_disabled 2174 #define gpuGetSliFingerPinsetMask(pGpu) gpuGetSliFingerPinsetMask_4a4dee(pGpu) 2175 #endif //__nvoc_gpu_h_disabled 2176 2177 #define gpuGetSliFingerPinsetMask_HAL(pGpu) gpuGetSliFingerPinsetMask(pGpu) 2178 2179 static inline NV_STATUS gpuPrivSecInitRegistryOverrides_56cd7a(struct OBJGPU *pGpu) { 2180 return NV_OK; 2181 } 2182 2183 2184 #ifdef __nvoc_gpu_h_disabled 2185 static inline NV_STATUS gpuPrivSecInitRegistryOverrides(struct OBJGPU *pGpu) { 2186 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2187 return NV_ERR_NOT_SUPPORTED; 2188 } 2189 #else //__nvoc_gpu_h_disabled 2190 #define gpuPrivSecInitRegistryOverrides(pGpu) gpuPrivSecInitRegistryOverrides_56cd7a(pGpu) 2191 #endif //__nvoc_gpu_h_disabled 2192 2193 #define gpuPrivSecInitRegistryOverrides_HAL(pGpu) gpuPrivSecInitRegistryOverrides(pGpu) 2194 2195 static inline void gpuDestroyOverrides_b3696a(struct OBJGPU *pGpu) { 2196 return; 2197 } 2198 2199 2200 #ifdef __nvoc_gpu_h_disabled 2201 static inline void gpuDestroyOverrides(struct OBJGPU *pGpu) { 2202 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2203 } 2204 #else //__nvoc_gpu_h_disabled 2205 #define gpuDestroyOverrides(pGpu) gpuDestroyOverrides_b3696a(pGpu) 2206 #endif //__nvoc_gpu_h_disabled 2207 2208 #define gpuDestroyOverrides_HAL(pGpu) gpuDestroyOverrides(pGpu) 2209 2210 NV_STATUS gpuPowerOff_KERNEL(struct OBJGPU *pGpu); 2211 2212 2213 #ifdef __nvoc_gpu_h_disabled 2214 static inline NV_STATUS gpuPowerOff(struct OBJGPU *pGpu) { 2215 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2216 return NV_ERR_NOT_SUPPORTED; 2217 } 2218 #else //__nvoc_gpu_h_disabled 2219 #define gpuPowerOff(pGpu) gpuPowerOff_KERNEL(pGpu) 2220 #endif //__nvoc_gpu_h_disabled 2221 2222 #define gpuPowerOff_HAL(pGpu) gpuPowerOff(pGpu) 2223 2224 NV_STATUS gpuSetPower_GM107(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3); 2225 2226 2227 #ifdef __nvoc_gpu_h_disabled 2228 static inline NV_STATUS gpuSetPower(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3) { 2229 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2230 return NV_ERR_NOT_SUPPORTED; 2231 } 2232 #else //__nvoc_gpu_h_disabled 2233 #define gpuSetPower(pGpu, arg1, arg2, arg3) gpuSetPower_GM107(pGpu, arg1, arg2, arg3) 2234 #endif //__nvoc_gpu_h_disabled 2235 2236 #define gpuSetPower_HAL(pGpu, arg1, arg2, arg3) gpuSetPower(pGpu, arg1, arg2, arg3) 2237 2238 static inline void gpuUpdateIdInfo_b3696a(struct OBJGPU *pGpu) { 2239 return; 2240 } 2241 2242 void gpuUpdateIdInfo_GK104(struct OBJGPU *pGpu); 2243 2244 2245 #ifdef __nvoc_gpu_h_disabled 2246 static inline void gpuUpdateIdInfo(struct OBJGPU *pGpu) { 2247 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2248 } 2249 #else //__nvoc_gpu_h_disabled 2250 #define gpuUpdateIdInfo(pGpu) gpuUpdateIdInfo_b3696a(pGpu) 2251 #endif //__nvoc_gpu_h_disabled 2252 2253 #define gpuUpdateIdInfo_HAL(pGpu) gpuUpdateIdInfo(pGpu) 2254 2255 static inline NvU32 gpuGetDeviceIDList_4a4dee(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { 2256 return 0; 2257 } 2258 2259 2260 #ifdef __nvoc_gpu_h_disabled 2261 static inline NvU32 gpuGetDeviceIDList(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { 2262 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2263 return 0; 2264 } 2265 #else //__nvoc_gpu_h_disabled 2266 #define gpuGetDeviceIDList(pGpu, arg0) gpuGetDeviceIDList_4a4dee(pGpu, arg0) 2267 #endif //__nvoc_gpu_h_disabled 2268 2269 #define gpuGetDeviceIDList_HAL(pGpu, arg0) gpuGetDeviceIDList(pGpu, arg0) 2270 2271 NV_STATUS gpuGenGidData_FWCLIENT(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags); 2272 2273 NV_STATUS gpuGenGidData_GK104(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags); 2274 2275 2276 #ifdef __nvoc_gpu_h_disabled 2277 static inline NV_STATUS gpuGenGidData(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags) { 2278 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2279 return NV_ERR_NOT_SUPPORTED; 2280 } 2281 #else //__nvoc_gpu_h_disabled 2282 #define gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData_FWCLIENT(pGpu, pGidData, gidSize, gidFlags) 2283 #endif //__nvoc_gpu_h_disabled 2284 2285 #define gpuGenGidData_HAL(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) 2286 2287 NvU8 gpuGetChipSubRev_FWCLIENT(struct OBJGPU *pGpu); 2288 2289 NvU8 gpuGetChipSubRev_GK104(struct OBJGPU *pGpu); 2290 2291 NvU8 gpuGetChipSubRev_GA100(struct OBJGPU *pGpu); 2292 2293 2294 #ifdef __nvoc_gpu_h_disabled 2295 static inline NvU8 gpuGetChipSubRev(struct OBJGPU *pGpu) { 2296 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2297 return 0; 2298 } 2299 #else //__nvoc_gpu_h_disabled 2300 #define gpuGetChipSubRev(pGpu) gpuGetChipSubRev_FWCLIENT(pGpu) 2301 #endif //__nvoc_gpu_h_disabled 2302 2303 #define gpuGetChipSubRev_HAL(pGpu) gpuGetChipSubRev(pGpu) 2304 2305 NvU32 gpuGetEmulationRev1_FWCLIENT(struct OBJGPU *pGpu); 2306 2307 NvU32 gpuGetEmulationRev1_GM107(struct OBJGPU *pGpu); 2308 2309 2310 #ifdef __nvoc_gpu_h_disabled 2311 static inline NvU32 gpuGetEmulationRev1(struct OBJGPU *pGpu) { 2312 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2313 return 0; 2314 } 2315 #else //__nvoc_gpu_h_disabled 2316 #define gpuGetEmulationRev1(pGpu) gpuGetEmulationRev1_FWCLIENT(pGpu) 2317 #endif //__nvoc_gpu_h_disabled 2318 2319 #define gpuGetEmulationRev1_HAL(pGpu) gpuGetEmulationRev1(pGpu) 2320 2321 static inline NV_STATUS gpuPerformUniversalValidation_56cd7a(struct OBJGPU *pGpu) { 2322 return NV_OK; 2323 } 2324 2325 NV_STATUS gpuPerformUniversalValidation_GM107(struct OBJGPU *pGpu); 2326 2327 2328 #ifdef __nvoc_gpu_h_disabled 2329 static inline NV_STATUS gpuPerformUniversalValidation(struct OBJGPU *pGpu) { 2330 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2331 return NV_ERR_NOT_SUPPORTED; 2332 } 2333 #else //__nvoc_gpu_h_disabled 2334 #define gpuPerformUniversalValidation(pGpu) gpuPerformUniversalValidation_56cd7a(pGpu) 2335 #endif //__nvoc_gpu_h_disabled 2336 2337 #define gpuPerformUniversalValidation_HAL(pGpu) gpuPerformUniversalValidation(pGpu) 2338 2339 NvU32 gpuGetVirtRegPhysOffset_TU102(struct OBJGPU *pGpu); 2340 2341 2342 #ifdef __nvoc_gpu_h_disabled 2343 static inline NvU32 gpuGetVirtRegPhysOffset(struct OBJGPU *pGpu) { 2344 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2345 return 0; 2346 } 2347 #else //__nvoc_gpu_h_disabled 2348 #define gpuGetVirtRegPhysOffset(pGpu) gpuGetVirtRegPhysOffset_TU102(pGpu) 2349 #endif //__nvoc_gpu_h_disabled 2350 2351 #define gpuGetVirtRegPhysOffset_HAL(pGpu) gpuGetVirtRegPhysOffset(pGpu) 2352 2353 NV_STATUS gpuGetRegBaseOffset_FWCLIENT(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); 2354 2355 NV_STATUS gpuGetRegBaseOffset_TU102(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); 2356 2357 2358 #ifdef __nvoc_gpu_h_disabled 2359 static inline NV_STATUS gpuGetRegBaseOffset(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { 2360 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2361 return NV_ERR_NOT_SUPPORTED; 2362 } 2363 #else //__nvoc_gpu_h_disabled 2364 #define gpuGetRegBaseOffset(pGpu, arg0, arg1) gpuGetRegBaseOffset_FWCLIENT(pGpu, arg0, arg1) 2365 #endif //__nvoc_gpu_h_disabled 2366 2367 #define gpuGetRegBaseOffset_HAL(pGpu, arg0, arg1) gpuGetRegBaseOffset(pGpu, arg0, arg1) 2368 2369 static inline void gpuGetSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { 2370 return; 2371 } 2372 2373 void gpuGetSanityCheckRegReadError_GK104(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString); 2374 2375 void gpuGetSanityCheckRegReadError_GA100(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString); 2376 2377 2378 #ifdef __nvoc_gpu_h_disabled 2379 static inline void gpuGetSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { 2380 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2381 } 2382 #else //__nvoc_gpu_h_disabled 2383 #define gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError_b3696a(pGpu, value, pErrorString) 2384 #endif //__nvoc_gpu_h_disabled 2385 2386 #define gpuGetSanityCheckRegReadError_HAL(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) 2387 2388 static inline NV_STATUS gpuSanityCheckVirtRegAccess_56cd7a(struct OBJGPU *pGpu, NvU32 arg0) { 2389 return NV_OK; 2390 } 2391 2392 NV_STATUS gpuSanityCheckVirtRegAccess_TU102(struct OBJGPU *pGpu, NvU32 arg0); 2393 2394 NV_STATUS gpuSanityCheckVirtRegAccess_GH100(struct OBJGPU *pGpu, NvU32 arg0); 2395 2396 2397 #ifdef __nvoc_gpu_h_disabled 2398 static inline NV_STATUS gpuSanityCheckVirtRegAccess(struct OBJGPU *pGpu, NvU32 arg0) { 2399 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2400 return NV_ERR_NOT_SUPPORTED; 2401 } 2402 #else //__nvoc_gpu_h_disabled 2403 #define gpuSanityCheckVirtRegAccess(pGpu, arg0) gpuSanityCheckVirtRegAccess_56cd7a(pGpu, arg0) 2404 #endif //__nvoc_gpu_h_disabled 2405 2406 #define gpuSanityCheckVirtRegAccess_HAL(pGpu, arg0) gpuSanityCheckVirtRegAccess(pGpu, arg0) 2407 2408 NV_STATUS gpuInitRegistryOverrides_KERNEL(struct OBJGPU *pGpu); 2409 2410 2411 #ifdef __nvoc_gpu_h_disabled 2412 static inline NV_STATUS gpuInitRegistryOverrides(struct OBJGPU *pGpu) { 2413 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2414 return NV_ERR_NOT_SUPPORTED; 2415 } 2416 #else //__nvoc_gpu_h_disabled 2417 #define gpuInitRegistryOverrides(pGpu) gpuInitRegistryOverrides_KERNEL(pGpu) 2418 #endif //__nvoc_gpu_h_disabled 2419 2420 #define gpuInitRegistryOverrides_HAL(pGpu) gpuInitRegistryOverrides(pGpu) 2421 2422 NV_STATUS gpuInitInstLocOverrides_IMPL(struct OBJGPU *pGpu); 2423 2424 2425 #ifdef __nvoc_gpu_h_disabled 2426 static inline NV_STATUS gpuInitInstLocOverrides(struct OBJGPU *pGpu) { 2427 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2428 return NV_ERR_NOT_SUPPORTED; 2429 } 2430 #else //__nvoc_gpu_h_disabled 2431 #define gpuInitInstLocOverrides(pGpu) gpuInitInstLocOverrides_IMPL(pGpu) 2432 #endif //__nvoc_gpu_h_disabled 2433 2434 #define gpuInitInstLocOverrides_HAL(pGpu) gpuInitInstLocOverrides(pGpu) 2435 2436 const GPUCHILDORDER *gpuGetChildrenOrder_GM200(struct OBJGPU *pGpu, NvU32 *pNumEntries); 2437 2438 2439 #ifdef __nvoc_gpu_h_disabled 2440 static inline const GPUCHILDORDER *gpuGetChildrenOrder(struct OBJGPU *pGpu, NvU32 *pNumEntries) { 2441 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2442 return NULL; 2443 } 2444 #else //__nvoc_gpu_h_disabled 2445 #define gpuGetChildrenOrder(pGpu, pNumEntries) gpuGetChildrenOrder_GM200(pGpu, pNumEntries) 2446 #endif //__nvoc_gpu_h_disabled 2447 2448 #define gpuGetChildrenOrder_HAL(pGpu, pNumEntries) gpuGetChildrenOrder(pGpu, pNumEntries) 2449 2450 NV_STATUS gpuInitSriov_FWCLIENT(struct OBJGPU *pGpu); 2451 2452 NV_STATUS gpuInitSriov_TU102(struct OBJGPU *pGpu); 2453 2454 2455 #ifdef __nvoc_gpu_h_disabled 2456 static inline NV_STATUS gpuInitSriov(struct OBJGPU *pGpu) { 2457 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2458 return NV_ERR_NOT_SUPPORTED; 2459 } 2460 #else //__nvoc_gpu_h_disabled 2461 #define gpuInitSriov(pGpu) gpuInitSriov_FWCLIENT(pGpu) 2462 #endif //__nvoc_gpu_h_disabled 2463 2464 #define gpuInitSriov_HAL(pGpu) gpuInitSriov(pGpu) 2465 2466 NV_STATUS gpuDeinitSriov_FWCLIENT(struct OBJGPU *pGpu); 2467 2468 NV_STATUS gpuDeinitSriov_TU102(struct OBJGPU *pGpu); 2469 2470 2471 #ifdef __nvoc_gpu_h_disabled 2472 static inline NV_STATUS gpuDeinitSriov(struct OBJGPU *pGpu) { 2473 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2474 return NV_ERR_NOT_SUPPORTED; 2475 } 2476 #else //__nvoc_gpu_h_disabled 2477 #define gpuDeinitSriov(pGpu) gpuDeinitSriov_FWCLIENT(pGpu) 2478 #endif //__nvoc_gpu_h_disabled 2479 2480 #define gpuDeinitSriov_HAL(pGpu) gpuDeinitSriov(pGpu) 2481 2482 static inline NV_STATUS gpuCreateDefaultClientShare_56cd7a(struct OBJGPU *pGpu) { 2483 return NV_OK; 2484 } 2485 2486 2487 #ifdef __nvoc_gpu_h_disabled 2488 static inline NV_STATUS gpuCreateDefaultClientShare(struct OBJGPU *pGpu) { 2489 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2490 return NV_ERR_NOT_SUPPORTED; 2491 } 2492 #else //__nvoc_gpu_h_disabled 2493 #define gpuCreateDefaultClientShare(pGpu) gpuCreateDefaultClientShare_56cd7a(pGpu) 2494 #endif //__nvoc_gpu_h_disabled 2495 2496 #define gpuCreateDefaultClientShare_HAL(pGpu) gpuCreateDefaultClientShare(pGpu) 2497 2498 static inline void gpuDestroyDefaultClientShare_b3696a(struct OBJGPU *pGpu) { 2499 return; 2500 } 2501 2502 2503 #ifdef __nvoc_gpu_h_disabled 2504 static inline void gpuDestroyDefaultClientShare(struct OBJGPU *pGpu) { 2505 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2506 } 2507 #else //__nvoc_gpu_h_disabled 2508 #define gpuDestroyDefaultClientShare(pGpu) gpuDestroyDefaultClientShare_b3696a(pGpu) 2509 #endif //__nvoc_gpu_h_disabled 2510 2511 #define gpuDestroyDefaultClientShare_HAL(pGpu) gpuDestroyDefaultClientShare(pGpu) 2512 2513 static inline NvU64 gpuGetVmmuSegmentSize_72c522(struct OBJGPU *pGpu) { 2514 return pGpu->vmmuSegmentSize; 2515 } 2516 2517 2518 #ifdef __nvoc_gpu_h_disabled 2519 static inline NvU64 gpuGetVmmuSegmentSize(struct OBJGPU *pGpu) { 2520 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2521 return 0; 2522 } 2523 #else //__nvoc_gpu_h_disabled 2524 #define gpuGetVmmuSegmentSize(pGpu) gpuGetVmmuSegmentSize_72c522(pGpu) 2525 #endif //__nvoc_gpu_h_disabled 2526 2527 #define gpuGetVmmuSegmentSize_HAL(pGpu) gpuGetVmmuSegmentSize(pGpu) 2528 2529 void gpuGetTerminatedLinkMask_GA100(struct OBJGPU *pGpu, NvU32 arg0); 2530 2531 2532 #ifdef __nvoc_gpu_h_disabled 2533 static inline void gpuGetTerminatedLinkMask(struct OBJGPU *pGpu, NvU32 arg0) { 2534 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2535 } 2536 #else //__nvoc_gpu_h_disabled 2537 #define gpuGetTerminatedLinkMask(pGpu, arg0) gpuGetTerminatedLinkMask_GA100(pGpu, arg0) 2538 #endif //__nvoc_gpu_h_disabled 2539 2540 #define gpuGetTerminatedLinkMask_HAL(pGpu, arg0) gpuGetTerminatedLinkMask(pGpu, arg0) 2541 2542 NV_STATUS gpuJtVersionSanityCheck_TU102(struct OBJGPU *pGpu); 2543 2544 2545 #ifdef __nvoc_gpu_h_disabled 2546 static inline NV_STATUS gpuJtVersionSanityCheck(struct OBJGPU *pGpu) { 2547 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2548 return NV_ERR_NOT_SUPPORTED; 2549 } 2550 #else //__nvoc_gpu_h_disabled 2551 #define gpuJtVersionSanityCheck(pGpu) gpuJtVersionSanityCheck_TU102(pGpu) 2552 #endif //__nvoc_gpu_h_disabled 2553 2554 #define gpuJtVersionSanityCheck_HAL(pGpu) gpuJtVersionSanityCheck(pGpu) 2555 2556 static inline NvBool gpuCompletedGC6PowerOff_cbe027(struct OBJGPU *pGpu) { 2557 return ((NvBool)(0 == 0)); 2558 } 2559 2560 NvBool gpuCompletedGC6PowerOff_GV100(struct OBJGPU *pGpu); 2561 2562 2563 #ifdef __nvoc_gpu_h_disabled 2564 static inline NvBool gpuCompletedGC6PowerOff(struct OBJGPU *pGpu) { 2565 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2566 return NV_FALSE; 2567 } 2568 #else //__nvoc_gpu_h_disabled 2569 #define gpuCompletedGC6PowerOff(pGpu) gpuCompletedGC6PowerOff_cbe027(pGpu) 2570 #endif //__nvoc_gpu_h_disabled 2571 2572 #define gpuCompletedGC6PowerOff_HAL(pGpu) gpuCompletedGC6PowerOff(pGpu) 2573 2574 static inline NvBool gpuIsACPIPatchRequiredForBug2473619_491d52(struct OBJGPU *pGpu) { 2575 return ((NvBool)(0 != 0)); 2576 } 2577 2578 2579 #ifdef __nvoc_gpu_h_disabled 2580 static inline NvBool gpuIsACPIPatchRequiredForBug2473619(struct OBJGPU *pGpu) { 2581 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2582 return NV_FALSE; 2583 } 2584 #else //__nvoc_gpu_h_disabled 2585 #define gpuIsACPIPatchRequiredForBug2473619(pGpu) gpuIsACPIPatchRequiredForBug2473619_491d52(pGpu) 2586 #endif //__nvoc_gpu_h_disabled 2587 2588 #define gpuIsACPIPatchRequiredForBug2473619_HAL(pGpu) gpuIsACPIPatchRequiredForBug2473619(pGpu) 2589 2590 NvU32 gpuGetActiveFBIOs_FWCLIENT(struct OBJGPU *pGpu); 2591 2592 NvU32 gpuGetActiveFBIOs_GM107(struct OBJGPU *pGpu); 2593 2594 2595 #ifdef __nvoc_gpu_h_disabled 2596 static inline NvU32 gpuGetActiveFBIOs(struct OBJGPU *pGpu) { 2597 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2598 return 0; 2599 } 2600 #else //__nvoc_gpu_h_disabled 2601 #define gpuGetActiveFBIOs(pGpu) gpuGetActiveFBIOs_FWCLIENT(pGpu) 2602 #endif //__nvoc_gpu_h_disabled 2603 2604 #define gpuGetActiveFBIOs_HAL(pGpu) gpuGetActiveFBIOs(pGpu) 2605 2606 static inline NvBool gpuIsDebuggerActive_8031b9(struct OBJGPU *pGpu) { 2607 return pGpu->bIsDebugModeEnabled; 2608 } 2609 2610 2611 #ifdef __nvoc_gpu_h_disabled 2612 static inline NvBool gpuIsDebuggerActive(struct OBJGPU *pGpu) { 2613 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2614 return NV_FALSE; 2615 } 2616 #else //__nvoc_gpu_h_disabled 2617 #define gpuIsDebuggerActive(pGpu) gpuIsDebuggerActive_8031b9(pGpu) 2618 #endif //__nvoc_gpu_h_disabled 2619 2620 #define gpuIsDebuggerActive_HAL(pGpu) gpuIsDebuggerActive(pGpu) 2621 2622 NV_STATUS gpuExecGrCtxRegops_GK104(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw); 2623 2624 2625 #ifdef __nvoc_gpu_h_disabled 2626 static inline NV_STATUS gpuExecGrCtxRegops(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw) { 2627 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2628 return NV_ERR_NOT_SUPPORTED; 2629 } 2630 #else //__nvoc_gpu_h_disabled 2631 #define gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops_GK104(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) 2632 #endif //__nvoc_gpu_h_disabled 2633 2634 #define gpuExecGrCtxRegops_HAL(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) 2635 2636 NV_STATUS gpuExtdevConstruct_GK104(struct OBJGPU *pGpu); 2637 2638 2639 #ifdef __nvoc_gpu_h_disabled 2640 static inline NV_STATUS gpuExtdevConstruct(struct OBJGPU *pGpu) { 2641 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2642 return NV_ERR_NOT_SUPPORTED; 2643 } 2644 #else //__nvoc_gpu_h_disabled 2645 #define gpuExtdevConstruct(pGpu) gpuExtdevConstruct_GK104(pGpu) 2646 #endif //__nvoc_gpu_h_disabled 2647 2648 #define gpuExtdevConstruct_HAL(pGpu) gpuExtdevConstruct(pGpu) 2649 2650 NvU32 gpuReadBAR1Size_FWCLIENT(struct OBJGPU *pGpu); 2651 2652 NvU32 gpuReadBAR1Size_TU102(struct OBJGPU *pGpu); 2653 2654 NvU32 gpuReadBAR1Size_GH100(struct OBJGPU *pGpu); 2655 2656 2657 #ifdef __nvoc_gpu_h_disabled 2658 static inline NvU32 gpuReadBAR1Size(struct OBJGPU *pGpu) { 2659 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2660 return 0; 2661 } 2662 #else //__nvoc_gpu_h_disabled 2663 #define gpuReadBAR1Size(pGpu) gpuReadBAR1Size_FWCLIENT(pGpu) 2664 #endif //__nvoc_gpu_h_disabled 2665 2666 #define gpuReadBAR1Size_HAL(pGpu) gpuReadBAR1Size(pGpu) 2667 2668 NvBool gpuCheckPageRetirementSupport_GSPCLIENT(struct OBJGPU *pGpu); 2669 2670 NvBool gpuCheckPageRetirementSupport_GV100(struct OBJGPU *pGpu); 2671 2672 2673 #ifdef __nvoc_gpu_h_disabled 2674 static inline NvBool gpuCheckPageRetirementSupport(struct OBJGPU *pGpu) { 2675 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2676 return NV_FALSE; 2677 } 2678 #else //__nvoc_gpu_h_disabled 2679 #define gpuCheckPageRetirementSupport(pGpu) gpuCheckPageRetirementSupport_GSPCLIENT(pGpu) 2680 #endif //__nvoc_gpu_h_disabled 2681 2682 #define gpuCheckPageRetirementSupport_HAL(pGpu) gpuCheckPageRetirementSupport(pGpu) 2683 2684 NvBool gpuIsInternalSku_FWCLIENT(struct OBJGPU *pGpu); 2685 2686 NvBool gpuIsInternalSku_GP100(struct OBJGPU *pGpu); 2687 2688 2689 #ifdef __nvoc_gpu_h_disabled 2690 static inline NvBool gpuIsInternalSku(struct OBJGPU *pGpu) { 2691 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2692 return NV_FALSE; 2693 } 2694 #else //__nvoc_gpu_h_disabled 2695 #define gpuIsInternalSku(pGpu) gpuIsInternalSku_FWCLIENT(pGpu) 2696 #endif //__nvoc_gpu_h_disabled 2697 2698 #define gpuIsInternalSku_HAL(pGpu) gpuIsInternalSku(pGpu) 2699 2700 NV_STATUS gpuGetSriovCaps_TU102(struct OBJGPU *pGpu, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *arg0); 2701 2702 2703 #ifdef __nvoc_gpu_h_disabled 2704 static inline NV_STATUS gpuGetSriovCaps(struct OBJGPU *pGpu, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *arg0) { 2705 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2706 return NV_ERR_NOT_SUPPORTED; 2707 } 2708 #else //__nvoc_gpu_h_disabled 2709 #define gpuGetSriovCaps(pGpu, arg0) gpuGetSriovCaps_TU102(pGpu, arg0) 2710 #endif //__nvoc_gpu_h_disabled 2711 2712 #define gpuGetSriovCaps_HAL(pGpu, arg0) gpuGetSriovCaps(pGpu, arg0) 2713 2714 static inline NvBool gpuCheckIsP2PAllocated_491d52(struct OBJGPU *pGpu) { 2715 return ((NvBool)(0 != 0)); 2716 } 2717 2718 NvBool gpuCheckIsP2PAllocated_GA100(struct OBJGPU *pGpu); 2719 2720 static inline NvBool gpuCheckIsP2PAllocated_108313(struct OBJGPU *pGpu) { 2721 NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); 2722 } 2723 2724 2725 #ifdef __nvoc_gpu_h_disabled 2726 static inline NvBool gpuCheckIsP2PAllocated(struct OBJGPU *pGpu) { 2727 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2728 return NV_FALSE; 2729 } 2730 #else //__nvoc_gpu_h_disabled 2731 #define gpuCheckIsP2PAllocated(pGpu) gpuCheckIsP2PAllocated_491d52(pGpu) 2732 #endif //__nvoc_gpu_h_disabled 2733 2734 #define gpuCheckIsP2PAllocated_HAL(pGpu) gpuCheckIsP2PAllocated(pGpu) 2735 2736 static inline void gpuDecodeDeviceInfoTableGroupId_b3696a(struct OBJGPU *pGpu, DEVICE_INFO2_ENTRY *pEntry, NvU32 *pDeviceAccum) { 2737 return; 2738 } 2739 2740 2741 #ifdef __nvoc_gpu_h_disabled 2742 static inline void gpuDecodeDeviceInfoTableGroupId(struct OBJGPU *pGpu, DEVICE_INFO2_ENTRY *pEntry, NvU32 *pDeviceAccum) { 2743 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2744 } 2745 #else //__nvoc_gpu_h_disabled 2746 #define gpuDecodeDeviceInfoTableGroupId(pGpu, pEntry, pDeviceAccum) gpuDecodeDeviceInfoTableGroupId_b3696a(pGpu, pEntry, pDeviceAccum) 2747 #endif //__nvoc_gpu_h_disabled 2748 2749 #define gpuDecodeDeviceInfoTableGroupId_HAL(pGpu, pEntry, pDeviceAccum) gpuDecodeDeviceInfoTableGroupId(pGpu, pEntry, pDeviceAccum) 2750 2751 static inline NV_STATUS gpuGc6EntryPstateCheck_56cd7a(struct OBJGPU *pGpu) { 2752 return NV_OK; 2753 } 2754 2755 2756 #ifdef __nvoc_gpu_h_disabled 2757 static inline NV_STATUS gpuGc6EntryPstateCheck(struct OBJGPU *pGpu) { 2758 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2759 return NV_ERR_NOT_SUPPORTED; 2760 } 2761 #else //__nvoc_gpu_h_disabled 2762 #define gpuGc6EntryPstateCheck(pGpu) gpuGc6EntryPstateCheck_56cd7a(pGpu) 2763 #endif //__nvoc_gpu_h_disabled 2764 2765 #define gpuGc6EntryPstateCheck_HAL(pGpu) gpuGc6EntryPstateCheck(pGpu) 2766 2767 static inline NV_STATUS gpuWaitGC6Ready_56cd7a(struct OBJGPU *pGpu) { 2768 return NV_OK; 2769 } 2770 2771 NV_STATUS gpuWaitGC6Ready_GM107(struct OBJGPU *pGpu); 2772 2773 2774 #ifdef __nvoc_gpu_h_disabled 2775 static inline NV_STATUS gpuWaitGC6Ready(struct OBJGPU *pGpu) { 2776 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2777 return NV_ERR_NOT_SUPPORTED; 2778 } 2779 #else //__nvoc_gpu_h_disabled 2780 #define gpuWaitGC6Ready(pGpu) gpuWaitGC6Ready_56cd7a(pGpu) 2781 #endif //__nvoc_gpu_h_disabled 2782 2783 #define gpuWaitGC6Ready_HAL(pGpu) gpuWaitGC6Ready(pGpu) 2784 2785 static inline NV_STATUS gpuPrePowerOff_56cd7a(struct OBJGPU *pGpu) { 2786 return NV_OK; 2787 } 2788 2789 NV_STATUS gpuPrePowerOff_GM107(struct OBJGPU *pGpu); 2790 2791 2792 #ifdef __nvoc_gpu_h_disabled 2793 static inline NV_STATUS gpuPrePowerOff(struct OBJGPU *pGpu) { 2794 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2795 return NV_ERR_NOT_SUPPORTED; 2796 } 2797 #else //__nvoc_gpu_h_disabled 2798 #define gpuPrePowerOff(pGpu) gpuPrePowerOff_56cd7a(pGpu) 2799 #endif //__nvoc_gpu_h_disabled 2800 2801 #define gpuPrePowerOff_HAL(pGpu) gpuPrePowerOff(pGpu) 2802 2803 NV_STATUS gpuVerifyExistence_IMPL(struct OBJGPU *pGpu); 2804 2805 2806 #ifdef __nvoc_gpu_h_disabled 2807 static inline NV_STATUS gpuVerifyExistence(struct OBJGPU *pGpu) { 2808 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2809 return NV_ERR_NOT_SUPPORTED; 2810 } 2811 #else //__nvoc_gpu_h_disabled 2812 #define gpuVerifyExistence(pGpu) gpuVerifyExistence_IMPL(pGpu) 2813 #endif //__nvoc_gpu_h_disabled 2814 2815 #define gpuVerifyExistence_HAL(pGpu) gpuVerifyExistence(pGpu) 2816 2817 static inline void gpuResetVFRegisters_b3696a(struct OBJGPU *pGpu, NvU32 gfid) { 2818 return; 2819 } 2820 2821 void gpuResetVFRegisters_TU102(struct OBJGPU *pGpu, NvU32 gfid); 2822 2823 2824 #ifdef __nvoc_gpu_h_disabled 2825 static inline void gpuResetVFRegisters(struct OBJGPU *pGpu, NvU32 gfid) { 2826 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2827 } 2828 #else //__nvoc_gpu_h_disabled 2829 #define gpuResetVFRegisters(pGpu, gfid) gpuResetVFRegisters_b3696a(pGpu, gfid) 2830 #endif //__nvoc_gpu_h_disabled 2831 2832 #define gpuResetVFRegisters_HAL(pGpu, gfid) gpuResetVFRegisters(pGpu, gfid) 2833 2834 static inline NvU32 gpuGetSliLinkDetectionHalFlag_539ab4(struct OBJGPU *pGpu) { 2835 return 1; 2836 } 2837 2838 2839 #ifdef __nvoc_gpu_h_disabled 2840 static inline NvU32 gpuGetSliLinkDetectionHalFlag(struct OBJGPU *pGpu) { 2841 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2842 return 0; 2843 } 2844 #else //__nvoc_gpu_h_disabled 2845 #define gpuGetSliLinkDetectionHalFlag(pGpu) gpuGetSliLinkDetectionHalFlag_539ab4(pGpu) 2846 #endif //__nvoc_gpu_h_disabled 2847 2848 #define gpuGetSliLinkDetectionHalFlag_HAL(pGpu) gpuGetSliLinkDetectionHalFlag(pGpu) 2849 2850 void gpuDetectSliLinkFromGpus_GK104(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount); 2851 2852 2853 #ifdef __nvoc_gpu_h_disabled 2854 static inline void gpuDetectSliLinkFromGpus(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount) { 2855 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2856 } 2857 #else //__nvoc_gpu_h_disabled 2858 #define gpuDetectSliLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectSliLinkFromGpus_GK104(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2859 #endif //__nvoc_gpu_h_disabled 2860 2861 #define gpuDetectSliLinkFromGpus_HAL(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectSliLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2862 2863 static inline NvU32 gpuGetNvlinkLinkDetectionHalFlag_adde13(struct OBJGPU *pGpu) { 2864 return 2; 2865 } 2866 2867 2868 #ifdef __nvoc_gpu_h_disabled 2869 static inline NvU32 gpuGetNvlinkLinkDetectionHalFlag(struct OBJGPU *pGpu) { 2870 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2871 return 0; 2872 } 2873 #else //__nvoc_gpu_h_disabled 2874 #define gpuGetNvlinkLinkDetectionHalFlag(pGpu) gpuGetNvlinkLinkDetectionHalFlag_adde13(pGpu) 2875 #endif //__nvoc_gpu_h_disabled 2876 2877 #define gpuGetNvlinkLinkDetectionHalFlag_HAL(pGpu) gpuGetNvlinkLinkDetectionHalFlag(pGpu) 2878 2879 void gpuDetectNvlinkLinkFromGpus_GP100(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount); 2880 2881 2882 #ifdef __nvoc_gpu_h_disabled 2883 static inline void gpuDetectNvlinkLinkFromGpus(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount) { 2884 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2885 } 2886 #else //__nvoc_gpu_h_disabled 2887 #define gpuDetectNvlinkLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectNvlinkLinkFromGpus_GP100(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2888 #endif //__nvoc_gpu_h_disabled 2889 2890 #define gpuDetectNvlinkLinkFromGpus_HAL(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectNvlinkLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2891 2892 NvU32 gpuGetLitterValues_FWCLIENT(struct OBJGPU *pGpu, NvU32 index); 2893 2894 NvU32 gpuGetLitterValues_TU102(struct OBJGPU *pGpu, NvU32 index); 2895 2896 NvU32 gpuGetLitterValues_GA100(struct OBJGPU *pGpu, NvU32 index); 2897 2898 NvU32 gpuGetLitterValues_GA102(struct OBJGPU *pGpu, NvU32 index); 2899 2900 NvU32 gpuGetLitterValues_AD102(struct OBJGPU *pGpu, NvU32 index); 2901 2902 NvU32 gpuGetLitterValues_GH100(struct OBJGPU *pGpu, NvU32 index); 2903 2904 2905 #ifdef __nvoc_gpu_h_disabled 2906 static inline NvU32 gpuGetLitterValues(struct OBJGPU *pGpu, NvU32 index) { 2907 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2908 return 0; 2909 } 2910 #else //__nvoc_gpu_h_disabled 2911 #define gpuGetLitterValues(pGpu, index) gpuGetLitterValues_FWCLIENT(pGpu, index) 2912 #endif //__nvoc_gpu_h_disabled 2913 2914 #define gpuGetLitterValues_HAL(pGpu, index) gpuGetLitterValues(pGpu, index) 2915 2916 NvBool gpuIsGlobalPoisonFuseEnabled_FWCLIENT(struct OBJGPU *pGpu); 2917 2918 2919 #ifdef __nvoc_gpu_h_disabled 2920 static inline NvBool gpuIsGlobalPoisonFuseEnabled(struct OBJGPU *pGpu) { 2921 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2922 return NV_FALSE; 2923 } 2924 #else //__nvoc_gpu_h_disabled 2925 #define gpuIsGlobalPoisonFuseEnabled(pGpu) gpuIsGlobalPoisonFuseEnabled_FWCLIENT(pGpu) 2926 #endif //__nvoc_gpu_h_disabled 2927 2928 #define gpuIsGlobalPoisonFuseEnabled_HAL(pGpu) gpuIsGlobalPoisonFuseEnabled(pGpu) 2929 2930 NV_STATUS gpuInitOptimusSettings_IMPL(struct OBJGPU *pGpu); 2931 2932 2933 #ifdef __nvoc_gpu_h_disabled 2934 static inline NV_STATUS gpuInitOptimusSettings(struct OBJGPU *pGpu) { 2935 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2936 return NV_ERR_NOT_SUPPORTED; 2937 } 2938 #else //__nvoc_gpu_h_disabled 2939 #define gpuInitOptimusSettings(pGpu) gpuInitOptimusSettings_IMPL(pGpu) 2940 #endif //__nvoc_gpu_h_disabled 2941 2942 #define gpuInitOptimusSettings_HAL(pGpu) gpuInitOptimusSettings(pGpu) 2943 2944 NV_STATUS gpuDeinitOptimusSettings_IMPL(struct OBJGPU *pGpu); 2945 2946 2947 #ifdef __nvoc_gpu_h_disabled 2948 static inline NV_STATUS gpuDeinitOptimusSettings(struct OBJGPU *pGpu) { 2949 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2950 return NV_ERR_NOT_SUPPORTED; 2951 } 2952 #else //__nvoc_gpu_h_disabled 2953 #define gpuDeinitOptimusSettings(pGpu) gpuDeinitOptimusSettings_IMPL(pGpu) 2954 #endif //__nvoc_gpu_h_disabled 2955 2956 #define gpuDeinitOptimusSettings_HAL(pGpu) gpuDeinitOptimusSettings(pGpu) 2957 2958 static inline NV_STATUS gpuSetCacheOnlyModeOverrides_56cd7a(struct OBJGPU *pGpu) { 2959 return NV_OK; 2960 } 2961 2962 2963 #ifdef __nvoc_gpu_h_disabled 2964 static inline NV_STATUS gpuSetCacheOnlyModeOverrides(struct OBJGPU *pGpu) { 2965 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2966 return NV_ERR_NOT_SUPPORTED; 2967 } 2968 #else //__nvoc_gpu_h_disabled 2969 #define gpuSetCacheOnlyModeOverrides(pGpu) gpuSetCacheOnlyModeOverrides_56cd7a(pGpu) 2970 #endif //__nvoc_gpu_h_disabled 2971 2972 #define gpuSetCacheOnlyModeOverrides_HAL(pGpu) gpuSetCacheOnlyModeOverrides(pGpu) 2973 2974 NV_STATUS gpuGetCeFaultMethodBufferSize_KERNEL(struct OBJGPU *arg0, NvU32 *arg1); 2975 2976 2977 #ifdef __nvoc_gpu_h_disabled 2978 static inline NV_STATUS gpuGetCeFaultMethodBufferSize(struct OBJGPU *arg0, NvU32 *arg1) { 2979 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2980 return NV_ERR_NOT_SUPPORTED; 2981 } 2982 #else //__nvoc_gpu_h_disabled 2983 #define gpuGetCeFaultMethodBufferSize(arg0, arg1) gpuGetCeFaultMethodBufferSize_KERNEL(arg0, arg1) 2984 #endif //__nvoc_gpu_h_disabled 2985 2986 #define gpuGetCeFaultMethodBufferSize_HAL(arg0, arg1) gpuGetCeFaultMethodBufferSize(arg0, arg1) 2987 2988 static inline NV_STATUS gpuSetVFBarSizes_46f6a7(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { 2989 return NV_ERR_NOT_SUPPORTED; 2990 } 2991 2992 NV_STATUS gpuSetVFBarSizes_GA102(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0); 2993 2994 2995 #ifdef __nvoc_gpu_h_disabled 2996 static inline NV_STATUS gpuSetVFBarSizes(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { 2997 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2998 return NV_ERR_NOT_SUPPORTED; 2999 } 3000 #else //__nvoc_gpu_h_disabled 3001 #define gpuSetVFBarSizes(pGpu, arg0) gpuSetVFBarSizes_46f6a7(pGpu, arg0) 3002 #endif //__nvoc_gpu_h_disabled 3003 3004 #define gpuSetVFBarSizes_HAL(pGpu, arg0) gpuSetVFBarSizes(pGpu, arg0) 3005 3006 static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId_80f438(struct OBJGPU *pGpu, NvU32 peerGpuId) { 3007 NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); 3008 } 3009 3010 3011 #ifdef __nvoc_gpu_h_disabled 3012 static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId(struct OBJGPU *pGpu, NvU32 peerGpuId) { 3013 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3014 return NULL; 3015 } 3016 #else //__nvoc_gpu_h_disabled 3017 #define gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId_80f438(pGpu, peerGpuId) 3018 #endif //__nvoc_gpu_h_disabled 3019 3020 #define gpuFindP2PPeerGpuCapsByGpuId_HAL(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId) 3021 3022 static inline NV_STATUS gpuLoadFailurePathTest_56cd7a(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) { 3023 return NV_OK; 3024 } 3025 3026 3027 #ifdef __nvoc_gpu_h_disabled 3028 static inline NV_STATUS gpuLoadFailurePathTest(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) { 3029 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3030 return NV_ERR_NOT_SUPPORTED; 3031 } 3032 #else //__nvoc_gpu_h_disabled 3033 #define gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest_56cd7a(pGpu, engStage, engDescIdx, bStopTest) 3034 #endif //__nvoc_gpu_h_disabled 3035 3036 #define gpuLoadFailurePathTest_HAL(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest) 3037 3038 NV_STATUS gpuConstructDeviceInfoTable_FWCLIENT(struct OBJGPU *pGpu); 3039 3040 static inline NV_STATUS gpuConstructDeviceInfoTable_56cd7a(struct OBJGPU *pGpu) { 3041 return NV_OK; 3042 } 3043 3044 NV_STATUS gpuConstructDeviceInfoTable_GA100(struct OBJGPU *pGpu); 3045 3046 static inline NV_STATUS gpuConstructDeviceInfoTable_DISPATCH(struct OBJGPU *pGpu) { 3047 return pGpu->__gpuConstructDeviceInfoTable__(pGpu); 3048 } 3049 3050 NV_STATUS gpuWriteBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 value); 3051 3052 NV_STATUS gpuWriteBusConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 value); 3053 3054 static inline NV_STATUS gpuWriteBusConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { 3055 return pGpu->__gpuWriteBusConfigReg__(pGpu, index, value); 3056 } 3057 3058 NV_STATUS gpuReadBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data); 3059 3060 NV_STATUS gpuReadBusConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 *data); 3061 3062 static inline NV_STATUS gpuReadBusConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { 3063 return pGpu->__gpuReadBusConfigReg__(pGpu, index, data); 3064 } 3065 3066 NV_STATUS gpuReadBusConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState); 3067 3068 static inline NV_STATUS gpuReadBusConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { 3069 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3070 } 3071 3072 static inline NV_STATUS gpuReadBusConfigRegEx_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { 3073 return pGpu->__gpuReadBusConfigRegEx__(pGpu, index, data, pThreadState); 3074 } 3075 3076 NV_STATUS gpuReadFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data); 3077 3078 static inline NV_STATUS gpuReadFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { 3079 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3080 } 3081 3082 static inline NV_STATUS gpuReadFunctionConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { 3083 return pGpu->__gpuReadFunctionConfigReg__(pGpu, function, reg, data); 3084 } 3085 3086 NV_STATUS gpuWriteFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data); 3087 3088 static inline NV_STATUS gpuWriteFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { 3089 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3090 } 3091 3092 static inline NV_STATUS gpuWriteFunctionConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { 3093 return pGpu->__gpuWriteFunctionConfigReg__(pGpu, function, reg, data); 3094 } 3095 3096 NV_STATUS gpuWriteFunctionConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState); 3097 3098 static inline NV_STATUS gpuWriteFunctionConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { 3099 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3100 } 3101 3102 static inline NV_STATUS gpuWriteFunctionConfigRegEx_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { 3103 return pGpu->__gpuWriteFunctionConfigRegEx__(pGpu, function, reg, data, pThreadState); 3104 } 3105 3106 NV_STATUS gpuReadVgpuConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 *data); 3107 3108 static inline NV_STATUS gpuReadVgpuConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { 3109 return NV_ERR_NOT_SUPPORTED; 3110 } 3111 3112 static inline NV_STATUS gpuReadVgpuConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { 3113 return pGpu->__gpuReadVgpuConfigReg__(pGpu, index, data); 3114 } 3115 3116 void gpuGetIdInfo_GM107(struct OBJGPU *pGpu); 3117 3118 void gpuGetIdInfo_GH100(struct OBJGPU *pGpu); 3119 3120 static inline void gpuGetIdInfo_DISPATCH(struct OBJGPU *pGpu) { 3121 pGpu->__gpuGetIdInfo__(pGpu); 3122 } 3123 3124 void gpuHandleSanityCheckRegReadError_GM107(struct OBJGPU *pGpu, NvU32 addr, NvU32 value); 3125 3126 void gpuHandleSanityCheckRegReadError_GH100(struct OBJGPU *pGpu, NvU32 addr, NvU32 value); 3127 3128 static inline void gpuHandleSanityCheckRegReadError_DISPATCH(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) { 3129 pGpu->__gpuHandleSanityCheckRegReadError__(pGpu, addr, value); 3130 } 3131 3132 void gpuHandleSecFault_GH100(struct OBJGPU *pGpu); 3133 3134 static inline void gpuHandleSecFault_b3696a(struct OBJGPU *pGpu) { 3135 return; 3136 } 3137 3138 static inline void gpuHandleSecFault_DISPATCH(struct OBJGPU *pGpu) { 3139 pGpu->__gpuHandleSecFault__(pGpu); 3140 } 3141 3142 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU102(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3143 3144 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU104(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3145 3146 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU106(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3147 3148 const GPUCHILDPRESENT *gpuGetChildrenPresent_GA100(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3149 3150 const GPUCHILDPRESENT *gpuGetChildrenPresent_GA102(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3151 3152 const GPUCHILDPRESENT *gpuGetChildrenPresent_AD102(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3153 3154 const GPUCHILDPRESENT *gpuGetChildrenPresent_GH100(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3155 3156 static inline const GPUCHILDPRESENT *gpuGetChildrenPresent_DISPATCH(struct OBJGPU *pGpu, NvU32 *pNumEntries) { 3157 return pGpu->__gpuGetChildrenPresent__(pGpu, pNumEntries); 3158 } 3159 3160 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU102(struct OBJGPU *pGpu, NvU32 *arg0); 3161 3162 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU104(struct OBJGPU *pGpu, NvU32 *arg0); 3163 3164 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU106(struct OBJGPU *pGpu, NvU32 *arg0); 3165 3166 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU117(struct OBJGPU *pGpu, NvU32 *arg0); 3167 3168 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA100(struct OBJGPU *pGpu, NvU32 *arg0); 3169 3170 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA102(struct OBJGPU *pGpu, NvU32 *arg0); 3171 3172 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_AD102(struct OBJGPU *pGpu, NvU32 *arg0); 3173 3174 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GH100(struct OBJGPU *pGpu, NvU32 *arg0); 3175 3176 static inline const CLASSDESCRIPTOR *gpuGetClassDescriptorList_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg0) { 3177 return pGpu->__gpuGetClassDescriptorList__(pGpu, arg0); 3178 } 3179 3180 NvU32 gpuGetPhysAddrWidth_TU102(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0); 3181 3182 NvU32 gpuGetPhysAddrWidth_GH100(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0); 3183 3184 static inline NvU32 gpuGetPhysAddrWidth_DISPATCH(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0) { 3185 return pGpu->__gpuGetPhysAddrWidth__(pGpu, arg0); 3186 } 3187 3188 NvBool gpuFuseSupportsDisplay_GM107(struct OBJGPU *pGpu); 3189 3190 NvBool gpuFuseSupportsDisplay_GA100(struct OBJGPU *pGpu); 3191 3192 static inline NvBool gpuFuseSupportsDisplay_491d52(struct OBJGPU *pGpu) { 3193 return ((NvBool)(0 != 0)); 3194 } 3195 3196 static inline NvBool gpuFuseSupportsDisplay_DISPATCH(struct OBJGPU *pGpu) { 3197 return pGpu->__gpuFuseSupportsDisplay__(pGpu); 3198 } 3199 3200 NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_GA100(struct OBJGPU *pGpu); 3201 3202 static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_56cd7a(struct OBJGPU *pGpu) { 3203 return NV_OK; 3204 } 3205 3206 static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(struct OBJGPU *pGpu) { 3207 return pGpu->__gpuClearFbhubPoisonIntrForBug2924523__(pGpu); 3208 } 3209 3210 void gpuReadDeviceId_GM107(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1); 3211 3212 void gpuReadDeviceId_GH100(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1); 3213 3214 static inline void gpuReadDeviceId_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 3215 pGpu->__gpuReadDeviceId__(pGpu, arg0, arg1); 3216 } 3217 3218 NvU64 gpuGetFlaVasSize_GA100(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization); 3219 3220 NvU64 gpuGetFlaVasSize_GH100(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization); 3221 3222 static inline NvU64 gpuGetFlaVasSize_474d46(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) { 3223 NV_ASSERT_OR_RETURN_PRECOMP(0, 0); 3224 } 3225 3226 static inline NvU64 gpuGetFlaVasSize_DISPATCH(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) { 3227 return pGpu->__gpuGetFlaVasSize__(pGpu, bNvswitchVirtualization); 3228 } 3229 3230 void gpuDetermineSelfHostedMode_PHYSICAL_GH100(struct OBJGPU *pGpu); 3231 3232 static inline void gpuDetermineSelfHostedMode_b3696a(struct OBJGPU *pGpu) { 3233 return; 3234 } 3235 3236 void gpuDetermineSelfHostedMode_KERNEL_GH100(struct OBJGPU *pGpu); 3237 3238 static inline void gpuDetermineSelfHostedMode_DISPATCH(struct OBJGPU *pGpu) { 3239 pGpu->__gpuDetermineSelfHostedMode__(pGpu); 3240 } 3241 3242 void gpuDetermineMIGSupport_GH100(struct OBJGPU *pGpu); 3243 3244 static inline void gpuDetermineMIGSupport_b3696a(struct OBJGPU *pGpu) { 3245 return; 3246 } 3247 3248 static inline void gpuDetermineMIGSupport_DISPATCH(struct OBJGPU *pGpu) { 3249 pGpu->__gpuDetermineMIGSupport__(pGpu); 3250 } 3251 3252 NvBool gpuIsAtsSupportedWithSmcMemPartitioning_GH100(struct OBJGPU *pGpu); 3253 3254 static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning_491d52(struct OBJGPU *pGpu) { 3255 return ((NvBool)(0 != 0)); 3256 } 3257 3258 static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(struct OBJGPU *pGpu) { 3259 return pGpu->__gpuIsAtsSupportedWithSmcMemPartitioning__(pGpu); 3260 } 3261 3262 static inline NvBool gpuIsSliCapableWithoutDisplay_cbe027(struct OBJGPU *pGpu) { 3263 return ((NvBool)(0 == 0)); 3264 } 3265 3266 static inline NvBool gpuIsSliCapableWithoutDisplay_491d52(struct OBJGPU *pGpu) { 3267 return ((NvBool)(0 != 0)); 3268 } 3269 3270 static inline NvBool gpuIsSliCapableWithoutDisplay_DISPATCH(struct OBJGPU *pGpu) { 3271 return pGpu->__gpuIsSliCapableWithoutDisplay__(pGpu); 3272 } 3273 3274 NvBool gpuIsCCEnabledInHw_GH100(struct OBJGPU *pGpu); 3275 3276 static inline NvBool gpuIsCCEnabledInHw_491d52(struct OBJGPU *pGpu) { 3277 return ((NvBool)(0 != 0)); 3278 } 3279 3280 static inline NvBool gpuIsCCEnabledInHw_DISPATCH(struct OBJGPU *pGpu) { 3281 return pGpu->__gpuIsCCEnabledInHw__(pGpu); 3282 } 3283 3284 NvBool gpuIsDevModeEnabledInHw_GH100(struct OBJGPU *pGpu); 3285 3286 static inline NvBool gpuIsDevModeEnabledInHw_491d52(struct OBJGPU *pGpu) { 3287 return ((NvBool)(0 != 0)); 3288 } 3289 3290 static inline NvBool gpuIsDevModeEnabledInHw_DISPATCH(struct OBJGPU *pGpu) { 3291 return pGpu->__gpuIsDevModeEnabledInHw__(pGpu); 3292 } 3293 3294 NvBool gpuIsCtxBufAllocInPmaSupported_GA100(struct OBJGPU *pGpu); 3295 3296 static inline NvBool gpuIsCtxBufAllocInPmaSupported_491d52(struct OBJGPU *pGpu) { 3297 return ((NvBool)(0 != 0)); 3298 } 3299 3300 static inline NvBool gpuIsCtxBufAllocInPmaSupported_DISPATCH(struct OBJGPU *pGpu) { 3301 return pGpu->__gpuIsCtxBufAllocInPmaSupported__(pGpu); 3302 } 3303 3304 static inline PENGDESCRIPTOR gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) { 3305 return pGpu->engineOrder.pEngineInitDescriptors; 3306 } 3307 3308 static inline PENGDESCRIPTOR gpuGetLoadEngineDescriptors(struct OBJGPU *pGpu) { 3309 return pGpu->engineOrder.pEngineLoadDescriptors; 3310 } 3311 3312 static inline PENGDESCRIPTOR gpuGetUnloadEngineDescriptors(struct OBJGPU *pGpu) { 3313 return pGpu->engineOrder.pEngineUnloadDescriptors; 3314 } 3315 3316 static inline PENGDESCRIPTOR gpuGetDestroyEngineDescriptors(struct OBJGPU *pGpu) { 3317 return pGpu->engineOrder.pEngineDestroyDescriptors; 3318 } 3319 3320 static inline NvU32 gpuGetNumEngDescriptors(struct OBJGPU *pGpu) { 3321 return pGpu->engineOrder.numEngineDescriptors; 3322 } 3323 3324 static inline NvU32 gpuGetMode(struct OBJGPU *pGpu) { 3325 return pGpu->computeModeRefCount > 0 ? 2 : 1; 3326 } 3327 3328 static inline ACPI_DSM_FUNCTION gpuGetDispStatusHotplugFunc(struct OBJGPU *pGpu) { 3329 return pGpu->acpi.dispStatusHotplugFunc; 3330 } 3331 3332 static inline ACPI_DSM_FUNCTION gpuGetDispStatusConfigFunc(struct OBJGPU *pGpu) { 3333 return pGpu->acpi.dispStatusConfigFunc; 3334 } 3335 3336 static inline ACPI_DSM_FUNCTION gpuGetPerfPostPowerStateFunc(struct OBJGPU *pGpu) { 3337 return pGpu->acpi.perfPostPowerStateFunc; 3338 } 3339 3340 static inline ACPI_DSM_FUNCTION gpuGetStereo3dStateActiveFunc(struct OBJGPU *pGpu) { 3341 return pGpu->acpi.stereo3dStateActiveFunc; 3342 } 3343 3344 static inline NvU32 gpuGetPmcBoot0(struct OBJGPU *pGpu) { 3345 return pGpu->chipId0; 3346 } 3347 3348 static inline struct OBJFIFO *gpuGetFifoShared(struct OBJGPU *pGpu) { 3349 return ((void *)0); 3350 } 3351 3352 static inline ENGSTATE_ITER gpuGetEngstateIter(struct OBJGPU *pGpu) { 3353 GPU_CHILD_ITER it = { 0 }; 3354 return it; 3355 } 3356 3357 static inline RmPhysAddr gpuGetDmaStartAddress(struct OBJGPU *pGpu) { 3358 return pGpu->dmaStartAddress; 3359 } 3360 3361 static inline NV_STATUS gpuFreeEventHandle(struct OBJGPU *pGpu) { 3362 return NV_OK; 3363 } 3364 3365 static inline NvU32 gpuGetChipMajRev(struct OBJGPU *pGpu) { 3366 return pGpu->chipInfo.pmcBoot42.majorRev; 3367 } 3368 3369 static inline NvU32 gpuGetChipMinRev(struct OBJGPU *pGpu) { 3370 return pGpu->chipInfo.pmcBoot42.minorRev; 3371 } 3372 3373 static inline NvU32 gpuGetChipImpl(struct OBJGPU *pGpu) { 3374 return pGpu->chipInfo.implementationId; 3375 } 3376 3377 static inline NvU32 gpuGetChipArch(struct OBJGPU *pGpu) { 3378 return pGpu->chipInfo.platformId; 3379 } 3380 3381 static inline NvU32 gpuGetChipMinExtRev(struct OBJGPU *pGpu) { 3382 return pGpu->chipInfo.pmcBoot42.minorExtRev; 3383 } 3384 3385 static inline NvBool gpuIsVideoLinkDisabled(struct OBJGPU *pGpu) { 3386 return pGpu->bVideoLinkDisabled; 3387 } 3388 3389 static inline const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *gpuGetChipInfo(struct OBJGPU *pGpu) { 3390 return pGpu->pChipInfo; 3391 } 3392 3393 static inline NvBool gpuIsBar2MovedByVtd(struct OBJGPU *pGpu) { 3394 return pGpu->bBar2MovedByVtd; 3395 } 3396 3397 static inline NvBool gpuIsBar1Size64Bit(struct OBJGPU *pGpu) { 3398 return pGpu->bBar1Is64Bit; 3399 } 3400 3401 static inline NvBool gpuIsSurpriseRemovalSupported(struct OBJGPU *pGpu) { 3402 return pGpu->bSurpriseRemovalSupported; 3403 } 3404 3405 static inline NvBool gpuIsReplayableTraceEnabled(struct OBJGPU *pGpu) { 3406 return pGpu->bReplayableTraceEnabled; 3407 } 3408 3409 static inline NvBool gpuIsStateLoading(struct OBJGPU *pGpu) { 3410 return pGpu->bStateLoading; 3411 } 3412 3413 static inline NvBool gpuIsStateUnloading(struct OBJGPU *pGpu) { 3414 return pGpu->bStateUnloading; 3415 } 3416 3417 static inline NvBool gpuIsStateLoaded(struct OBJGPU *pGpu) { 3418 return pGpu->bStateLoaded; 3419 } 3420 3421 static inline NvBool gpuIsFullyConstructed(struct OBJGPU *pGpu) { 3422 return pGpu->bFullyConstructed; 3423 } 3424 3425 static inline NvBool gpuIsUnifiedMemorySpaceEnabled(struct OBJGPU *pGpu) { 3426 return pGpu->bUnifiedMemorySpaceEnabled; 3427 } 3428 3429 static inline NvBool gpuIsWarBug4040336Enabled(struct OBJGPU *pGpu) { 3430 return pGpu->bBf3WarBug4040336Enabled; 3431 } 3432 3433 static inline NvBool gpuIsSriovEnabled(struct OBJGPU *pGpu) { 3434 return pGpu->bSriovEnabled; 3435 } 3436 3437 static inline NvBool gpuIsCacheOnlyModeEnabled(struct OBJGPU *pGpu) { 3438 return pGpu->bCacheOnlyMode; 3439 } 3440 3441 static inline NvBool gpuIsSplitVasManagementServerClientRmEnabled(struct OBJGPU *pGpu) { 3442 return pGpu->bSplitVasManagementServerClientRm; 3443 } 3444 3445 static inline NvBool gpuIsWarBug200577889SriovHeavyEnabled(struct OBJGPU *pGpu) { 3446 return pGpu->bWarBug200577889SriovHeavyEnabled; 3447 } 3448 3449 static inline NvBool gpuIsPipelinedPteMemEnabled(struct OBJGPU *pGpu) { 3450 return pGpu->bPipelinedPteMemEnabled; 3451 } 3452 3453 static inline NvBool gpuIsBarPteInSysmemSupported(struct OBJGPU *pGpu) { 3454 return pGpu->bIsBarPteInSysmemSupported; 3455 } 3456 3457 static inline NvBool gpuIsRegUsesGlobalSurfaceOverridesEnabled(struct OBJGPU *pGpu) { 3458 return pGpu->bRegUsesGlobalSurfaceOverrides; 3459 } 3460 3461 static inline NvBool gpuIsTwoStageRcRecoveryEnabled(struct OBJGPU *pGpu) { 3462 return pGpu->bTwoStageRcRecoveryEnabled; 3463 } 3464 3465 static inline NvBool gpuIsInD3Cold(struct OBJGPU *pGpu) { 3466 return pGpu->bInD3Cold; 3467 } 3468 3469 static inline NvBool gpuIsClientRmAllocatedCtxBufferEnabled(struct OBJGPU *pGpu) { 3470 return pGpu->bClientRmAllocatedCtxBuffer; 3471 } 3472 3473 static inline NvBool gpuIsIterativeMmuWalkerEnabled(struct OBJGPU *pGpu) { 3474 return pGpu->bIterativeMmuWalker; 3475 } 3476 3477 static inline NvBool gpuIsEccPageRetirementWithSliAllowed(struct OBJGPU *pGpu) { 3478 return pGpu->bEccPageRetirementWithSliAllowed; 3479 } 3480 3481 static inline NvBool gpuIsVidmemPreservationBrokenBug3172217(struct OBJGPU *pGpu) { 3482 return pGpu->bVidmemPreservationBrokenBug3172217; 3483 } 3484 3485 static inline NvBool gpuIsInstanceMemoryAlwaysCached(struct OBJGPU *pGpu) { 3486 return pGpu->bInstanceMemoryAlwaysCached; 3487 } 3488 3489 static inline NvBool gpuIsRmProfilingPrivileged(struct OBJGPU *pGpu) { 3490 return pGpu->bRmProfilingPrivileged; 3491 } 3492 3493 static inline NvBool gpuIsGeforceSmb(struct OBJGPU *pGpu) { 3494 return pGpu->bGeforceSmb; 3495 } 3496 3497 static inline NvBool gpuIsGeforceBranded(struct OBJGPU *pGpu) { 3498 return pGpu->bIsGeforce; 3499 } 3500 3501 static inline NvBool gpuIsQuadroBranded(struct OBJGPU *pGpu) { 3502 return pGpu->bIsQuadro; 3503 } 3504 3505 static inline NvBool gpuIsVgxBranded(struct OBJGPU *pGpu) { 3506 return pGpu->bIsVgx; 3507 } 3508 3509 static inline NvBool gpuIsACBranded(struct OBJGPU *pGpu) { 3510 return pGpu->bIsAC; 3511 } 3512 3513 static inline NvBool gpuIsNvidiaNvsBranded(struct OBJGPU *pGpu) { 3514 return pGpu->bIsNvidiaNvs; 3515 } 3516 3517 static inline NvBool gpuIsTitanBranded(struct OBJGPU *pGpu) { 3518 return pGpu->bIsTitan; 3519 } 3520 3521 static inline NvBool gpuIsTeslaBranded(struct OBJGPU *pGpu) { 3522 return pGpu->bIsTesla; 3523 } 3524 3525 static inline NvBool gpuIsComputePolicyTimesliceSupported(struct OBJGPU *pGpu) { 3526 return pGpu->bComputePolicyTimesliceSupported; 3527 } 3528 3529 static inline NvBool gpuIsSriovCapable(struct OBJGPU *pGpu) { 3530 return pGpu->bSriovCapable; 3531 } 3532 3533 static inline NvBool gpuIsNonPowerOf2ChannelCountSupported(struct OBJGPU *pGpu) { 3534 return pGpu->bNonPowerOf2ChannelCountSupported; 3535 } 3536 3537 static inline NvBool gpuIsSelfHosted(struct OBJGPU *pGpu) { 3538 return pGpu->bIsSelfHosted; 3539 } 3540 3541 static inline NvBool gpuIsGspOwnedFaultBuffersEnabled(struct OBJGPU *pGpu) { 3542 return pGpu->bIsGspOwnedFaultBuffersEnabled; 3543 } 3544 3545 NV_STATUS gpuConstruct_IMPL(struct OBJGPU *arg_pGpu, NvU32 arg_gpuInstance); 3546 3547 #define __nvoc_gpuConstruct(arg_pGpu, arg_gpuInstance) gpuConstruct_IMPL(arg_pGpu, arg_gpuInstance) 3548 NV_STATUS gpuBindHalLegacy_IMPL(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0); 3549 3550 #ifdef __nvoc_gpu_h_disabled 3551 static inline NV_STATUS gpuBindHalLegacy(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0) { 3552 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3553 return NV_ERR_NOT_SUPPORTED; 3554 } 3555 #else //__nvoc_gpu_h_disabled 3556 #define gpuBindHalLegacy(pGpu, chipId0, chipId1, socChipId0) gpuBindHalLegacy_IMPL(pGpu, chipId0, chipId1, socChipId0) 3557 #endif //__nvoc_gpu_h_disabled 3558 3559 NV_STATUS gpuPostConstruct_IMPL(struct OBJGPU *pGpu, GPUATTACHARG *arg0); 3560 3561 #ifdef __nvoc_gpu_h_disabled 3562 static inline NV_STATUS gpuPostConstruct(struct OBJGPU *pGpu, GPUATTACHARG *arg0) { 3563 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3564 return NV_ERR_NOT_SUPPORTED; 3565 } 3566 #else //__nvoc_gpu_h_disabled 3567 #define gpuPostConstruct(pGpu, arg0) gpuPostConstruct_IMPL(pGpu, arg0) 3568 #endif //__nvoc_gpu_h_disabled 3569 3570 NV_STATUS gpuCreateObject_IMPL(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1); 3571 3572 #ifdef __nvoc_gpu_h_disabled 3573 static inline NV_STATUS gpuCreateObject(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1) { 3574 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3575 return NV_ERR_NOT_SUPPORTED; 3576 } 3577 #else //__nvoc_gpu_h_disabled 3578 #define gpuCreateObject(pGpu, arg0, arg1) gpuCreateObject_IMPL(pGpu, arg0, arg1) 3579 #endif //__nvoc_gpu_h_disabled 3580 3581 void gpuDestruct_IMPL(struct OBJGPU *pGpu); 3582 3583 #define __nvoc_gpuDestruct(pGpu) gpuDestruct_IMPL(pGpu) 3584 NV_STATUS gpuStateInit_IMPL(struct OBJGPU *pGpu); 3585 3586 #ifdef __nvoc_gpu_h_disabled 3587 static inline NV_STATUS gpuStateInit(struct OBJGPU *pGpu) { 3588 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3589 return NV_ERR_NOT_SUPPORTED; 3590 } 3591 #else //__nvoc_gpu_h_disabled 3592 #define gpuStateInit(pGpu) gpuStateInit_IMPL(pGpu) 3593 #endif //__nvoc_gpu_h_disabled 3594 3595 NV_STATUS gpuStateUnload_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3596 3597 #ifdef __nvoc_gpu_h_disabled 3598 static inline NV_STATUS gpuStateUnload(struct OBJGPU *pGpu, NvU32 arg0) { 3599 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3600 return NV_ERR_NOT_SUPPORTED; 3601 } 3602 #else //__nvoc_gpu_h_disabled 3603 #define gpuStateUnload(pGpu, arg0) gpuStateUnload_IMPL(pGpu, arg0) 3604 #endif //__nvoc_gpu_h_disabled 3605 3606 NV_STATUS gpuInitDispIpHal_IMPL(struct OBJGPU *pGpu, NvU32 ipver); 3607 3608 #ifdef __nvoc_gpu_h_disabled 3609 static inline NV_STATUS gpuInitDispIpHal(struct OBJGPU *pGpu, NvU32 ipver) { 3610 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3611 return NV_ERR_NOT_SUPPORTED; 3612 } 3613 #else //__nvoc_gpu_h_disabled 3614 #define gpuInitDispIpHal(pGpu, ipver) gpuInitDispIpHal_IMPL(pGpu, ipver) 3615 #endif //__nvoc_gpu_h_disabled 3616 3617 void gpuServiceInterruptsAllGpus_IMPL(struct OBJGPU *pGpu); 3618 3619 #ifdef __nvoc_gpu_h_disabled 3620 static inline void gpuServiceInterruptsAllGpus(struct OBJGPU *pGpu) { 3621 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3622 } 3623 #else //__nvoc_gpu_h_disabled 3624 #define gpuServiceInterruptsAllGpus(pGpu) gpuServiceInterruptsAllGpus_IMPL(pGpu) 3625 #endif //__nvoc_gpu_h_disabled 3626 3627 NvBool gpuIsImplementation_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); 3628 3629 #ifdef __nvoc_gpu_h_disabled 3630 static inline NvBool gpuIsImplementation(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { 3631 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3632 return NV_FALSE; 3633 } 3634 #else //__nvoc_gpu_h_disabled 3635 #define gpuIsImplementation(pGpu, arg0, arg1, arg2) gpuIsImplementation_IMPL(pGpu, arg0, arg1, arg2) 3636 #endif //__nvoc_gpu_h_disabled 3637 3638 NvBool gpuIsImplementationOrBetter_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); 3639 3640 #ifdef __nvoc_gpu_h_disabled 3641 static inline NvBool gpuIsImplementationOrBetter(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { 3642 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3643 return NV_FALSE; 3644 } 3645 #else //__nvoc_gpu_h_disabled 3646 #define gpuIsImplementationOrBetter(pGpu, arg0, arg1, arg2) gpuIsImplementationOrBetter_IMPL(pGpu, arg0, arg1, arg2) 3647 #endif //__nvoc_gpu_h_disabled 3648 3649 NvBool gpuIsGpuFullPower_IMPL(struct OBJGPU *pGpu); 3650 3651 #ifdef __nvoc_gpu_h_disabled 3652 static inline NvBool gpuIsGpuFullPower(struct OBJGPU *pGpu) { 3653 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3654 return NV_FALSE; 3655 } 3656 #else //__nvoc_gpu_h_disabled 3657 #define gpuIsGpuFullPower(pGpu) gpuIsGpuFullPower_IMPL(pGpu) 3658 #endif //__nvoc_gpu_h_disabled 3659 3660 NvBool gpuIsGpuFullPowerForPmResume_IMPL(struct OBJGPU *pGpu); 3661 3662 #ifdef __nvoc_gpu_h_disabled 3663 static inline NvBool gpuIsGpuFullPowerForPmResume(struct OBJGPU *pGpu) { 3664 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3665 return NV_FALSE; 3666 } 3667 #else //__nvoc_gpu_h_disabled 3668 #define gpuIsGpuFullPowerForPmResume(pGpu) gpuIsGpuFullPowerForPmResume_IMPL(pGpu) 3669 #endif //__nvoc_gpu_h_disabled 3670 3671 NV_STATUS gpuGetDeviceEntryByType_IMPL(struct OBJGPU *pGpu, NvU32 deviceTypeEnum, NvS32 groupId, NvU32 instanceId, const DEVICE_INFO2_ENTRY **ppDeviceEntry); 3672 3673 #ifdef __nvoc_gpu_h_disabled 3674 static inline NV_STATUS gpuGetDeviceEntryByType(struct OBJGPU *pGpu, NvU32 deviceTypeEnum, NvS32 groupId, NvU32 instanceId, const DEVICE_INFO2_ENTRY **ppDeviceEntry) { 3675 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3676 return NV_ERR_NOT_SUPPORTED; 3677 } 3678 #else //__nvoc_gpu_h_disabled 3679 #define gpuGetDeviceEntryByType(pGpu, deviceTypeEnum, groupId, instanceId, ppDeviceEntry) gpuGetDeviceEntryByType_IMPL(pGpu, deviceTypeEnum, groupId, instanceId, ppDeviceEntry) 3680 #endif //__nvoc_gpu_h_disabled 3681 3682 NV_STATUS gpuBuildClassDB_IMPL(struct OBJGPU *pGpu); 3683 3684 #ifdef __nvoc_gpu_h_disabled 3685 static inline NV_STATUS gpuBuildClassDB(struct OBJGPU *pGpu) { 3686 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3687 return NV_ERR_NOT_SUPPORTED; 3688 } 3689 #else //__nvoc_gpu_h_disabled 3690 #define gpuBuildClassDB(pGpu) gpuBuildClassDB_IMPL(pGpu) 3691 #endif //__nvoc_gpu_h_disabled 3692 3693 NV_STATUS gpuDestroyClassDB_IMPL(struct OBJGPU *pGpu); 3694 3695 #ifdef __nvoc_gpu_h_disabled 3696 static inline NV_STATUS gpuDestroyClassDB(struct OBJGPU *pGpu) { 3697 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3698 return NV_ERR_NOT_SUPPORTED; 3699 } 3700 #else //__nvoc_gpu_h_disabled 3701 #define gpuDestroyClassDB(pGpu) gpuDestroyClassDB_IMPL(pGpu) 3702 #endif //__nvoc_gpu_h_disabled 3703 3704 NV_STATUS gpuDeleteEngineFromClassDB_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3705 3706 #ifdef __nvoc_gpu_h_disabled 3707 static inline NV_STATUS gpuDeleteEngineFromClassDB(struct OBJGPU *pGpu, NvU32 arg0) { 3708 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3709 return NV_ERR_NOT_SUPPORTED; 3710 } 3711 #else //__nvoc_gpu_h_disabled 3712 #define gpuDeleteEngineFromClassDB(pGpu, arg0) gpuDeleteEngineFromClassDB_IMPL(pGpu, arg0) 3713 #endif //__nvoc_gpu_h_disabled 3714 3715 NV_STATUS gpuDeleteEngineOnPreInit_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3716 3717 #ifdef __nvoc_gpu_h_disabled 3718 static inline NV_STATUS gpuDeleteEngineOnPreInit(struct OBJGPU *pGpu, NvU32 arg0) { 3719 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3720 return NV_ERR_NOT_SUPPORTED; 3721 } 3722 #else //__nvoc_gpu_h_disabled 3723 #define gpuDeleteEngineOnPreInit(pGpu, arg0) gpuDeleteEngineOnPreInit_IMPL(pGpu, arg0) 3724 #endif //__nvoc_gpu_h_disabled 3725 3726 NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3727 3728 #ifdef __nvoc_gpu_h_disabled 3729 static inline NV_STATUS gpuAddClassToClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { 3730 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3731 return NV_ERR_NOT_SUPPORTED; 3732 } 3733 #else //__nvoc_gpu_h_disabled 3734 #define gpuAddClassToClassDBByEngTag(pGpu, arg0) gpuAddClassToClassDBByEngTag_IMPL(pGpu, arg0) 3735 #endif //__nvoc_gpu_h_disabled 3736 3737 NV_STATUS gpuAddClassToClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3738 3739 #ifdef __nvoc_gpu_h_disabled 3740 static inline NV_STATUS gpuAddClassToClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { 3741 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3742 return NV_ERR_NOT_SUPPORTED; 3743 } 3744 #else //__nvoc_gpu_h_disabled 3745 #define gpuAddClassToClassDBByClassId(pGpu, arg0) gpuAddClassToClassDBByClassId_IMPL(pGpu, arg0) 3746 #endif //__nvoc_gpu_h_disabled 3747 3748 NV_STATUS gpuAddClassToClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); 3749 3750 #ifdef __nvoc_gpu_h_disabled 3751 static inline NV_STATUS gpuAddClassToClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 3752 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3753 return NV_ERR_NOT_SUPPORTED; 3754 } 3755 #else //__nvoc_gpu_h_disabled 3756 #define gpuAddClassToClassDBByEngTagClassId(pGpu, arg0, arg1) gpuAddClassToClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) 3757 #endif //__nvoc_gpu_h_disabled 3758 3759 NV_STATUS gpuDeleteClassFromClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3760 3761 #ifdef __nvoc_gpu_h_disabled 3762 static inline NV_STATUS gpuDeleteClassFromClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { 3763 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3764 return NV_ERR_NOT_SUPPORTED; 3765 } 3766 #else //__nvoc_gpu_h_disabled 3767 #define gpuDeleteClassFromClassDBByClassId(pGpu, arg0) gpuDeleteClassFromClassDBByClassId_IMPL(pGpu, arg0) 3768 #endif //__nvoc_gpu_h_disabled 3769 3770 NV_STATUS gpuDeleteClassFromClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3771 3772 #ifdef __nvoc_gpu_h_disabled 3773 static inline NV_STATUS gpuDeleteClassFromClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { 3774 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3775 return NV_ERR_NOT_SUPPORTED; 3776 } 3777 #else //__nvoc_gpu_h_disabled 3778 #define gpuDeleteClassFromClassDBByEngTag(pGpu, arg0) gpuDeleteClassFromClassDBByEngTag_IMPL(pGpu, arg0) 3779 #endif //__nvoc_gpu_h_disabled 3780 3781 NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); 3782 3783 #ifdef __nvoc_gpu_h_disabled 3784 static inline NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 3785 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3786 return NV_ERR_NOT_SUPPORTED; 3787 } 3788 #else //__nvoc_gpu_h_disabled 3789 #define gpuDeleteClassFromClassDBByEngTagClassId(pGpu, arg0, arg1) gpuDeleteClassFromClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) 3790 #endif //__nvoc_gpu_h_disabled 3791 3792 NvBool gpuIsClassSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3793 3794 #ifdef __nvoc_gpu_h_disabled 3795 static inline NvBool gpuIsClassSupported(struct OBJGPU *pGpu, NvU32 arg0) { 3796 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3797 return NV_FALSE; 3798 } 3799 #else //__nvoc_gpu_h_disabled 3800 #define gpuIsClassSupported(pGpu, arg0) gpuIsClassSupported_IMPL(pGpu, arg0) 3801 #endif //__nvoc_gpu_h_disabled 3802 3803 NV_STATUS gpuGetClassByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1); 3804 3805 #ifdef __nvoc_gpu_h_disabled 3806 static inline NV_STATUS gpuGetClassByClassId(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1) { 3807 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3808 return NV_ERR_NOT_SUPPORTED; 3809 } 3810 #else //__nvoc_gpu_h_disabled 3811 #define gpuGetClassByClassId(pGpu, arg0, arg1) gpuGetClassByClassId_IMPL(pGpu, arg0, arg1) 3812 #endif //__nvoc_gpu_h_disabled 3813 3814 NV_STATUS gpuGetClassByEngineAndClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2); 3815 3816 #ifdef __nvoc_gpu_h_disabled 3817 static inline NV_STATUS gpuGetClassByEngineAndClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2) { 3818 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3819 return NV_ERR_NOT_SUPPORTED; 3820 } 3821 #else //__nvoc_gpu_h_disabled 3822 #define gpuGetClassByEngineAndClassId(pGpu, arg0, arg1, arg2) gpuGetClassByEngineAndClassId_IMPL(pGpu, arg0, arg1, arg2) 3823 #endif //__nvoc_gpu_h_disabled 3824 3825 NV_STATUS gpuGetClassList_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2); 3826 3827 #ifdef __nvoc_gpu_h_disabled 3828 static inline NV_STATUS gpuGetClassList(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2) { 3829 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3830 return NV_ERR_NOT_SUPPORTED; 3831 } 3832 #else //__nvoc_gpu_h_disabled 3833 #define gpuGetClassList(pGpu, arg0, arg1, arg2) gpuGetClassList_IMPL(pGpu, arg0, arg1, arg2) 3834 #endif //__nvoc_gpu_h_disabled 3835 3836 NV_STATUS gpuConstructEngineTable_IMPL(struct OBJGPU *pGpu); 3837 3838 #ifdef __nvoc_gpu_h_disabled 3839 static inline NV_STATUS gpuConstructEngineTable(struct OBJGPU *pGpu) { 3840 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3841 return NV_ERR_NOT_SUPPORTED; 3842 } 3843 #else //__nvoc_gpu_h_disabled 3844 #define gpuConstructEngineTable(pGpu) gpuConstructEngineTable_IMPL(pGpu) 3845 #endif //__nvoc_gpu_h_disabled 3846 3847 void gpuDestroyEngineTable_IMPL(struct OBJGPU *pGpu); 3848 3849 #ifdef __nvoc_gpu_h_disabled 3850 static inline void gpuDestroyEngineTable(struct OBJGPU *pGpu) { 3851 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3852 } 3853 #else //__nvoc_gpu_h_disabled 3854 #define gpuDestroyEngineTable(pGpu) gpuDestroyEngineTable_IMPL(pGpu) 3855 #endif //__nvoc_gpu_h_disabled 3856 3857 NV_STATUS gpuUpdateEngineTable_IMPL(struct OBJGPU *pGpu); 3858 3859 #ifdef __nvoc_gpu_h_disabled 3860 static inline NV_STATUS gpuUpdateEngineTable(struct OBJGPU *pGpu) { 3861 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3862 return NV_ERR_NOT_SUPPORTED; 3863 } 3864 #else //__nvoc_gpu_h_disabled 3865 #define gpuUpdateEngineTable(pGpu) gpuUpdateEngineTable_IMPL(pGpu) 3866 #endif //__nvoc_gpu_h_disabled 3867 3868 NvBool gpuCheckEngineTable_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0); 3869 3870 #ifdef __nvoc_gpu_h_disabled 3871 static inline NvBool gpuCheckEngineTable(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0) { 3872 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3873 return NV_FALSE; 3874 } 3875 #else //__nvoc_gpu_h_disabled 3876 #define gpuCheckEngineTable(pGpu, arg0) gpuCheckEngineTable_IMPL(pGpu, arg0) 3877 #endif //__nvoc_gpu_h_disabled 3878 3879 NV_STATUS gpuXlateEngDescToClientEngineId_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, RM_ENGINE_TYPE *arg1); 3880 3881 #ifdef __nvoc_gpu_h_disabled 3882 static inline NV_STATUS gpuXlateEngDescToClientEngineId(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, RM_ENGINE_TYPE *arg1) { 3883 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3884 return NV_ERR_NOT_SUPPORTED; 3885 } 3886 #else //__nvoc_gpu_h_disabled 3887 #define gpuXlateEngDescToClientEngineId(pGpu, arg0, arg1) gpuXlateEngDescToClientEngineId_IMPL(pGpu, arg0, arg1) 3888 #endif //__nvoc_gpu_h_disabled 3889 3890 NV_STATUS gpuXlateClientEngineIdToEngDesc_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, ENGDESCRIPTOR *arg1); 3891 3892 #ifdef __nvoc_gpu_h_disabled 3893 static inline NV_STATUS gpuXlateClientEngineIdToEngDesc(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, ENGDESCRIPTOR *arg1) { 3894 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3895 return NV_ERR_NOT_SUPPORTED; 3896 } 3897 #else //__nvoc_gpu_h_disabled 3898 #define gpuXlateClientEngineIdToEngDesc(pGpu, arg0, arg1) gpuXlateClientEngineIdToEngDesc_IMPL(pGpu, arg0, arg1) 3899 #endif //__nvoc_gpu_h_disabled 3900 3901 NV_STATUS gpuGetFlcnFromClientEngineId_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, struct Falcon **arg1); 3902 3903 #ifdef __nvoc_gpu_h_disabled 3904 static inline NV_STATUS gpuGetFlcnFromClientEngineId(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, struct Falcon **arg1) { 3905 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3906 return NV_ERR_NOT_SUPPORTED; 3907 } 3908 #else //__nvoc_gpu_h_disabled 3909 #define gpuGetFlcnFromClientEngineId(pGpu, arg0, arg1) gpuGetFlcnFromClientEngineId_IMPL(pGpu, arg0, arg1) 3910 #endif //__nvoc_gpu_h_disabled 3911 3912 NvBool gpuIsEngDescSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3913 3914 #ifdef __nvoc_gpu_h_disabled 3915 static inline NvBool gpuIsEngDescSupported(struct OBJGPU *pGpu, NvU32 arg0) { 3916 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3917 return NV_FALSE; 3918 } 3919 #else //__nvoc_gpu_h_disabled 3920 #define gpuIsEngDescSupported(pGpu, arg0) gpuIsEngDescSupported_IMPL(pGpu, arg0) 3921 #endif //__nvoc_gpu_h_disabled 3922 3923 NV_STATUS gpuReadBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData); 3924 3925 #ifdef __nvoc_gpu_h_disabled 3926 static inline NV_STATUS gpuReadBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData) { 3927 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3928 return NV_ERR_NOT_SUPPORTED; 3929 } 3930 #else //__nvoc_gpu_h_disabled 3931 #define gpuReadBusConfigCycle(pGpu, index, pData) gpuReadBusConfigCycle_IMPL(pGpu, index, pData) 3932 #endif //__nvoc_gpu_h_disabled 3933 3934 NV_STATUS gpuWriteBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 value); 3935 3936 #ifdef __nvoc_gpu_h_disabled 3937 static inline NV_STATUS gpuWriteBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { 3938 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3939 return NV_ERR_NOT_SUPPORTED; 3940 } 3941 #else //__nvoc_gpu_h_disabled 3942 #define gpuWriteBusConfigCycle(pGpu, index, value) gpuWriteBusConfigCycle_IMPL(pGpu, index, value) 3943 #endif //__nvoc_gpu_h_disabled 3944 3945 RM_ENGINE_TYPE gpuGetRmEngineType_IMPL(NvU32 index); 3946 3947 #define gpuGetRmEngineType(index) gpuGetRmEngineType_IMPL(index) 3948 void gpuGetRmEngineTypeList_IMPL(NvU32 *pNv2080EngineList, NvU32 engineCount, RM_ENGINE_TYPE *pRmEngineList); 3949 3950 #define gpuGetRmEngineTypeList(pNv2080EngineList, engineCount, pRmEngineList) gpuGetRmEngineTypeList_IMPL(pNv2080EngineList, engineCount, pRmEngineList) 3951 NvU32 gpuGetNv2080EngineType_IMPL(RM_ENGINE_TYPE index); 3952 3953 #define gpuGetNv2080EngineType(index) gpuGetNv2080EngineType_IMPL(index) 3954 void gpuGetNv2080EngineTypeList_IMPL(RM_ENGINE_TYPE *pRmEngineList, NvU32 engineCount, NvU32 *pNv2080EngineList); 3955 3956 #define gpuGetNv2080EngineTypeList(pRmEngineList, engineCount, pNv2080EngineList) gpuGetNv2080EngineTypeList_IMPL(pRmEngineList, engineCount, pNv2080EngineList) 3957 NV_STATUS gpuGetRmEngineTypeCapMask_IMPL(NvU32 *NV2080EngineTypeCap, NvU32 capSize, NvU32 *RmEngineTypeCap); 3958 3959 #define gpuGetRmEngineTypeCapMask(NV2080EngineTypeCap, capSize, RmEngineTypeCap) gpuGetRmEngineTypeCapMask_IMPL(NV2080EngineTypeCap, capSize, RmEngineTypeCap) 3960 NvU32 gpuGetGpuMask_IMPL(struct OBJGPU *pGpu); 3961 3962 #ifdef __nvoc_gpu_h_disabled 3963 static inline NvU32 gpuGetGpuMask(struct OBJGPU *pGpu) { 3964 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3965 return 0; 3966 } 3967 #else //__nvoc_gpu_h_disabled 3968 #define gpuGetGpuMask(pGpu) gpuGetGpuMask_IMPL(pGpu) 3969 #endif //__nvoc_gpu_h_disabled 3970 3971 void gpuChangeComputeModeRefCount_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3972 3973 #ifdef __nvoc_gpu_h_disabled 3974 static inline void gpuChangeComputeModeRefCount(struct OBJGPU *pGpu, NvU32 arg0) { 3975 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3976 } 3977 #else //__nvoc_gpu_h_disabled 3978 #define gpuChangeComputeModeRefCount(pGpu, arg0) gpuChangeComputeModeRefCount_IMPL(pGpu, arg0) 3979 #endif //__nvoc_gpu_h_disabled 3980 3981 NV_STATUS gpuEnterShutdown_IMPL(struct OBJGPU *pGpu); 3982 3983 #ifdef __nvoc_gpu_h_disabled 3984 static inline NV_STATUS gpuEnterShutdown(struct OBJGPU *pGpu) { 3985 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3986 return NV_ERR_NOT_SUPPORTED; 3987 } 3988 #else //__nvoc_gpu_h_disabled 3989 #define gpuEnterShutdown(pGpu) gpuEnterShutdown_IMPL(pGpu) 3990 #endif //__nvoc_gpu_h_disabled 3991 3992 NV_STATUS gpuSanityCheck_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); 3993 3994 #ifdef __nvoc_gpu_h_disabled 3995 static inline NV_STATUS gpuSanityCheck(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { 3996 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3997 return NV_ERR_NOT_SUPPORTED; 3998 } 3999 #else //__nvoc_gpu_h_disabled 4000 #define gpuSanityCheck(pGpu, arg0, arg1) gpuSanityCheck_IMPL(pGpu, arg0, arg1) 4001 #endif //__nvoc_gpu_h_disabled 4002 4003 DEVICE_MAPPING *gpuGetDeviceMapping_IMPL(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1); 4004 4005 #ifdef __nvoc_gpu_h_disabled 4006 static inline DEVICE_MAPPING *gpuGetDeviceMapping(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1) { 4007 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4008 return NULL; 4009 } 4010 #else //__nvoc_gpu_h_disabled 4011 #define gpuGetDeviceMapping(pGpu, arg0, arg1) gpuGetDeviceMapping_IMPL(pGpu, arg0, arg1) 4012 #endif //__nvoc_gpu_h_disabled 4013 4014 DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); 4015 4016 #ifdef __nvoc_gpu_h_disabled 4017 static inline DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 4018 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4019 return NULL; 4020 } 4021 #else //__nvoc_gpu_h_disabled 4022 #define gpuGetDeviceMappingFromDeviceID(pGpu, arg0, arg1) gpuGetDeviceMappingFromDeviceID_IMPL(pGpu, arg0, arg1) 4023 #endif //__nvoc_gpu_h_disabled 4024 4025 NV_STATUS gpuGetGidInfo_IMPL(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags); 4026 4027 #ifdef __nvoc_gpu_h_disabled 4028 static inline NV_STATUS gpuGetGidInfo(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags) { 4029 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4030 return NV_ERR_NOT_SUPPORTED; 4031 } 4032 #else //__nvoc_gpu_h_disabled 4033 #define gpuGetGidInfo(pGpu, ppGidString, pGidStrlen, gidFlags) gpuGetGidInfo_IMPL(pGpu, ppGidString, pGidStrlen, gidFlags) 4034 #endif //__nvoc_gpu_h_disabled 4035 4036 void gpuSetDisconnectedProperties_IMPL(struct OBJGPU *pGpu); 4037 4038 #ifdef __nvoc_gpu_h_disabled 4039 static inline void gpuSetDisconnectedProperties(struct OBJGPU *pGpu) { 4040 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4041 } 4042 #else //__nvoc_gpu_h_disabled 4043 #define gpuSetDisconnectedProperties(pGpu) gpuSetDisconnectedProperties_IMPL(pGpu) 4044 #endif //__nvoc_gpu_h_disabled 4045 4046 NV_STATUS gpuAddConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); 4047 4048 #ifdef __nvoc_gpu_h_disabled 4049 static inline NV_STATUS gpuAddConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { 4050 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4051 return NV_ERR_NOT_SUPPORTED; 4052 } 4053 #else //__nvoc_gpu_h_disabled 4054 #define gpuAddConstructedFalcon(pGpu, arg0) gpuAddConstructedFalcon_IMPL(pGpu, arg0) 4055 #endif //__nvoc_gpu_h_disabled 4056 4057 NV_STATUS gpuRemoveConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); 4058 4059 #ifdef __nvoc_gpu_h_disabled 4060 static inline NV_STATUS gpuRemoveConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { 4061 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4062 return NV_ERR_NOT_SUPPORTED; 4063 } 4064 #else //__nvoc_gpu_h_disabled 4065 #define gpuRemoveConstructedFalcon(pGpu, arg0) gpuRemoveConstructedFalcon_IMPL(pGpu, arg0) 4066 #endif //__nvoc_gpu_h_disabled 4067 4068 NV_STATUS gpuGetConstructedFalcon_IMPL(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1); 4069 4070 #ifdef __nvoc_gpu_h_disabled 4071 static inline NV_STATUS gpuGetConstructedFalcon(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1) { 4072 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4073 return NV_ERR_NOT_SUPPORTED; 4074 } 4075 #else //__nvoc_gpu_h_disabled 4076 #define gpuGetConstructedFalcon(pGpu, arg0, arg1) gpuGetConstructedFalcon_IMPL(pGpu, arg0, arg1) 4077 #endif //__nvoc_gpu_h_disabled 4078 4079 NV_STATUS gpuGetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2); 4080 4081 #ifdef __nvoc_gpu_h_disabled 4082 static inline NV_STATUS gpuGetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { 4083 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4084 return NV_ERR_NOT_SUPPORTED; 4085 } 4086 #else //__nvoc_gpu_h_disabled 4087 #define gpuGetSparseTextureComputeMode(pGpu, arg0, arg1, arg2) gpuGetSparseTextureComputeMode_IMPL(pGpu, arg0, arg1, arg2) 4088 #endif //__nvoc_gpu_h_disabled 4089 4090 NV_STATUS gpuSetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 4091 4092 #ifdef __nvoc_gpu_h_disabled 4093 static inline NV_STATUS gpuSetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 arg0) { 4094 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4095 return NV_ERR_NOT_SUPPORTED; 4096 } 4097 #else //__nvoc_gpu_h_disabled 4098 #define gpuSetSparseTextureComputeMode(pGpu, arg0) gpuSetSparseTextureComputeMode_IMPL(pGpu, arg0) 4099 #endif //__nvoc_gpu_h_disabled 4100 4101 struct OBJENGSTATE *gpuGetEngstate_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 4102 4103 #ifdef __nvoc_gpu_h_disabled 4104 static inline struct OBJENGSTATE *gpuGetEngstate(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 4105 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4106 return NULL; 4107 } 4108 #else //__nvoc_gpu_h_disabled 4109 #define gpuGetEngstate(pGpu, arg0) gpuGetEngstate_IMPL(pGpu, arg0) 4110 #endif //__nvoc_gpu_h_disabled 4111 4112 struct OBJENGSTATE *gpuGetEngstateNoShare_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 4113 4114 #ifdef __nvoc_gpu_h_disabled 4115 static inline struct OBJENGSTATE *gpuGetEngstateNoShare(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 4116 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4117 return NULL; 4118 } 4119 #else //__nvoc_gpu_h_disabled 4120 #define gpuGetEngstateNoShare(pGpu, arg0) gpuGetEngstateNoShare_IMPL(pGpu, arg0) 4121 #endif //__nvoc_gpu_h_disabled 4122 4123 struct KernelFifo *gpuGetKernelFifoShared_IMPL(struct OBJGPU *pGpu); 4124 4125 #ifdef __nvoc_gpu_h_disabled 4126 static inline struct KernelFifo *gpuGetKernelFifoShared(struct OBJGPU *pGpu) { 4127 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4128 return NULL; 4129 } 4130 #else //__nvoc_gpu_h_disabled 4131 #define gpuGetKernelFifoShared(pGpu) gpuGetKernelFifoShared_IMPL(pGpu) 4132 #endif //__nvoc_gpu_h_disabled 4133 4134 NvBool gpuGetNextEngstate_IMPL(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState); 4135 4136 #ifdef __nvoc_gpu_h_disabled 4137 static inline NvBool gpuGetNextEngstate(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState) { 4138 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4139 return NV_FALSE; 4140 } 4141 #else //__nvoc_gpu_h_disabled 4142 #define gpuGetNextEngstate(pGpu, pIt, ppEngState) gpuGetNextEngstate_IMPL(pGpu, pIt, ppEngState) 4143 #endif //__nvoc_gpu_h_disabled 4144 4145 struct OBJHOSTENG *gpuGetHosteng_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 4146 4147 #ifdef __nvoc_gpu_h_disabled 4148 static inline struct OBJHOSTENG *gpuGetHosteng(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 4149 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4150 return NULL; 4151 } 4152 #else //__nvoc_gpu_h_disabled 4153 #define gpuGetHosteng(pGpu, arg0) gpuGetHosteng_IMPL(pGpu, arg0) 4154 #endif //__nvoc_gpu_h_disabled 4155 4156 NV_STATUS gpuConstructUserRegisterAccessMap_IMPL(struct OBJGPU *pGpu); 4157 4158 #ifdef __nvoc_gpu_h_disabled 4159 static inline NV_STATUS gpuConstructUserRegisterAccessMap(struct OBJGPU *pGpu) { 4160 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4161 return NV_ERR_NOT_SUPPORTED; 4162 } 4163 #else //__nvoc_gpu_h_disabled 4164 #define gpuConstructUserRegisterAccessMap(pGpu) gpuConstructUserRegisterAccessMap_IMPL(pGpu) 4165 #endif //__nvoc_gpu_h_disabled 4166 4167 NV_STATUS gpuInitRegisterAccessMap_IMPL(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3); 4168 4169 #ifdef __nvoc_gpu_h_disabled 4170 static inline NV_STATUS gpuInitRegisterAccessMap(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3) { 4171 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4172 return NV_ERR_NOT_SUPPORTED; 4173 } 4174 #else //__nvoc_gpu_h_disabled 4175 #define gpuInitRegisterAccessMap(pGpu, arg0, arg1, arg2, arg3) gpuInitRegisterAccessMap_IMPL(pGpu, arg0, arg1, arg2, arg3) 4176 #endif //__nvoc_gpu_h_disabled 4177 4178 NV_STATUS gpuSetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow); 4179 4180 #ifdef __nvoc_gpu_h_disabled 4181 static inline NV_STATUS gpuSetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow) { 4182 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4183 return NV_ERR_NOT_SUPPORTED; 4184 } 4185 #else //__nvoc_gpu_h_disabled 4186 #define gpuSetUserRegisterAccessPermissions(pGpu, offset, size, bAllow) gpuSetUserRegisterAccessPermissions_IMPL(pGpu, offset, size, bAllow) 4187 #endif //__nvoc_gpu_h_disabled 4188 4189 NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk_IMPL(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow); 4190 4191 #ifdef __nvoc_gpu_h_disabled 4192 static inline NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow) { 4193 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4194 return NV_ERR_NOT_SUPPORTED; 4195 } 4196 #else //__nvoc_gpu_h_disabled 4197 #define gpuSetUserRegisterAccessPermissionsInBulk(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow) gpuSetUserRegisterAccessPermissionsInBulk_IMPL(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow) 4198 #endif //__nvoc_gpu_h_disabled 4199 4200 NvBool gpuGetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset); 4201 4202 #ifdef __nvoc_gpu_h_disabled 4203 static inline NvBool gpuGetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset) { 4204 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4205 return NV_FALSE; 4206 } 4207 #else //__nvoc_gpu_h_disabled 4208 #define gpuGetUserRegisterAccessPermissions(pGpu, offset) gpuGetUserRegisterAccessPermissions_IMPL(pGpu, offset) 4209 #endif //__nvoc_gpu_h_disabled 4210 4211 void gpuDumpCallbackRegister_IMPL(struct OBJGPU *pGpu); 4212 4213 #ifdef __nvoc_gpu_h_disabled 4214 static inline void gpuDumpCallbackRegister(struct OBJGPU *pGpu) { 4215 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4216 } 4217 #else //__nvoc_gpu_h_disabled 4218 #define gpuDumpCallbackRegister(pGpu) gpuDumpCallbackRegister_IMPL(pGpu) 4219 #endif //__nvoc_gpu_h_disabled 4220 4221 NV_STATUS gpuGetGfidState_IMPL(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState); 4222 4223 #ifdef __nvoc_gpu_h_disabled 4224 static inline NV_STATUS gpuGetGfidState(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState) { 4225 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4226 return NV_ERR_NOT_SUPPORTED; 4227 } 4228 #else //__nvoc_gpu_h_disabled 4229 #define gpuGetGfidState(pGpu, gfid, pState) gpuGetGfidState_IMPL(pGpu, gfid, pState) 4230 #endif //__nvoc_gpu_h_disabled 4231 4232 void gpuSetGfidUsage_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse); 4233 4234 #ifdef __nvoc_gpu_h_disabled 4235 static inline void gpuSetGfidUsage(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) { 4236 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4237 } 4238 #else //__nvoc_gpu_h_disabled 4239 #define gpuSetGfidUsage(pGpu, gfid, bInUse) gpuSetGfidUsage_IMPL(pGpu, gfid, bInUse) 4240 #endif //__nvoc_gpu_h_disabled 4241 4242 void gpuSetGfidInvalidated_IMPL(struct OBJGPU *pGpu, NvU32 gfid); 4243 4244 #ifdef __nvoc_gpu_h_disabled 4245 static inline void gpuSetGfidInvalidated(struct OBJGPU *pGpu, NvU32 gfid) { 4246 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4247 } 4248 #else //__nvoc_gpu_h_disabled 4249 #define gpuSetGfidInvalidated(pGpu, gfid) gpuSetGfidInvalidated_IMPL(pGpu, gfid) 4250 #endif //__nvoc_gpu_h_disabled 4251 4252 NV_STATUS gpuSetExternalKernelClientCount_IMPL(struct OBJGPU *pGpu, NvBool bIncr); 4253 4254 #ifdef __nvoc_gpu_h_disabled 4255 static inline NV_STATUS gpuSetExternalKernelClientCount(struct OBJGPU *pGpu, NvBool bIncr) { 4256 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4257 return NV_ERR_NOT_SUPPORTED; 4258 } 4259 #else //__nvoc_gpu_h_disabled 4260 #define gpuSetExternalKernelClientCount(pGpu, bIncr) gpuSetExternalKernelClientCount_IMPL(pGpu, bIncr) 4261 #endif //__nvoc_gpu_h_disabled 4262 4263 NvBool gpuIsInUse_IMPL(struct OBJGPU *pGpu); 4264 4265 #ifdef __nvoc_gpu_h_disabled 4266 static inline NvBool gpuIsInUse(struct OBJGPU *pGpu) { 4267 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4268 return NV_FALSE; 4269 } 4270 #else //__nvoc_gpu_h_disabled 4271 #define gpuIsInUse(pGpu) gpuIsInUse_IMPL(pGpu) 4272 #endif //__nvoc_gpu_h_disabled 4273 4274 NvU32 gpuGetUserClientCount_IMPL(struct OBJGPU *pGpu); 4275 4276 #ifdef __nvoc_gpu_h_disabled 4277 static inline NvU32 gpuGetUserClientCount(struct OBJGPU *pGpu) { 4278 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4279 return 0; 4280 } 4281 #else //__nvoc_gpu_h_disabled 4282 #define gpuGetUserClientCount(pGpu) gpuGetUserClientCount_IMPL(pGpu) 4283 #endif //__nvoc_gpu_h_disabled 4284 4285 NvU32 gpuGetExternalClientCount_IMPL(struct OBJGPU *pGpu); 4286 4287 #ifdef __nvoc_gpu_h_disabled 4288 static inline NvU32 gpuGetExternalClientCount(struct OBJGPU *pGpu) { 4289 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4290 return 0; 4291 } 4292 #else //__nvoc_gpu_h_disabled 4293 #define gpuGetExternalClientCount(pGpu) gpuGetExternalClientCount_IMPL(pGpu) 4294 #endif //__nvoc_gpu_h_disabled 4295 4296 void gpuNotifySubDeviceEvent_IMPL(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); 4297 4298 #ifdef __nvoc_gpu_h_disabled 4299 static inline void gpuNotifySubDeviceEvent(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { 4300 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4301 } 4302 #else //__nvoc_gpu_h_disabled 4303 #define gpuNotifySubDeviceEvent(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) gpuNotifySubDeviceEvent_IMPL(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) 4304 #endif //__nvoc_gpu_h_disabled 4305 4306 NV_STATUS gpuRegisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); 4307 4308 #ifdef __nvoc_gpu_h_disabled 4309 static inline NV_STATUS gpuRegisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { 4310 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4311 return NV_ERR_NOT_SUPPORTED; 4312 } 4313 #else //__nvoc_gpu_h_disabled 4314 #define gpuRegisterSubdevice(pGpu, pSubdevice) gpuRegisterSubdevice_IMPL(pGpu, pSubdevice) 4315 #endif //__nvoc_gpu_h_disabled 4316 4317 void gpuUnregisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); 4318 4319 #ifdef __nvoc_gpu_h_disabled 4320 static inline void gpuUnregisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { 4321 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4322 } 4323 #else //__nvoc_gpu_h_disabled 4324 #define gpuUnregisterSubdevice(pGpu, pSubdevice) gpuUnregisterSubdevice_IMPL(pGpu, pSubdevice) 4325 #endif //__nvoc_gpu_h_disabled 4326 4327 void gpuGspPluginTriggeredEvent_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex); 4328 4329 #ifdef __nvoc_gpu_h_disabled 4330 static inline void gpuGspPluginTriggeredEvent(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex) { 4331 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4332 } 4333 #else //__nvoc_gpu_h_disabled 4334 #define gpuGspPluginTriggeredEvent(pGpu, gfid, notifyIndex) gpuGspPluginTriggeredEvent_IMPL(pGpu, gfid, notifyIndex) 4335 #endif //__nvoc_gpu_h_disabled 4336 4337 NV_STATUS gpuGetProcWithObject_IMPL(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef); 4338 4339 #ifdef __nvoc_gpu_h_disabled 4340 static inline NV_STATUS gpuGetProcWithObject(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef) { 4341 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4342 return NV_ERR_NOT_SUPPORTED; 4343 } 4344 #else //__nvoc_gpu_h_disabled 4345 #define gpuGetProcWithObject(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) gpuGetProcWithObject_IMPL(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) 4346 #endif //__nvoc_gpu_h_disabled 4347 4348 NV_STATUS gpuFindClientInfoWithPidIterator_IMPL(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo); 4349 4350 #ifdef __nvoc_gpu_h_disabled 4351 static inline NV_STATUS gpuFindClientInfoWithPidIterator(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo) { 4352 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4353 return NV_ERR_NOT_SUPPORTED; 4354 } 4355 #else //__nvoc_gpu_h_disabled 4356 #define gpuFindClientInfoWithPidIterator(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) gpuFindClientInfoWithPidIterator_IMPL(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) 4357 #endif //__nvoc_gpu_h_disabled 4358 4359 NvBool gpuIsCCFeatureEnabled_IMPL(struct OBJGPU *pGpu); 4360 4361 #ifdef __nvoc_gpu_h_disabled 4362 static inline NvBool gpuIsCCFeatureEnabled(struct OBJGPU *pGpu) { 4363 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4364 return NV_FALSE; 4365 } 4366 #else //__nvoc_gpu_h_disabled 4367 #define gpuIsCCFeatureEnabled(pGpu) gpuIsCCFeatureEnabled_IMPL(pGpu) 4368 #endif //__nvoc_gpu_h_disabled 4369 4370 NvBool gpuIsApmFeatureEnabled_IMPL(struct OBJGPU *pGpu); 4371 4372 #ifdef __nvoc_gpu_h_disabled 4373 static inline NvBool gpuIsApmFeatureEnabled(struct OBJGPU *pGpu) { 4374 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4375 return NV_FALSE; 4376 } 4377 #else //__nvoc_gpu_h_disabled 4378 #define gpuIsApmFeatureEnabled(pGpu) gpuIsApmFeatureEnabled_IMPL(pGpu) 4379 #endif //__nvoc_gpu_h_disabled 4380 4381 NvBool gpuIsCCorApmFeatureEnabled_IMPL(struct OBJGPU *pGpu); 4382 4383 #ifdef __nvoc_gpu_h_disabled 4384 static inline NvBool gpuIsCCorApmFeatureEnabled(struct OBJGPU *pGpu) { 4385 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4386 return NV_FALSE; 4387 } 4388 #else //__nvoc_gpu_h_disabled 4389 #define gpuIsCCorApmFeatureEnabled(pGpu) gpuIsCCorApmFeatureEnabled_IMPL(pGpu) 4390 #endif //__nvoc_gpu_h_disabled 4391 4392 NvBool gpuIsCCDevToolsModeEnabled_IMPL(struct OBJGPU *pGpu); 4393 4394 #ifdef __nvoc_gpu_h_disabled 4395 static inline NvBool gpuIsCCDevToolsModeEnabled(struct OBJGPU *pGpu) { 4396 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4397 return NV_FALSE; 4398 } 4399 #else //__nvoc_gpu_h_disabled 4400 #define gpuIsCCDevToolsModeEnabled(pGpu) gpuIsCCDevToolsModeEnabled_IMPL(pGpu) 4401 #endif //__nvoc_gpu_h_disabled 4402 4403 NvBool gpuIsOnTheBus_IMPL(struct OBJGPU *pGpu); 4404 4405 #ifdef __nvoc_gpu_h_disabled 4406 static inline NvBool gpuIsOnTheBus(struct OBJGPU *pGpu) { 4407 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4408 return NV_FALSE; 4409 } 4410 #else //__nvoc_gpu_h_disabled 4411 #define gpuIsOnTheBus(pGpu) gpuIsOnTheBus_IMPL(pGpu) 4412 #endif //__nvoc_gpu_h_disabled 4413 4414 NV_STATUS gpuEnterStandby_IMPL(struct OBJGPU *pGpu); 4415 4416 #ifdef __nvoc_gpu_h_disabled 4417 static inline NV_STATUS gpuEnterStandby(struct OBJGPU *pGpu) { 4418 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4419 return NV_ERR_NOT_SUPPORTED; 4420 } 4421 #else //__nvoc_gpu_h_disabled 4422 #define gpuEnterStandby(pGpu) gpuEnterStandby_IMPL(pGpu) 4423 #endif //__nvoc_gpu_h_disabled 4424 4425 NV_STATUS gpuEnterHibernate_IMPL(struct OBJGPU *pGpu); 4426 4427 #ifdef __nvoc_gpu_h_disabled 4428 static inline NV_STATUS gpuEnterHibernate(struct OBJGPU *pGpu) { 4429 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4430 return NV_ERR_NOT_SUPPORTED; 4431 } 4432 #else //__nvoc_gpu_h_disabled 4433 #define gpuEnterHibernate(pGpu) gpuEnterHibernate_IMPL(pGpu) 4434 #endif //__nvoc_gpu_h_disabled 4435 4436 NV_STATUS gpuResumeFromStandby_IMPL(struct OBJGPU *pGpu); 4437 4438 #ifdef __nvoc_gpu_h_disabled 4439 static inline NV_STATUS gpuResumeFromStandby(struct OBJGPU *pGpu) { 4440 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4441 return NV_ERR_NOT_SUPPORTED; 4442 } 4443 #else //__nvoc_gpu_h_disabled 4444 #define gpuResumeFromStandby(pGpu) gpuResumeFromStandby_IMPL(pGpu) 4445 #endif //__nvoc_gpu_h_disabled 4446 4447 NV_STATUS gpuResumeFromHibernate_IMPL(struct OBJGPU *pGpu); 4448 4449 #ifdef __nvoc_gpu_h_disabled 4450 static inline NV_STATUS gpuResumeFromHibernate(struct OBJGPU *pGpu) { 4451 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4452 return NV_ERR_NOT_SUPPORTED; 4453 } 4454 #else //__nvoc_gpu_h_disabled 4455 #define gpuResumeFromHibernate(pGpu) gpuResumeFromHibernate_IMPL(pGpu) 4456 #endif //__nvoc_gpu_h_disabled 4457 4458 NvBool gpuCheckSysmemAccess_IMPL(struct OBJGPU *pGpu); 4459 4460 #ifdef __nvoc_gpu_h_disabled 4461 static inline NvBool gpuCheckSysmemAccess(struct OBJGPU *pGpu) { 4462 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4463 return NV_FALSE; 4464 } 4465 #else //__nvoc_gpu_h_disabled 4466 #define gpuCheckSysmemAccess(pGpu) gpuCheckSysmemAccess_IMPL(pGpu) 4467 #endif //__nvoc_gpu_h_disabled 4468 4469 void gpuInitChipInfo_IMPL(struct OBJGPU *pGpu); 4470 4471 #ifdef __nvoc_gpu_h_disabled 4472 static inline void gpuInitChipInfo(struct OBJGPU *pGpu) { 4473 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4474 } 4475 #else //__nvoc_gpu_h_disabled 4476 #define gpuInitChipInfo(pGpu) gpuInitChipInfo_IMPL(pGpu) 4477 #endif //__nvoc_gpu_h_disabled 4478 4479 NV_STATUS gpuSanityCheckRegRead_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue); 4480 4481 #ifdef __nvoc_gpu_h_disabled 4482 static inline NV_STATUS gpuSanityCheckRegRead(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue) { 4483 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4484 return NV_ERR_NOT_SUPPORTED; 4485 } 4486 #else //__nvoc_gpu_h_disabled 4487 #define gpuSanityCheckRegRead(pGpu, addr, size, pValue) gpuSanityCheckRegRead_IMPL(pGpu, addr, size, pValue) 4488 #endif //__nvoc_gpu_h_disabled 4489 4490 NV_STATUS gpuSanityCheckRegisterAccess_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal); 4491 4492 #ifdef __nvoc_gpu_h_disabled 4493 static inline NV_STATUS gpuSanityCheckRegisterAccess(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal) { 4494 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4495 return NV_ERR_NOT_SUPPORTED; 4496 } 4497 #else //__nvoc_gpu_h_disabled 4498 #define gpuSanityCheckRegisterAccess(pGpu, addr, pRetVal) gpuSanityCheckRegisterAccess_IMPL(pGpu, addr, pRetVal) 4499 #endif //__nvoc_gpu_h_disabled 4500 4501 void gpuUpdateUserSharedData_IMPL(struct OBJGPU *pGpu); 4502 4503 #ifdef __nvoc_gpu_h_disabled 4504 static inline void gpuUpdateUserSharedData(struct OBJGPU *pGpu) { 4505 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4506 } 4507 #else //__nvoc_gpu_h_disabled 4508 #define gpuUpdateUserSharedData(pGpu) gpuUpdateUserSharedData_IMPL(pGpu) 4509 #endif //__nvoc_gpu_h_disabled 4510 4511 NV_STATUS gpuValidateRegOffset_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 4512 4513 #ifdef __nvoc_gpu_h_disabled 4514 static inline NV_STATUS gpuValidateRegOffset(struct OBJGPU *pGpu, NvU32 arg0) { 4515 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4516 return NV_ERR_NOT_SUPPORTED; 4517 } 4518 #else //__nvoc_gpu_h_disabled 4519 #define gpuValidateRegOffset(pGpu, arg0) gpuValidateRegOffset_IMPL(pGpu, arg0) 4520 #endif //__nvoc_gpu_h_disabled 4521 4522 NV_STATUS gpuSetGC6SBIOSCapabilities_IMPL(struct OBJGPU *pGpu); 4523 4524 #ifdef __nvoc_gpu_h_disabled 4525 static inline NV_STATUS gpuSetGC6SBIOSCapabilities(struct OBJGPU *pGpu) { 4526 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4527 return NV_ERR_NOT_SUPPORTED; 4528 } 4529 #else //__nvoc_gpu_h_disabled 4530 #define gpuSetGC6SBIOSCapabilities(pGpu) gpuSetGC6SBIOSCapabilities_IMPL(pGpu) 4531 #endif //__nvoc_gpu_h_disabled 4532 4533 NV_STATUS gpuGc6Entry_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GC6_ENTRY_PARAMS *arg0); 4534 4535 #ifdef __nvoc_gpu_h_disabled 4536 static inline NV_STATUS gpuGc6Entry(struct OBJGPU *pGpu, NV2080_CTRL_GC6_ENTRY_PARAMS *arg0) { 4537 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4538 return NV_ERR_NOT_SUPPORTED; 4539 } 4540 #else //__nvoc_gpu_h_disabled 4541 #define gpuGc6Entry(pGpu, arg0) gpuGc6Entry_IMPL(pGpu, arg0) 4542 #endif //__nvoc_gpu_h_disabled 4543 4544 NV_STATUS gpuGc6EntryGpuPowerOff_IMPL(struct OBJGPU *pGpu); 4545 4546 #ifdef __nvoc_gpu_h_disabled 4547 static inline NV_STATUS gpuGc6EntryGpuPowerOff(struct OBJGPU *pGpu) { 4548 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4549 return NV_ERR_NOT_SUPPORTED; 4550 } 4551 #else //__nvoc_gpu_h_disabled 4552 #define gpuGc6EntryGpuPowerOff(pGpu) gpuGc6EntryGpuPowerOff_IMPL(pGpu) 4553 #endif //__nvoc_gpu_h_disabled 4554 4555 NV_STATUS gpuGc6Exit_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GC6_EXIT_PARAMS *arg0); 4556 4557 #ifdef __nvoc_gpu_h_disabled 4558 static inline NV_STATUS gpuGc6Exit(struct OBJGPU *pGpu, NV2080_CTRL_GC6_EXIT_PARAMS *arg0) { 4559 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4560 return NV_ERR_NOT_SUPPORTED; 4561 } 4562 #else //__nvoc_gpu_h_disabled 4563 #define gpuGc6Exit(pGpu, arg0) gpuGc6Exit_IMPL(pGpu, arg0) 4564 #endif //__nvoc_gpu_h_disabled 4565 4566 #undef PRIVATE_FIELD 4567 4568 4569 // Look up pGpu associated with a pResourceRef 4570 NV_STATUS gpuGetByRef (RsResourceRef *pContextRef, NvBool *pbBroadcast, struct OBJGPU **ppGpu); 4571 4572 // Look up pGpu associated with a hResource 4573 NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *pbBroadcast, struct OBJGPU **ppGpu); 4574 4575 #define GPU_GFID_PF (0) 4576 #define IS_GFID_PF(gfid) (((NvU32)(gfid)) == GPU_GFID_PF) 4577 #define IS_GFID_VF(gfid) (((NvU32)(gfid)) != GPU_GFID_PF) 4578 // Invalid P2P GFID 4579 #define INVALID_P2P_GFID (0xFFFFFFFF) 4580 #define INVALID_FABRIC_PARTITION_ID (0xFFFFFFFF) 4581 4582 // 4583 // Generates GPU child accessor macros (i.e.: GPU_GET_{ENG}) 4584 // 4585 #define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4586 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return pGpu->gpuField; } \ 4587 ct_assert(numInstances == 1); 4588 4589 #define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4590 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return index < numInstances ? pGpu->gpuField[index] : NULL; } 4591 4592 #include "gpu/gpu_child_list.h" 4593 4594 static NV_FORCEINLINE struct Graphics *GPU_GET_GR(struct OBJGPU *pGpu) { return NULL; } 4595 4596 // Temporary stubs 4597 #if RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS 4598 #define GPU_CHILD_LIST_DISABLED_ONLY 4599 #define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4600 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return NULL; } 4601 4602 #define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4603 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return NULL; } 4604 4605 #include "gpu/gpu_child_list.h" 4606 #endif // RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS 4607 4608 4609 // 4610 // Inline functions 4611 // 4612 4613 // 4614 // This function returns subdevice mask for a GPU. 4615 // For non SLI, subdeviceInstance is 0, so this 4616 // function will always return 1. 4617 // 4618 4619 static NV_INLINE NvU32 4620 gpuGetSubdeviceMask 4621 ( 4622 struct OBJGPU *pGpu 4623 ) 4624 { 4625 return 1 << pGpu->subdeviceInstance; 4626 } 4627 4628 static NV_INLINE NvU32 4629 gpuGetInstance 4630 ( 4631 struct OBJGPU *pGpu 4632 ) 4633 { 4634 return pGpu->gpuInstance; 4635 } 4636 4637 static NV_INLINE NvU32 4638 gpuGetDeviceInstance 4639 ( 4640 struct OBJGPU *pGpu 4641 ) 4642 { 4643 return pGpu->deviceInstance; 4644 } 4645 4646 NV_INLINE 4647 static NvU32 gpuGetNumCEs(struct OBJGPU *pGpu) 4648 { 4649 return pGpu->numCEs; 4650 } 4651 4652 // 4653 // Per GPU mode flags macros. In general these macros should not be 4654 // used and all code paths should be the same on all environments. 4655 // However occasionally a tweak is needed to work around a limitation 4656 // or improve speed on non-hardware. Is_RTLSIM normally is handled 4657 // in the IS_SIMULATION case and should almost never be used. 4658 // 4659 // IS_EMULATION actual emulation hardware 4660 // IS_SIMULATION fmodel or RTL simulation 4661 // IS_MODS_AMODEL amodel under mods for trace player 4662 // IS_LIVE_AMODEL amodel under windows for 3D drivers (removed) 4663 // IS_RTLSIM RTL simulation 4664 // IS_SILICON Real hardware 4665 // IS_VIRTUAL RM is running within a guest VM 4666 // IS_GSP_CLIENT RM is a GSP/DCE client with GPU support offloaded to GSP/DCE 4667 // 4668 4669 #define IS_EMULATION(pGpu) ((pGpu)->getProperty((pGpu), PDB_PROP_GPU_EMULATION)) 4670 #define IS_SIMULATION(pGpu) (pGpu->bIsSimulation) 4671 #define IS_MODS_AMODEL(pGpu) (pGpu->bIsModsAmodel) 4672 #define IS_FMODEL(pGpu) (pGpu->bIsFmodel) 4673 #define IS_RTLSIM(pGpu) (pGpu->bIsRtlsim) 4674 #define IS_SILICON(pGpu) (!(IS_EMULATION(pGpu) || IS_SIMULATION(pGpu))) 4675 #define IS_PASSTHRU(pGpu) ((pGpu)->bIsPassthru) 4676 #define IS_GSP_CLIENT(pGpu) ((RMCFG_FEATURE_GSP_CLIENT_RM || RMCFG_FEATURE_DCE_CLIENT_RM) && (pGpu)->isGspClient) 4677 #define IS_VIRTUAL(pGpu) NV_FALSE 4678 #define IS_VIRTUAL_WITH_SRIOV(pGpu) NV_FALSE 4679 #define IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu) NV_FALSE 4680 #define IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) NV_FALSE 4681 #define IS_VIRTUAL_WITHOUT_SRIOV(pGpu) NV_FALSE 4682 #define IS_SRIOV_HEAVY(pGpu) NV_FALSE 4683 #define IS_SRIOV_HEAVY_GUEST(pGpu) NV_FALSE 4684 #define IS_SRIOV_FULL_GUEST(pGpu) NV_FALSE 4685 #define IS_SRIOV_HEAVY_HOST(pGpu) NV_FALSE 4686 #define IS_SRIOV_FULL_HOST(pGpu) ((hypervisorIsVgxHyper()) && gpuIsSriovEnabled(pGpu) && !IS_SRIOV_HEAVY(pGpu)) 4687 #define IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) ((pGpu)->bVgpuGspPluginOffloadEnabled) 4688 #define IS_SRIOV_WITH_VGPU_GSP_ENABLED(pGpu) (gpuIsSriovEnabled(pGpu) && IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu)) 4689 #define IS_SRIOV_WITH_VGPU_GSP_DISABLED(pGpu) (gpuIsSriovEnabled(pGpu) && !IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu)) 4690 4691 extern GPU_CHILD_ITER gpuGetPossibleEngDescriptorIter(void); 4692 extern NvBool gpuGetNextPossibleEngDescriptor(GPU_CHILD_ITER *pIt, ENGDESCRIPTOR *pEngDesc); 4693 4694 NV_STATUS gpuCtrlExecRegOps(struct OBJGPU *, struct Graphics *, NvHandle, NvHandle, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool); 4695 NV_STATUS gpuValidateRegOps(struct OBJGPU *, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool, NvBool); 4696 4697 // GPU Sanity Check Flags 4698 #define GPU_SANITY_CHECK_FLAGS_BOOT_0 NVBIT(0) 4699 #define GPU_SANITY_CHECK_FLAGS_OFF_BY_N NVBIT(1) 4700 #define GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH NVBIT(2) 4701 #define GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED NVBIT(3) 4702 #define GPU_SANITY_CHECK_FLAGS_FB NVBIT(4) 4703 4704 #define GPU_SANITY_CHECK_FLAGS_NONE 0x0 4705 #define GPU_SANITY_CHECK_FLAGS_ALL 0xffffffff 4706 4707 // 4708 // Macro for checking if GPU is in reset. 4709 // 4710 #define API_GPU_IN_RESET_SANITY_CHECK(pGpu) \ 4711 ((NULL == pGpu) || \ 4712 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) || \ 4713 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || \ 4714 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) || \ 4715 pGpu->getProperty(pGpu, PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING)) 4716 4717 // 4718 // Marco for checking if GPU is still connected. 4719 // 4720 #define API_GPU_ATTACHED_SANITY_CHECK(pGpu) \ 4721 ((NULL != pGpu) && \ 4722 pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ 4723 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET)) 4724 4725 // 4726 // Macro for checking if GPU has Full Sanity 4727 // 4728 #define FULL_GPU_SANITY_CHECK(pGpu) \ 4729 ((NULL != pGpu) && \ 4730 gpuIsGpuFullPower(pGpu) && \ 4731 pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ 4732 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ 4733 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ 4734 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ 4735 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST) && \ 4736 gpuCheckSysmemAccess(pGpu)) 4737 4738 // 4739 // Macro for checking if GPU has Full Sanity 4740 // 4741 #define FULL_GPU_SANITY_FOR_PM_RESUME(pGpu) \ 4742 ((NULL != pGpu) && \ 4743 gpuIsGpuFullPowerForPmResume(pGpu) && \ 4744 pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ 4745 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ 4746 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ 4747 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ 4748 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)) 4749 4750 // 4751 // Macro for checking if GPU is in the recovery path 4752 // 4753 #define API_GPU_IN_RECOVERY_SANITY_CHECK(pGpu) \ 4754 ((NULL == pGpu) || \ 4755 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY)) 4756 4757 // 4758 // Identifiers for gpuGetRegBaseOffset HAL interface. 4759 // 4760 #define NV_REG_BASE_GR (0x00000001) 4761 #define NV_REG_BASE_TIMER (0x00000002) 4762 #define NV_REG_BASE_MASTER (0x00000003) 4763 #define NV_REG_BASE_USERMODE (0x00000004) 4764 #define NV_REG_BASE_LAST NV_REG_BASE_USERMODE 4765 ct_assert(NV_REG_BASE_LAST < NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX); 4766 4767 #define GPU_READ_PRI_ERROR_MASK 0xFFF00000 4768 #define GPU_READ_PRI_ERROR_CODE 0xBAD00000 4769 4770 // 4771 // Define for invalid register value. GPU could have fallen off the bus or 4772 // the GPU could be in reset. 4773 // 4774 #define GPU_REG_VALUE_INVALID 0xFFFFFFFF 4775 4776 // 4777 // Hal InfoBlock access interface 4778 // 4779 #define gpuGetInfoBlock(pGpu, pListHead, dataId) getInfoPtr(pListHead, dataId) 4780 #define gpuAddInfoBlock(pGpu, ppListHead, dataId, size) addInfoPtr(ppListHead, dataId, size) 4781 #define gpuDeleteInfoBlock(pGpu, ppListHead, dataId) deleteInfoPtr(ppListHead, dataId); 4782 #define gpuTestInfoBlock(pGpu, pListHead, dataId) testInfoPtr(pListHead, dataId); 4783 4784 typedef struct _vgpu_static_info VGPU_STATIC_INFO; 4785 typedef struct GspStaticConfigInfo_t GspStaticConfigInfo; 4786 4787 // Static info getters 4788 VGPU_STATIC_INFO *gpuGetStaticInfo(struct OBJGPU *pGpu); 4789 #define GPU_GET_STATIC_INFO(pGpu) gpuGetStaticInfo(pGpu) 4790 GspStaticConfigInfo *gpuGetGspStaticInfo(struct OBJGPU *pGpu); 4791 #define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu) 4792 4793 NV_STATUS gpuSimEscapeWrite(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 Value); 4794 NV_STATUS gpuSimEscapeWriteBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); 4795 NV_STATUS gpuSimEscapeRead(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value); 4796 NV_STATUS gpuSimEscapeReadBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); 4797 4798 #endif // _OBJGPU_H_ 4799 4800 #ifdef __cplusplus 4801 } // extern "C" 4802 #endif 4803 4804 #endif // _G_GPU_NVOC_H_ 4805