1 #ifndef _G_GPU_NVOC_H_ 2 #define _G_GPU_NVOC_H_ 3 #include "nvoc/runtime.h" 4 5 #ifdef __cplusplus 6 extern "C" { 7 #endif 8 9 /* 10 * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 11 * SPDX-License-Identifier: MIT 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a 14 * copy of this software and associated documentation files (the "Software"), 15 * to deal in the Software without restriction, including without limitation 16 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 17 * and/or sell copies of the Software, and to permit persons to whom the 18 * Software is furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 29 * DEALINGS IN THE SOFTWARE. 30 */ 31 #include "g_gpu_nvoc.h" 32 33 #ifndef _OBJGPU_H_ 34 #define _OBJGPU_H_ 35 36 /*! 37 * @file 38 * @brief Resource Manager Defines and Structures: Defines and structures used for the GPU Object. 39 */ 40 41 /*! 42 * 43 * Forward declaration of SEQSCRIPT - here because it is used by many clients 44 * and we don't want objseq.h to have to be included everywhere, so adding this 45 * here. See NVCR 12827752 46 * 47 */ 48 typedef struct _SEQSCRIPT SEQSCRIPT, *PSEQSCRIPT; 49 50 typedef struct GPUATTACHARG GPUATTACHARG; 51 52 /* 53 * WARNING -- Avoid including headers in gpu.h 54 * A change in gpu.h and headers included by gpu.h triggers recompilation of most RM 55 * files in an incremental build. We should keep the list of included header as short as 56 * possible. 57 * Especially, GPU's child module should not have its object header being included here. 58 * A child module generally includes the header of its parent. A child module header included 59 * by the parent module affects all the sibling modules. 60 * */ 61 #include "ctrl/ctrl0000/ctrl0000system.h" 62 #include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS (form hal) 63 #include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_CMD_INTERNAL_MAX_BSPS/NVENCS 64 #include "ctrl/ctrl2080/ctrl2080ecc.h" 65 #include "ctrl/ctrl2080/ctrl2080nvd.h" 66 #include "ctrl/ctrl0073/ctrl0073system.h" 67 #include "class/cl2080.h" 68 #include "class/cl90cd.h" 69 70 #include "nvlimits.h" 71 #include "utils/nv_enum.h" 72 73 #include "gpu/gpu_timeout.h" 74 #include "gpu/gpu_access.h" 75 #include "gpu/gpu_shared_data_map.h" 76 #include "gpu/kern_gpu_power.h" 77 78 #include "platform/acpi_common.h" 79 #include "gpu/gpu_acpi_data.h" 80 #include "platform/sli/sli.h" 81 82 #include "core/core.h" 83 #include "core/system.h" 84 #include "core/info_block.h" 85 #include "core/hal.h" 86 #include "nvoc/utility.h" 87 #include "gpu/mem_mgr/mem_desc.h" 88 #include "gpu/gpu_resource_desc.h" 89 #include "diagnostics/traceable.h" 90 #include "gpu/gpu_uuid.h" 91 #include "prereq_tracker/prereq_tracker.h" 92 #include "gpu/gpu_halspec.h" 93 #include "kernel/gpu/gpu_engine_type.h" 94 95 #include "rmapi/control.h" 96 #include "rmapi/event.h" 97 #include "rmapi/rmapi.h" 98 99 #include "kernel/gpu/gr/fecs_event_list.h" 100 #include "class/cl90cdfecs.h" 101 102 #include "gpu/gpu_fabric_probe.h" 103 104 #include "nv_arch.h" 105 106 #include "g_rmconfig_util.h" // prototypes for rmconfig utility functions, eg: rmcfg_IsGK104() 107 108 // TODO - the forward declaration of OS_GPU_INFO should be simplified 109 typedef struct nv_state_t OS_GPU_INFO; 110 111 struct OBJGMMU; 112 113 #ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ 114 #define __NVOC_CLASS_OBJGMMU_TYPEDEF__ 115 typedef struct OBJGMMU OBJGMMU; 116 #endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ 117 118 #ifndef __nvoc_class_id_OBJGMMU 119 #define __nvoc_class_id_OBJGMMU 0xd7a41d 120 #endif /* __nvoc_class_id_OBJGMMU */ 121 122 123 struct OBJGRIDDISPLAYLESS; 124 125 #ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ 126 #define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ 127 typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; 128 #endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ 129 130 #ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS 131 #define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a 132 #endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ 133 134 135 struct OBJHOSTENG; 136 137 #ifndef __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ 138 #define __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ 139 typedef struct OBJHOSTENG OBJHOSTENG; 140 #endif /* __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ */ 141 142 #ifndef __nvoc_class_id_OBJHOSTENG 143 #define __nvoc_class_id_OBJHOSTENG 0xb356e7 144 #endif /* __nvoc_class_id_OBJHOSTENG */ 145 146 147 struct OBJPMU_CLIENT_IMPLEMENTER; 148 149 #ifndef __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ 150 #define __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ 151 typedef struct OBJPMU_CLIENT_IMPLEMENTER OBJPMU_CLIENT_IMPLEMENTER; 152 #endif /* __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ */ 153 154 #ifndef __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER 155 #define __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER 0x88cace 156 #endif /* __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER */ 157 158 159 struct OBJINTRABLE; 160 161 #ifndef __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ 162 #define __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ 163 typedef struct OBJINTRABLE OBJINTRABLE; 164 #endif /* __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ */ 165 166 #ifndef __nvoc_class_id_OBJINTRABLE 167 #define __nvoc_class_id_OBJINTRABLE 0x31ccb7 168 #endif /* __nvoc_class_id_OBJINTRABLE */ 169 170 171 struct OBJVBIOS; 172 173 #ifndef __NVOC_CLASS_OBJVBIOS_TYPEDEF__ 174 #define __NVOC_CLASS_OBJVBIOS_TYPEDEF__ 175 typedef struct OBJVBIOS OBJVBIOS; 176 #endif /* __NVOC_CLASS_OBJVBIOS_TYPEDEF__ */ 177 178 #ifndef __nvoc_class_id_OBJVBIOS 179 #define __nvoc_class_id_OBJVBIOS 0x5dc772 180 #endif /* __nvoc_class_id_OBJVBIOS */ 181 182 183 struct NvDebugDump; 184 185 #ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ 186 #define __NVOC_CLASS_NvDebugDump_TYPEDEF__ 187 typedef struct NvDebugDump NvDebugDump; 188 #endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ 189 190 #ifndef __nvoc_class_id_NvDebugDump 191 #define __nvoc_class_id_NvDebugDump 0x7e80a2 192 #endif /* __nvoc_class_id_NvDebugDump */ 193 194 195 struct GpuMutexMgr; 196 197 #ifndef __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ 198 #define __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ 199 typedef struct GpuMutexMgr GpuMutexMgr; 200 #endif /* __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ */ 201 202 #ifndef __nvoc_class_id_GpuMutexMgr 203 #define __nvoc_class_id_GpuMutexMgr 0x9d93b2 204 #endif /* __nvoc_class_id_GpuMutexMgr */ 205 206 207 struct KernelFalcon; 208 209 #ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__ 210 #define __NVOC_CLASS_KernelFalcon_TYPEDEF__ 211 typedef struct KernelFalcon KernelFalcon; 212 #endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */ 213 214 #ifndef __nvoc_class_id_KernelFalcon 215 #define __nvoc_class_id_KernelFalcon 0xb6b1af 216 #endif /* __nvoc_class_id_KernelFalcon */ 217 218 219 struct KernelVideoEngine; 220 221 #ifndef __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ 222 #define __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ 223 typedef struct KernelVideoEngine KernelVideoEngine; 224 #endif /* __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ */ 225 226 #ifndef __nvoc_class_id_KernelVideoEngine 227 #define __nvoc_class_id_KernelVideoEngine 0x9e2f3e 228 #endif /* __nvoc_class_id_KernelVideoEngine */ 229 230 231 struct KernelChannel; 232 233 #ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ 234 #define __NVOC_CLASS_KernelChannel_TYPEDEF__ 235 typedef struct KernelChannel KernelChannel; 236 #endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ 237 238 #ifndef __nvoc_class_id_KernelChannel 239 #define __nvoc_class_id_KernelChannel 0x5d8d70 240 #endif /* __nvoc_class_id_KernelChannel */ 241 242 243 struct GenericKernelFalcon; 244 245 #ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ 246 #define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ 247 typedef struct GenericKernelFalcon GenericKernelFalcon; 248 #endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */ 249 250 #ifndef __nvoc_class_id_GenericKernelFalcon 251 #define __nvoc_class_id_GenericKernelFalcon 0xabcf08 252 #endif /* __nvoc_class_id_GenericKernelFalcon */ 253 254 255 256 struct Subdevice; 257 258 #ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ 259 #define __NVOC_CLASS_Subdevice_TYPEDEF__ 260 typedef struct Subdevice Subdevice; 261 #endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ 262 263 #ifndef __nvoc_class_id_Subdevice 264 #define __nvoc_class_id_Subdevice 0x4b01b3 265 #endif /* __nvoc_class_id_Subdevice */ 266 267 268 struct Device; 269 270 #ifndef __NVOC_CLASS_Device_TYPEDEF__ 271 #define __NVOC_CLASS_Device_TYPEDEF__ 272 typedef struct Device Device; 273 #endif /* __NVOC_CLASS_Device_TYPEDEF__ */ 274 275 #ifndef __nvoc_class_id_Device 276 #define __nvoc_class_id_Device 0xe0ac20 277 #endif /* __nvoc_class_id_Device */ 278 279 280 struct RsClient; 281 282 #ifndef __NVOC_CLASS_RsClient_TYPEDEF__ 283 #define __NVOC_CLASS_RsClient_TYPEDEF__ 284 typedef struct RsClient RsClient; 285 #endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ 286 287 #ifndef __nvoc_class_id_RsClient 288 #define __nvoc_class_id_RsClient 0x8f87e5 289 #endif /* __nvoc_class_id_RsClient */ 290 291 292 struct Memory; 293 294 #ifndef __NVOC_CLASS_Memory_TYPEDEF__ 295 #define __NVOC_CLASS_Memory_TYPEDEF__ 296 typedef struct Memory Memory; 297 #endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ 298 299 #ifndef __nvoc_class_id_Memory 300 #define __nvoc_class_id_Memory 0x4789f2 301 #endif /* __nvoc_class_id_Memory */ 302 303 304 305 #ifndef PARTITIONID_INVALID 306 #define PARTITIONID_INVALID 0xFFFFFFFF 307 #endif 308 typedef struct MIG_INSTANCE_REF MIG_INSTANCE_REF; 309 typedef struct NV2080_CTRL_GPU_REG_OP NV2080_CTRL_GPU_REG_OP; 310 311 typedef enum 312 { 313 BRANDING_TYPE_UNCACHED, 314 BRANDING_TYPE_NONE, 315 BRANDING_TYPE_QUADRO_GENERIC, 316 BRANDING_TYPE_QUADRO_AD, 317 BRANDING_TYPE_NVS_NVIDIA, // "NVIDIA NVS" 318 BRANDING_TYPE_VGX, 319 } BRANDING_TYPE; 320 321 typedef enum 322 { 323 COMPUTE_BRANDING_TYPE_NONE, 324 COMPUTE_BRANDING_TYPE_TESLA, 325 } COMPUTE_BRANDING_TYPE; 326 327 #define OOR_ARCH_DEF(x) \ 328 NV_ENUM_ENTRY(x, OOR_ARCH_X86_64, 0x00000000) \ 329 NV_ENUM_ENTRY(x, OOR_ARCH_PPC64LE, 0x00000001) \ 330 NV_ENUM_ENTRY(x, OOR_ARCH_ARM, 0x00000002) \ 331 NV_ENUM_ENTRY(x, OOR_ARCH_AARCH64, 0x00000003) \ 332 NV_ENUM_ENTRY(x, OOR_ARCH_NONE, 0x00000004) 333 334 NV_ENUM_DEF(OOR_ARCH, OOR_ARCH_DEF) 335 336 typedef struct 337 { 338 NvU32 classId; 339 NvU32 flags; 340 } GPUCHILDORDER; 341 342 typedef struct 343 { 344 NvU32 classId; 345 NvU32 instances; 346 347 /*! 348 * Pointer to the @ref NVOC_CLASS_INFO for the concrete class to instantiate 349 * for this child. 350 */ 351 const NVOC_CLASS_INFO *pClassInfo; 352 } GPUCHILDPRESENT; 353 354 /*! 355 * @brief Generates an entry for a list of @ref GPUCHILDPRESENT objects for a 356 * class of the given name 357 * 358 * @param[in] _childClassName 359 * Name of the class for the entry 360 * @param[in] _instances 361 * Number of instances of the child that may be present; see 362 * @ref GPUCHILDPRESENT::instances 363 * 364 * @return An entry suitable for a list of @ref GPUCHILDPRESENT for the given 365 * child of @ref OBJGPU 366 */ 367 #define GPU_CHILD_PRESENT(_childClassName, _instances) \ 368 GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, (_instances), _childClassName) 369 370 /*! 371 * @brief Generates an entry for a list of @ref GPUCHILDPRESENT objects that 372 * allows the @ref OBJGPU child to instantiate a sub-class of the base 373 * @ref OBJGPU child class. 374 * 375 * @details The intention of this macro is to allow a list of 376 * @ref GPUCHILDPRESENT to essentially state "this child should be 377 * present with this concrete class type". This allows for different 378 * @ref GPUCHILDPRESENT lists to request different classes with 379 * different behavior via sub-classes, for the same basic @ref OBJGPU 380 * child. 381 * 382 * @param[in] _childClassName 383 * Name of the base class at which @ref OBJGPU points 384 * @param[in] _instances 385 * Number of instances of the child that may be present; see 386 * @ref GPUCHILDPRESENT::instances 387 * @param[in] _concreteClassName 388 * Name of the sub-class of _childClassName that should actually be 389 * instantiated 390 * 391 * @return An entry suitable for a list of @ref GPUCHILDPRESENT for the given 392 * child of @ref OBJGPU with the given concrete class type. 393 */ 394 #define GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, _instances, _concreteClassName) \ 395 { \ 396 .classId = classId(_childClassName), \ 397 .instances = (_instances), \ 398 .pClassInfo = classInfo(_concreteClassName) \ 399 } 400 401 // GPU Child Order Flags 402 #define GCO_LIST_INIT NVBIT(0) // entry is used for init ordering (DO NOT USE) 403 #define GCO_LIST_LOAD NVBIT(1) // entry is used for load and postload ordering (DO NOT USE) 404 #define GCO_LIST_UNLOAD NVBIT(2) // entry is used for unload and preunload ordering (DO NOT USE) 405 #define GCO_LIST_DESTROY NVBIT(3) // entry is used for destroy order (DO NOT USE) 406 #define GCO_LIST_ALL (GCO_LIST_INIT | GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY) 407 // ^ entry is used for all list types (RECOMMENDED) 408 #define GCO_ALL (GCO_LIST_ALL) 409 410 411 typedef struct 412 { 413 NvU32 childTypeIdx; 414 NvU32 childInst; 415 NvU32 gpuChildPtrOffset; 416 } GPU_CHILD_ITER; 417 418 typedef GPU_CHILD_ITER ENGSTATE_ITER; 419 typedef GPU_CHILD_ITER PMU_CLIENT_IMPLEMENTER_ITER; 420 421 // 422 // Object 'get' macros for GPU relative object retrievals. 423 // 424 425 #define ENG_GET_GPU(p) objFindAncestorOfType(OBJGPU, (p)) 426 427 // GPU_GET_FIFO_UC is autogenerated, returns per Gpu pFifo. 428 #define GPU_GET_FIFO(p) GPU_GET_FIFO_UC(p) 429 430 // GPU_GET_KERNEL_FIFO_UC is autogenerated, returns per Gpu pKernelFifo. 431 #define GPU_GET_KERNEL_FIFO(p) gpuGetKernelFifoShared(p) 432 433 #define GPU_GET_HEAP(p) (RMCFG_MODULE_HEAP ? MEMORY_MANAGER_GET_HEAP(GPU_GET_MEMORY_MANAGER(p)) : NULL) 434 435 #define GPU_GET_HAL(p) (RMCFG_MODULE_HAL ? (p)->pHal : NULL) 436 437 #define GPU_GET_OS(p) (RMCFG_MODULE_OS ? (p)->pOS : NULL) // TBD: replace with SYS_GET_OS 438 #define GPU_QUICK_PATH_GET_OS(p) GPU_GET_OS(p) // TBD: remove 439 440 #define GPU_GET_REGISTER_ACCESS(g) (&(g)->registerAccess) 441 442 // Returns the pRmApi that routes to the physical driver, either via RPC or local calls 443 #define GPU_GET_PHYSICAL_RMAPI(g) (&(g)->physicalRmApi) 444 445 // 446 // Defines and helpers for encoding and decoding PCI domain, bus and device. 447 // 448 // Ideally these would live in objbus.h (or somewhere else more appropriate) and 449 // not gpu/gpu.h, but keep them here for now while support for 32-bit domains is 450 // being added as part of bug 1904645. 451 // 452 453 // DRF macros for GPUBUSINFO::nvDomainBusDeviceFunc 454 #define NVGPU_BUSDEVICE_DOMAIN 63:32 455 #define NVGPU_BUSDEVICE_BUS 15:8 456 #define NVGPU_BUSDEVICE_DEVICE 7:0 457 458 static NV_INLINE NvU32 gpuDecodeDomain(NvU64 gpuDomainBusDevice) 459 { 460 return (NvU32)DRF_VAL64(GPU, _BUSDEVICE, _DOMAIN, gpuDomainBusDevice); 461 } 462 463 static NV_INLINE NvU8 gpuDecodeBus(NvU64 gpuDomainBusDevice) 464 { 465 return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _BUS, gpuDomainBusDevice); 466 } 467 468 static NV_INLINE NvU8 gpuDecodeDevice(NvU64 gpuDomainBusDevice) 469 { 470 return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _DEVICE, gpuDomainBusDevice); 471 } 472 473 static NV_INLINE NvU64 gpuEncodeDomainBusDevice(NvU32 domain, NvU8 bus, NvU8 device) 474 { 475 return DRF_NUM64(GPU, _BUSDEVICE, _DOMAIN, domain) | 476 DRF_NUM64(GPU, _BUSDEVICE, _BUS, bus) | 477 DRF_NUM64(GPU, _BUSDEVICE, _DEVICE, device); 478 } 479 480 static NV_INLINE NvU32 gpuEncodeBusDevice(NvU8 bus, NvU8 device) 481 { 482 NvU64 busDevice = gpuEncodeDomainBusDevice(0, bus, device); 483 484 // Bus and device are guaranteed to fit in the lower 32bits 485 return (NvU32)busDevice; 486 } 487 488 // 489 // Generate a 32-bit id from domain, bus and device tuple. 490 // 491 NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device); 492 493 // 494 // Generate a 32-bit id from a physical address 495 // 496 NvU32 gpuGenerate32BitIdFromPhysAddr(RmPhysAddr addr); 497 498 // 499 // Helpers for getting domain, bus and device of a GPU 500 // 501 // Ideally these would be inline functions, but NVOC doesn't support that today, 502 // tracked in bug 1905882 503 // 504 #define gpuGetDBDF(pGpu) ((pGpu)->busInfo.nvDomainBusDeviceFunc) 505 #define gpuGetDomain(pGpu) gpuDecodeDomain((pGpu)->busInfo.nvDomainBusDeviceFunc) 506 #define gpuGetBus(pGpu) gpuDecodeBus((pGpu)->busInfo.nvDomainBusDeviceFunc) 507 #define gpuGetDevice(pGpu) gpuDecodeDevice((pGpu)->busInfo.nvDomainBusDeviceFunc) 508 509 #undef NVGPU_BUSDEVICE_DOMAIN 510 #undef NVGPU_BUSDEVICE_BUS 511 #undef NVGPU_BUSDEVICE_DEVICE 512 513 // 514 // MaskRevision constants. 515 // 516 #define GPU_NO_MASK_REVISION 0x00 517 #define GPU_MASK_REVISION_A1 0xA1 518 #define GPU_MASK_REVISION_A2 0xA2 519 #define GPU_MASK_REVISION_A3 0xA3 520 #define GPU_MASK_REVISION_A4 0xA4 521 #define GPU_MASK_REVISION_A5 0xA5 522 #define GPU_MASK_REVISION_A6 0xA6 523 #define GPU_MASK_REVISION_B1 0xB1 524 #define GPU_MASK_REVISION_B2 0xB2 525 #define GPU_MASK_REVISION_C1 0xC1 526 #define GPU_MASK_REVISION_D1 0xD1 527 528 #define GPU_GET_MASKREVISION(pGpu) (((gpuGetChipMajRev(pGpu))<<4)|(gpuGetChipMinRev(pGpu))) 529 530 // 531 // Revision constants. 532 // 533 #define GPU_NO_REVISION 0xFF 534 #define GPU_REVISION_0 0x00 535 #define GPU_REVISION_1 0x01 536 #define GPU_REVISION_2 0x02 537 #define GPU_REVISION_3 0x03 538 #define GPU_REVISION_4 0x04 539 #define GPU_REVISION_5 0x05 540 #define GPU_REVISION_6 0x06 541 #define GPU_REVISION_7 0x07 542 #define GPU_REVISION_8 0x08 543 #define GPU_REVISION_9 0x09 544 #define GPU_REVISION_A 0x0A 545 #define GPU_REVISION_B 0x0B 546 #define GPU_REVISION_C 0x0C 547 #define GPU_REVISION_D 0x0D 548 #define GPU_REVISION_E 0x0E 549 #define GPU_REVISION_F 0x0F 550 551 // 552 // One extra nibble should be added to the architecture version read from the 553 // PMC boot register to represent the architecture number in RM. 554 // 555 #define GPU_ARCH_SHIFT 0x4 556 557 // Registry key for inst mem modification defines 558 #define INSTMEM_TAG_MASK (0xf0000000) 559 #define INSTMEM_TAG(a) ((INSTMEM_TAG_MASK & (a)) >> 28) 560 561 562 typedef struct 563 { 564 565 NvU32 PCIDeviceID; 566 NvU32 Manufacturer; 567 NvU32 PCISubDeviceID; 568 NvU32 PCIRevisionID; 569 NvU32 Subrevision; 570 571 } GPUIDINFO; 572 573 574 typedef struct 575 { 576 NvU32 impl; 577 NvU32 arch; 578 NvU32 majorRev; 579 NvU32 minorRev; 580 NvU32 minorExtRev; 581 } PMCBOOT0; 582 583 typedef struct 584 { 585 NvU32 impl; 586 NvU32 arch; 587 NvU32 majorRev; 588 NvU32 minorRev; 589 NvU32 minorExtRev; 590 } PMCBOOT42; 591 592 // 593 // Random collection of bus-related configuration state. 594 // 595 typedef struct 596 { 597 RmPhysAddr gpuPhysAddr; 598 RmPhysAddr gpuPhysFbAddr; 599 RmPhysAddr gpuPhysInstAddr; 600 RmPhysAddr gpuPhysIoAddr; 601 NvU32 iovaspaceId; 602 NvU32 IntLine; 603 NvU32 IsrHooked; 604 NvU64 nvDomainBusDeviceFunc; 605 OOR_ARCH oorArch; 606 } GPUBUSINFO; 607 608 typedef struct 609 { 610 PCLASSDESCRIPTOR pClasses; 611 NvU32 *pSuppressClasses; 612 NvU32 numClasses; 613 NvBool bSuppressRead; 614 } GPUCLASSDB, *PGPUCLASSDB; 615 616 typedef struct 617 { 618 const CLASSDESCRIPTOR *pClassDescriptors; 619 NvU32 numClassDescriptors; 620 621 PENGDESCRIPTOR pEngineInitDescriptors; 622 PENGDESCRIPTOR pEngineDestroyDescriptors; 623 PENGDESCRIPTOR pEngineLoadDescriptors; 624 PENGDESCRIPTOR pEngineUnloadDescriptors; 625 NvU32 numEngineDescriptors; 626 } GPU_ENGINE_ORDER, *PGPU_ENGINE_ORDER; 627 628 // 629 // PCI Express Support 630 // 631 typedef struct NBADDR 632 { 633 NvU32 domain; 634 NvU8 bus; 635 NvU8 device; 636 NvU8 func; 637 NvU8 valid; 638 void *handle; 639 } NBADDR; 640 641 typedef struct 642 { 643 NBADDR addr; 644 void *vAddr; // virtual address of the port, if it has been mapped . Not used starting with Win10 BuildXXXXX 645 NvU32 PCIECapPtr; // offset of the PCIE capptr in the NB 646 // Capability register set in enhanced configuration space 647 // 648 NvU32 PCIEErrorCapPtr; // offset of the Advanced Error Reporting Capability register set 649 NvU32 PCIEVCCapPtr; // offset of the Virtual Channel (VC) Capability register set 650 NvU32 PCIEL1SsCapPtr; // Offset of the L1 Substates Capabilities 651 NvU16 DeviceID, VendorID; // device and vendor ID for port 652 } PORTDATA; 653 654 typedef struct // GPU specific data for core logic object, stored in GPU object 655 { 656 PORTDATA upstreamPort; // the upstream port info for the GPU 657 // If there is a switch this is equal to boardDownstreamPort 658 // If there is no switch this is equal to rootPort 659 PORTDATA rootPort; // The root port of the PCI-E root complex 660 PORTDATA boardUpstreamPort; // If there is no BR03 this is equal to rootPort. 661 PORTDATA boardDownstreamPort; // If there is no BR03 these data are not set. 662 } GPUCLDATA; 663 664 // For SLI Support Using Peer Model 665 typedef struct 666 { 667 OBJGPU *pGpu; // Mapping from the local pinset number (i.e. array index) to peer GPU 668 NvU32 pinset; // Mapping from the local pinset number (i.e. array index) to peer pinset number 669 } _GPU_SLI_PEER; 670 671 672 // 673 // Flags for gpuStateLoad() and gpuStateUnload() routines. Flags *must* be used 674 // symmetrically across an Unload/Load pair. 675 // 676 #define GPU_STATE_FLAGS_PRESERVING NVBIT(0) // GPU state is preserved 677 #define GPU_STATE_FLAGS_VGA_TRANSITION NVBIT(1) // To be used with GPU_STATE_FLAGS_PRESERVING. 678 #define GPU_STATE_FLAGS_PM_TRANSITION NVBIT(2) // To be used with GPU_STATE_FLAGS_PRESERVING. 679 #define GPU_STATE_FLAGS_PM_SUSPEND NVBIT(3) 680 #define GPU_STATE_FLAGS_PM_HIBERNATE NVBIT(4) 681 #define GPU_STATE_FLAGS_GC6_TRANSITION NVBIT(5) // To be used with GPU_STATE_FLAGS_PRESERVING. 682 #define GPU_STATE_DEFAULT 0 // Default flags for destructive state loads 683 // and unloads 684 685 struct OBJHWBC; 686 typedef struct hwbc_list 687 { 688 struct OBJHWBC *pHWBC; 689 struct hwbc_list *pNext; 690 } HWBC_LIST; 691 692 /*! 693 * GFID allocation state 694 */ 695 typedef enum 696 { 697 GFID_FREE = 0, 698 GFID_ALLOCATED = 1, 699 GFID_INVALIDATED = 2, 700 } GFID_ALLOC_STATUS; 701 702 typedef struct SRIOV_P2P_INFO 703 { 704 NvU32 gfid; 705 NvBool bAllowP2pAccess; 706 NvU32 accessRefCount; 707 NvU32 destRefCount; 708 } SRIOV_P2P_INFO, *PSRIOV_P2P_INFO; 709 710 typedef struct 711 { 712 NvU32 peerGpuId; 713 NvU32 peerGpuInstance; 714 NvU32 p2pCaps; 715 NvU32 p2pOptimalReadCEs; 716 NvU32 p2pOptimalWriteCEs; 717 NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; 718 NvU32 busPeerId; 719 } GPU_P2P_PEER_GPU_CAPS; 720 721 // 722 // typedef of private struct used in OBJGPU's data field 723 // 724 725 typedef struct 726 { 727 NvBool isInitialized; 728 NvU8 uuid[RM_SHA1_GID_SIZE]; 729 } _GPU_UUID; 730 731 typedef struct 732 { 733 NvBool bValid; 734 NvU8 id; 735 } _GPU_PCIE_PEER_CLIQUE; 736 737 typedef struct 738 { 739 NvU32 platformId; // used to identify soc 740 NvU32 implementationId; // soc-specific 741 NvU32 revisionId; // soc-revision 742 PMCBOOT0 pmcBoot0; 743 PMCBOOT42 pmcBoot42; 744 NvU8 subRevision; // sub-revision (NV_FUSE_OPT_SUBREVISION on GPU) 745 } _GPU_CHIP_INFO; 746 747 748 // Engine Database 749 typedef struct 750 { 751 NvU32 size; 752 RM_ENGINE_TYPE *pType; 753 NvBool bValid; 754 } _GPU_ENGINE_DB; 755 756 #define MAX_NUM_BARS (8) 757 // SRIOV state 758 typedef struct 759 { 760 /*! 761 * Total number of VFs available in this GPU 762 */ 763 NvU32 totalVFs; 764 765 /*! 766 * First VF Offset 767 */ 768 NvU32 firstVFOffset; 769 770 /*! 771 * Max GFID possible 772 */ 773 NvU32 maxGfid; 774 775 /*! 776 * Physical offset of Virtual BAR0 register. Stores the offset if the GPU is 777 * a physical function, else 0 778 */ 779 NvU32 virtualRegPhysOffset; 780 781 /*! 782 * Allocated GFIDs. Will be used to ensure plugins doesn't use same GFID for multiple VFs 783 */ 784 NvU8 *pAllocatedGfids; 785 786 /*! 787 * The sizes of the BAR regions on the VF 788 */ 789 NvU64 vfBarSize[MAX_NUM_BARS]; 790 791 /*! 792 * First PF's BAR addresses 793 */ 794 NvU64 firstVFBarAddress[MAX_NUM_BARS]; 795 796 /*! 797 * If the VF BARs are 64-bit addressable 798 */ 799 NvBool b64bitVFBar0; 800 NvBool b64bitVFBar1; 801 NvBool b64bitVFBar2; 802 803 /*! 804 * GFID used for P2P access 805 */ 806 PSRIOV_P2P_INFO pP2PInfo; 807 NvBool bP2PAllocated; 808 NvU32 maxP2pGfid; 809 NvU32 p2pFabricPartitionId; 810 } _GPU_SRIOV_STATE; 811 812 // Max # of instances for GPU children 813 #define GPU_MAX_CES 10 814 #define GPU_MAX_GRS 8 815 #define GPU_MAX_FIFOS 1 816 #define GPU_MAX_MSENCS NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS 817 #define GPU_MAX_NVDECS NV2080_CTRL_CMD_INTERNAL_MAX_BSPS 818 #define GPU_MAX_NVJPGS 8 819 #define GPU_MAX_HSHUBS 5 820 #define GPU_MAX_OFAS 1 821 822 // 823 // Macro defines for OBJGPU fields -- Macro defines inside NVOC class block is 824 // gone after NVOC preprocessing stage. For macros used outside gpu/gpu.h should 825 // not be defined inside the class block. 826 // 827 828 // 829 // Maximum number of Falcon objects that can be allocated on one GPU. 830 // This is purely a software limit and can be raised freely as more are added. 831 // 832 #define GPU_MAX_FALCON_ENGINES \ 833 ENG_IOCTRL__SIZE_1 + \ 834 ENG_GPCCS__SIZE_1 + \ 835 ENG_FECS__SIZE_1 + \ 836 ENG_NVJPEG__SIZE_1 + \ 837 ENG_NVDEC__SIZE_1 + \ 838 ENG_MSENC__SIZE_1 + \ 839 32 840 841 #define GPU_MAX_VIDEO_ENGINES \ 842 (ENG_NVJPEG__SIZE_1 + \ 843 ENG_NVDEC__SIZE_1 + \ 844 ENG_MSENC__SIZE_1 + \ 845 ENG_OFA__SIZE_1) 846 847 // for OBJGPU::pRmCtrlDeferredCmd 848 #define MAX_DEFERRED_CMDS 2 849 850 // for OBJGPU::computeModeRefCount 851 #define NV_GPU_MODE_GRAPHICS_MODE 0x00000001 852 #define NV_GPU_MODE_COMPUTE_MODE 0x00000002 853 #define NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT 0x0000000a 854 #define NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT 0x0000000b 855 856 // 857 // Structure to hold information obtained from 858 // parsing the DEVICE_INFO2 table during init. 859 // 860 861 typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO DEVICE_INFO2_ENTRY; 862 863 864 //! Value of DEV_GROUP_ID used in gpuGetDeviceEntryByType for any group ID. 865 #define DEVICE_INFO2_ENTRY_GROUP_ID_ANY (-1) 866 867 #define NV_GPU_INTERNAL_DEVICE_HANDLE 0xABCD0080 868 #define NV_GPU_INTERNAL_SUBDEVICE_HANDLE 0xABCD2080 869 870 // 871 // NV GPU simulation mode defines 872 // Keep in sync with os.h SIM MODE defines until osGetSimulationMode is deprecated. 873 // 874 #ifndef NV_SIM_MODE_DEFS 875 #define NV_SIM_MODE_DEFS 876 #define NV_SIM_MODE_HARDWARE 0U 877 #define NV_SIM_MODE_RTL 1U 878 #define NV_SIM_MODE_CMODEL 2U 879 #define NV_SIM_MODE_MODS_AMODEL 3U 880 #define NV_SIM_MODE_TEGRA_FPGA 4U 881 #define NV_SIM_MODE_INVALID (~0x0U) 882 #endif 883 884 #define GPU_IS_NVSWITCH_DETECTED(pGpu) \ 885 (pGpu->nvswitchSupport == NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_SUPPORTED) 886 887 888 // 889 // The actual GPU object definition 890 // 891 #ifdef NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED 892 #define PRIVATE_FIELD(x) x 893 #else 894 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) 895 #endif 896 struct OBJGPU { 897 const struct NVOC_RTTI *__nvoc_rtti; 898 struct Object __nvoc_base_Object; 899 struct RmHalspecOwner __nvoc_base_RmHalspecOwner; 900 struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; 901 struct Object *__nvoc_pbase_Object; 902 struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; 903 struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; 904 struct OBJGPU *__nvoc_pbase_OBJGPU; 905 NV_STATUS (*__gpuConstructDeviceInfoTable__)(struct OBJGPU *); 906 NV_STATUS (*__gpuWriteBusConfigReg__)(struct OBJGPU *, NvU32, NvU32); 907 NV_STATUS (*__gpuReadBusConfigReg__)(struct OBJGPU *, NvU32, NvU32 *); 908 NV_STATUS (*__gpuReadBusConfigRegEx__)(struct OBJGPU *, NvU32, NvU32 *, THREAD_STATE_NODE *); 909 NV_STATUS (*__gpuReadFunctionConfigReg__)(struct OBJGPU *, NvU32, NvU32, NvU32 *); 910 NV_STATUS (*__gpuWriteFunctionConfigReg__)(struct OBJGPU *, NvU32, NvU32, NvU32); 911 NV_STATUS (*__gpuWriteFunctionConfigRegEx__)(struct OBJGPU *, NvU32, NvU32, NvU32, THREAD_STATE_NODE *); 912 NV_STATUS (*__gpuReadVgpuConfigReg__)(struct OBJGPU *, NvU32, NvU32 *); 913 void (*__gpuGetIdInfo__)(struct OBJGPU *); 914 void (*__gpuHandleSanityCheckRegReadError__)(struct OBJGPU *, NvU32, NvU32); 915 void (*__gpuHandleSecFault__)(struct OBJGPU *); 916 const GPUCHILDPRESENT *(*__gpuGetChildrenPresent__)(struct OBJGPU *, NvU32 *); 917 const CLASSDESCRIPTOR *(*__gpuGetClassDescriptorList__)(struct OBJGPU *, NvU32 *); 918 NvU32 (*__gpuGetPhysAddrWidth__)(struct OBJGPU *, NV_ADDRESS_SPACE); 919 NvBool (*__gpuFuseSupportsDisplay__)(struct OBJGPU *); 920 NV_STATUS (*__gpuClearFbhubPoisonIntrForBug2924523__)(struct OBJGPU *); 921 void (*__gpuReadDeviceId__)(struct OBJGPU *, NvU32 *, NvU32 *); 922 NvU64 (*__gpuGetFlaVasSize__)(struct OBJGPU *, NvBool); 923 void (*__gpuDetermineSelfHostedMode__)(struct OBJGPU *); 924 void (*__gpuDetermineMIGSupport__)(struct OBJGPU *); 925 NvBool (*__gpuIsAtsSupportedWithSmcMemPartitioning__)(struct OBJGPU *); 926 NvBool (*__gpuIsSliCapableWithoutDisplay__)(struct OBJGPU *); 927 NvBool (*__gpuIsCCEnabledInHw__)(struct OBJGPU *); 928 NvBool (*__gpuIsDevModeEnabledInHw__)(struct OBJGPU *); 929 NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *); 930 NvBool PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED; 931 NvBool bVideoLinkDisabled; 932 GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel; 933 NvU32 moduleId; 934 NvU8 nvswitchSupport; 935 NvBool PDB_PROP_GPU_IN_STANDBY; 936 NvBool PDB_PROP_GPU_IN_HIBERNATE; 937 NvBool PDB_PROP_GPU_IN_PM_CODEPATH; 938 NvBool PDB_PROP_GPU_IN_PM_RESUME_CODEPATH; 939 NvBool PDB_PROP_GPU_STATE_INITIALIZED; 940 NvBool PDB_PROP_GPU_EMULATION; 941 NvBool PDB_PROP_GPU_PRIMARY_DEVICE; 942 NvBool PDB_PROP_GPU_HYBRID_MGPU; 943 NvBool PDB_PROP_GPU_ALTERNATE_TREE_ENABLED; 944 NvBool PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS; 945 NvBool PDB_PROP_GPU_3D_CONTROLLER; 946 NvBool PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM; 947 NvBool PDB_PROP_GPU_IS_CONNECTED; 948 NvBool PDB_PROP_GPU_BROKEN_FB; 949 NvBool PDB_PROP_GPU_IN_FULLCHIP_RESET; 950 NvBool PDB_PROP_GPU_IN_SECONDARY_BUS_RESET; 951 NvBool PDB_PROP_GPU_IN_GC6_RESET; 952 NvBool PDB_PROP_GPU_IS_GEMINI; 953 NvBool PDB_PROP_GPU_PERSISTENT_SW_STATE; 954 NvBool PDB_PROP_GPU_COHERENT_CPU_MAPPING; 955 NvBool PDB_PROP_GPU_IS_LOST; 956 NvBool PDB_PROP_GPU_IN_TIMEOUT_RECOVERY; 957 NvBool PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT; 958 NvBool PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY; 959 NvBool PDB_PROP_GPU_TEGRA_SOC_IGPU; 960 NvBool PDB_PROP_GPU_ATS_SUPPORTED; 961 NvBool PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING; 962 NvBool PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE; 963 NvBool PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE; 964 NvBool PDB_PROP_GPU_IS_UEFI; 965 NvBool PDB_PROP_GPU_ZERO_FB; 966 NvBool PDB_PROP_GPU_BAR1_BAR2_DISABLED; 967 NvBool PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE; 968 NvBool PDB_PROP_GPU_MIG_SUPPORTED; 969 NvBool PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED; 970 NvBool PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED; 971 NvBool PDB_PROP_GPU_IS_COT_ENABLED; 972 NvBool PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE; 973 NvBool PDB_PROP_GPU_SWRL_GRANULAR_LOCKING; 974 NvBool PDB_PROP_GPU_IN_SLI_LINK_CODEPATH; 975 NvBool PDB_PROP_GPU_IS_PLX_PRESENT; 976 NvBool PDB_PROP_GPU_IS_BR03_PRESENT; 977 NvBool PDB_PROP_GPU_IS_BR04_PRESENT; 978 NvBool PDB_PROP_GPU_BEHIND_BRIDGE; 979 NvBool PDB_PROP_GPU_BEHIND_BR03; 980 NvBool PDB_PROP_GPU_BEHIND_BR04; 981 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED; 982 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED; 983 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED; 984 NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY; 985 NvBool PDB_PROP_GPU_RM_UNLINKED_SLI; 986 NvBool PDB_PROP_GPU_SLI_LINK_ACTIVE; 987 NvBool PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST; 988 NvBool PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH; 989 NvBool PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL; 990 NvBool PDB_PROP_GPU_IS_MOBILE; 991 NvBool PDB_PROP_GPU_RTD3_GC6_SUPPORTED; 992 NvBool PDB_PROP_GPU_RTD3_GC6_ACTIVE; 993 NvBool PDB_PROP_GPU_FAST_GC6_ACTIVE; 994 NvBool PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED; 995 NvBool PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA; 996 NvBool PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED; 997 NvBool PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED; 998 NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERING; 999 NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERED; 1000 NvBool PDB_PROP_GPU_ACCOUNTING_ON; 1001 NvBool PDB_PROP_GPU_INACCESSIBLE; 1002 NvBool PDB_PROP_GPU_NVLINK_SYSMEM; 1003 NvBool PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK; 1004 NvBool PDB_PROP_GPU_C2C_SYSMEM; 1005 NvBool PDB_PROP_GPU_IN_TCC_MODE; 1006 NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE; 1007 NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K; 1008 NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT; 1009 NvBool PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT; 1010 NvBool PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS; 1011 NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU; 1012 NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA; 1013 NvBool PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED; 1014 NvBool PDB_PROP_GPU_NV_USERMODE_ENABLED; 1015 NvBool PDB_PROP_GPU_IN_FATAL_ERROR; 1016 NvBool PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE; 1017 NvBool PDB_PROP_GPU_VGA_ENABLED; 1018 NvBool PDB_PROP_GPU_IS_MXM_3X; 1019 NvBool PDB_PROP_GPU_GSYNC_III_ATTACHED; 1020 NvBool PDB_PROP_GPU_QSYNC_II_ATTACHED; 1021 NvBool PDB_PROP_GPU_CC_FEATURE_CAPABLE; 1022 NvBool PDB_PROP_GPU_APM_FEATURE_CAPABLE; 1023 NvBool PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX; 1024 NvBool PDB_PROP_GPU_SKIP_TABLE_CE_MAP; 1025 NvBool PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF; 1026 NvBool PDB_PROP_GPU_IS_SOC_SDM; 1027 NvBool PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL; 1028 OS_GPU_INFO *pOsGpuInfo; 1029 OS_RM_CAPS *pOsRmCaps; 1030 NvU32 halImpl; 1031 void *hPci; 1032 GpuEngineEventNotificationList *engineNonstallIntrEventNotifications[63]; 1033 NvBool bIsSOC; 1034 NvU32 gpuInstance; 1035 NvU32 gpuDisabled; 1036 NvU32 gpuId; 1037 NvU32 boardId; 1038 NvU32 deviceInstance; 1039 NvU32 subdeviceInstance; 1040 NvS32 numaNodeId; 1041 _GPU_UUID gpuUuid; 1042 NvU32 gpuPhysicalId; 1043 NvU32 gpuTerminatedLinkMask; 1044 NvBool gpuLinkTerminationEnabled; 1045 NvBool gspRmInitialized; 1046 _GPU_PCIE_PEER_CLIQUE pciePeerClique; 1047 NvU32 i2cPortForExtdev; 1048 GPUIDINFO idInfo; 1049 _GPU_CHIP_INFO chipInfo; 1050 GPUBUSINFO busInfo; 1051 const GPUCHILDPRESENT *pChildrenPresent; 1052 NvU32 numChildrenPresent; 1053 GPU_ENGINE_ORDER engineOrder; 1054 GPUCLASSDB classDB; 1055 NvU32 chipId0; 1056 NvU32 chipId1; 1057 NvU32 pmcEnable; 1058 NvU32 pmcRmOwnsIntrMask; 1059 NvBool testIntr; 1060 NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *gspSupportedEngines; 1061 NvU32 numCEs; 1062 NvU32 ceFaultMethodBufferSize; 1063 NvBool isVirtual; 1064 NvBool isGspClient; 1065 NvU64 fbLength; 1066 NvU32 instLength; 1067 NvBool instSetViaAttachArg; 1068 NvU32 activeFBIOs; 1069 NvU64 gpuVbiosPostTime; 1070 NvU32 uefiScanoutSurfaceSizeInMB; 1071 RmPhysAddr dmaStartAddress; 1072 NvU32 gpuDeviceMapCount; 1073 DEVICE_MAPPING deviceMappings[60]; 1074 struct IoAperture *pIOApertures[12]; 1075 DEVICE_MAPPING *pDeviceMappingsByDeviceInstance[12]; 1076 void *gpuCfgAddr; 1077 TIMEOUT_DATA timeoutData; 1078 NvU32 computeModeRules; 1079 NvS32 computeModeRefCount; 1080 NvHandle hComputeModeReservation; 1081 NvBool bIsDebugModeEnabled; 1082 NvU32 masterFromSLIConfig; 1083 NvU32 sliStatus; 1084 PENG_INFO_LINK_NODE infoList; 1085 struct OBJOS *pOS; 1086 struct OBJHAL *pHal; 1087 struct KernelBif *pKernelBif; 1088 struct KernelMc *pKernelMc; 1089 struct SwIntr *pSwIntr; 1090 struct KernelMemorySystem *pKernelMemorySystem; 1091 struct MemoryManager *pMemoryManager; 1092 struct KernelDisplay *pKernelDisplay; 1093 struct OBJTMR *pTmr; 1094 struct KernelBus *pKernelBus; 1095 struct KernelGmmu *pKernelGmmu; 1096 struct KernelSec2 *pKernelSec2; 1097 struct KernelGsp *pKernelGsp; 1098 struct VirtMemAllocator *pDma; 1099 struct KernelMIGManager *pKernelMIGManager; 1100 struct KernelGraphicsManager *pKernelGraphicsManager; 1101 struct KernelGraphics *pKernelGraphics[8]; 1102 struct KernelPerf *pKernelPerf; 1103 struct KernelRc *pKernelRc; 1104 struct Intr *pIntr; 1105 struct KernelPmu *pKernelPmu; 1106 struct KernelCE *pKCe[10]; 1107 struct KernelFifo *pKernelFifo; 1108 struct OBJUVM *pUvm; 1109 struct NvDebugDump *pNvd; 1110 struct KernelNvlink *pKernelNvlink; 1111 struct OBJGPUMON *pGpuMon; 1112 struct OBJSWENG *pSwEng; 1113 struct KernelFsp *pKernelFsp; 1114 struct ConfidentialCompute *pConfCompute; 1115 struct KernelCcu *pKernelCcu; 1116 HWBC_LIST *pHWBCList; 1117 GPUCLDATA gpuClData; 1118 _GPU_ENGINE_DB engineDB; 1119 NvU32 engineDBSize; 1120 NvU32 instCacheOverride; 1121 NvS32 numOfMclkLockRequests; 1122 NvU32 netlistNum; 1123 RmCtrlDeferredCmd pRmCtrlDeferredCmd[2]; 1124 ACPI_DATA acpi; 1125 ACPI_METHOD_DATA acpiMethodData; 1126 NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS backLightMethodData; 1127 NvU32 activeFifoEventMthdNotifiers; 1128 struct Falcon *constructedFalcons[70]; 1129 NvU32 numConstructedFalcons; 1130 struct GenericKernelFalcon *genericKernelFalcons[70]; 1131 NvU32 numGenericKernelFalcons; 1132 struct KernelVideoEngine *kernelVideoEngines[20]; 1133 NvU32 numKernelVideoEngines; 1134 NvU8 *pUserRegisterAccessMap; 1135 NvU8 *pUnrestrictedRegisterAccessMap; 1136 NvU32 userRegisterAccessMapSize; 1137 struct PrereqTracker *pPrereqTracker; 1138 RegisterAccess registerAccess; 1139 NvBool bUseRegisterAccessMap; 1140 NvU32 *pRegopOffsetScratchBuffer; 1141 NvU32 *pRegopOffsetAddrScratchBuffer; 1142 NvU32 regopScratchBufferMaxOffsets; 1143 _GPU_SRIOV_STATE sriovState; 1144 NvU64 vmmuSegmentSize; 1145 NvHandle hDefaultClientShare; 1146 NvHandle hDefaultClientShareDevice; 1147 NvHandle hDefaultClientShareSubDevice; 1148 NvU32 externalKernelClientCount; 1149 DEVICE_INFO2_ENTRY *pDeviceInfoTable; 1150 NvU32 numDeviceInfoEntries; 1151 NvHandle hInternalClient; 1152 NvHandle hInternalDevice; 1153 NvHandle hInternalSubdevice; 1154 struct Subdevice *pCachedSubdevice; 1155 struct RsClient *pCachedRsClient; 1156 RM_API physicalRmApi; 1157 struct Subdevice **pSubdeviceBackReferences; 1158 NvU32 numSubdeviceBackReferences; 1159 NvU32 maxSubdeviceBackReferences; 1160 NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo; 1161 NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *boardInfo; 1162 GpuSharedDataMap userSharedData; 1163 NvBool bBar2MovedByVtd; 1164 NvBool bBar1Is64Bit; 1165 NvBool bSurpriseRemovalSupported; 1166 NvBool bTwoStageRcRecoveryEnabled; 1167 NvBool bReplayableTraceEnabled; 1168 NvBool bInD3Cold; 1169 NvBool bIsSimulation; 1170 NvBool bIsModsAmodel; 1171 NvBool bIsFmodel; 1172 NvBool bIsRtlsim; 1173 NvBool bIsPassthru; 1174 NvBool bIsVirtualWithSriov; 1175 NvU32 P2PPeerGpuCount; 1176 GPU_P2P_PEER_GPU_CAPS P2PPeerGpuCaps[32]; 1177 NvBool bIsSelfHosted; 1178 NvBool bStateLoading; 1179 NvBool bStateUnloading; 1180 NvBool bStateLoaded; 1181 NvBool bFullyConstructed; 1182 NvBool bBf3WarBug4040336Enabled; 1183 NvBool bUnifiedMemorySpaceEnabled; 1184 NvBool bSriovEnabled; 1185 NvBool bWarBug200577889SriovHeavyEnabled; 1186 NvBool bNonPowerOf2ChannelCountSupported; 1187 NvBool bCacheOnlyMode; 1188 NvBool bNeed4kPageIsolation; 1189 NvBool bSplitVasManagementServerClientRm; 1190 NvU32 instLocOverrides; 1191 NvU32 instLocOverrides2; 1192 NvU32 instLocOverrides3; 1193 NvU32 instLocOverrides4; 1194 NvBool bInstLoc47bitPaWar; 1195 NvU32 instVprOverrides; 1196 NvU32 optimizeUseCaseOverride; 1197 NvS16 fecsCtxswLogConsumerCount; 1198 NvS16 videoCtxswLogConsumerCount; 1199 EventBufferMap vgpuFecsTraceStagingBindings; 1200 FecsEventBufferBindMultiMap fecsEventBufferBindingsUid; 1201 TMR_EVENT *pFecsTimerEvent; 1202 struct OBJVASPACE *pFabricVAS; 1203 NvBool bPipelinedPteMemEnabled; 1204 NvBool bIsBarPteInSysmemSupported; 1205 NvBool bRegUsesGlobalSurfaceOverrides; 1206 NvBool bClientRmAllocatedCtxBuffer; 1207 NvBool bIterativeMmuWalker; 1208 NvBool bEccPageRetirementWithSliAllowed; 1209 NvBool bVidmemPreservationBrokenBug3172217; 1210 NvBool bInstanceMemoryAlwaysCached; 1211 NvBool bUseRpcSimEscapes; 1212 NvBool bRmProfilingPrivileged; 1213 NvBool bGeforceSmb; 1214 NvBool bIsGeforce; 1215 NvBool bIsQuadro; 1216 NvBool bIsVgx; 1217 NvBool bIsNvidiaNvs; 1218 NvBool bIsTitan; 1219 NvBool bIsTesla; 1220 NvBool bIsAC; 1221 BRANDING_TYPE brandingCache; 1222 NvBool bComputePolicyTimesliceSupported; 1223 NvBool bGlobalPoisonFuseEnabled; 1224 RmPhysAddr simAccessBufPhysAddr; 1225 NvU32 fabricProbeRegKeyOverride; 1226 NvU8 fabricProbeRetryDelay; 1227 NvU8 fabricProbeSlowdownThreshold; 1228 NvBool bVgpuGspPluginOffloadEnabled; 1229 NvBool bSriovCapable; 1230 NvBool bRecheckSliSupportAtResume; 1231 NvBool bGpuNvEncAv1Supported; 1232 _GPU_SLI_PEER peer[2]; 1233 NvBool bIsGspOwnedFaultBuffersEnabled; 1234 NvBool bVideoTraceLogSupported; 1235 _GPU_GC6_STATE gc6State; 1236 }; 1237 1238 #ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ 1239 #define __NVOC_CLASS_OBJGPU_TYPEDEF__ 1240 typedef struct OBJGPU OBJGPU; 1241 #endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ 1242 1243 #ifndef __nvoc_class_id_OBJGPU 1244 #define __nvoc_class_id_OBJGPU 0x7ef3cb 1245 #endif /* __nvoc_class_id_OBJGPU */ 1246 1247 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; 1248 1249 #define __staticCast_OBJGPU(pThis) \ 1250 ((pThis)->__nvoc_pbase_OBJGPU) 1251 1252 #ifdef __nvoc_gpu_h_disabled 1253 #define __dynamicCast_OBJGPU(pThis) ((OBJGPU*)NULL) 1254 #else //__nvoc_gpu_h_disabled 1255 #define __dynamicCast_OBJGPU(pThis) \ 1256 ((OBJGPU*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPU))) 1257 #endif //__nvoc_gpu_h_disabled 1258 1259 #define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_CAST 1260 #define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_NAME PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL 1261 #define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_CAST 1262 #define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GC6_SUPPORTED 1263 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_CAST 1264 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU 1265 #define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_CAST 1266 #define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_NAME PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K 1267 #define PDB_PROP_GPU_INACCESSIBLE_BASE_CAST 1268 #define PDB_PROP_GPU_INACCESSIBLE_BASE_NAME PDB_PROP_GPU_INACCESSIBLE 1269 #define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_CAST 1270 #define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH 1271 #define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_CAST 1272 #define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_NAME PDB_PROP_GPU_IN_FATAL_ERROR 1273 #define PDB_PROP_GPU_VGA_ENABLED_BASE_CAST 1274 #define PDB_PROP_GPU_VGA_ENABLED_BASE_NAME PDB_PROP_GPU_VGA_ENABLED 1275 #define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_CAST 1276 #define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_RESUME_CODEPATH 1277 #define PDB_PROP_GPU_IN_STANDBY_BASE_CAST 1278 #define PDB_PROP_GPU_IN_STANDBY_BASE_NAME PDB_PROP_GPU_IN_STANDBY 1279 #define PDB_PROP_GPU_IS_COT_ENABLED_BASE_CAST 1280 #define PDB_PROP_GPU_IS_COT_ENABLED_BASE_NAME PDB_PROP_GPU_IS_COT_ENABLED 1281 #define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_CAST 1282 #define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_NAME PDB_PROP_GPU_COHERENT_CPU_MAPPING 1283 #define PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED_BASE_CAST 1284 #define PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED_BASE_NAME PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED 1285 #define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_CAST 1286 #define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY 1287 #define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_CAST 1288 #define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED 1289 #define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_CAST 1290 #define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_NAME PDB_PROP_GPU_SLI_LINK_ACTIVE 1291 #define PDB_PROP_GPU_IN_TCC_MODE_BASE_CAST 1292 #define PDB_PROP_GPU_IN_TCC_MODE_BASE_NAME PDB_PROP_GPU_IN_TCC_MODE 1293 #define PDB_PROP_GPU_C2C_SYSMEM_BASE_CAST 1294 #define PDB_PROP_GPU_C2C_SYSMEM_BASE_NAME PDB_PROP_GPU_C2C_SYSMEM 1295 #define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_CAST 1296 #define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_NAME PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING 1297 #define PDB_PROP_GPU_IN_GC6_RESET_BASE_CAST 1298 #define PDB_PROP_GPU_IN_GC6_RESET_BASE_NAME PDB_PROP_GPU_IN_GC6_RESET 1299 #define PDB_PROP_GPU_HYBRID_MGPU_BASE_CAST 1300 #define PDB_PROP_GPU_HYBRID_MGPU_BASE_NAME PDB_PROP_GPU_HYBRID_MGPU 1301 #define PDB_PROP_GPU_3D_CONTROLLER_BASE_CAST 1302 #define PDB_PROP_GPU_3D_CONTROLLER_BASE_NAME PDB_PROP_GPU_3D_CONTROLLER 1303 #define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_CAST 1304 #define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED 1305 #define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_CAST 1306 #define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE 1307 #define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_CAST 1308 #define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_NAME PDB_PROP_GPU_SKIP_TABLE_CE_MAP 1309 #define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_CAST 1310 #define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_NAME PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED 1311 #define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_CAST 1312 #define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI 1313 #define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_CAST 1314 #define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_NAME PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL 1315 #define PDB_PROP_GPU_IS_UEFI_BASE_CAST 1316 #define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI 1317 #define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_CAST 1318 #define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_NAME PDB_PROP_GPU_IN_SECONDARY_BUS_RESET 1319 #define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_CAST 1320 #define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_NAME PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT 1321 #define PDB_PROP_GPU_IS_CONNECTED_BASE_CAST 1322 #define PDB_PROP_GPU_IS_CONNECTED_BASE_NAME PDB_PROP_GPU_IS_CONNECTED 1323 #define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_CAST 1324 #define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_NAME PDB_PROP_GPU_IS_PLX_PRESENT 1325 #define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_CAST 1326 #define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_NAME PDB_PROP_GPU_NVLINK_SYSMEM 1327 #define PDB_PROP_GPU_IS_MOBILE_BASE_CAST 1328 #define PDB_PROP_GPU_IS_MOBILE_BASE_NAME PDB_PROP_GPU_IS_MOBILE 1329 #define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_CAST 1330 #define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_RTD3_GC6_ACTIVE 1331 #define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_CAST 1332 #define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_CC_FEATURE_CAPABLE 1333 #define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_CAST 1334 #define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_ENABLED 1335 #define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_CAST 1336 #define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_NAME PDB_PROP_GPU_PERSISTENT_SW_STATE 1337 #define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_CAST 1338 #define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH 1339 #define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_CAST 1340 #define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_NAME PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT 1341 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST 1342 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED 1343 #define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST 1344 #define PDB_PROP_GPU_BEHIND_BR03_BASE_NAME PDB_PROP_GPU_BEHIND_BR03 1345 #define PDB_PROP_GPU_BEHIND_BR04_BASE_CAST 1346 #define PDB_PROP_GPU_BEHIND_BR04_BASE_NAME PDB_PROP_GPU_BEHIND_BR04 1347 #define PDB_PROP_GPU_MIG_SUPPORTED_BASE_CAST 1348 #define PDB_PROP_GPU_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_SUPPORTED 1349 #define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_CAST 1350 #define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_NAME PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE 1351 #define PDB_PROP_GPU_BAR1_BAR2_DISABLED_BASE_CAST 1352 #define PDB_PROP_GPU_BAR1_BAR2_DISABLED_BASE_NAME PDB_PROP_GPU_BAR1_BAR2_DISABLED 1353 #define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_CAST 1354 #define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_NAME PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE 1355 #define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_CAST 1356 #define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_NAME PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE 1357 #define PDB_PROP_GPU_ACCOUNTING_ON_BASE_CAST 1358 #define PDB_PROP_GPU_ACCOUNTING_ON_BASE_NAME PDB_PROP_GPU_ACCOUNTING_ON 1359 #define PDB_PROP_GPU_IN_HIBERNATE_BASE_CAST 1360 #define PDB_PROP_GPU_IN_HIBERNATE_BASE_NAME PDB_PROP_GPU_IN_HIBERNATE 1361 #define PDB_PROP_GPU_BROKEN_FB_BASE_CAST 1362 #define PDB_PROP_GPU_BROKEN_FB_BASE_NAME PDB_PROP_GPU_BROKEN_FB 1363 #define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_CAST 1364 #define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERING 1365 #define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_CAST 1366 #define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_NAME PDB_PROP_GPU_IN_TIMEOUT_RECOVERY 1367 #define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_CAST 1368 #define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERED 1369 #define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_CAST 1370 #define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_FAST_GC6_ACTIVE 1371 #define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_CAST 1372 #define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_NAME PDB_PROP_GPU_IN_FULLCHIP_RESET 1373 #define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_CAST 1374 #define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_NAME PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA 1375 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_CAST 1376 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA 1377 #define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_CAST 1378 #define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_SLI_LINK_CODEPATH 1379 #define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_CAST 1380 #define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR03_PRESENT 1381 #define PDB_PROP_GPU_IS_GEMINI_BASE_CAST 1382 #define PDB_PROP_GPU_IS_GEMINI_BASE_NAME PDB_PROP_GPU_IS_GEMINI 1383 #define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_CAST 1384 #define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_NAME PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED 1385 #define PDB_PROP_GPU_STATE_INITIALIZED_BASE_CAST 1386 #define PDB_PROP_GPU_STATE_INITIALIZED_BASE_NAME PDB_PROP_GPU_STATE_INITIALIZED 1387 #define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_CAST 1388 #define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_NAME PDB_PROP_GPU_NV_USERMODE_ENABLED 1389 #define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_CAST 1390 #define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_NAME PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT 1391 #define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_CAST 1392 #define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS 1393 #define PDB_PROP_GPU_IS_MXM_3X_BASE_CAST 1394 #define PDB_PROP_GPU_IS_MXM_3X_BASE_NAME PDB_PROP_GPU_IS_MXM_3X 1395 #define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_CAST 1396 #define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_NAME PDB_PROP_GPU_GSYNC_III_ATTACHED 1397 #define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_CAST 1398 #define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_NAME PDB_PROP_GPU_QSYNC_II_ATTACHED 1399 #define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_CAST 1400 #define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR04_PRESENT 1401 #define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_CAST 1402 #define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_NAME PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF 1403 #define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_CAST 1404 #define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_NAME PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE 1405 #define PDB_PROP_GPU_IS_SOC_SDM_BASE_CAST 1406 #define PDB_PROP_GPU_IS_SOC_SDM_BASE_NAME PDB_PROP_GPU_IS_SOC_SDM 1407 #define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_CAST 1408 #define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_NAME PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM 1409 #define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_CAST 1410 #define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_NAME PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED 1411 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_CAST 1412 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED 1413 #define PDB_PROP_GPU_ZERO_FB_BASE_CAST 1414 #define PDB_PROP_GPU_ZERO_FB_BASE_NAME PDB_PROP_GPU_ZERO_FB 1415 #define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_CAST 1416 #define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_NAME PDB_PROP_GPU_SWRL_GRANULAR_LOCKING 1417 #define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_CAST 1418 #define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_NAME PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK 1419 #define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_CAST 1420 #define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_IGPU 1421 #define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_CAST 1422 #define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED 1423 #define PDB_PROP_GPU_ATS_SUPPORTED_BASE_CAST 1424 #define PDB_PROP_GPU_ATS_SUPPORTED_BASE_NAME PDB_PROP_GPU_ATS_SUPPORTED 1425 #define PDB_PROP_GPU_EMULATION_BASE_CAST 1426 #define PDB_PROP_GPU_EMULATION_BASE_NAME PDB_PROP_GPU_EMULATION 1427 #define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_CAST 1428 #define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_APM_FEATURE_CAPABLE 1429 #define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_CAST 1430 #define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_NAME PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS 1431 #define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_CAST 1432 #define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_NAME PDB_PROP_GPU_PRIMARY_DEVICE 1433 #define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_CAST 1434 #define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_NAME PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE 1435 #define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_CAST 1436 #define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_NAME PDB_PROP_GPU_BEHIND_BRIDGE 1437 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_CAST 1438 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY 1439 #define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_CAST 1440 #define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_NAME PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST 1441 #define PDB_PROP_GPU_IS_LOST_BASE_CAST 1442 #define PDB_PROP_GPU_IS_LOST_BASE_NAME PDB_PROP_GPU_IS_LOST 1443 #define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_CAST 1444 #define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED 1445 #define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_CAST 1446 #define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_NAME PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX 1447 1448 NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU**, Dynamic*, NvU32, va_list); 1449 1450 NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32, 1451 NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, 1452 RM_RUNTIME_VARIANT RmVariantHal_rmVariant, 1453 TEGRA_CHIP_TYPE TegraChipHal_tegraType, 1454 NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance); 1455 #define __objCreate_OBJGPU(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, TegraChipHal_tegraType, DispIpHal_ipver, arg_gpuInstance) \ 1456 __nvoc_objCreate_OBJGPU((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, TegraChipHal_tegraType, DispIpHal_ipver, arg_gpuInstance) 1457 1458 #define gpuConstructDeviceInfoTable(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu) 1459 #define gpuConstructDeviceInfoTable_HAL(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu) 1460 #define gpuWriteBusConfigReg(pGpu, index, value) gpuWriteBusConfigReg_DISPATCH(pGpu, index, value) 1461 #define gpuWriteBusConfigReg_HAL(pGpu, index, value) gpuWriteBusConfigReg_DISPATCH(pGpu, index, value) 1462 #define gpuReadBusConfigReg(pGpu, index, data) gpuReadBusConfigReg_DISPATCH(pGpu, index, data) 1463 #define gpuReadBusConfigReg_HAL(pGpu, index, data) gpuReadBusConfigReg_DISPATCH(pGpu, index, data) 1464 #define gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_DISPATCH(pGpu, index, data, pThreadState) 1465 #define gpuReadBusConfigRegEx_HAL(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_DISPATCH(pGpu, index, data, pThreadState) 1466 #define gpuReadFunctionConfigReg(pGpu, function, reg, data) gpuReadFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1467 #define gpuReadFunctionConfigReg_HAL(pGpu, function, reg, data) gpuReadFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1468 #define gpuWriteFunctionConfigReg(pGpu, function, reg, data) gpuWriteFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1469 #define gpuWriteFunctionConfigReg_HAL(pGpu, function, reg, data) gpuWriteFunctionConfigReg_DISPATCH(pGpu, function, reg, data) 1470 #define gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_DISPATCH(pGpu, function, reg, data, pThreadState) 1471 #define gpuWriteFunctionConfigRegEx_HAL(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_DISPATCH(pGpu, function, reg, data, pThreadState) 1472 #define gpuReadVgpuConfigReg(pGpu, index, data) gpuReadVgpuConfigReg_DISPATCH(pGpu, index, data) 1473 #define gpuReadVgpuConfigReg_HAL(pGpu, index, data) gpuReadVgpuConfigReg_DISPATCH(pGpu, index, data) 1474 #define gpuGetIdInfo(pGpu) gpuGetIdInfo_DISPATCH(pGpu) 1475 #define gpuGetIdInfo_HAL(pGpu) gpuGetIdInfo_DISPATCH(pGpu) 1476 #define gpuHandleSanityCheckRegReadError(pGpu, addr, value) gpuHandleSanityCheckRegReadError_DISPATCH(pGpu, addr, value) 1477 #define gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value) gpuHandleSanityCheckRegReadError_DISPATCH(pGpu, addr, value) 1478 #define gpuHandleSecFault(pGpu) gpuHandleSecFault_DISPATCH(pGpu) 1479 #define gpuHandleSecFault_HAL(pGpu) gpuHandleSecFault_DISPATCH(pGpu) 1480 #define gpuGetChildrenPresent(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) 1481 #define gpuGetChildrenPresent_HAL(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) 1482 #define gpuGetClassDescriptorList(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0) 1483 #define gpuGetClassDescriptorList_HAL(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0) 1484 #define gpuGetPhysAddrWidth(pGpu, arg0) gpuGetPhysAddrWidth_DISPATCH(pGpu, arg0) 1485 #define gpuGetPhysAddrWidth_HAL(pGpu, arg0) gpuGetPhysAddrWidth_DISPATCH(pGpu, arg0) 1486 #define gpuFuseSupportsDisplay(pGpu) gpuFuseSupportsDisplay_DISPATCH(pGpu) 1487 #define gpuFuseSupportsDisplay_HAL(pGpu) gpuFuseSupportsDisplay_DISPATCH(pGpu) 1488 #define gpuClearFbhubPoisonIntrForBug2924523(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu) 1489 #define gpuClearFbhubPoisonIntrForBug2924523_HAL(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu) 1490 #define gpuReadDeviceId(pGpu, arg0, arg1) gpuReadDeviceId_DISPATCH(pGpu, arg0, arg1) 1491 #define gpuReadDeviceId_HAL(pGpu, arg0, arg1) gpuReadDeviceId_DISPATCH(pGpu, arg0, arg1) 1492 #define gpuGetFlaVasSize(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization) 1493 #define gpuGetFlaVasSize_HAL(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization) 1494 #define gpuDetermineSelfHostedMode(pGpu) gpuDetermineSelfHostedMode_DISPATCH(pGpu) 1495 #define gpuDetermineSelfHostedMode_HAL(pGpu) gpuDetermineSelfHostedMode_DISPATCH(pGpu) 1496 #define gpuDetermineMIGSupport(pGpu) gpuDetermineMIGSupport_DISPATCH(pGpu) 1497 #define gpuDetermineMIGSupport_HAL(pGpu) gpuDetermineMIGSupport_DISPATCH(pGpu) 1498 #define gpuIsAtsSupportedWithSmcMemPartitioning(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(pGpu) 1499 #define gpuIsAtsSupportedWithSmcMemPartitioning_HAL(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(pGpu) 1500 #define gpuIsSliCapableWithoutDisplay(pGpu) gpuIsSliCapableWithoutDisplay_DISPATCH(pGpu) 1501 #define gpuIsSliCapableWithoutDisplay_HAL(pGpu) gpuIsSliCapableWithoutDisplay_DISPATCH(pGpu) 1502 #define gpuIsCCEnabledInHw(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu) 1503 #define gpuIsCCEnabledInHw_HAL(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu) 1504 #define gpuIsDevModeEnabledInHw(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu) 1505 #define gpuIsDevModeEnabledInHw_HAL(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu) 1506 #define gpuIsCtxBufAllocInPmaSupported(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu) 1507 #define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu) 1508 static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) { 1509 return NV_OK; 1510 } 1511 1512 1513 #ifdef __nvoc_gpu_h_disabled 1514 static inline NV_STATUS gpuConstructPhysical(struct OBJGPU *pGpu) { 1515 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1516 return NV_ERR_NOT_SUPPORTED; 1517 } 1518 #else //__nvoc_gpu_h_disabled 1519 #define gpuConstructPhysical(pGpu) gpuConstructPhysical_56cd7a(pGpu) 1520 #endif //__nvoc_gpu_h_disabled 1521 1522 #define gpuConstructPhysical_HAL(pGpu) gpuConstructPhysical(pGpu) 1523 1524 static inline void gpuDestructPhysical_b3696a(struct OBJGPU *pGpu) { 1525 return; 1526 } 1527 1528 1529 #ifdef __nvoc_gpu_h_disabled 1530 static inline void gpuDestructPhysical(struct OBJGPU *pGpu) { 1531 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1532 } 1533 #else //__nvoc_gpu_h_disabled 1534 #define gpuDestructPhysical(pGpu) gpuDestructPhysical_b3696a(pGpu) 1535 #endif //__nvoc_gpu_h_disabled 1536 1537 #define gpuDestructPhysical_HAL(pGpu) gpuDestructPhysical(pGpu) 1538 1539 NV_STATUS gpuStatePreInit_IMPL(struct OBJGPU *pGpu); 1540 1541 1542 #ifdef __nvoc_gpu_h_disabled 1543 static inline NV_STATUS gpuStatePreInit(struct OBJGPU *pGpu) { 1544 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1545 return NV_ERR_NOT_SUPPORTED; 1546 } 1547 #else //__nvoc_gpu_h_disabled 1548 #define gpuStatePreInit(pGpu) gpuStatePreInit_IMPL(pGpu) 1549 #endif //__nvoc_gpu_h_disabled 1550 1551 #define gpuStatePreInit_HAL(pGpu) gpuStatePreInit(pGpu) 1552 1553 NV_STATUS gpuStateLoad_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 1554 1555 1556 #ifdef __nvoc_gpu_h_disabled 1557 static inline NV_STATUS gpuStateLoad(struct OBJGPU *pGpu, NvU32 arg0) { 1558 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1559 return NV_ERR_NOT_SUPPORTED; 1560 } 1561 #else //__nvoc_gpu_h_disabled 1562 #define gpuStateLoad(pGpu, arg0) gpuStateLoad_IMPL(pGpu, arg0) 1563 #endif //__nvoc_gpu_h_disabled 1564 1565 #define gpuStateLoad_HAL(pGpu, arg0) gpuStateLoad(pGpu, arg0) 1566 1567 NV_STATUS gpuStateDestroy_IMPL(struct OBJGPU *pGpu); 1568 1569 1570 #ifdef __nvoc_gpu_h_disabled 1571 static inline NV_STATUS gpuStateDestroy(struct OBJGPU *pGpu) { 1572 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1573 return NV_ERR_NOT_SUPPORTED; 1574 } 1575 #else //__nvoc_gpu_h_disabled 1576 #define gpuStateDestroy(pGpu) gpuStateDestroy_IMPL(pGpu) 1577 #endif //__nvoc_gpu_h_disabled 1578 1579 #define gpuStateDestroy_HAL(pGpu) gpuStateDestroy(pGpu) 1580 1581 static inline NV_STATUS gpuApplyOverrides_46f6a7(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 1582 return NV_ERR_NOT_SUPPORTED; 1583 } 1584 1585 1586 #ifdef __nvoc_gpu_h_disabled 1587 static inline NV_STATUS gpuApplyOverrides(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 1588 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1589 return NV_ERR_NOT_SUPPORTED; 1590 } 1591 #else //__nvoc_gpu_h_disabled 1592 #define gpuApplyOverrides(pGpu, arg0, arg1) gpuApplyOverrides_46f6a7(pGpu, arg0, arg1) 1593 #endif //__nvoc_gpu_h_disabled 1594 1595 #define gpuApplyOverrides_HAL(pGpu, arg0, arg1) gpuApplyOverrides(pGpu, arg0, arg1) 1596 1597 static inline NV_STATUS gpuInitDevinitOverridesFromRegistry_56cd7a(struct OBJGPU *pGpu) { 1598 return NV_OK; 1599 } 1600 1601 1602 #ifdef __nvoc_gpu_h_disabled 1603 static inline NV_STATUS gpuInitDevinitOverridesFromRegistry(struct OBJGPU *pGpu) { 1604 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1605 return NV_ERR_NOT_SUPPORTED; 1606 } 1607 #else //__nvoc_gpu_h_disabled 1608 #define gpuInitDevinitOverridesFromRegistry(pGpu) gpuInitDevinitOverridesFromRegistry_56cd7a(pGpu) 1609 #endif //__nvoc_gpu_h_disabled 1610 1611 #define gpuInitDevinitOverridesFromRegistry_HAL(pGpu) gpuInitDevinitOverridesFromRegistry(pGpu) 1612 1613 static inline NV_STATUS gpuApplyDevinitReg032Override_46f6a7(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1614 return NV_ERR_NOT_SUPPORTED; 1615 } 1616 1617 1618 #ifdef __nvoc_gpu_h_disabled 1619 static inline NV_STATUS gpuApplyDevinitReg032Override(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1620 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1621 return NV_ERR_NOT_SUPPORTED; 1622 } 1623 #else //__nvoc_gpu_h_disabled 1624 #define gpuApplyDevinitReg032Override(pGpu, arg0, arg1) gpuApplyDevinitReg032Override_46f6a7(pGpu, arg0, arg1) 1625 #endif //__nvoc_gpu_h_disabled 1626 1627 #define gpuApplyDevinitReg032Override_HAL(pGpu, arg0, arg1) gpuApplyDevinitReg032Override(pGpu, arg0, arg1) 1628 1629 static inline NV_STATUS gpuCheckPCIIDMismatch_56cd7a(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { 1630 return NV_OK; 1631 } 1632 1633 1634 #ifdef __nvoc_gpu_h_disabled 1635 static inline NV_STATUS gpuCheckPCIIDMismatch(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { 1636 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1637 return NV_ERR_NOT_SUPPORTED; 1638 } 1639 #else //__nvoc_gpu_h_disabled 1640 #define gpuCheckPCIIDMismatch(pGpu, arg0) gpuCheckPCIIDMismatch_56cd7a(pGpu, arg0) 1641 #endif //__nvoc_gpu_h_disabled 1642 1643 #define gpuCheckPCIIDMismatch_HAL(pGpu, arg0) gpuCheckPCIIDMismatch(pGpu, arg0) 1644 1645 static inline NvBool gpuCheckGpuIDMismatch_491d52(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1646 return ((NvBool)(0 != 0)); 1647 } 1648 1649 1650 #ifdef __nvoc_gpu_h_disabled 1651 static inline NvBool gpuCheckGpuIDMismatch(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 1652 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1653 return NV_FALSE; 1654 } 1655 #else //__nvoc_gpu_h_disabled 1656 #define gpuCheckGpuIDMismatch(pGpu, arg0, arg1) gpuCheckGpuIDMismatch_491d52(pGpu, arg0, arg1) 1657 #endif //__nvoc_gpu_h_disabled 1658 1659 #define gpuCheckGpuIDMismatch_HAL(pGpu, arg0, arg1) gpuCheckGpuIDMismatch(pGpu, arg0, arg1) 1660 1661 static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical_56cd7a(struct OBJGPU *pGpu) { 1662 return NV_OK; 1663 } 1664 1665 NV_STATUS gpuPowerManagementEnterPreUnloadPhysical_IMPL(struct OBJGPU *pGpu); 1666 1667 1668 #ifdef __nvoc_gpu_h_disabled 1669 static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical(struct OBJGPU *pGpu) { 1670 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1671 return NV_ERR_NOT_SUPPORTED; 1672 } 1673 #else //__nvoc_gpu_h_disabled 1674 #define gpuPowerManagementEnterPreUnloadPhysical(pGpu) gpuPowerManagementEnterPreUnloadPhysical_56cd7a(pGpu) 1675 #endif //__nvoc_gpu_h_disabled 1676 1677 #define gpuPowerManagementEnterPreUnloadPhysical_HAL(pGpu) gpuPowerManagementEnterPreUnloadPhysical(pGpu) 1678 1679 static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 newLevel) { 1680 return NV_OK; 1681 } 1682 1683 NV_STATUS gpuPowerManagementEnterPostUnloadPhysical_IMPL(struct OBJGPU *pGpu, NvU32 newLevel); 1684 1685 1686 #ifdef __nvoc_gpu_h_disabled 1687 static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical(struct OBJGPU *pGpu, NvU32 newLevel) { 1688 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1689 return NV_ERR_NOT_SUPPORTED; 1690 } 1691 #else //__nvoc_gpu_h_disabled 1692 #define gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical_56cd7a(pGpu, newLevel) 1693 #endif //__nvoc_gpu_h_disabled 1694 1695 #define gpuPowerManagementEnterPostUnloadPhysical_HAL(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel) 1696 1697 static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) { 1698 return NV_OK; 1699 } 1700 1701 NV_STATUS gpuPowerManagementResumePreLoadPhysical_IMPL(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags); 1702 1703 1704 #ifdef __nvoc_gpu_h_disabled 1705 static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) { 1706 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1707 return NV_ERR_NOT_SUPPORTED; 1708 } 1709 #else //__nvoc_gpu_h_disabled 1710 #define gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical_56cd7a(pGpu, oldLevel, flags) 1711 #endif //__nvoc_gpu_h_disabled 1712 1713 #define gpuPowerManagementResumePreLoadPhysical_HAL(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags) 1714 1715 static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical_56cd7a(struct OBJGPU *pGpu) { 1716 return NV_OK; 1717 } 1718 1719 NV_STATUS gpuPowerManagementResumePostLoadPhysical_IMPL(struct OBJGPU *pGpu); 1720 1721 1722 #ifdef __nvoc_gpu_h_disabled 1723 static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical(struct OBJGPU *pGpu) { 1724 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1725 return NV_ERR_NOT_SUPPORTED; 1726 } 1727 #else //__nvoc_gpu_h_disabled 1728 #define gpuPowerManagementResumePostLoadPhysical(pGpu) gpuPowerManagementResumePostLoadPhysical_56cd7a(pGpu) 1729 #endif //__nvoc_gpu_h_disabled 1730 1731 #define gpuPowerManagementResumePostLoadPhysical_HAL(pGpu) gpuPowerManagementResumePostLoadPhysical(pGpu) 1732 1733 static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx_46f6a7(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) { 1734 return NV_ERR_NOT_SUPPORTED; 1735 } 1736 1737 1738 #ifdef __nvoc_gpu_h_disabled 1739 static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) { 1740 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1741 return NV_ERR_NOT_SUPPORTED; 1742 } 1743 #else //__nvoc_gpu_h_disabled 1744 #define gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx_46f6a7(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) 1745 #endif //__nvoc_gpu_h_disabled 1746 1747 #define gpuInitializeMemDescFromPromotedCtx_HAL(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) 1748 1749 NV_STATUS gpuGetNameString_KERNEL(struct OBJGPU *pGpu, NvU32 arg0, void *arg1); 1750 1751 NV_STATUS gpuGetNameString_IMPL(struct OBJGPU *pGpu, NvU32 arg0, void *arg1); 1752 1753 1754 #ifdef __nvoc_gpu_h_disabled 1755 static inline NV_STATUS gpuGetNameString(struct OBJGPU *pGpu, NvU32 arg0, void *arg1) { 1756 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1757 return NV_ERR_NOT_SUPPORTED; 1758 } 1759 #else //__nvoc_gpu_h_disabled 1760 #define gpuGetNameString(pGpu, arg0, arg1) gpuGetNameString_KERNEL(pGpu, arg0, arg1) 1761 #endif //__nvoc_gpu_h_disabled 1762 1763 #define gpuGetNameString_HAL(pGpu, arg0, arg1) gpuGetNameString(pGpu, arg0, arg1) 1764 1765 NV_STATUS gpuGetShortNameString_KERNEL(struct OBJGPU *pGpu, NvU8 *arg0); 1766 1767 NV_STATUS gpuGetShortNameString_IMPL(struct OBJGPU *pGpu, NvU8 *arg0); 1768 1769 1770 #ifdef __nvoc_gpu_h_disabled 1771 static inline NV_STATUS gpuGetShortNameString(struct OBJGPU *pGpu, NvU8 *arg0) { 1772 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1773 return NV_ERR_NOT_SUPPORTED; 1774 } 1775 #else //__nvoc_gpu_h_disabled 1776 #define gpuGetShortNameString(pGpu, arg0) gpuGetShortNameString_KERNEL(pGpu, arg0) 1777 #endif //__nvoc_gpu_h_disabled 1778 1779 #define gpuGetShortNameString_HAL(pGpu, arg0) gpuGetShortNameString(pGpu, arg0) 1780 1781 void gpuInitBranding_FWCLIENT(struct OBJGPU *pGpu); 1782 1783 1784 #ifdef __nvoc_gpu_h_disabled 1785 static inline void gpuInitBranding(struct OBJGPU *pGpu) { 1786 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1787 } 1788 #else //__nvoc_gpu_h_disabled 1789 #define gpuInitBranding(pGpu) gpuInitBranding_FWCLIENT(pGpu) 1790 #endif //__nvoc_gpu_h_disabled 1791 1792 #define gpuInitBranding_HAL(pGpu) gpuInitBranding(pGpu) 1793 1794 BRANDING_TYPE gpuDetectBranding_FWCLIENT(struct OBJGPU *pGpu); 1795 1796 1797 #ifdef __nvoc_gpu_h_disabled 1798 static inline BRANDING_TYPE gpuDetectBranding(struct OBJGPU *pGpu) { 1799 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1800 BRANDING_TYPE ret; 1801 portMemSet(&ret, 0, sizeof(BRANDING_TYPE)); 1802 return ret; 1803 } 1804 #else //__nvoc_gpu_h_disabled 1805 #define gpuDetectBranding(pGpu) gpuDetectBranding_FWCLIENT(pGpu) 1806 #endif //__nvoc_gpu_h_disabled 1807 1808 #define gpuDetectBranding_HAL(pGpu) gpuDetectBranding(pGpu) 1809 1810 COMPUTE_BRANDING_TYPE gpuDetectComputeBranding_FWCLIENT(struct OBJGPU *pGpu); 1811 1812 1813 #ifdef __nvoc_gpu_h_disabled 1814 static inline COMPUTE_BRANDING_TYPE gpuDetectComputeBranding(struct OBJGPU *pGpu) { 1815 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1816 COMPUTE_BRANDING_TYPE ret; 1817 portMemSet(&ret, 0, sizeof(COMPUTE_BRANDING_TYPE)); 1818 return ret; 1819 } 1820 #else //__nvoc_gpu_h_disabled 1821 #define gpuDetectComputeBranding(pGpu) gpuDetectComputeBranding_FWCLIENT(pGpu) 1822 #endif //__nvoc_gpu_h_disabled 1823 1824 #define gpuDetectComputeBranding_HAL(pGpu) gpuDetectComputeBranding(pGpu) 1825 1826 BRANDING_TYPE gpuDetectVgxBranding_FWCLIENT(struct OBJGPU *pGpu); 1827 1828 1829 #ifdef __nvoc_gpu_h_disabled 1830 static inline BRANDING_TYPE gpuDetectVgxBranding(struct OBJGPU *pGpu) { 1831 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1832 BRANDING_TYPE ret; 1833 portMemSet(&ret, 0, sizeof(BRANDING_TYPE)); 1834 return ret; 1835 } 1836 #else //__nvoc_gpu_h_disabled 1837 #define gpuDetectVgxBranding(pGpu) gpuDetectVgxBranding_FWCLIENT(pGpu) 1838 #endif //__nvoc_gpu_h_disabled 1839 1840 #define gpuDetectVgxBranding_HAL(pGpu) gpuDetectVgxBranding(pGpu) 1841 1842 void gpuInitProperties_FWCLIENT(struct OBJGPU *pGpu); 1843 1844 1845 #ifdef __nvoc_gpu_h_disabled 1846 static inline void gpuInitProperties(struct OBJGPU *pGpu) { 1847 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1848 } 1849 #else //__nvoc_gpu_h_disabled 1850 #define gpuInitProperties(pGpu) gpuInitProperties_FWCLIENT(pGpu) 1851 #endif //__nvoc_gpu_h_disabled 1852 1853 #define gpuInitProperties_HAL(pGpu) gpuInitProperties(pGpu) 1854 1855 static inline void gpuSetThreadBcState_b3696a(struct OBJGPU *pGpu, NvBool arg0) { 1856 return; 1857 } 1858 1859 1860 #ifdef __nvoc_gpu_h_disabled 1861 static inline void gpuSetThreadBcState(struct OBJGPU *pGpu, NvBool arg0) { 1862 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1863 } 1864 #else //__nvoc_gpu_h_disabled 1865 #define gpuSetThreadBcState(pGpu, arg0) gpuSetThreadBcState_b3696a(pGpu, arg0) 1866 #endif //__nvoc_gpu_h_disabled 1867 1868 #define gpuSetThreadBcState_HAL(pGpu, arg0) gpuSetThreadBcState(pGpu, arg0) 1869 1870 static inline void gpuDeterminePersistantIllumSettings_b3696a(struct OBJGPU *pGpu) { 1871 return; 1872 } 1873 1874 1875 #ifdef __nvoc_gpu_h_disabled 1876 static inline void gpuDeterminePersistantIllumSettings(struct OBJGPU *pGpu) { 1877 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1878 } 1879 #else //__nvoc_gpu_h_disabled 1880 #define gpuDeterminePersistantIllumSettings(pGpu) gpuDeterminePersistantIllumSettings_b3696a(pGpu) 1881 #endif //__nvoc_gpu_h_disabled 1882 1883 #define gpuDeterminePersistantIllumSettings_HAL(pGpu) gpuDeterminePersistantIllumSettings(pGpu) 1884 1885 static inline NV_STATUS gpuInitSliIllumination_46f6a7(struct OBJGPU *pGpu) { 1886 return NV_ERR_NOT_SUPPORTED; 1887 } 1888 1889 1890 #ifdef __nvoc_gpu_h_disabled 1891 static inline NV_STATUS gpuInitSliIllumination(struct OBJGPU *pGpu) { 1892 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1893 return NV_ERR_NOT_SUPPORTED; 1894 } 1895 #else //__nvoc_gpu_h_disabled 1896 #define gpuInitSliIllumination(pGpu) gpuInitSliIllumination_46f6a7(pGpu) 1897 #endif //__nvoc_gpu_h_disabled 1898 1899 #define gpuInitSliIllumination_HAL(pGpu) gpuInitSliIllumination(pGpu) 1900 1901 NV_STATUS gpuBuildGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); 1902 1903 1904 #ifdef __nvoc_gpu_h_disabled 1905 static inline NV_STATUS gpuBuildGenericKernelFalconList(struct OBJGPU *pGpu) { 1906 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1907 return NV_ERR_NOT_SUPPORTED; 1908 } 1909 #else //__nvoc_gpu_h_disabled 1910 #define gpuBuildGenericKernelFalconList(pGpu) gpuBuildGenericKernelFalconList_IMPL(pGpu) 1911 #endif //__nvoc_gpu_h_disabled 1912 1913 #define gpuBuildGenericKernelFalconList_HAL(pGpu) gpuBuildGenericKernelFalconList(pGpu) 1914 1915 void gpuDestroyGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); 1916 1917 1918 #ifdef __nvoc_gpu_h_disabled 1919 static inline void gpuDestroyGenericKernelFalconList(struct OBJGPU *pGpu) { 1920 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1921 } 1922 #else //__nvoc_gpu_h_disabled 1923 #define gpuDestroyGenericKernelFalconList(pGpu) gpuDestroyGenericKernelFalconList_IMPL(pGpu) 1924 #endif //__nvoc_gpu_h_disabled 1925 1926 #define gpuDestroyGenericKernelFalconList_HAL(pGpu) gpuDestroyGenericKernelFalconList(pGpu) 1927 1928 NV_STATUS gpuBuildKernelVideoEngineList_IMPL(struct OBJGPU *pGpu); 1929 1930 1931 #ifdef __nvoc_gpu_h_disabled 1932 static inline NV_STATUS gpuBuildKernelVideoEngineList(struct OBJGPU *pGpu) { 1933 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1934 return NV_ERR_NOT_SUPPORTED; 1935 } 1936 #else //__nvoc_gpu_h_disabled 1937 #define gpuBuildKernelVideoEngineList(pGpu) gpuBuildKernelVideoEngineList_IMPL(pGpu) 1938 #endif //__nvoc_gpu_h_disabled 1939 1940 #define gpuBuildKernelVideoEngineList_HAL(pGpu) gpuBuildKernelVideoEngineList(pGpu) 1941 1942 NV_STATUS gpuInitVideoLogging_IMPL(struct OBJGPU *pGpu); 1943 1944 1945 #ifdef __nvoc_gpu_h_disabled 1946 static inline NV_STATUS gpuInitVideoLogging(struct OBJGPU *pGpu) { 1947 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1948 return NV_ERR_NOT_SUPPORTED; 1949 } 1950 #else //__nvoc_gpu_h_disabled 1951 #define gpuInitVideoLogging(pGpu) gpuInitVideoLogging_IMPL(pGpu) 1952 #endif //__nvoc_gpu_h_disabled 1953 1954 #define gpuInitVideoLogging_HAL(pGpu) gpuInitVideoLogging(pGpu) 1955 1956 void gpuFreeVideoLogging_IMPL(struct OBJGPU *pGpu); 1957 1958 1959 #ifdef __nvoc_gpu_h_disabled 1960 static inline void gpuFreeVideoLogging(struct OBJGPU *pGpu) { 1961 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1962 } 1963 #else //__nvoc_gpu_h_disabled 1964 #define gpuFreeVideoLogging(pGpu) gpuFreeVideoLogging_IMPL(pGpu) 1965 #endif //__nvoc_gpu_h_disabled 1966 1967 #define gpuFreeVideoLogging_HAL(pGpu) gpuFreeVideoLogging(pGpu) 1968 1969 void gpuDestroyKernelVideoEngineList_IMPL(struct OBJGPU *pGpu); 1970 1971 1972 #ifdef __nvoc_gpu_h_disabled 1973 static inline void gpuDestroyKernelVideoEngineList(struct OBJGPU *pGpu) { 1974 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1975 } 1976 #else //__nvoc_gpu_h_disabled 1977 #define gpuDestroyKernelVideoEngineList(pGpu) gpuDestroyKernelVideoEngineList_IMPL(pGpu) 1978 #endif //__nvoc_gpu_h_disabled 1979 1980 #define gpuDestroyKernelVideoEngineList_HAL(pGpu) gpuDestroyKernelVideoEngineList(pGpu) 1981 1982 struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 1983 1984 1985 #ifdef __nvoc_gpu_h_disabled 1986 static inline struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 1987 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 1988 return NULL; 1989 } 1990 #else //__nvoc_gpu_h_disabled 1991 #define gpuGetGenericKernelFalconForEngine(pGpu, arg0) gpuGetGenericKernelFalconForEngine_IMPL(pGpu, arg0) 1992 #endif //__nvoc_gpu_h_disabled 1993 1994 #define gpuGetGenericKernelFalconForEngine_HAL(pGpu, arg0) gpuGetGenericKernelFalconForEngine(pGpu, arg0) 1995 1996 void gpuRegisterGenericKernelFalconIntrService_IMPL(struct OBJGPU *pGpu, void *pRecords); 1997 1998 1999 #ifdef __nvoc_gpu_h_disabled 2000 static inline void gpuRegisterGenericKernelFalconIntrService(struct OBJGPU *pGpu, void *pRecords) { 2001 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2002 } 2003 #else //__nvoc_gpu_h_disabled 2004 #define gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService_IMPL(pGpu, pRecords) 2005 #endif //__nvoc_gpu_h_disabled 2006 2007 #define gpuRegisterGenericKernelFalconIntrService_HAL(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) 2008 2009 static inline void gpuGetHwDefaults_b3696a(struct OBJGPU *pGpu) { 2010 return; 2011 } 2012 2013 2014 #ifdef __nvoc_gpu_h_disabled 2015 static inline void gpuGetHwDefaults(struct OBJGPU *pGpu) { 2016 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2017 } 2018 #else //__nvoc_gpu_h_disabled 2019 #define gpuGetHwDefaults(pGpu) gpuGetHwDefaults_b3696a(pGpu) 2020 #endif //__nvoc_gpu_h_disabled 2021 2022 #define gpuGetHwDefaults_HAL(pGpu) gpuGetHwDefaults(pGpu) 2023 2024 RmPhysAddr gpuGetDmaEndAddress_IMPL(struct OBJGPU *pGpu); 2025 2026 2027 #ifdef __nvoc_gpu_h_disabled 2028 static inline RmPhysAddr gpuGetDmaEndAddress(struct OBJGPU *pGpu) { 2029 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2030 RmPhysAddr ret; 2031 portMemSet(&ret, 0, sizeof(RmPhysAddr)); 2032 return ret; 2033 } 2034 #else //__nvoc_gpu_h_disabled 2035 #define gpuGetDmaEndAddress(pGpu) gpuGetDmaEndAddress_IMPL(pGpu) 2036 #endif //__nvoc_gpu_h_disabled 2037 2038 #define gpuGetDmaEndAddress_HAL(pGpu) gpuGetDmaEndAddress(pGpu) 2039 2040 static inline NV_STATUS gpuSetStateResetRequired_395e98(struct OBJGPU *pGpu, NvU32 exceptType) { 2041 return NV_ERR_NOT_SUPPORTED; 2042 } 2043 2044 2045 #ifdef __nvoc_gpu_h_disabled 2046 static inline NV_STATUS gpuSetStateResetRequired(struct OBJGPU *pGpu, NvU32 exceptType) { 2047 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2048 return NV_ERR_NOT_SUPPORTED; 2049 } 2050 #else //__nvoc_gpu_h_disabled 2051 #define gpuSetStateResetRequired(pGpu, exceptType) gpuSetStateResetRequired_395e98(pGpu, exceptType) 2052 #endif //__nvoc_gpu_h_disabled 2053 2054 #define gpuSetStateResetRequired_HAL(pGpu, exceptType) gpuSetStateResetRequired(pGpu, exceptType) 2055 2056 static inline NV_STATUS gpuMarkDeviceForReset_395e98(struct OBJGPU *pGpu) { 2057 return NV_ERR_NOT_SUPPORTED; 2058 } 2059 2060 2061 #ifdef __nvoc_gpu_h_disabled 2062 static inline NV_STATUS gpuMarkDeviceForReset(struct OBJGPU *pGpu) { 2063 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2064 return NV_ERR_NOT_SUPPORTED; 2065 } 2066 #else //__nvoc_gpu_h_disabled 2067 #define gpuMarkDeviceForReset(pGpu) gpuMarkDeviceForReset_395e98(pGpu) 2068 #endif //__nvoc_gpu_h_disabled 2069 2070 #define gpuMarkDeviceForReset_HAL(pGpu) gpuMarkDeviceForReset(pGpu) 2071 2072 static inline NV_STATUS gpuUnmarkDeviceForReset_395e98(struct OBJGPU *pGpu) { 2073 return NV_ERR_NOT_SUPPORTED; 2074 } 2075 2076 2077 #ifdef __nvoc_gpu_h_disabled 2078 static inline NV_STATUS gpuUnmarkDeviceForReset(struct OBJGPU *pGpu) { 2079 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2080 return NV_ERR_NOT_SUPPORTED; 2081 } 2082 #else //__nvoc_gpu_h_disabled 2083 #define gpuUnmarkDeviceForReset(pGpu) gpuUnmarkDeviceForReset_395e98(pGpu) 2084 #endif //__nvoc_gpu_h_disabled 2085 2086 #define gpuUnmarkDeviceForReset_HAL(pGpu) gpuUnmarkDeviceForReset(pGpu) 2087 2088 static inline NV_STATUS gpuIsDeviceMarkedForReset_82f166(struct OBJGPU *pGpu, NvBool *pbResetRequired) { 2089 *pbResetRequired = ((NvBool)(0 != 0)); 2090 { 2091 return NV_ERR_NOT_SUPPORTED; 2092 } 2093 ; 2094 } 2095 2096 2097 #ifdef __nvoc_gpu_h_disabled 2098 static inline NV_STATUS gpuIsDeviceMarkedForReset(struct OBJGPU *pGpu, NvBool *pbResetRequired) { 2099 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2100 return NV_ERR_NOT_SUPPORTED; 2101 } 2102 #else //__nvoc_gpu_h_disabled 2103 #define gpuIsDeviceMarkedForReset(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset_82f166(pGpu, pbResetRequired) 2104 #endif //__nvoc_gpu_h_disabled 2105 2106 #define gpuIsDeviceMarkedForReset_HAL(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset(pGpu, pbResetRequired) 2107 2108 static inline NV_STATUS gpuMarkDeviceForDrainAndReset_395e98(struct OBJGPU *pGpu) { 2109 return NV_ERR_NOT_SUPPORTED; 2110 } 2111 2112 2113 #ifdef __nvoc_gpu_h_disabled 2114 static inline NV_STATUS gpuMarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { 2115 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2116 return NV_ERR_NOT_SUPPORTED; 2117 } 2118 #else //__nvoc_gpu_h_disabled 2119 #define gpuMarkDeviceForDrainAndReset(pGpu) gpuMarkDeviceForDrainAndReset_395e98(pGpu) 2120 #endif //__nvoc_gpu_h_disabled 2121 2122 #define gpuMarkDeviceForDrainAndReset_HAL(pGpu) gpuMarkDeviceForDrainAndReset(pGpu) 2123 2124 static inline NV_STATUS gpuUnmarkDeviceForDrainAndReset_395e98(struct OBJGPU *pGpu) { 2125 return NV_ERR_NOT_SUPPORTED; 2126 } 2127 2128 2129 #ifdef __nvoc_gpu_h_disabled 2130 static inline NV_STATUS gpuUnmarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { 2131 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2132 return NV_ERR_NOT_SUPPORTED; 2133 } 2134 #else //__nvoc_gpu_h_disabled 2135 #define gpuUnmarkDeviceForDrainAndReset(pGpu) gpuUnmarkDeviceForDrainAndReset_395e98(pGpu) 2136 #endif //__nvoc_gpu_h_disabled 2137 2138 #define gpuUnmarkDeviceForDrainAndReset_HAL(pGpu) gpuUnmarkDeviceForDrainAndReset(pGpu) 2139 2140 static inline NV_STATUS gpuIsDeviceMarkedForDrainAndReset_244f65(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) { 2141 *pbDrainRecommended = ((NvBool)(0 != 0)); 2142 { 2143 return NV_ERR_NOT_SUPPORTED; 2144 } 2145 ; 2146 } 2147 2148 2149 #ifdef __nvoc_gpu_h_disabled 2150 static inline NV_STATUS gpuIsDeviceMarkedForDrainAndReset(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) { 2151 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2152 return NV_ERR_NOT_SUPPORTED; 2153 } 2154 #else //__nvoc_gpu_h_disabled 2155 #define gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset_244f65(pGpu, pbDrainRecommended) 2156 #endif //__nvoc_gpu_h_disabled 2157 2158 #define gpuIsDeviceMarkedForDrainAndReset_HAL(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended) 2159 2160 static inline NvU32 gpuGetSliFingerPinsetMask_4a4dee(struct OBJGPU *pGpu) { 2161 return 0; 2162 } 2163 2164 2165 #ifdef __nvoc_gpu_h_disabled 2166 static inline NvU32 gpuGetSliFingerPinsetMask(struct OBJGPU *pGpu) { 2167 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2168 return 0; 2169 } 2170 #else //__nvoc_gpu_h_disabled 2171 #define gpuGetSliFingerPinsetMask(pGpu) gpuGetSliFingerPinsetMask_4a4dee(pGpu) 2172 #endif //__nvoc_gpu_h_disabled 2173 2174 #define gpuGetSliFingerPinsetMask_HAL(pGpu) gpuGetSliFingerPinsetMask(pGpu) 2175 2176 static inline NV_STATUS gpuPrivSecInitRegistryOverrides_56cd7a(struct OBJGPU *pGpu) { 2177 return NV_OK; 2178 } 2179 2180 2181 #ifdef __nvoc_gpu_h_disabled 2182 static inline NV_STATUS gpuPrivSecInitRegistryOverrides(struct OBJGPU *pGpu) { 2183 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2184 return NV_ERR_NOT_SUPPORTED; 2185 } 2186 #else //__nvoc_gpu_h_disabled 2187 #define gpuPrivSecInitRegistryOverrides(pGpu) gpuPrivSecInitRegistryOverrides_56cd7a(pGpu) 2188 #endif //__nvoc_gpu_h_disabled 2189 2190 #define gpuPrivSecInitRegistryOverrides_HAL(pGpu) gpuPrivSecInitRegistryOverrides(pGpu) 2191 2192 static inline void gpuDestroyOverrides_b3696a(struct OBJGPU *pGpu) { 2193 return; 2194 } 2195 2196 2197 #ifdef __nvoc_gpu_h_disabled 2198 static inline void gpuDestroyOverrides(struct OBJGPU *pGpu) { 2199 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2200 } 2201 #else //__nvoc_gpu_h_disabled 2202 #define gpuDestroyOverrides(pGpu) gpuDestroyOverrides_b3696a(pGpu) 2203 #endif //__nvoc_gpu_h_disabled 2204 2205 #define gpuDestroyOverrides_HAL(pGpu) gpuDestroyOverrides(pGpu) 2206 2207 NV_STATUS gpuPowerOff_KERNEL(struct OBJGPU *pGpu); 2208 2209 2210 #ifdef __nvoc_gpu_h_disabled 2211 static inline NV_STATUS gpuPowerOff(struct OBJGPU *pGpu) { 2212 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2213 return NV_ERR_NOT_SUPPORTED; 2214 } 2215 #else //__nvoc_gpu_h_disabled 2216 #define gpuPowerOff(pGpu) gpuPowerOff_KERNEL(pGpu) 2217 #endif //__nvoc_gpu_h_disabled 2218 2219 #define gpuPowerOff_HAL(pGpu) gpuPowerOff(pGpu) 2220 2221 NV_STATUS gpuSetPower_GM107(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3); 2222 2223 2224 #ifdef __nvoc_gpu_h_disabled 2225 static inline NV_STATUS gpuSetPower(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3) { 2226 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2227 return NV_ERR_NOT_SUPPORTED; 2228 } 2229 #else //__nvoc_gpu_h_disabled 2230 #define gpuSetPower(pGpu, arg1, arg2, arg3) gpuSetPower_GM107(pGpu, arg1, arg2, arg3) 2231 #endif //__nvoc_gpu_h_disabled 2232 2233 #define gpuSetPower_HAL(pGpu, arg1, arg2, arg3) gpuSetPower(pGpu, arg1, arg2, arg3) 2234 2235 static inline void gpuUpdateIdInfo_b3696a(struct OBJGPU *pGpu) { 2236 return; 2237 } 2238 2239 void gpuUpdateIdInfo_GK104(struct OBJGPU *pGpu); 2240 2241 2242 #ifdef __nvoc_gpu_h_disabled 2243 static inline void gpuUpdateIdInfo(struct OBJGPU *pGpu) { 2244 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2245 } 2246 #else //__nvoc_gpu_h_disabled 2247 #define gpuUpdateIdInfo(pGpu) gpuUpdateIdInfo_b3696a(pGpu) 2248 #endif //__nvoc_gpu_h_disabled 2249 2250 #define gpuUpdateIdInfo_HAL(pGpu) gpuUpdateIdInfo(pGpu) 2251 2252 static inline NvU32 gpuGetDeviceIDList_4a4dee(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { 2253 return 0; 2254 } 2255 2256 2257 #ifdef __nvoc_gpu_h_disabled 2258 static inline NvU32 gpuGetDeviceIDList(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { 2259 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2260 return 0; 2261 } 2262 #else //__nvoc_gpu_h_disabled 2263 #define gpuGetDeviceIDList(pGpu, arg0) gpuGetDeviceIDList_4a4dee(pGpu, arg0) 2264 #endif //__nvoc_gpu_h_disabled 2265 2266 #define gpuGetDeviceIDList_HAL(pGpu, arg0) gpuGetDeviceIDList(pGpu, arg0) 2267 2268 NV_STATUS gpuGenGidData_FWCLIENT(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags); 2269 2270 NV_STATUS gpuGenGidData_GK104(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags); 2271 2272 2273 #ifdef __nvoc_gpu_h_disabled 2274 static inline NV_STATUS gpuGenGidData(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags) { 2275 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2276 return NV_ERR_NOT_SUPPORTED; 2277 } 2278 #else //__nvoc_gpu_h_disabled 2279 #define gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData_FWCLIENT(pGpu, pGidData, gidSize, gidFlags) 2280 #endif //__nvoc_gpu_h_disabled 2281 2282 #define gpuGenGidData_HAL(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) 2283 2284 NvU8 gpuGetChipSubRev_FWCLIENT(struct OBJGPU *pGpu); 2285 2286 NvU8 gpuGetChipSubRev_GK104(struct OBJGPU *pGpu); 2287 2288 NvU8 gpuGetChipSubRev_GA100(struct OBJGPU *pGpu); 2289 2290 2291 #ifdef __nvoc_gpu_h_disabled 2292 static inline NvU8 gpuGetChipSubRev(struct OBJGPU *pGpu) { 2293 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2294 return 0; 2295 } 2296 #else //__nvoc_gpu_h_disabled 2297 #define gpuGetChipSubRev(pGpu) gpuGetChipSubRev_FWCLIENT(pGpu) 2298 #endif //__nvoc_gpu_h_disabled 2299 2300 #define gpuGetChipSubRev_HAL(pGpu) gpuGetChipSubRev(pGpu) 2301 2302 NvU32 gpuGetEmulationRev1_FWCLIENT(struct OBJGPU *pGpu); 2303 2304 NvU32 gpuGetEmulationRev1_GM107(struct OBJGPU *pGpu); 2305 2306 2307 #ifdef __nvoc_gpu_h_disabled 2308 static inline NvU32 gpuGetEmulationRev1(struct OBJGPU *pGpu) { 2309 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2310 return 0; 2311 } 2312 #else //__nvoc_gpu_h_disabled 2313 #define gpuGetEmulationRev1(pGpu) gpuGetEmulationRev1_FWCLIENT(pGpu) 2314 #endif //__nvoc_gpu_h_disabled 2315 2316 #define gpuGetEmulationRev1_HAL(pGpu) gpuGetEmulationRev1(pGpu) 2317 2318 static inline NV_STATUS gpuPerformUniversalValidation_56cd7a(struct OBJGPU *pGpu) { 2319 return NV_OK; 2320 } 2321 2322 NV_STATUS gpuPerformUniversalValidation_GM107(struct OBJGPU *pGpu); 2323 2324 2325 #ifdef __nvoc_gpu_h_disabled 2326 static inline NV_STATUS gpuPerformUniversalValidation(struct OBJGPU *pGpu) { 2327 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2328 return NV_ERR_NOT_SUPPORTED; 2329 } 2330 #else //__nvoc_gpu_h_disabled 2331 #define gpuPerformUniversalValidation(pGpu) gpuPerformUniversalValidation_56cd7a(pGpu) 2332 #endif //__nvoc_gpu_h_disabled 2333 2334 #define gpuPerformUniversalValidation_HAL(pGpu) gpuPerformUniversalValidation(pGpu) 2335 2336 NvU32 gpuGetVirtRegPhysOffset_TU102(struct OBJGPU *pGpu); 2337 2338 2339 #ifdef __nvoc_gpu_h_disabled 2340 static inline NvU32 gpuGetVirtRegPhysOffset(struct OBJGPU *pGpu) { 2341 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2342 return 0; 2343 } 2344 #else //__nvoc_gpu_h_disabled 2345 #define gpuGetVirtRegPhysOffset(pGpu) gpuGetVirtRegPhysOffset_TU102(pGpu) 2346 #endif //__nvoc_gpu_h_disabled 2347 2348 #define gpuGetVirtRegPhysOffset_HAL(pGpu) gpuGetVirtRegPhysOffset(pGpu) 2349 2350 NV_STATUS gpuGetRegBaseOffset_FWCLIENT(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); 2351 2352 NV_STATUS gpuGetRegBaseOffset_TU102(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); 2353 2354 2355 #ifdef __nvoc_gpu_h_disabled 2356 static inline NV_STATUS gpuGetRegBaseOffset(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { 2357 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2358 return NV_ERR_NOT_SUPPORTED; 2359 } 2360 #else //__nvoc_gpu_h_disabled 2361 #define gpuGetRegBaseOffset(pGpu, arg0, arg1) gpuGetRegBaseOffset_FWCLIENT(pGpu, arg0, arg1) 2362 #endif //__nvoc_gpu_h_disabled 2363 2364 #define gpuGetRegBaseOffset_HAL(pGpu, arg0, arg1) gpuGetRegBaseOffset(pGpu, arg0, arg1) 2365 2366 static inline void gpuGetSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { 2367 return; 2368 } 2369 2370 void gpuGetSanityCheckRegReadError_GK104(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString); 2371 2372 void gpuGetSanityCheckRegReadError_GA100(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString); 2373 2374 2375 #ifdef __nvoc_gpu_h_disabled 2376 static inline void gpuGetSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { 2377 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2378 } 2379 #else //__nvoc_gpu_h_disabled 2380 #define gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError_b3696a(pGpu, value, pErrorString) 2381 #endif //__nvoc_gpu_h_disabled 2382 2383 #define gpuGetSanityCheckRegReadError_HAL(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) 2384 2385 static inline NV_STATUS gpuSanityCheckVirtRegAccess_56cd7a(struct OBJGPU *pGpu, NvU32 arg0) { 2386 return NV_OK; 2387 } 2388 2389 NV_STATUS gpuSanityCheckVirtRegAccess_TU102(struct OBJGPU *pGpu, NvU32 arg0); 2390 2391 NV_STATUS gpuSanityCheckVirtRegAccess_GH100(struct OBJGPU *pGpu, NvU32 arg0); 2392 2393 2394 #ifdef __nvoc_gpu_h_disabled 2395 static inline NV_STATUS gpuSanityCheckVirtRegAccess(struct OBJGPU *pGpu, NvU32 arg0) { 2396 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2397 return NV_ERR_NOT_SUPPORTED; 2398 } 2399 #else //__nvoc_gpu_h_disabled 2400 #define gpuSanityCheckVirtRegAccess(pGpu, arg0) gpuSanityCheckVirtRegAccess_56cd7a(pGpu, arg0) 2401 #endif //__nvoc_gpu_h_disabled 2402 2403 #define gpuSanityCheckVirtRegAccess_HAL(pGpu, arg0) gpuSanityCheckVirtRegAccess(pGpu, arg0) 2404 2405 NV_STATUS gpuInitRegistryOverrides_KERNEL(struct OBJGPU *pGpu); 2406 2407 2408 #ifdef __nvoc_gpu_h_disabled 2409 static inline NV_STATUS gpuInitRegistryOverrides(struct OBJGPU *pGpu) { 2410 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2411 return NV_ERR_NOT_SUPPORTED; 2412 } 2413 #else //__nvoc_gpu_h_disabled 2414 #define gpuInitRegistryOverrides(pGpu) gpuInitRegistryOverrides_KERNEL(pGpu) 2415 #endif //__nvoc_gpu_h_disabled 2416 2417 #define gpuInitRegistryOverrides_HAL(pGpu) gpuInitRegistryOverrides(pGpu) 2418 2419 NV_STATUS gpuInitInstLocOverrides_IMPL(struct OBJGPU *pGpu); 2420 2421 2422 #ifdef __nvoc_gpu_h_disabled 2423 static inline NV_STATUS gpuInitInstLocOverrides(struct OBJGPU *pGpu) { 2424 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2425 return NV_ERR_NOT_SUPPORTED; 2426 } 2427 #else //__nvoc_gpu_h_disabled 2428 #define gpuInitInstLocOverrides(pGpu) gpuInitInstLocOverrides_IMPL(pGpu) 2429 #endif //__nvoc_gpu_h_disabled 2430 2431 #define gpuInitInstLocOverrides_HAL(pGpu) gpuInitInstLocOverrides(pGpu) 2432 2433 const GPUCHILDORDER *gpuGetChildrenOrder_GM200(struct OBJGPU *pGpu, NvU32 *pNumEntries); 2434 2435 2436 #ifdef __nvoc_gpu_h_disabled 2437 static inline const GPUCHILDORDER *gpuGetChildrenOrder(struct OBJGPU *pGpu, NvU32 *pNumEntries) { 2438 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2439 return NULL; 2440 } 2441 #else //__nvoc_gpu_h_disabled 2442 #define gpuGetChildrenOrder(pGpu, pNumEntries) gpuGetChildrenOrder_GM200(pGpu, pNumEntries) 2443 #endif //__nvoc_gpu_h_disabled 2444 2445 #define gpuGetChildrenOrder_HAL(pGpu, pNumEntries) gpuGetChildrenOrder(pGpu, pNumEntries) 2446 2447 NV_STATUS gpuInitSriov_FWCLIENT(struct OBJGPU *pGpu); 2448 2449 NV_STATUS gpuInitSriov_TU102(struct OBJGPU *pGpu); 2450 2451 2452 #ifdef __nvoc_gpu_h_disabled 2453 static inline NV_STATUS gpuInitSriov(struct OBJGPU *pGpu) { 2454 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2455 return NV_ERR_NOT_SUPPORTED; 2456 } 2457 #else //__nvoc_gpu_h_disabled 2458 #define gpuInitSriov(pGpu) gpuInitSriov_FWCLIENT(pGpu) 2459 #endif //__nvoc_gpu_h_disabled 2460 2461 #define gpuInitSriov_HAL(pGpu) gpuInitSriov(pGpu) 2462 2463 NV_STATUS gpuDeinitSriov_FWCLIENT(struct OBJGPU *pGpu); 2464 2465 NV_STATUS gpuDeinitSriov_TU102(struct OBJGPU *pGpu); 2466 2467 2468 #ifdef __nvoc_gpu_h_disabled 2469 static inline NV_STATUS gpuDeinitSriov(struct OBJGPU *pGpu) { 2470 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2471 return NV_ERR_NOT_SUPPORTED; 2472 } 2473 #else //__nvoc_gpu_h_disabled 2474 #define gpuDeinitSriov(pGpu) gpuDeinitSriov_FWCLIENT(pGpu) 2475 #endif //__nvoc_gpu_h_disabled 2476 2477 #define gpuDeinitSriov_HAL(pGpu) gpuDeinitSriov(pGpu) 2478 2479 static inline NV_STATUS gpuCreateDefaultClientShare_56cd7a(struct OBJGPU *pGpu) { 2480 return NV_OK; 2481 } 2482 2483 2484 #ifdef __nvoc_gpu_h_disabled 2485 static inline NV_STATUS gpuCreateDefaultClientShare(struct OBJGPU *pGpu) { 2486 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2487 return NV_ERR_NOT_SUPPORTED; 2488 } 2489 #else //__nvoc_gpu_h_disabled 2490 #define gpuCreateDefaultClientShare(pGpu) gpuCreateDefaultClientShare_56cd7a(pGpu) 2491 #endif //__nvoc_gpu_h_disabled 2492 2493 #define gpuCreateDefaultClientShare_HAL(pGpu) gpuCreateDefaultClientShare(pGpu) 2494 2495 static inline void gpuDestroyDefaultClientShare_b3696a(struct OBJGPU *pGpu) { 2496 return; 2497 } 2498 2499 2500 #ifdef __nvoc_gpu_h_disabled 2501 static inline void gpuDestroyDefaultClientShare(struct OBJGPU *pGpu) { 2502 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2503 } 2504 #else //__nvoc_gpu_h_disabled 2505 #define gpuDestroyDefaultClientShare(pGpu) gpuDestroyDefaultClientShare_b3696a(pGpu) 2506 #endif //__nvoc_gpu_h_disabled 2507 2508 #define gpuDestroyDefaultClientShare_HAL(pGpu) gpuDestroyDefaultClientShare(pGpu) 2509 2510 static inline NvU64 gpuGetVmmuSegmentSize_72c522(struct OBJGPU *pGpu) { 2511 return pGpu->vmmuSegmentSize; 2512 } 2513 2514 2515 #ifdef __nvoc_gpu_h_disabled 2516 static inline NvU64 gpuGetVmmuSegmentSize(struct OBJGPU *pGpu) { 2517 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2518 return 0; 2519 } 2520 #else //__nvoc_gpu_h_disabled 2521 #define gpuGetVmmuSegmentSize(pGpu) gpuGetVmmuSegmentSize_72c522(pGpu) 2522 #endif //__nvoc_gpu_h_disabled 2523 2524 #define gpuGetVmmuSegmentSize_HAL(pGpu) gpuGetVmmuSegmentSize(pGpu) 2525 2526 void gpuGetTerminatedLinkMask_GA100(struct OBJGPU *pGpu, NvU32 arg0); 2527 2528 2529 #ifdef __nvoc_gpu_h_disabled 2530 static inline void gpuGetTerminatedLinkMask(struct OBJGPU *pGpu, NvU32 arg0) { 2531 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2532 } 2533 #else //__nvoc_gpu_h_disabled 2534 #define gpuGetTerminatedLinkMask(pGpu, arg0) gpuGetTerminatedLinkMask_GA100(pGpu, arg0) 2535 #endif //__nvoc_gpu_h_disabled 2536 2537 #define gpuGetTerminatedLinkMask_HAL(pGpu, arg0) gpuGetTerminatedLinkMask(pGpu, arg0) 2538 2539 NV_STATUS gpuJtVersionSanityCheck_TU102(struct OBJGPU *pGpu); 2540 2541 2542 #ifdef __nvoc_gpu_h_disabled 2543 static inline NV_STATUS gpuJtVersionSanityCheck(struct OBJGPU *pGpu) { 2544 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2545 return NV_ERR_NOT_SUPPORTED; 2546 } 2547 #else //__nvoc_gpu_h_disabled 2548 #define gpuJtVersionSanityCheck(pGpu) gpuJtVersionSanityCheck_TU102(pGpu) 2549 #endif //__nvoc_gpu_h_disabled 2550 2551 #define gpuJtVersionSanityCheck_HAL(pGpu) gpuJtVersionSanityCheck(pGpu) 2552 2553 static inline NvBool gpuCompletedGC6PowerOff_cbe027(struct OBJGPU *pGpu) { 2554 return ((NvBool)(0 == 0)); 2555 } 2556 2557 NvBool gpuCompletedGC6PowerOff_GV100(struct OBJGPU *pGpu); 2558 2559 2560 #ifdef __nvoc_gpu_h_disabled 2561 static inline NvBool gpuCompletedGC6PowerOff(struct OBJGPU *pGpu) { 2562 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2563 return NV_FALSE; 2564 } 2565 #else //__nvoc_gpu_h_disabled 2566 #define gpuCompletedGC6PowerOff(pGpu) gpuCompletedGC6PowerOff_cbe027(pGpu) 2567 #endif //__nvoc_gpu_h_disabled 2568 2569 #define gpuCompletedGC6PowerOff_HAL(pGpu) gpuCompletedGC6PowerOff(pGpu) 2570 2571 static inline NvBool gpuIsACPIPatchRequiredForBug2473619_491d52(struct OBJGPU *pGpu) { 2572 return ((NvBool)(0 != 0)); 2573 } 2574 2575 2576 #ifdef __nvoc_gpu_h_disabled 2577 static inline NvBool gpuIsACPIPatchRequiredForBug2473619(struct OBJGPU *pGpu) { 2578 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2579 return NV_FALSE; 2580 } 2581 #else //__nvoc_gpu_h_disabled 2582 #define gpuIsACPIPatchRequiredForBug2473619(pGpu) gpuIsACPIPatchRequiredForBug2473619_491d52(pGpu) 2583 #endif //__nvoc_gpu_h_disabled 2584 2585 #define gpuIsACPIPatchRequiredForBug2473619_HAL(pGpu) gpuIsACPIPatchRequiredForBug2473619(pGpu) 2586 2587 NvU32 gpuGetActiveFBIOs_FWCLIENT(struct OBJGPU *pGpu); 2588 2589 NvU32 gpuGetActiveFBIOs_GM107(struct OBJGPU *pGpu); 2590 2591 2592 #ifdef __nvoc_gpu_h_disabled 2593 static inline NvU32 gpuGetActiveFBIOs(struct OBJGPU *pGpu) { 2594 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2595 return 0; 2596 } 2597 #else //__nvoc_gpu_h_disabled 2598 #define gpuGetActiveFBIOs(pGpu) gpuGetActiveFBIOs_FWCLIENT(pGpu) 2599 #endif //__nvoc_gpu_h_disabled 2600 2601 #define gpuGetActiveFBIOs_HAL(pGpu) gpuGetActiveFBIOs(pGpu) 2602 2603 static inline NvBool gpuIsDebuggerActive_8031b9(struct OBJGPU *pGpu) { 2604 return pGpu->bIsDebugModeEnabled; 2605 } 2606 2607 2608 #ifdef __nvoc_gpu_h_disabled 2609 static inline NvBool gpuIsDebuggerActive(struct OBJGPU *pGpu) { 2610 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2611 return NV_FALSE; 2612 } 2613 #else //__nvoc_gpu_h_disabled 2614 #define gpuIsDebuggerActive(pGpu) gpuIsDebuggerActive_8031b9(pGpu) 2615 #endif //__nvoc_gpu_h_disabled 2616 2617 #define gpuIsDebuggerActive_HAL(pGpu) gpuIsDebuggerActive(pGpu) 2618 2619 NV_STATUS gpuExecGrCtxRegops_GK104(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw); 2620 2621 2622 #ifdef __nvoc_gpu_h_disabled 2623 static inline NV_STATUS gpuExecGrCtxRegops(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw) { 2624 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2625 return NV_ERR_NOT_SUPPORTED; 2626 } 2627 #else //__nvoc_gpu_h_disabled 2628 #define gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops_GK104(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) 2629 #endif //__nvoc_gpu_h_disabled 2630 2631 #define gpuExecGrCtxRegops_HAL(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) 2632 2633 NV_STATUS gpuExtdevConstruct_GK104(struct OBJGPU *pGpu); 2634 2635 2636 #ifdef __nvoc_gpu_h_disabled 2637 static inline NV_STATUS gpuExtdevConstruct(struct OBJGPU *pGpu) { 2638 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2639 return NV_ERR_NOT_SUPPORTED; 2640 } 2641 #else //__nvoc_gpu_h_disabled 2642 #define gpuExtdevConstruct(pGpu) gpuExtdevConstruct_GK104(pGpu) 2643 #endif //__nvoc_gpu_h_disabled 2644 2645 #define gpuExtdevConstruct_HAL(pGpu) gpuExtdevConstruct(pGpu) 2646 2647 NvU32 gpuReadBAR1Size_FWCLIENT(struct OBJGPU *pGpu); 2648 2649 NvU32 gpuReadBAR1Size_TU102(struct OBJGPU *pGpu); 2650 2651 NvU32 gpuReadBAR1Size_GH100(struct OBJGPU *pGpu); 2652 2653 2654 #ifdef __nvoc_gpu_h_disabled 2655 static inline NvU32 gpuReadBAR1Size(struct OBJGPU *pGpu) { 2656 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2657 return 0; 2658 } 2659 #else //__nvoc_gpu_h_disabled 2660 #define gpuReadBAR1Size(pGpu) gpuReadBAR1Size_FWCLIENT(pGpu) 2661 #endif //__nvoc_gpu_h_disabled 2662 2663 #define gpuReadBAR1Size_HAL(pGpu) gpuReadBAR1Size(pGpu) 2664 2665 NvBool gpuCheckPageRetirementSupport_GSPCLIENT(struct OBJGPU *pGpu); 2666 2667 NvBool gpuCheckPageRetirementSupport_GV100(struct OBJGPU *pGpu); 2668 2669 2670 #ifdef __nvoc_gpu_h_disabled 2671 static inline NvBool gpuCheckPageRetirementSupport(struct OBJGPU *pGpu) { 2672 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2673 return NV_FALSE; 2674 } 2675 #else //__nvoc_gpu_h_disabled 2676 #define gpuCheckPageRetirementSupport(pGpu) gpuCheckPageRetirementSupport_GSPCLIENT(pGpu) 2677 #endif //__nvoc_gpu_h_disabled 2678 2679 #define gpuCheckPageRetirementSupport_HAL(pGpu) gpuCheckPageRetirementSupport(pGpu) 2680 2681 NvBool gpuIsInternalSku_FWCLIENT(struct OBJGPU *pGpu); 2682 2683 NvBool gpuIsInternalSku_GP100(struct OBJGPU *pGpu); 2684 2685 2686 #ifdef __nvoc_gpu_h_disabled 2687 static inline NvBool gpuIsInternalSku(struct OBJGPU *pGpu) { 2688 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2689 return NV_FALSE; 2690 } 2691 #else //__nvoc_gpu_h_disabled 2692 #define gpuIsInternalSku(pGpu) gpuIsInternalSku_FWCLIENT(pGpu) 2693 #endif //__nvoc_gpu_h_disabled 2694 2695 #define gpuIsInternalSku_HAL(pGpu) gpuIsInternalSku(pGpu) 2696 2697 NV_STATUS gpuGetSriovCaps_TU102(struct OBJGPU *pGpu, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *arg0); 2698 2699 2700 #ifdef __nvoc_gpu_h_disabled 2701 static inline NV_STATUS gpuGetSriovCaps(struct OBJGPU *pGpu, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *arg0) { 2702 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2703 return NV_ERR_NOT_SUPPORTED; 2704 } 2705 #else //__nvoc_gpu_h_disabled 2706 #define gpuGetSriovCaps(pGpu, arg0) gpuGetSriovCaps_TU102(pGpu, arg0) 2707 #endif //__nvoc_gpu_h_disabled 2708 2709 #define gpuGetSriovCaps_HAL(pGpu, arg0) gpuGetSriovCaps(pGpu, arg0) 2710 2711 static inline NvBool gpuCheckIsP2PAllocated_491d52(struct OBJGPU *pGpu) { 2712 return ((NvBool)(0 != 0)); 2713 } 2714 2715 NvBool gpuCheckIsP2PAllocated_GA100(struct OBJGPU *pGpu); 2716 2717 static inline NvBool gpuCheckIsP2PAllocated_108313(struct OBJGPU *pGpu) { 2718 NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); 2719 } 2720 2721 2722 #ifdef __nvoc_gpu_h_disabled 2723 static inline NvBool gpuCheckIsP2PAllocated(struct OBJGPU *pGpu) { 2724 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2725 return NV_FALSE; 2726 } 2727 #else //__nvoc_gpu_h_disabled 2728 #define gpuCheckIsP2PAllocated(pGpu) gpuCheckIsP2PAllocated_491d52(pGpu) 2729 #endif //__nvoc_gpu_h_disabled 2730 2731 #define gpuCheckIsP2PAllocated_HAL(pGpu) gpuCheckIsP2PAllocated(pGpu) 2732 2733 static inline void gpuDecodeDeviceInfoTableGroupId_b3696a(struct OBJGPU *pGpu, DEVICE_INFO2_ENTRY *pEntry, NvU32 *pDeviceAccum) { 2734 return; 2735 } 2736 2737 2738 #ifdef __nvoc_gpu_h_disabled 2739 static inline void gpuDecodeDeviceInfoTableGroupId(struct OBJGPU *pGpu, DEVICE_INFO2_ENTRY *pEntry, NvU32 *pDeviceAccum) { 2740 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2741 } 2742 #else //__nvoc_gpu_h_disabled 2743 #define gpuDecodeDeviceInfoTableGroupId(pGpu, pEntry, pDeviceAccum) gpuDecodeDeviceInfoTableGroupId_b3696a(pGpu, pEntry, pDeviceAccum) 2744 #endif //__nvoc_gpu_h_disabled 2745 2746 #define gpuDecodeDeviceInfoTableGroupId_HAL(pGpu, pEntry, pDeviceAccum) gpuDecodeDeviceInfoTableGroupId(pGpu, pEntry, pDeviceAccum) 2747 2748 static inline NV_STATUS gpuGc6EntryPstateCheck_56cd7a(struct OBJGPU *pGpu) { 2749 return NV_OK; 2750 } 2751 2752 2753 #ifdef __nvoc_gpu_h_disabled 2754 static inline NV_STATUS gpuGc6EntryPstateCheck(struct OBJGPU *pGpu) { 2755 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2756 return NV_ERR_NOT_SUPPORTED; 2757 } 2758 #else //__nvoc_gpu_h_disabled 2759 #define gpuGc6EntryPstateCheck(pGpu) gpuGc6EntryPstateCheck_56cd7a(pGpu) 2760 #endif //__nvoc_gpu_h_disabled 2761 2762 #define gpuGc6EntryPstateCheck_HAL(pGpu) gpuGc6EntryPstateCheck(pGpu) 2763 2764 static inline NV_STATUS gpuWaitGC6Ready_56cd7a(struct OBJGPU *pGpu) { 2765 return NV_OK; 2766 } 2767 2768 NV_STATUS gpuWaitGC6Ready_GM107(struct OBJGPU *pGpu); 2769 2770 2771 #ifdef __nvoc_gpu_h_disabled 2772 static inline NV_STATUS gpuWaitGC6Ready(struct OBJGPU *pGpu) { 2773 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2774 return NV_ERR_NOT_SUPPORTED; 2775 } 2776 #else //__nvoc_gpu_h_disabled 2777 #define gpuWaitGC6Ready(pGpu) gpuWaitGC6Ready_56cd7a(pGpu) 2778 #endif //__nvoc_gpu_h_disabled 2779 2780 #define gpuWaitGC6Ready_HAL(pGpu) gpuWaitGC6Ready(pGpu) 2781 2782 static inline NV_STATUS gpuPrePowerOff_56cd7a(struct OBJGPU *pGpu) { 2783 return NV_OK; 2784 } 2785 2786 NV_STATUS gpuPrePowerOff_GM107(struct OBJGPU *pGpu); 2787 2788 2789 #ifdef __nvoc_gpu_h_disabled 2790 static inline NV_STATUS gpuPrePowerOff(struct OBJGPU *pGpu) { 2791 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2792 return NV_ERR_NOT_SUPPORTED; 2793 } 2794 #else //__nvoc_gpu_h_disabled 2795 #define gpuPrePowerOff(pGpu) gpuPrePowerOff_56cd7a(pGpu) 2796 #endif //__nvoc_gpu_h_disabled 2797 2798 #define gpuPrePowerOff_HAL(pGpu) gpuPrePowerOff(pGpu) 2799 2800 NV_STATUS gpuVerifyExistence_IMPL(struct OBJGPU *pGpu); 2801 2802 2803 #ifdef __nvoc_gpu_h_disabled 2804 static inline NV_STATUS gpuVerifyExistence(struct OBJGPU *pGpu) { 2805 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2806 return NV_ERR_NOT_SUPPORTED; 2807 } 2808 #else //__nvoc_gpu_h_disabled 2809 #define gpuVerifyExistence(pGpu) gpuVerifyExistence_IMPL(pGpu) 2810 #endif //__nvoc_gpu_h_disabled 2811 2812 #define gpuVerifyExistence_HAL(pGpu) gpuVerifyExistence(pGpu) 2813 2814 static inline void gpuResetVFRegisters_b3696a(struct OBJGPU *pGpu, NvU32 gfid) { 2815 return; 2816 } 2817 2818 void gpuResetVFRegisters_TU102(struct OBJGPU *pGpu, NvU32 gfid); 2819 2820 2821 #ifdef __nvoc_gpu_h_disabled 2822 static inline void gpuResetVFRegisters(struct OBJGPU *pGpu, NvU32 gfid) { 2823 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2824 } 2825 #else //__nvoc_gpu_h_disabled 2826 #define gpuResetVFRegisters(pGpu, gfid) gpuResetVFRegisters_b3696a(pGpu, gfid) 2827 #endif //__nvoc_gpu_h_disabled 2828 2829 #define gpuResetVFRegisters_HAL(pGpu, gfid) gpuResetVFRegisters(pGpu, gfid) 2830 2831 static inline NvU32 gpuGetSliLinkDetectionHalFlag_539ab4(struct OBJGPU *pGpu) { 2832 return 1; 2833 } 2834 2835 2836 #ifdef __nvoc_gpu_h_disabled 2837 static inline NvU32 gpuGetSliLinkDetectionHalFlag(struct OBJGPU *pGpu) { 2838 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2839 return 0; 2840 } 2841 #else //__nvoc_gpu_h_disabled 2842 #define gpuGetSliLinkDetectionHalFlag(pGpu) gpuGetSliLinkDetectionHalFlag_539ab4(pGpu) 2843 #endif //__nvoc_gpu_h_disabled 2844 2845 #define gpuGetSliLinkDetectionHalFlag_HAL(pGpu) gpuGetSliLinkDetectionHalFlag(pGpu) 2846 2847 void gpuDetectSliLinkFromGpus_GK104(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount); 2848 2849 2850 #ifdef __nvoc_gpu_h_disabled 2851 static inline void gpuDetectSliLinkFromGpus(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount) { 2852 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2853 } 2854 #else //__nvoc_gpu_h_disabled 2855 #define gpuDetectSliLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectSliLinkFromGpus_GK104(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2856 #endif //__nvoc_gpu_h_disabled 2857 2858 #define gpuDetectSliLinkFromGpus_HAL(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectSliLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2859 2860 static inline NvU32 gpuGetNvlinkLinkDetectionHalFlag_adde13(struct OBJGPU *pGpu) { 2861 return 2; 2862 } 2863 2864 2865 #ifdef __nvoc_gpu_h_disabled 2866 static inline NvU32 gpuGetNvlinkLinkDetectionHalFlag(struct OBJGPU *pGpu) { 2867 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2868 return 0; 2869 } 2870 #else //__nvoc_gpu_h_disabled 2871 #define gpuGetNvlinkLinkDetectionHalFlag(pGpu) gpuGetNvlinkLinkDetectionHalFlag_adde13(pGpu) 2872 #endif //__nvoc_gpu_h_disabled 2873 2874 #define gpuGetNvlinkLinkDetectionHalFlag_HAL(pGpu) gpuGetNvlinkLinkDetectionHalFlag(pGpu) 2875 2876 void gpuDetectNvlinkLinkFromGpus_GP100(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount); 2877 2878 2879 #ifdef __nvoc_gpu_h_disabled 2880 static inline void gpuDetectNvlinkLinkFromGpus(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount) { 2881 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2882 } 2883 #else //__nvoc_gpu_h_disabled 2884 #define gpuDetectNvlinkLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectNvlinkLinkFromGpus_GP100(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2885 #endif //__nvoc_gpu_h_disabled 2886 2887 #define gpuDetectNvlinkLinkFromGpus_HAL(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectNvlinkLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) 2888 2889 NvU32 gpuGetLitterValues_FWCLIENT(struct OBJGPU *pGpu, NvU32 index); 2890 2891 NvU32 gpuGetLitterValues_TU102(struct OBJGPU *pGpu, NvU32 index); 2892 2893 NvU32 gpuGetLitterValues_GA100(struct OBJGPU *pGpu, NvU32 index); 2894 2895 NvU32 gpuGetLitterValues_GA102(struct OBJGPU *pGpu, NvU32 index); 2896 2897 NvU32 gpuGetLitterValues_AD102(struct OBJGPU *pGpu, NvU32 index); 2898 2899 NvU32 gpuGetLitterValues_GH100(struct OBJGPU *pGpu, NvU32 index); 2900 2901 2902 #ifdef __nvoc_gpu_h_disabled 2903 static inline NvU32 gpuGetLitterValues(struct OBJGPU *pGpu, NvU32 index) { 2904 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2905 return 0; 2906 } 2907 #else //__nvoc_gpu_h_disabled 2908 #define gpuGetLitterValues(pGpu, index) gpuGetLitterValues_FWCLIENT(pGpu, index) 2909 #endif //__nvoc_gpu_h_disabled 2910 2911 #define gpuGetLitterValues_HAL(pGpu, index) gpuGetLitterValues(pGpu, index) 2912 2913 NvBool gpuIsGlobalPoisonFuseEnabled_FWCLIENT(struct OBJGPU *pGpu); 2914 2915 2916 #ifdef __nvoc_gpu_h_disabled 2917 static inline NvBool gpuIsGlobalPoisonFuseEnabled(struct OBJGPU *pGpu) { 2918 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2919 return NV_FALSE; 2920 } 2921 #else //__nvoc_gpu_h_disabled 2922 #define gpuIsGlobalPoisonFuseEnabled(pGpu) gpuIsGlobalPoisonFuseEnabled_FWCLIENT(pGpu) 2923 #endif //__nvoc_gpu_h_disabled 2924 2925 #define gpuIsGlobalPoisonFuseEnabled_HAL(pGpu) gpuIsGlobalPoisonFuseEnabled(pGpu) 2926 2927 NV_STATUS gpuInitOptimusSettings_IMPL(struct OBJGPU *pGpu); 2928 2929 2930 #ifdef __nvoc_gpu_h_disabled 2931 static inline NV_STATUS gpuInitOptimusSettings(struct OBJGPU *pGpu) { 2932 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2933 return NV_ERR_NOT_SUPPORTED; 2934 } 2935 #else //__nvoc_gpu_h_disabled 2936 #define gpuInitOptimusSettings(pGpu) gpuInitOptimusSettings_IMPL(pGpu) 2937 #endif //__nvoc_gpu_h_disabled 2938 2939 #define gpuInitOptimusSettings_HAL(pGpu) gpuInitOptimusSettings(pGpu) 2940 2941 NV_STATUS gpuDeinitOptimusSettings_IMPL(struct OBJGPU *pGpu); 2942 2943 2944 #ifdef __nvoc_gpu_h_disabled 2945 static inline NV_STATUS gpuDeinitOptimusSettings(struct OBJGPU *pGpu) { 2946 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2947 return NV_ERR_NOT_SUPPORTED; 2948 } 2949 #else //__nvoc_gpu_h_disabled 2950 #define gpuDeinitOptimusSettings(pGpu) gpuDeinitOptimusSettings_IMPL(pGpu) 2951 #endif //__nvoc_gpu_h_disabled 2952 2953 #define gpuDeinitOptimusSettings_HAL(pGpu) gpuDeinitOptimusSettings(pGpu) 2954 2955 static inline NV_STATUS gpuSetCacheOnlyModeOverrides_56cd7a(struct OBJGPU *pGpu) { 2956 return NV_OK; 2957 } 2958 2959 2960 #ifdef __nvoc_gpu_h_disabled 2961 static inline NV_STATUS gpuSetCacheOnlyModeOverrides(struct OBJGPU *pGpu) { 2962 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2963 return NV_ERR_NOT_SUPPORTED; 2964 } 2965 #else //__nvoc_gpu_h_disabled 2966 #define gpuSetCacheOnlyModeOverrides(pGpu) gpuSetCacheOnlyModeOverrides_56cd7a(pGpu) 2967 #endif //__nvoc_gpu_h_disabled 2968 2969 #define gpuSetCacheOnlyModeOverrides_HAL(pGpu) gpuSetCacheOnlyModeOverrides(pGpu) 2970 2971 NV_STATUS gpuGetCeFaultMethodBufferSize_KERNEL(struct OBJGPU *arg0, NvU32 *arg1); 2972 2973 2974 #ifdef __nvoc_gpu_h_disabled 2975 static inline NV_STATUS gpuGetCeFaultMethodBufferSize(struct OBJGPU *arg0, NvU32 *arg1) { 2976 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2977 return NV_ERR_NOT_SUPPORTED; 2978 } 2979 #else //__nvoc_gpu_h_disabled 2980 #define gpuGetCeFaultMethodBufferSize(arg0, arg1) gpuGetCeFaultMethodBufferSize_KERNEL(arg0, arg1) 2981 #endif //__nvoc_gpu_h_disabled 2982 2983 #define gpuGetCeFaultMethodBufferSize_HAL(arg0, arg1) gpuGetCeFaultMethodBufferSize(arg0, arg1) 2984 2985 static inline NV_STATUS gpuSetVFBarSizes_46f6a7(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { 2986 return NV_ERR_NOT_SUPPORTED; 2987 } 2988 2989 NV_STATUS gpuSetVFBarSizes_GA102(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0); 2990 2991 2992 #ifdef __nvoc_gpu_h_disabled 2993 static inline NV_STATUS gpuSetVFBarSizes(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { 2994 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 2995 return NV_ERR_NOT_SUPPORTED; 2996 } 2997 #else //__nvoc_gpu_h_disabled 2998 #define gpuSetVFBarSizes(pGpu, arg0) gpuSetVFBarSizes_46f6a7(pGpu, arg0) 2999 #endif //__nvoc_gpu_h_disabled 3000 3001 #define gpuSetVFBarSizes_HAL(pGpu, arg0) gpuSetVFBarSizes(pGpu, arg0) 3002 3003 static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId_80f438(struct OBJGPU *pGpu, NvU32 peerGpuId) { 3004 NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); 3005 } 3006 3007 3008 #ifdef __nvoc_gpu_h_disabled 3009 static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId(struct OBJGPU *pGpu, NvU32 peerGpuId) { 3010 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3011 return NULL; 3012 } 3013 #else //__nvoc_gpu_h_disabled 3014 #define gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId_80f438(pGpu, peerGpuId) 3015 #endif //__nvoc_gpu_h_disabled 3016 3017 #define gpuFindP2PPeerGpuCapsByGpuId_HAL(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId) 3018 3019 static inline NV_STATUS gpuLoadFailurePathTest_56cd7a(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) { 3020 return NV_OK; 3021 } 3022 3023 3024 #ifdef __nvoc_gpu_h_disabled 3025 static inline NV_STATUS gpuLoadFailurePathTest(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) { 3026 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3027 return NV_ERR_NOT_SUPPORTED; 3028 } 3029 #else //__nvoc_gpu_h_disabled 3030 #define gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest_56cd7a(pGpu, engStage, engDescIdx, bStopTest) 3031 #endif //__nvoc_gpu_h_disabled 3032 3033 #define gpuLoadFailurePathTest_HAL(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest) 3034 3035 NV_STATUS gpuConstructDeviceInfoTable_FWCLIENT(struct OBJGPU *pGpu); 3036 3037 static inline NV_STATUS gpuConstructDeviceInfoTable_56cd7a(struct OBJGPU *pGpu) { 3038 return NV_OK; 3039 } 3040 3041 NV_STATUS gpuConstructDeviceInfoTable_GA100(struct OBJGPU *pGpu); 3042 3043 static inline NV_STATUS gpuConstructDeviceInfoTable_DISPATCH(struct OBJGPU *pGpu) { 3044 return pGpu->__gpuConstructDeviceInfoTable__(pGpu); 3045 } 3046 3047 NV_STATUS gpuWriteBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 value); 3048 3049 NV_STATUS gpuWriteBusConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 value); 3050 3051 static inline NV_STATUS gpuWriteBusConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { 3052 return pGpu->__gpuWriteBusConfigReg__(pGpu, index, value); 3053 } 3054 3055 NV_STATUS gpuReadBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data); 3056 3057 NV_STATUS gpuReadBusConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 *data); 3058 3059 static inline NV_STATUS gpuReadBusConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { 3060 return pGpu->__gpuReadBusConfigReg__(pGpu, index, data); 3061 } 3062 3063 NV_STATUS gpuReadBusConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState); 3064 3065 static inline NV_STATUS gpuReadBusConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { 3066 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3067 } 3068 3069 static inline NV_STATUS gpuReadBusConfigRegEx_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { 3070 return pGpu->__gpuReadBusConfigRegEx__(pGpu, index, data, pThreadState); 3071 } 3072 3073 NV_STATUS gpuReadFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data); 3074 3075 static inline NV_STATUS gpuReadFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { 3076 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3077 } 3078 3079 static inline NV_STATUS gpuReadFunctionConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { 3080 return pGpu->__gpuReadFunctionConfigReg__(pGpu, function, reg, data); 3081 } 3082 3083 NV_STATUS gpuWriteFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data); 3084 3085 static inline NV_STATUS gpuWriteFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { 3086 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3087 } 3088 3089 static inline NV_STATUS gpuWriteFunctionConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { 3090 return pGpu->__gpuWriteFunctionConfigReg__(pGpu, function, reg, data); 3091 } 3092 3093 NV_STATUS gpuWriteFunctionConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState); 3094 3095 static inline NV_STATUS gpuWriteFunctionConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { 3096 NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); 3097 } 3098 3099 static inline NV_STATUS gpuWriteFunctionConfigRegEx_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { 3100 return pGpu->__gpuWriteFunctionConfigRegEx__(pGpu, function, reg, data, pThreadState); 3101 } 3102 3103 NV_STATUS gpuReadVgpuConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 *data); 3104 3105 static inline NV_STATUS gpuReadVgpuConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { 3106 return NV_ERR_NOT_SUPPORTED; 3107 } 3108 3109 static inline NV_STATUS gpuReadVgpuConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { 3110 return pGpu->__gpuReadVgpuConfigReg__(pGpu, index, data); 3111 } 3112 3113 void gpuGetIdInfo_GM107(struct OBJGPU *pGpu); 3114 3115 void gpuGetIdInfo_GH100(struct OBJGPU *pGpu); 3116 3117 static inline void gpuGetIdInfo_DISPATCH(struct OBJGPU *pGpu) { 3118 pGpu->__gpuGetIdInfo__(pGpu); 3119 } 3120 3121 void gpuHandleSanityCheckRegReadError_GM107(struct OBJGPU *pGpu, NvU32 addr, NvU32 value); 3122 3123 void gpuHandleSanityCheckRegReadError_GH100(struct OBJGPU *pGpu, NvU32 addr, NvU32 value); 3124 3125 static inline void gpuHandleSanityCheckRegReadError_DISPATCH(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) { 3126 pGpu->__gpuHandleSanityCheckRegReadError__(pGpu, addr, value); 3127 } 3128 3129 void gpuHandleSecFault_GH100(struct OBJGPU *pGpu); 3130 3131 static inline void gpuHandleSecFault_b3696a(struct OBJGPU *pGpu) { 3132 return; 3133 } 3134 3135 static inline void gpuHandleSecFault_DISPATCH(struct OBJGPU *pGpu) { 3136 pGpu->__gpuHandleSecFault__(pGpu); 3137 } 3138 3139 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU102(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3140 3141 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU104(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3142 3143 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU106(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3144 3145 const GPUCHILDPRESENT *gpuGetChildrenPresent_GA100(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3146 3147 const GPUCHILDPRESENT *gpuGetChildrenPresent_GA102(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3148 3149 const GPUCHILDPRESENT *gpuGetChildrenPresent_AD102(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3150 3151 const GPUCHILDPRESENT *gpuGetChildrenPresent_GH100(struct OBJGPU *pGpu, NvU32 *pNumEntries); 3152 3153 static inline const GPUCHILDPRESENT *gpuGetChildrenPresent_DISPATCH(struct OBJGPU *pGpu, NvU32 *pNumEntries) { 3154 return pGpu->__gpuGetChildrenPresent__(pGpu, pNumEntries); 3155 } 3156 3157 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU102(struct OBJGPU *pGpu, NvU32 *arg0); 3158 3159 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU104(struct OBJGPU *pGpu, NvU32 *arg0); 3160 3161 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU106(struct OBJGPU *pGpu, NvU32 *arg0); 3162 3163 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU117(struct OBJGPU *pGpu, NvU32 *arg0); 3164 3165 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA100(struct OBJGPU *pGpu, NvU32 *arg0); 3166 3167 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA102(struct OBJGPU *pGpu, NvU32 *arg0); 3168 3169 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_AD102(struct OBJGPU *pGpu, NvU32 *arg0); 3170 3171 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GH100(struct OBJGPU *pGpu, NvU32 *arg0); 3172 3173 static inline const CLASSDESCRIPTOR *gpuGetClassDescriptorList_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg0) { 3174 return pGpu->__gpuGetClassDescriptorList__(pGpu, arg0); 3175 } 3176 3177 NvU32 gpuGetPhysAddrWidth_TU102(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0); 3178 3179 NvU32 gpuGetPhysAddrWidth_GH100(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0); 3180 3181 static inline NvU32 gpuGetPhysAddrWidth_DISPATCH(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0) { 3182 return pGpu->__gpuGetPhysAddrWidth__(pGpu, arg0); 3183 } 3184 3185 NvBool gpuFuseSupportsDisplay_GM107(struct OBJGPU *pGpu); 3186 3187 NvBool gpuFuseSupportsDisplay_GA100(struct OBJGPU *pGpu); 3188 3189 static inline NvBool gpuFuseSupportsDisplay_491d52(struct OBJGPU *pGpu) { 3190 return ((NvBool)(0 != 0)); 3191 } 3192 3193 static inline NvBool gpuFuseSupportsDisplay_DISPATCH(struct OBJGPU *pGpu) { 3194 return pGpu->__gpuFuseSupportsDisplay__(pGpu); 3195 } 3196 3197 NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_GA100(struct OBJGPU *pGpu); 3198 3199 static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_56cd7a(struct OBJGPU *pGpu) { 3200 return NV_OK; 3201 } 3202 3203 static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(struct OBJGPU *pGpu) { 3204 return pGpu->__gpuClearFbhubPoisonIntrForBug2924523__(pGpu); 3205 } 3206 3207 void gpuReadDeviceId_GM107(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1); 3208 3209 void gpuReadDeviceId_GH100(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1); 3210 3211 static inline void gpuReadDeviceId_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { 3212 pGpu->__gpuReadDeviceId__(pGpu, arg0, arg1); 3213 } 3214 3215 NvU64 gpuGetFlaVasSize_GA100(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization); 3216 3217 NvU64 gpuGetFlaVasSize_GH100(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization); 3218 3219 static inline NvU64 gpuGetFlaVasSize_474d46(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) { 3220 NV_ASSERT_OR_RETURN_PRECOMP(0, 0); 3221 } 3222 3223 static inline NvU64 gpuGetFlaVasSize_DISPATCH(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) { 3224 return pGpu->__gpuGetFlaVasSize__(pGpu, bNvswitchVirtualization); 3225 } 3226 3227 void gpuDetermineSelfHostedMode_PHYSICAL_GH100(struct OBJGPU *pGpu); 3228 3229 static inline void gpuDetermineSelfHostedMode_b3696a(struct OBJGPU *pGpu) { 3230 return; 3231 } 3232 3233 void gpuDetermineSelfHostedMode_KERNEL_GH100(struct OBJGPU *pGpu); 3234 3235 static inline void gpuDetermineSelfHostedMode_DISPATCH(struct OBJGPU *pGpu) { 3236 pGpu->__gpuDetermineSelfHostedMode__(pGpu); 3237 } 3238 3239 void gpuDetermineMIGSupport_GH100(struct OBJGPU *pGpu); 3240 3241 static inline void gpuDetermineMIGSupport_b3696a(struct OBJGPU *pGpu) { 3242 return; 3243 } 3244 3245 static inline void gpuDetermineMIGSupport_DISPATCH(struct OBJGPU *pGpu) { 3246 pGpu->__gpuDetermineMIGSupport__(pGpu); 3247 } 3248 3249 NvBool gpuIsAtsSupportedWithSmcMemPartitioning_GH100(struct OBJGPU *pGpu); 3250 3251 static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning_491d52(struct OBJGPU *pGpu) { 3252 return ((NvBool)(0 != 0)); 3253 } 3254 3255 static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(struct OBJGPU *pGpu) { 3256 return pGpu->__gpuIsAtsSupportedWithSmcMemPartitioning__(pGpu); 3257 } 3258 3259 static inline NvBool gpuIsSliCapableWithoutDisplay_cbe027(struct OBJGPU *pGpu) { 3260 return ((NvBool)(0 == 0)); 3261 } 3262 3263 static inline NvBool gpuIsSliCapableWithoutDisplay_491d52(struct OBJGPU *pGpu) { 3264 return ((NvBool)(0 != 0)); 3265 } 3266 3267 static inline NvBool gpuIsSliCapableWithoutDisplay_DISPATCH(struct OBJGPU *pGpu) { 3268 return pGpu->__gpuIsSliCapableWithoutDisplay__(pGpu); 3269 } 3270 3271 NvBool gpuIsCCEnabledInHw_GH100(struct OBJGPU *pGpu); 3272 3273 static inline NvBool gpuIsCCEnabledInHw_491d52(struct OBJGPU *pGpu) { 3274 return ((NvBool)(0 != 0)); 3275 } 3276 3277 static inline NvBool gpuIsCCEnabledInHw_DISPATCH(struct OBJGPU *pGpu) { 3278 return pGpu->__gpuIsCCEnabledInHw__(pGpu); 3279 } 3280 3281 NvBool gpuIsDevModeEnabledInHw_GH100(struct OBJGPU *pGpu); 3282 3283 static inline NvBool gpuIsDevModeEnabledInHw_491d52(struct OBJGPU *pGpu) { 3284 return ((NvBool)(0 != 0)); 3285 } 3286 3287 static inline NvBool gpuIsDevModeEnabledInHw_DISPATCH(struct OBJGPU *pGpu) { 3288 return pGpu->__gpuIsDevModeEnabledInHw__(pGpu); 3289 } 3290 3291 NvBool gpuIsCtxBufAllocInPmaSupported_GA100(struct OBJGPU *pGpu); 3292 3293 static inline NvBool gpuIsCtxBufAllocInPmaSupported_491d52(struct OBJGPU *pGpu) { 3294 return ((NvBool)(0 != 0)); 3295 } 3296 3297 static inline NvBool gpuIsCtxBufAllocInPmaSupported_DISPATCH(struct OBJGPU *pGpu) { 3298 return pGpu->__gpuIsCtxBufAllocInPmaSupported__(pGpu); 3299 } 3300 3301 static inline PENGDESCRIPTOR gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) { 3302 return pGpu->engineOrder.pEngineInitDescriptors; 3303 } 3304 3305 static inline PENGDESCRIPTOR gpuGetLoadEngineDescriptors(struct OBJGPU *pGpu) { 3306 return pGpu->engineOrder.pEngineLoadDescriptors; 3307 } 3308 3309 static inline PENGDESCRIPTOR gpuGetUnloadEngineDescriptors(struct OBJGPU *pGpu) { 3310 return pGpu->engineOrder.pEngineUnloadDescriptors; 3311 } 3312 3313 static inline PENGDESCRIPTOR gpuGetDestroyEngineDescriptors(struct OBJGPU *pGpu) { 3314 return pGpu->engineOrder.pEngineDestroyDescriptors; 3315 } 3316 3317 static inline NvU32 gpuGetNumEngDescriptors(struct OBJGPU *pGpu) { 3318 return pGpu->engineOrder.numEngineDescriptors; 3319 } 3320 3321 static inline NvU32 gpuGetMode(struct OBJGPU *pGpu) { 3322 return pGpu->computeModeRefCount > 0 ? 2 : 1; 3323 } 3324 3325 static inline ACPI_DSM_FUNCTION gpuGetDispStatusHotplugFunc(struct OBJGPU *pGpu) { 3326 return pGpu->acpi.dispStatusHotplugFunc; 3327 } 3328 3329 static inline ACPI_DSM_FUNCTION gpuGetDispStatusConfigFunc(struct OBJGPU *pGpu) { 3330 return pGpu->acpi.dispStatusConfigFunc; 3331 } 3332 3333 static inline ACPI_DSM_FUNCTION gpuGetPerfPostPowerStateFunc(struct OBJGPU *pGpu) { 3334 return pGpu->acpi.perfPostPowerStateFunc; 3335 } 3336 3337 static inline ACPI_DSM_FUNCTION gpuGetStereo3dStateActiveFunc(struct OBJGPU *pGpu) { 3338 return pGpu->acpi.stereo3dStateActiveFunc; 3339 } 3340 3341 static inline NvU32 gpuGetPmcBoot0(struct OBJGPU *pGpu) { 3342 return pGpu->chipId0; 3343 } 3344 3345 static inline struct OBJFIFO *gpuGetFifoShared(struct OBJGPU *pGpu) { 3346 return ((void *)0); 3347 } 3348 3349 static inline ENGSTATE_ITER gpuGetEngstateIter(struct OBJGPU *pGpu) { 3350 GPU_CHILD_ITER it = { 0 }; 3351 return it; 3352 } 3353 3354 static inline RmPhysAddr gpuGetDmaStartAddress(struct OBJGPU *pGpu) { 3355 return pGpu->dmaStartAddress; 3356 } 3357 3358 static inline NV_STATUS gpuFreeEventHandle(struct OBJGPU *pGpu) { 3359 return NV_OK; 3360 } 3361 3362 static inline NvU32 gpuGetChipMajRev(struct OBJGPU *pGpu) { 3363 return pGpu->chipInfo.pmcBoot42.majorRev; 3364 } 3365 3366 static inline NvU32 gpuGetChipMinRev(struct OBJGPU *pGpu) { 3367 return pGpu->chipInfo.pmcBoot42.minorRev; 3368 } 3369 3370 static inline NvU32 gpuGetChipImpl(struct OBJGPU *pGpu) { 3371 return pGpu->chipInfo.implementationId; 3372 } 3373 3374 static inline NvU32 gpuGetChipArch(struct OBJGPU *pGpu) { 3375 return pGpu->chipInfo.platformId; 3376 } 3377 3378 static inline NvU32 gpuGetChipMinExtRev(struct OBJGPU *pGpu) { 3379 return pGpu->chipInfo.pmcBoot42.minorExtRev; 3380 } 3381 3382 static inline NvBool gpuIsVideoLinkDisabled(struct OBJGPU *pGpu) { 3383 return pGpu->bVideoLinkDisabled; 3384 } 3385 3386 static inline const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *gpuGetChipInfo(struct OBJGPU *pGpu) { 3387 return pGpu->pChipInfo; 3388 } 3389 3390 static inline NvBool gpuIsBar2MovedByVtd(struct OBJGPU *pGpu) { 3391 return pGpu->bBar2MovedByVtd; 3392 } 3393 3394 static inline NvBool gpuIsBar1Size64Bit(struct OBJGPU *pGpu) { 3395 return pGpu->bBar1Is64Bit; 3396 } 3397 3398 static inline NvBool gpuIsSurpriseRemovalSupported(struct OBJGPU *pGpu) { 3399 return pGpu->bSurpriseRemovalSupported; 3400 } 3401 3402 static inline NvBool gpuIsReplayableTraceEnabled(struct OBJGPU *pGpu) { 3403 return pGpu->bReplayableTraceEnabled; 3404 } 3405 3406 static inline NvBool gpuIsStateLoading(struct OBJGPU *pGpu) { 3407 return pGpu->bStateLoading; 3408 } 3409 3410 static inline NvBool gpuIsStateUnloading(struct OBJGPU *pGpu) { 3411 return pGpu->bStateUnloading; 3412 } 3413 3414 static inline NvBool gpuIsStateLoaded(struct OBJGPU *pGpu) { 3415 return pGpu->bStateLoaded; 3416 } 3417 3418 static inline NvBool gpuIsFullyConstructed(struct OBJGPU *pGpu) { 3419 return pGpu->bFullyConstructed; 3420 } 3421 3422 static inline NvBool gpuIsUnifiedMemorySpaceEnabled(struct OBJGPU *pGpu) { 3423 return pGpu->bUnifiedMemorySpaceEnabled; 3424 } 3425 3426 static inline NvBool gpuIsWarBug4040336Enabled(struct OBJGPU *pGpu) { 3427 return pGpu->bBf3WarBug4040336Enabled; 3428 } 3429 3430 static inline NvBool gpuIsSriovEnabled(struct OBJGPU *pGpu) { 3431 return pGpu->bSriovEnabled; 3432 } 3433 3434 static inline NvBool gpuIsCacheOnlyModeEnabled(struct OBJGPU *pGpu) { 3435 return pGpu->bCacheOnlyMode; 3436 } 3437 3438 static inline NvBool gpuIsSplitVasManagementServerClientRmEnabled(struct OBJGPU *pGpu) { 3439 return pGpu->bSplitVasManagementServerClientRm; 3440 } 3441 3442 static inline NvBool gpuIsWarBug200577889SriovHeavyEnabled(struct OBJGPU *pGpu) { 3443 return pGpu->bWarBug200577889SriovHeavyEnabled; 3444 } 3445 3446 static inline NvBool gpuIsPipelinedPteMemEnabled(struct OBJGPU *pGpu) { 3447 return pGpu->bPipelinedPteMemEnabled; 3448 } 3449 3450 static inline NvBool gpuIsBarPteInSysmemSupported(struct OBJGPU *pGpu) { 3451 return pGpu->bIsBarPteInSysmemSupported; 3452 } 3453 3454 static inline NvBool gpuIsRegUsesGlobalSurfaceOverridesEnabled(struct OBJGPU *pGpu) { 3455 return pGpu->bRegUsesGlobalSurfaceOverrides; 3456 } 3457 3458 static inline NvBool gpuIsTwoStageRcRecoveryEnabled(struct OBJGPU *pGpu) { 3459 return pGpu->bTwoStageRcRecoveryEnabled; 3460 } 3461 3462 static inline NvBool gpuIsInD3Cold(struct OBJGPU *pGpu) { 3463 return pGpu->bInD3Cold; 3464 } 3465 3466 static inline NvBool gpuIsClientRmAllocatedCtxBufferEnabled(struct OBJGPU *pGpu) { 3467 return pGpu->bClientRmAllocatedCtxBuffer; 3468 } 3469 3470 static inline NvBool gpuIsIterativeMmuWalkerEnabled(struct OBJGPU *pGpu) { 3471 return pGpu->bIterativeMmuWalker; 3472 } 3473 3474 static inline NvBool gpuIsEccPageRetirementWithSliAllowed(struct OBJGPU *pGpu) { 3475 return pGpu->bEccPageRetirementWithSliAllowed; 3476 } 3477 3478 static inline NvBool gpuIsVidmemPreservationBrokenBug3172217(struct OBJGPU *pGpu) { 3479 return pGpu->bVidmemPreservationBrokenBug3172217; 3480 } 3481 3482 static inline NvBool gpuIsInstanceMemoryAlwaysCached(struct OBJGPU *pGpu) { 3483 return pGpu->bInstanceMemoryAlwaysCached; 3484 } 3485 3486 static inline NvBool gpuIsRmProfilingPrivileged(struct OBJGPU *pGpu) { 3487 return pGpu->bRmProfilingPrivileged; 3488 } 3489 3490 static inline NvBool gpuIsGeforceSmb(struct OBJGPU *pGpu) { 3491 return pGpu->bGeforceSmb; 3492 } 3493 3494 static inline NvBool gpuIsGeforceBranded(struct OBJGPU *pGpu) { 3495 return pGpu->bIsGeforce; 3496 } 3497 3498 static inline NvBool gpuIsQuadroBranded(struct OBJGPU *pGpu) { 3499 return pGpu->bIsQuadro; 3500 } 3501 3502 static inline NvBool gpuIsVgxBranded(struct OBJGPU *pGpu) { 3503 return pGpu->bIsVgx; 3504 } 3505 3506 static inline NvBool gpuIsACBranded(struct OBJGPU *pGpu) { 3507 return pGpu->bIsAC; 3508 } 3509 3510 static inline NvBool gpuIsNvidiaNvsBranded(struct OBJGPU *pGpu) { 3511 return pGpu->bIsNvidiaNvs; 3512 } 3513 3514 static inline NvBool gpuIsTitanBranded(struct OBJGPU *pGpu) { 3515 return pGpu->bIsTitan; 3516 } 3517 3518 static inline NvBool gpuIsTeslaBranded(struct OBJGPU *pGpu) { 3519 return pGpu->bIsTesla; 3520 } 3521 3522 static inline NvBool gpuIsComputePolicyTimesliceSupported(struct OBJGPU *pGpu) { 3523 return pGpu->bComputePolicyTimesliceSupported; 3524 } 3525 3526 static inline NvBool gpuIsSriovCapable(struct OBJGPU *pGpu) { 3527 return pGpu->bSriovCapable; 3528 } 3529 3530 static inline NvBool gpuIsNonPowerOf2ChannelCountSupported(struct OBJGPU *pGpu) { 3531 return pGpu->bNonPowerOf2ChannelCountSupported; 3532 } 3533 3534 static inline NvBool gpuIsSelfHosted(struct OBJGPU *pGpu) { 3535 return pGpu->bIsSelfHosted; 3536 } 3537 3538 static inline NvBool gpuIsGspOwnedFaultBuffersEnabled(struct OBJGPU *pGpu) { 3539 return pGpu->bIsGspOwnedFaultBuffersEnabled; 3540 } 3541 3542 NV_STATUS gpuConstruct_IMPL(struct OBJGPU *arg_pGpu, NvU32 arg_gpuInstance); 3543 3544 #define __nvoc_gpuConstruct(arg_pGpu, arg_gpuInstance) gpuConstruct_IMPL(arg_pGpu, arg_gpuInstance) 3545 NV_STATUS gpuBindHalLegacy_IMPL(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0); 3546 3547 #ifdef __nvoc_gpu_h_disabled 3548 static inline NV_STATUS gpuBindHalLegacy(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0) { 3549 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3550 return NV_ERR_NOT_SUPPORTED; 3551 } 3552 #else //__nvoc_gpu_h_disabled 3553 #define gpuBindHalLegacy(pGpu, chipId0, chipId1, socChipId0) gpuBindHalLegacy_IMPL(pGpu, chipId0, chipId1, socChipId0) 3554 #endif //__nvoc_gpu_h_disabled 3555 3556 NV_STATUS gpuPostConstruct_IMPL(struct OBJGPU *pGpu, GPUATTACHARG *arg0); 3557 3558 #ifdef __nvoc_gpu_h_disabled 3559 static inline NV_STATUS gpuPostConstruct(struct OBJGPU *pGpu, GPUATTACHARG *arg0) { 3560 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3561 return NV_ERR_NOT_SUPPORTED; 3562 } 3563 #else //__nvoc_gpu_h_disabled 3564 #define gpuPostConstruct(pGpu, arg0) gpuPostConstruct_IMPL(pGpu, arg0) 3565 #endif //__nvoc_gpu_h_disabled 3566 3567 NV_STATUS gpuCreateObject_IMPL(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1); 3568 3569 #ifdef __nvoc_gpu_h_disabled 3570 static inline NV_STATUS gpuCreateObject(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1) { 3571 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3572 return NV_ERR_NOT_SUPPORTED; 3573 } 3574 #else //__nvoc_gpu_h_disabled 3575 #define gpuCreateObject(pGpu, arg0, arg1) gpuCreateObject_IMPL(pGpu, arg0, arg1) 3576 #endif //__nvoc_gpu_h_disabled 3577 3578 void gpuDestruct_IMPL(struct OBJGPU *pGpu); 3579 3580 #define __nvoc_gpuDestruct(pGpu) gpuDestruct_IMPL(pGpu) 3581 NV_STATUS gpuStateInit_IMPL(struct OBJGPU *pGpu); 3582 3583 #ifdef __nvoc_gpu_h_disabled 3584 static inline NV_STATUS gpuStateInit(struct OBJGPU *pGpu) { 3585 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3586 return NV_ERR_NOT_SUPPORTED; 3587 } 3588 #else //__nvoc_gpu_h_disabled 3589 #define gpuStateInit(pGpu) gpuStateInit_IMPL(pGpu) 3590 #endif //__nvoc_gpu_h_disabled 3591 3592 NV_STATUS gpuStateUnload_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3593 3594 #ifdef __nvoc_gpu_h_disabled 3595 static inline NV_STATUS gpuStateUnload(struct OBJGPU *pGpu, NvU32 arg0) { 3596 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3597 return NV_ERR_NOT_SUPPORTED; 3598 } 3599 #else //__nvoc_gpu_h_disabled 3600 #define gpuStateUnload(pGpu, arg0) gpuStateUnload_IMPL(pGpu, arg0) 3601 #endif //__nvoc_gpu_h_disabled 3602 3603 NV_STATUS gpuInitDispIpHal_IMPL(struct OBJGPU *pGpu, NvU32 ipver); 3604 3605 #ifdef __nvoc_gpu_h_disabled 3606 static inline NV_STATUS gpuInitDispIpHal(struct OBJGPU *pGpu, NvU32 ipver) { 3607 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3608 return NV_ERR_NOT_SUPPORTED; 3609 } 3610 #else //__nvoc_gpu_h_disabled 3611 #define gpuInitDispIpHal(pGpu, ipver) gpuInitDispIpHal_IMPL(pGpu, ipver) 3612 #endif //__nvoc_gpu_h_disabled 3613 3614 void gpuServiceInterruptsAllGpus_IMPL(struct OBJGPU *pGpu); 3615 3616 #ifdef __nvoc_gpu_h_disabled 3617 static inline void gpuServiceInterruptsAllGpus(struct OBJGPU *pGpu) { 3618 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3619 } 3620 #else //__nvoc_gpu_h_disabled 3621 #define gpuServiceInterruptsAllGpus(pGpu) gpuServiceInterruptsAllGpus_IMPL(pGpu) 3622 #endif //__nvoc_gpu_h_disabled 3623 3624 NvBool gpuIsImplementation_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); 3625 3626 #ifdef __nvoc_gpu_h_disabled 3627 static inline NvBool gpuIsImplementation(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { 3628 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3629 return NV_FALSE; 3630 } 3631 #else //__nvoc_gpu_h_disabled 3632 #define gpuIsImplementation(pGpu, arg0, arg1, arg2) gpuIsImplementation_IMPL(pGpu, arg0, arg1, arg2) 3633 #endif //__nvoc_gpu_h_disabled 3634 3635 NvBool gpuIsImplementationOrBetter_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); 3636 3637 #ifdef __nvoc_gpu_h_disabled 3638 static inline NvBool gpuIsImplementationOrBetter(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { 3639 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3640 return NV_FALSE; 3641 } 3642 #else //__nvoc_gpu_h_disabled 3643 #define gpuIsImplementationOrBetter(pGpu, arg0, arg1, arg2) gpuIsImplementationOrBetter_IMPL(pGpu, arg0, arg1, arg2) 3644 #endif //__nvoc_gpu_h_disabled 3645 3646 NvBool gpuIsGpuFullPower_IMPL(struct OBJGPU *pGpu); 3647 3648 #ifdef __nvoc_gpu_h_disabled 3649 static inline NvBool gpuIsGpuFullPower(struct OBJGPU *pGpu) { 3650 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3651 return NV_FALSE; 3652 } 3653 #else //__nvoc_gpu_h_disabled 3654 #define gpuIsGpuFullPower(pGpu) gpuIsGpuFullPower_IMPL(pGpu) 3655 #endif //__nvoc_gpu_h_disabled 3656 3657 NvBool gpuIsGpuFullPowerForPmResume_IMPL(struct OBJGPU *pGpu); 3658 3659 #ifdef __nvoc_gpu_h_disabled 3660 static inline NvBool gpuIsGpuFullPowerForPmResume(struct OBJGPU *pGpu) { 3661 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3662 return NV_FALSE; 3663 } 3664 #else //__nvoc_gpu_h_disabled 3665 #define gpuIsGpuFullPowerForPmResume(pGpu) gpuIsGpuFullPowerForPmResume_IMPL(pGpu) 3666 #endif //__nvoc_gpu_h_disabled 3667 3668 NV_STATUS gpuGetDeviceEntryByType_IMPL(struct OBJGPU *pGpu, NvU32 deviceTypeEnum, NvS32 groupId, NvU32 instanceId, const DEVICE_INFO2_ENTRY **ppDeviceEntry); 3669 3670 #ifdef __nvoc_gpu_h_disabled 3671 static inline NV_STATUS gpuGetDeviceEntryByType(struct OBJGPU *pGpu, NvU32 deviceTypeEnum, NvS32 groupId, NvU32 instanceId, const DEVICE_INFO2_ENTRY **ppDeviceEntry) { 3672 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3673 return NV_ERR_NOT_SUPPORTED; 3674 } 3675 #else //__nvoc_gpu_h_disabled 3676 #define gpuGetDeviceEntryByType(pGpu, deviceTypeEnum, groupId, instanceId, ppDeviceEntry) gpuGetDeviceEntryByType_IMPL(pGpu, deviceTypeEnum, groupId, instanceId, ppDeviceEntry) 3677 #endif //__nvoc_gpu_h_disabled 3678 3679 NV_STATUS gpuBuildClassDB_IMPL(struct OBJGPU *pGpu); 3680 3681 #ifdef __nvoc_gpu_h_disabled 3682 static inline NV_STATUS gpuBuildClassDB(struct OBJGPU *pGpu) { 3683 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3684 return NV_ERR_NOT_SUPPORTED; 3685 } 3686 #else //__nvoc_gpu_h_disabled 3687 #define gpuBuildClassDB(pGpu) gpuBuildClassDB_IMPL(pGpu) 3688 #endif //__nvoc_gpu_h_disabled 3689 3690 NV_STATUS gpuDestroyClassDB_IMPL(struct OBJGPU *pGpu); 3691 3692 #ifdef __nvoc_gpu_h_disabled 3693 static inline NV_STATUS gpuDestroyClassDB(struct OBJGPU *pGpu) { 3694 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3695 return NV_ERR_NOT_SUPPORTED; 3696 } 3697 #else //__nvoc_gpu_h_disabled 3698 #define gpuDestroyClassDB(pGpu) gpuDestroyClassDB_IMPL(pGpu) 3699 #endif //__nvoc_gpu_h_disabled 3700 3701 NV_STATUS gpuDeleteEngineFromClassDB_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3702 3703 #ifdef __nvoc_gpu_h_disabled 3704 static inline NV_STATUS gpuDeleteEngineFromClassDB(struct OBJGPU *pGpu, NvU32 arg0) { 3705 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3706 return NV_ERR_NOT_SUPPORTED; 3707 } 3708 #else //__nvoc_gpu_h_disabled 3709 #define gpuDeleteEngineFromClassDB(pGpu, arg0) gpuDeleteEngineFromClassDB_IMPL(pGpu, arg0) 3710 #endif //__nvoc_gpu_h_disabled 3711 3712 NV_STATUS gpuDeleteEngineOnPreInit_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3713 3714 #ifdef __nvoc_gpu_h_disabled 3715 static inline NV_STATUS gpuDeleteEngineOnPreInit(struct OBJGPU *pGpu, NvU32 arg0) { 3716 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3717 return NV_ERR_NOT_SUPPORTED; 3718 } 3719 #else //__nvoc_gpu_h_disabled 3720 #define gpuDeleteEngineOnPreInit(pGpu, arg0) gpuDeleteEngineOnPreInit_IMPL(pGpu, arg0) 3721 #endif //__nvoc_gpu_h_disabled 3722 3723 NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3724 3725 #ifdef __nvoc_gpu_h_disabled 3726 static inline NV_STATUS gpuAddClassToClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { 3727 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3728 return NV_ERR_NOT_SUPPORTED; 3729 } 3730 #else //__nvoc_gpu_h_disabled 3731 #define gpuAddClassToClassDBByEngTag(pGpu, arg0) gpuAddClassToClassDBByEngTag_IMPL(pGpu, arg0) 3732 #endif //__nvoc_gpu_h_disabled 3733 3734 NV_STATUS gpuAddClassToClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3735 3736 #ifdef __nvoc_gpu_h_disabled 3737 static inline NV_STATUS gpuAddClassToClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { 3738 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3739 return NV_ERR_NOT_SUPPORTED; 3740 } 3741 #else //__nvoc_gpu_h_disabled 3742 #define gpuAddClassToClassDBByClassId(pGpu, arg0) gpuAddClassToClassDBByClassId_IMPL(pGpu, arg0) 3743 #endif //__nvoc_gpu_h_disabled 3744 3745 NV_STATUS gpuAddClassToClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); 3746 3747 #ifdef __nvoc_gpu_h_disabled 3748 static inline NV_STATUS gpuAddClassToClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 3749 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3750 return NV_ERR_NOT_SUPPORTED; 3751 } 3752 #else //__nvoc_gpu_h_disabled 3753 #define gpuAddClassToClassDBByEngTagClassId(pGpu, arg0, arg1) gpuAddClassToClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) 3754 #endif //__nvoc_gpu_h_disabled 3755 3756 NV_STATUS gpuDeleteClassFromClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3757 3758 #ifdef __nvoc_gpu_h_disabled 3759 static inline NV_STATUS gpuDeleteClassFromClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { 3760 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3761 return NV_ERR_NOT_SUPPORTED; 3762 } 3763 #else //__nvoc_gpu_h_disabled 3764 #define gpuDeleteClassFromClassDBByClassId(pGpu, arg0) gpuDeleteClassFromClassDBByClassId_IMPL(pGpu, arg0) 3765 #endif //__nvoc_gpu_h_disabled 3766 3767 NV_STATUS gpuDeleteClassFromClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3768 3769 #ifdef __nvoc_gpu_h_disabled 3770 static inline NV_STATUS gpuDeleteClassFromClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { 3771 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3772 return NV_ERR_NOT_SUPPORTED; 3773 } 3774 #else //__nvoc_gpu_h_disabled 3775 #define gpuDeleteClassFromClassDBByEngTag(pGpu, arg0) gpuDeleteClassFromClassDBByEngTag_IMPL(pGpu, arg0) 3776 #endif //__nvoc_gpu_h_disabled 3777 3778 NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); 3779 3780 #ifdef __nvoc_gpu_h_disabled 3781 static inline NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 3782 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3783 return NV_ERR_NOT_SUPPORTED; 3784 } 3785 #else //__nvoc_gpu_h_disabled 3786 #define gpuDeleteClassFromClassDBByEngTagClassId(pGpu, arg0, arg1) gpuDeleteClassFromClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) 3787 #endif //__nvoc_gpu_h_disabled 3788 3789 NvBool gpuIsClassSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3790 3791 #ifdef __nvoc_gpu_h_disabled 3792 static inline NvBool gpuIsClassSupported(struct OBJGPU *pGpu, NvU32 arg0) { 3793 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3794 return NV_FALSE; 3795 } 3796 #else //__nvoc_gpu_h_disabled 3797 #define gpuIsClassSupported(pGpu, arg0) gpuIsClassSupported_IMPL(pGpu, arg0) 3798 #endif //__nvoc_gpu_h_disabled 3799 3800 NV_STATUS gpuGetClassByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1); 3801 3802 #ifdef __nvoc_gpu_h_disabled 3803 static inline NV_STATUS gpuGetClassByClassId(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1) { 3804 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3805 return NV_ERR_NOT_SUPPORTED; 3806 } 3807 #else //__nvoc_gpu_h_disabled 3808 #define gpuGetClassByClassId(pGpu, arg0, arg1) gpuGetClassByClassId_IMPL(pGpu, arg0, arg1) 3809 #endif //__nvoc_gpu_h_disabled 3810 3811 NV_STATUS gpuGetClassByEngineAndClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2); 3812 3813 #ifdef __nvoc_gpu_h_disabled 3814 static inline NV_STATUS gpuGetClassByEngineAndClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2) { 3815 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3816 return NV_ERR_NOT_SUPPORTED; 3817 } 3818 #else //__nvoc_gpu_h_disabled 3819 #define gpuGetClassByEngineAndClassId(pGpu, arg0, arg1, arg2) gpuGetClassByEngineAndClassId_IMPL(pGpu, arg0, arg1, arg2) 3820 #endif //__nvoc_gpu_h_disabled 3821 3822 NV_STATUS gpuGetClassList_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2); 3823 3824 #ifdef __nvoc_gpu_h_disabled 3825 static inline NV_STATUS gpuGetClassList(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2) { 3826 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3827 return NV_ERR_NOT_SUPPORTED; 3828 } 3829 #else //__nvoc_gpu_h_disabled 3830 #define gpuGetClassList(pGpu, arg0, arg1, arg2) gpuGetClassList_IMPL(pGpu, arg0, arg1, arg2) 3831 #endif //__nvoc_gpu_h_disabled 3832 3833 NV_STATUS gpuConstructEngineTable_IMPL(struct OBJGPU *pGpu); 3834 3835 #ifdef __nvoc_gpu_h_disabled 3836 static inline NV_STATUS gpuConstructEngineTable(struct OBJGPU *pGpu) { 3837 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3838 return NV_ERR_NOT_SUPPORTED; 3839 } 3840 #else //__nvoc_gpu_h_disabled 3841 #define gpuConstructEngineTable(pGpu) gpuConstructEngineTable_IMPL(pGpu) 3842 #endif //__nvoc_gpu_h_disabled 3843 3844 void gpuDestroyEngineTable_IMPL(struct OBJGPU *pGpu); 3845 3846 #ifdef __nvoc_gpu_h_disabled 3847 static inline void gpuDestroyEngineTable(struct OBJGPU *pGpu) { 3848 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3849 } 3850 #else //__nvoc_gpu_h_disabled 3851 #define gpuDestroyEngineTable(pGpu) gpuDestroyEngineTable_IMPL(pGpu) 3852 #endif //__nvoc_gpu_h_disabled 3853 3854 NV_STATUS gpuUpdateEngineTable_IMPL(struct OBJGPU *pGpu); 3855 3856 #ifdef __nvoc_gpu_h_disabled 3857 static inline NV_STATUS gpuUpdateEngineTable(struct OBJGPU *pGpu) { 3858 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3859 return NV_ERR_NOT_SUPPORTED; 3860 } 3861 #else //__nvoc_gpu_h_disabled 3862 #define gpuUpdateEngineTable(pGpu) gpuUpdateEngineTable_IMPL(pGpu) 3863 #endif //__nvoc_gpu_h_disabled 3864 3865 NvBool gpuCheckEngineTable_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0); 3866 3867 #ifdef __nvoc_gpu_h_disabled 3868 static inline NvBool gpuCheckEngineTable(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0) { 3869 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3870 return NV_FALSE; 3871 } 3872 #else //__nvoc_gpu_h_disabled 3873 #define gpuCheckEngineTable(pGpu, arg0) gpuCheckEngineTable_IMPL(pGpu, arg0) 3874 #endif //__nvoc_gpu_h_disabled 3875 3876 NV_STATUS gpuXlateEngDescToClientEngineId_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, RM_ENGINE_TYPE *arg1); 3877 3878 #ifdef __nvoc_gpu_h_disabled 3879 static inline NV_STATUS gpuXlateEngDescToClientEngineId(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, RM_ENGINE_TYPE *arg1) { 3880 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3881 return NV_ERR_NOT_SUPPORTED; 3882 } 3883 #else //__nvoc_gpu_h_disabled 3884 #define gpuXlateEngDescToClientEngineId(pGpu, arg0, arg1) gpuXlateEngDescToClientEngineId_IMPL(pGpu, arg0, arg1) 3885 #endif //__nvoc_gpu_h_disabled 3886 3887 NV_STATUS gpuXlateClientEngineIdToEngDesc_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, ENGDESCRIPTOR *arg1); 3888 3889 #ifdef __nvoc_gpu_h_disabled 3890 static inline NV_STATUS gpuXlateClientEngineIdToEngDesc(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, ENGDESCRIPTOR *arg1) { 3891 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3892 return NV_ERR_NOT_SUPPORTED; 3893 } 3894 #else //__nvoc_gpu_h_disabled 3895 #define gpuXlateClientEngineIdToEngDesc(pGpu, arg0, arg1) gpuXlateClientEngineIdToEngDesc_IMPL(pGpu, arg0, arg1) 3896 #endif //__nvoc_gpu_h_disabled 3897 3898 NV_STATUS gpuGetFlcnFromClientEngineId_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, struct Falcon **arg1); 3899 3900 #ifdef __nvoc_gpu_h_disabled 3901 static inline NV_STATUS gpuGetFlcnFromClientEngineId(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, struct Falcon **arg1) { 3902 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3903 return NV_ERR_NOT_SUPPORTED; 3904 } 3905 #else //__nvoc_gpu_h_disabled 3906 #define gpuGetFlcnFromClientEngineId(pGpu, arg0, arg1) gpuGetFlcnFromClientEngineId_IMPL(pGpu, arg0, arg1) 3907 #endif //__nvoc_gpu_h_disabled 3908 3909 NvBool gpuIsEngDescSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3910 3911 #ifdef __nvoc_gpu_h_disabled 3912 static inline NvBool gpuIsEngDescSupported(struct OBJGPU *pGpu, NvU32 arg0) { 3913 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3914 return NV_FALSE; 3915 } 3916 #else //__nvoc_gpu_h_disabled 3917 #define gpuIsEngDescSupported(pGpu, arg0) gpuIsEngDescSupported_IMPL(pGpu, arg0) 3918 #endif //__nvoc_gpu_h_disabled 3919 3920 NV_STATUS gpuReadBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData); 3921 3922 #ifdef __nvoc_gpu_h_disabled 3923 static inline NV_STATUS gpuReadBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData) { 3924 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3925 return NV_ERR_NOT_SUPPORTED; 3926 } 3927 #else //__nvoc_gpu_h_disabled 3928 #define gpuReadBusConfigCycle(pGpu, index, pData) gpuReadBusConfigCycle_IMPL(pGpu, index, pData) 3929 #endif //__nvoc_gpu_h_disabled 3930 3931 NV_STATUS gpuWriteBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 value); 3932 3933 #ifdef __nvoc_gpu_h_disabled 3934 static inline NV_STATUS gpuWriteBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { 3935 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3936 return NV_ERR_NOT_SUPPORTED; 3937 } 3938 #else //__nvoc_gpu_h_disabled 3939 #define gpuWriteBusConfigCycle(pGpu, index, value) gpuWriteBusConfigCycle_IMPL(pGpu, index, value) 3940 #endif //__nvoc_gpu_h_disabled 3941 3942 RM_ENGINE_TYPE gpuGetRmEngineType_IMPL(NvU32 index); 3943 3944 #define gpuGetRmEngineType(index) gpuGetRmEngineType_IMPL(index) 3945 void gpuGetRmEngineTypeList_IMPL(NvU32 *pNv2080EngineList, NvU32 engineCount, RM_ENGINE_TYPE *pRmEngineList); 3946 3947 #define gpuGetRmEngineTypeList(pNv2080EngineList, engineCount, pRmEngineList) gpuGetRmEngineTypeList_IMPL(pNv2080EngineList, engineCount, pRmEngineList) 3948 NvU32 gpuGetNv2080EngineType_IMPL(RM_ENGINE_TYPE index); 3949 3950 #define gpuGetNv2080EngineType(index) gpuGetNv2080EngineType_IMPL(index) 3951 void gpuGetNv2080EngineTypeList_IMPL(RM_ENGINE_TYPE *pRmEngineList, NvU32 engineCount, NvU32 *pNv2080EngineList); 3952 3953 #define gpuGetNv2080EngineTypeList(pRmEngineList, engineCount, pNv2080EngineList) gpuGetNv2080EngineTypeList_IMPL(pRmEngineList, engineCount, pNv2080EngineList) 3954 NV_STATUS gpuGetRmEngineTypeCapMask_IMPL(NvU32 *NV2080EngineTypeCap, NvU32 capSize, NvU32 *RmEngineTypeCap); 3955 3956 #define gpuGetRmEngineTypeCapMask(NV2080EngineTypeCap, capSize, RmEngineTypeCap) gpuGetRmEngineTypeCapMask_IMPL(NV2080EngineTypeCap, capSize, RmEngineTypeCap) 3957 NvU32 gpuGetGpuMask_IMPL(struct OBJGPU *pGpu); 3958 3959 #ifdef __nvoc_gpu_h_disabled 3960 static inline NvU32 gpuGetGpuMask(struct OBJGPU *pGpu) { 3961 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3962 return 0; 3963 } 3964 #else //__nvoc_gpu_h_disabled 3965 #define gpuGetGpuMask(pGpu) gpuGetGpuMask_IMPL(pGpu) 3966 #endif //__nvoc_gpu_h_disabled 3967 3968 void gpuChangeComputeModeRefCount_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 3969 3970 #ifdef __nvoc_gpu_h_disabled 3971 static inline void gpuChangeComputeModeRefCount(struct OBJGPU *pGpu, NvU32 arg0) { 3972 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3973 } 3974 #else //__nvoc_gpu_h_disabled 3975 #define gpuChangeComputeModeRefCount(pGpu, arg0) gpuChangeComputeModeRefCount_IMPL(pGpu, arg0) 3976 #endif //__nvoc_gpu_h_disabled 3977 3978 NV_STATUS gpuEnterShutdown_IMPL(struct OBJGPU *pGpu); 3979 3980 #ifdef __nvoc_gpu_h_disabled 3981 static inline NV_STATUS gpuEnterShutdown(struct OBJGPU *pGpu) { 3982 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3983 return NV_ERR_NOT_SUPPORTED; 3984 } 3985 #else //__nvoc_gpu_h_disabled 3986 #define gpuEnterShutdown(pGpu) gpuEnterShutdown_IMPL(pGpu) 3987 #endif //__nvoc_gpu_h_disabled 3988 3989 NV_STATUS gpuSanityCheck_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); 3990 3991 #ifdef __nvoc_gpu_h_disabled 3992 static inline NV_STATUS gpuSanityCheck(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { 3993 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 3994 return NV_ERR_NOT_SUPPORTED; 3995 } 3996 #else //__nvoc_gpu_h_disabled 3997 #define gpuSanityCheck(pGpu, arg0, arg1) gpuSanityCheck_IMPL(pGpu, arg0, arg1) 3998 #endif //__nvoc_gpu_h_disabled 3999 4000 DEVICE_MAPPING *gpuGetDeviceMapping_IMPL(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1); 4001 4002 #ifdef __nvoc_gpu_h_disabled 4003 static inline DEVICE_MAPPING *gpuGetDeviceMapping(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1) { 4004 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4005 return NULL; 4006 } 4007 #else //__nvoc_gpu_h_disabled 4008 #define gpuGetDeviceMapping(pGpu, arg0, arg1) gpuGetDeviceMapping_IMPL(pGpu, arg0, arg1) 4009 #endif //__nvoc_gpu_h_disabled 4010 4011 DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); 4012 4013 #ifdef __nvoc_gpu_h_disabled 4014 static inline DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { 4015 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4016 return NULL; 4017 } 4018 #else //__nvoc_gpu_h_disabled 4019 #define gpuGetDeviceMappingFromDeviceID(pGpu, arg0, arg1) gpuGetDeviceMappingFromDeviceID_IMPL(pGpu, arg0, arg1) 4020 #endif //__nvoc_gpu_h_disabled 4021 4022 NV_STATUS gpuGetGidInfo_IMPL(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags); 4023 4024 #ifdef __nvoc_gpu_h_disabled 4025 static inline NV_STATUS gpuGetGidInfo(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags) { 4026 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4027 return NV_ERR_NOT_SUPPORTED; 4028 } 4029 #else //__nvoc_gpu_h_disabled 4030 #define gpuGetGidInfo(pGpu, ppGidString, pGidStrlen, gidFlags) gpuGetGidInfo_IMPL(pGpu, ppGidString, pGidStrlen, gidFlags) 4031 #endif //__nvoc_gpu_h_disabled 4032 4033 void gpuSetDisconnectedProperties_IMPL(struct OBJGPU *pGpu); 4034 4035 #ifdef __nvoc_gpu_h_disabled 4036 static inline void gpuSetDisconnectedProperties(struct OBJGPU *pGpu) { 4037 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4038 } 4039 #else //__nvoc_gpu_h_disabled 4040 #define gpuSetDisconnectedProperties(pGpu) gpuSetDisconnectedProperties_IMPL(pGpu) 4041 #endif //__nvoc_gpu_h_disabled 4042 4043 NV_STATUS gpuAddConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); 4044 4045 #ifdef __nvoc_gpu_h_disabled 4046 static inline NV_STATUS gpuAddConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { 4047 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4048 return NV_ERR_NOT_SUPPORTED; 4049 } 4050 #else //__nvoc_gpu_h_disabled 4051 #define gpuAddConstructedFalcon(pGpu, arg0) gpuAddConstructedFalcon_IMPL(pGpu, arg0) 4052 #endif //__nvoc_gpu_h_disabled 4053 4054 NV_STATUS gpuRemoveConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); 4055 4056 #ifdef __nvoc_gpu_h_disabled 4057 static inline NV_STATUS gpuRemoveConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { 4058 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4059 return NV_ERR_NOT_SUPPORTED; 4060 } 4061 #else //__nvoc_gpu_h_disabled 4062 #define gpuRemoveConstructedFalcon(pGpu, arg0) gpuRemoveConstructedFalcon_IMPL(pGpu, arg0) 4063 #endif //__nvoc_gpu_h_disabled 4064 4065 NV_STATUS gpuGetConstructedFalcon_IMPL(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1); 4066 4067 #ifdef __nvoc_gpu_h_disabled 4068 static inline NV_STATUS gpuGetConstructedFalcon(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1) { 4069 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4070 return NV_ERR_NOT_SUPPORTED; 4071 } 4072 #else //__nvoc_gpu_h_disabled 4073 #define gpuGetConstructedFalcon(pGpu, arg0, arg1) gpuGetConstructedFalcon_IMPL(pGpu, arg0, arg1) 4074 #endif //__nvoc_gpu_h_disabled 4075 4076 NvBool gpuIsVideoTraceLogSupported_IMPL(struct OBJGPU *pGpu); 4077 4078 #ifdef __nvoc_gpu_h_disabled 4079 static inline NvBool gpuIsVideoTraceLogSupported(struct OBJGPU *pGpu) { 4080 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4081 return NV_FALSE; 4082 } 4083 #else //__nvoc_gpu_h_disabled 4084 #define gpuIsVideoTraceLogSupported(pGpu) gpuIsVideoTraceLogSupported_IMPL(pGpu) 4085 #endif //__nvoc_gpu_h_disabled 4086 4087 NV_STATUS gpuGetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2); 4088 4089 #ifdef __nvoc_gpu_h_disabled 4090 static inline NV_STATUS gpuGetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { 4091 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4092 return NV_ERR_NOT_SUPPORTED; 4093 } 4094 #else //__nvoc_gpu_h_disabled 4095 #define gpuGetSparseTextureComputeMode(pGpu, arg0, arg1, arg2) gpuGetSparseTextureComputeMode_IMPL(pGpu, arg0, arg1, arg2) 4096 #endif //__nvoc_gpu_h_disabled 4097 4098 NV_STATUS gpuSetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 4099 4100 #ifdef __nvoc_gpu_h_disabled 4101 static inline NV_STATUS gpuSetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 arg0) { 4102 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4103 return NV_ERR_NOT_SUPPORTED; 4104 } 4105 #else //__nvoc_gpu_h_disabled 4106 #define gpuSetSparseTextureComputeMode(pGpu, arg0) gpuSetSparseTextureComputeMode_IMPL(pGpu, arg0) 4107 #endif //__nvoc_gpu_h_disabled 4108 4109 struct OBJENGSTATE *gpuGetEngstate_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 4110 4111 #ifdef __nvoc_gpu_h_disabled 4112 static inline struct OBJENGSTATE *gpuGetEngstate(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 4113 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4114 return NULL; 4115 } 4116 #else //__nvoc_gpu_h_disabled 4117 #define gpuGetEngstate(pGpu, arg0) gpuGetEngstate_IMPL(pGpu, arg0) 4118 #endif //__nvoc_gpu_h_disabled 4119 4120 struct OBJENGSTATE *gpuGetEngstateNoShare_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 4121 4122 #ifdef __nvoc_gpu_h_disabled 4123 static inline struct OBJENGSTATE *gpuGetEngstateNoShare(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 4124 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4125 return NULL; 4126 } 4127 #else //__nvoc_gpu_h_disabled 4128 #define gpuGetEngstateNoShare(pGpu, arg0) gpuGetEngstateNoShare_IMPL(pGpu, arg0) 4129 #endif //__nvoc_gpu_h_disabled 4130 4131 struct KernelFifo *gpuGetKernelFifoShared_IMPL(struct OBJGPU *pGpu); 4132 4133 #ifdef __nvoc_gpu_h_disabled 4134 static inline struct KernelFifo *gpuGetKernelFifoShared(struct OBJGPU *pGpu) { 4135 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4136 return NULL; 4137 } 4138 #else //__nvoc_gpu_h_disabled 4139 #define gpuGetKernelFifoShared(pGpu) gpuGetKernelFifoShared_IMPL(pGpu) 4140 #endif //__nvoc_gpu_h_disabled 4141 4142 NvBool gpuGetNextEngstate_IMPL(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState); 4143 4144 #ifdef __nvoc_gpu_h_disabled 4145 static inline NvBool gpuGetNextEngstate(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState) { 4146 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4147 return NV_FALSE; 4148 } 4149 #else //__nvoc_gpu_h_disabled 4150 #define gpuGetNextEngstate(pGpu, pIt, ppEngState) gpuGetNextEngstate_IMPL(pGpu, pIt, ppEngState) 4151 #endif //__nvoc_gpu_h_disabled 4152 4153 struct OBJHOSTENG *gpuGetHosteng_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); 4154 4155 #ifdef __nvoc_gpu_h_disabled 4156 static inline struct OBJHOSTENG *gpuGetHosteng(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { 4157 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4158 return NULL; 4159 } 4160 #else //__nvoc_gpu_h_disabled 4161 #define gpuGetHosteng(pGpu, arg0) gpuGetHosteng_IMPL(pGpu, arg0) 4162 #endif //__nvoc_gpu_h_disabled 4163 4164 NV_STATUS gpuConstructUserRegisterAccessMap_IMPL(struct OBJGPU *pGpu); 4165 4166 #ifdef __nvoc_gpu_h_disabled 4167 static inline NV_STATUS gpuConstructUserRegisterAccessMap(struct OBJGPU *pGpu) { 4168 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4169 return NV_ERR_NOT_SUPPORTED; 4170 } 4171 #else //__nvoc_gpu_h_disabled 4172 #define gpuConstructUserRegisterAccessMap(pGpu) gpuConstructUserRegisterAccessMap_IMPL(pGpu) 4173 #endif //__nvoc_gpu_h_disabled 4174 4175 NV_STATUS gpuInitRegisterAccessMap_IMPL(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3); 4176 4177 #ifdef __nvoc_gpu_h_disabled 4178 static inline NV_STATUS gpuInitRegisterAccessMap(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3) { 4179 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4180 return NV_ERR_NOT_SUPPORTED; 4181 } 4182 #else //__nvoc_gpu_h_disabled 4183 #define gpuInitRegisterAccessMap(pGpu, arg0, arg1, arg2, arg3) gpuInitRegisterAccessMap_IMPL(pGpu, arg0, arg1, arg2, arg3) 4184 #endif //__nvoc_gpu_h_disabled 4185 4186 NV_STATUS gpuSetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow); 4187 4188 #ifdef __nvoc_gpu_h_disabled 4189 static inline NV_STATUS gpuSetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow) { 4190 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4191 return NV_ERR_NOT_SUPPORTED; 4192 } 4193 #else //__nvoc_gpu_h_disabled 4194 #define gpuSetUserRegisterAccessPermissions(pGpu, offset, size, bAllow) gpuSetUserRegisterAccessPermissions_IMPL(pGpu, offset, size, bAllow) 4195 #endif //__nvoc_gpu_h_disabled 4196 4197 NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk_IMPL(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow); 4198 4199 #ifdef __nvoc_gpu_h_disabled 4200 static inline NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow) { 4201 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4202 return NV_ERR_NOT_SUPPORTED; 4203 } 4204 #else //__nvoc_gpu_h_disabled 4205 #define gpuSetUserRegisterAccessPermissionsInBulk(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow) gpuSetUserRegisterAccessPermissionsInBulk_IMPL(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow) 4206 #endif //__nvoc_gpu_h_disabled 4207 4208 NvBool gpuGetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset); 4209 4210 #ifdef __nvoc_gpu_h_disabled 4211 static inline NvBool gpuGetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset) { 4212 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4213 return NV_FALSE; 4214 } 4215 #else //__nvoc_gpu_h_disabled 4216 #define gpuGetUserRegisterAccessPermissions(pGpu, offset) gpuGetUserRegisterAccessPermissions_IMPL(pGpu, offset) 4217 #endif //__nvoc_gpu_h_disabled 4218 4219 void gpuDumpCallbackRegister_IMPL(struct OBJGPU *pGpu); 4220 4221 #ifdef __nvoc_gpu_h_disabled 4222 static inline void gpuDumpCallbackRegister(struct OBJGPU *pGpu) { 4223 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4224 } 4225 #else //__nvoc_gpu_h_disabled 4226 #define gpuDumpCallbackRegister(pGpu) gpuDumpCallbackRegister_IMPL(pGpu) 4227 #endif //__nvoc_gpu_h_disabled 4228 4229 NV_STATUS gpuGetGfidState_IMPL(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState); 4230 4231 #ifdef __nvoc_gpu_h_disabled 4232 static inline NV_STATUS gpuGetGfidState(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState) { 4233 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4234 return NV_ERR_NOT_SUPPORTED; 4235 } 4236 #else //__nvoc_gpu_h_disabled 4237 #define gpuGetGfidState(pGpu, gfid, pState) gpuGetGfidState_IMPL(pGpu, gfid, pState) 4238 #endif //__nvoc_gpu_h_disabled 4239 4240 void gpuSetGfidUsage_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse); 4241 4242 #ifdef __nvoc_gpu_h_disabled 4243 static inline void gpuSetGfidUsage(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) { 4244 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4245 } 4246 #else //__nvoc_gpu_h_disabled 4247 #define gpuSetGfidUsage(pGpu, gfid, bInUse) gpuSetGfidUsage_IMPL(pGpu, gfid, bInUse) 4248 #endif //__nvoc_gpu_h_disabled 4249 4250 void gpuSetGfidInvalidated_IMPL(struct OBJGPU *pGpu, NvU32 gfid); 4251 4252 #ifdef __nvoc_gpu_h_disabled 4253 static inline void gpuSetGfidInvalidated(struct OBJGPU *pGpu, NvU32 gfid) { 4254 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4255 } 4256 #else //__nvoc_gpu_h_disabled 4257 #define gpuSetGfidInvalidated(pGpu, gfid) gpuSetGfidInvalidated_IMPL(pGpu, gfid) 4258 #endif //__nvoc_gpu_h_disabled 4259 4260 NV_STATUS gpuSetExternalKernelClientCount_IMPL(struct OBJGPU *pGpu, NvBool bIncr); 4261 4262 #ifdef __nvoc_gpu_h_disabled 4263 static inline NV_STATUS gpuSetExternalKernelClientCount(struct OBJGPU *pGpu, NvBool bIncr) { 4264 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4265 return NV_ERR_NOT_SUPPORTED; 4266 } 4267 #else //__nvoc_gpu_h_disabled 4268 #define gpuSetExternalKernelClientCount(pGpu, bIncr) gpuSetExternalKernelClientCount_IMPL(pGpu, bIncr) 4269 #endif //__nvoc_gpu_h_disabled 4270 4271 NvBool gpuIsInUse_IMPL(struct OBJGPU *pGpu); 4272 4273 #ifdef __nvoc_gpu_h_disabled 4274 static inline NvBool gpuIsInUse(struct OBJGPU *pGpu) { 4275 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4276 return NV_FALSE; 4277 } 4278 #else //__nvoc_gpu_h_disabled 4279 #define gpuIsInUse(pGpu) gpuIsInUse_IMPL(pGpu) 4280 #endif //__nvoc_gpu_h_disabled 4281 4282 NvU32 gpuGetUserClientCount_IMPL(struct OBJGPU *pGpu); 4283 4284 #ifdef __nvoc_gpu_h_disabled 4285 static inline NvU32 gpuGetUserClientCount(struct OBJGPU *pGpu) { 4286 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4287 return 0; 4288 } 4289 #else //__nvoc_gpu_h_disabled 4290 #define gpuGetUserClientCount(pGpu) gpuGetUserClientCount_IMPL(pGpu) 4291 #endif //__nvoc_gpu_h_disabled 4292 4293 NvU32 gpuGetExternalClientCount_IMPL(struct OBJGPU *pGpu); 4294 4295 #ifdef __nvoc_gpu_h_disabled 4296 static inline NvU32 gpuGetExternalClientCount(struct OBJGPU *pGpu) { 4297 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4298 return 0; 4299 } 4300 #else //__nvoc_gpu_h_disabled 4301 #define gpuGetExternalClientCount(pGpu) gpuGetExternalClientCount_IMPL(pGpu) 4302 #endif //__nvoc_gpu_h_disabled 4303 4304 void gpuNotifySubDeviceEvent_IMPL(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); 4305 4306 #ifdef __nvoc_gpu_h_disabled 4307 static inline void gpuNotifySubDeviceEvent(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { 4308 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4309 } 4310 #else //__nvoc_gpu_h_disabled 4311 #define gpuNotifySubDeviceEvent(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) gpuNotifySubDeviceEvent_IMPL(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) 4312 #endif //__nvoc_gpu_h_disabled 4313 4314 NV_STATUS gpuRegisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); 4315 4316 #ifdef __nvoc_gpu_h_disabled 4317 static inline NV_STATUS gpuRegisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { 4318 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4319 return NV_ERR_NOT_SUPPORTED; 4320 } 4321 #else //__nvoc_gpu_h_disabled 4322 #define gpuRegisterSubdevice(pGpu, pSubdevice) gpuRegisterSubdevice_IMPL(pGpu, pSubdevice) 4323 #endif //__nvoc_gpu_h_disabled 4324 4325 void gpuUnregisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); 4326 4327 #ifdef __nvoc_gpu_h_disabled 4328 static inline void gpuUnregisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { 4329 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4330 } 4331 #else //__nvoc_gpu_h_disabled 4332 #define gpuUnregisterSubdevice(pGpu, pSubdevice) gpuUnregisterSubdevice_IMPL(pGpu, pSubdevice) 4333 #endif //__nvoc_gpu_h_disabled 4334 4335 void gpuGspPluginTriggeredEvent_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex); 4336 4337 #ifdef __nvoc_gpu_h_disabled 4338 static inline void gpuGspPluginTriggeredEvent(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex) { 4339 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4340 } 4341 #else //__nvoc_gpu_h_disabled 4342 #define gpuGspPluginTriggeredEvent(pGpu, gfid, notifyIndex) gpuGspPluginTriggeredEvent_IMPL(pGpu, gfid, notifyIndex) 4343 #endif //__nvoc_gpu_h_disabled 4344 4345 NV_STATUS gpuGetProcWithObject_IMPL(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef); 4346 4347 #ifdef __nvoc_gpu_h_disabled 4348 static inline NV_STATUS gpuGetProcWithObject(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef) { 4349 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4350 return NV_ERR_NOT_SUPPORTED; 4351 } 4352 #else //__nvoc_gpu_h_disabled 4353 #define gpuGetProcWithObject(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) gpuGetProcWithObject_IMPL(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) 4354 #endif //__nvoc_gpu_h_disabled 4355 4356 NV_STATUS gpuFindClientInfoWithPidIterator_IMPL(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo); 4357 4358 #ifdef __nvoc_gpu_h_disabled 4359 static inline NV_STATUS gpuFindClientInfoWithPidIterator(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo) { 4360 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4361 return NV_ERR_NOT_SUPPORTED; 4362 } 4363 #else //__nvoc_gpu_h_disabled 4364 #define gpuFindClientInfoWithPidIterator(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) gpuFindClientInfoWithPidIterator_IMPL(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) 4365 #endif //__nvoc_gpu_h_disabled 4366 4367 NvBool gpuIsCCFeatureEnabled_IMPL(struct OBJGPU *pGpu); 4368 4369 #ifdef __nvoc_gpu_h_disabled 4370 static inline NvBool gpuIsCCFeatureEnabled(struct OBJGPU *pGpu) { 4371 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4372 return NV_FALSE; 4373 } 4374 #else //__nvoc_gpu_h_disabled 4375 #define gpuIsCCFeatureEnabled(pGpu) gpuIsCCFeatureEnabled_IMPL(pGpu) 4376 #endif //__nvoc_gpu_h_disabled 4377 4378 NvBool gpuIsApmFeatureEnabled_IMPL(struct OBJGPU *pGpu); 4379 4380 #ifdef __nvoc_gpu_h_disabled 4381 static inline NvBool gpuIsApmFeatureEnabled(struct OBJGPU *pGpu) { 4382 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4383 return NV_FALSE; 4384 } 4385 #else //__nvoc_gpu_h_disabled 4386 #define gpuIsApmFeatureEnabled(pGpu) gpuIsApmFeatureEnabled_IMPL(pGpu) 4387 #endif //__nvoc_gpu_h_disabled 4388 4389 NvBool gpuIsCCorApmFeatureEnabled_IMPL(struct OBJGPU *pGpu); 4390 4391 #ifdef __nvoc_gpu_h_disabled 4392 static inline NvBool gpuIsCCorApmFeatureEnabled(struct OBJGPU *pGpu) { 4393 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4394 return NV_FALSE; 4395 } 4396 #else //__nvoc_gpu_h_disabled 4397 #define gpuIsCCorApmFeatureEnabled(pGpu) gpuIsCCorApmFeatureEnabled_IMPL(pGpu) 4398 #endif //__nvoc_gpu_h_disabled 4399 4400 NvBool gpuIsCCDevToolsModeEnabled_IMPL(struct OBJGPU *pGpu); 4401 4402 #ifdef __nvoc_gpu_h_disabled 4403 static inline NvBool gpuIsCCDevToolsModeEnabled(struct OBJGPU *pGpu) { 4404 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4405 return NV_FALSE; 4406 } 4407 #else //__nvoc_gpu_h_disabled 4408 #define gpuIsCCDevToolsModeEnabled(pGpu) gpuIsCCDevToolsModeEnabled_IMPL(pGpu) 4409 #endif //__nvoc_gpu_h_disabled 4410 4411 NvBool gpuIsOnTheBus_IMPL(struct OBJGPU *pGpu); 4412 4413 #ifdef __nvoc_gpu_h_disabled 4414 static inline NvBool gpuIsOnTheBus(struct OBJGPU *pGpu) { 4415 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4416 return NV_FALSE; 4417 } 4418 #else //__nvoc_gpu_h_disabled 4419 #define gpuIsOnTheBus(pGpu) gpuIsOnTheBus_IMPL(pGpu) 4420 #endif //__nvoc_gpu_h_disabled 4421 4422 NV_STATUS gpuEnterStandby_IMPL(struct OBJGPU *pGpu); 4423 4424 #ifdef __nvoc_gpu_h_disabled 4425 static inline NV_STATUS gpuEnterStandby(struct OBJGPU *pGpu) { 4426 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4427 return NV_ERR_NOT_SUPPORTED; 4428 } 4429 #else //__nvoc_gpu_h_disabled 4430 #define gpuEnterStandby(pGpu) gpuEnterStandby_IMPL(pGpu) 4431 #endif //__nvoc_gpu_h_disabled 4432 4433 NV_STATUS gpuEnterHibernate_IMPL(struct OBJGPU *pGpu); 4434 4435 #ifdef __nvoc_gpu_h_disabled 4436 static inline NV_STATUS gpuEnterHibernate(struct OBJGPU *pGpu) { 4437 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4438 return NV_ERR_NOT_SUPPORTED; 4439 } 4440 #else //__nvoc_gpu_h_disabled 4441 #define gpuEnterHibernate(pGpu) gpuEnterHibernate_IMPL(pGpu) 4442 #endif //__nvoc_gpu_h_disabled 4443 4444 NV_STATUS gpuResumeFromStandby_IMPL(struct OBJGPU *pGpu); 4445 4446 #ifdef __nvoc_gpu_h_disabled 4447 static inline NV_STATUS gpuResumeFromStandby(struct OBJGPU *pGpu) { 4448 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4449 return NV_ERR_NOT_SUPPORTED; 4450 } 4451 #else //__nvoc_gpu_h_disabled 4452 #define gpuResumeFromStandby(pGpu) gpuResumeFromStandby_IMPL(pGpu) 4453 #endif //__nvoc_gpu_h_disabled 4454 4455 NV_STATUS gpuResumeFromHibernate_IMPL(struct OBJGPU *pGpu); 4456 4457 #ifdef __nvoc_gpu_h_disabled 4458 static inline NV_STATUS gpuResumeFromHibernate(struct OBJGPU *pGpu) { 4459 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4460 return NV_ERR_NOT_SUPPORTED; 4461 } 4462 #else //__nvoc_gpu_h_disabled 4463 #define gpuResumeFromHibernate(pGpu) gpuResumeFromHibernate_IMPL(pGpu) 4464 #endif //__nvoc_gpu_h_disabled 4465 4466 NvBool gpuCheckSysmemAccess_IMPL(struct OBJGPU *pGpu); 4467 4468 #ifdef __nvoc_gpu_h_disabled 4469 static inline NvBool gpuCheckSysmemAccess(struct OBJGPU *pGpu) { 4470 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4471 return NV_FALSE; 4472 } 4473 #else //__nvoc_gpu_h_disabled 4474 #define gpuCheckSysmemAccess(pGpu) gpuCheckSysmemAccess_IMPL(pGpu) 4475 #endif //__nvoc_gpu_h_disabled 4476 4477 void gpuInitChipInfo_IMPL(struct OBJGPU *pGpu); 4478 4479 #ifdef __nvoc_gpu_h_disabled 4480 static inline void gpuInitChipInfo(struct OBJGPU *pGpu) { 4481 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4482 } 4483 #else //__nvoc_gpu_h_disabled 4484 #define gpuInitChipInfo(pGpu) gpuInitChipInfo_IMPL(pGpu) 4485 #endif //__nvoc_gpu_h_disabled 4486 4487 NV_STATUS gpuSanityCheckRegRead_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue); 4488 4489 #ifdef __nvoc_gpu_h_disabled 4490 static inline NV_STATUS gpuSanityCheckRegRead(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue) { 4491 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4492 return NV_ERR_NOT_SUPPORTED; 4493 } 4494 #else //__nvoc_gpu_h_disabled 4495 #define gpuSanityCheckRegRead(pGpu, addr, size, pValue) gpuSanityCheckRegRead_IMPL(pGpu, addr, size, pValue) 4496 #endif //__nvoc_gpu_h_disabled 4497 4498 NV_STATUS gpuSanityCheckRegisterAccess_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal); 4499 4500 #ifdef __nvoc_gpu_h_disabled 4501 static inline NV_STATUS gpuSanityCheckRegisterAccess(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal) { 4502 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4503 return NV_ERR_NOT_SUPPORTED; 4504 } 4505 #else //__nvoc_gpu_h_disabled 4506 #define gpuSanityCheckRegisterAccess(pGpu, addr, pRetVal) gpuSanityCheckRegisterAccess_IMPL(pGpu, addr, pRetVal) 4507 #endif //__nvoc_gpu_h_disabled 4508 4509 void gpuUpdateUserSharedData_IMPL(struct OBJGPU *pGpu); 4510 4511 #ifdef __nvoc_gpu_h_disabled 4512 static inline void gpuUpdateUserSharedData(struct OBJGPU *pGpu) { 4513 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4514 } 4515 #else //__nvoc_gpu_h_disabled 4516 #define gpuUpdateUserSharedData(pGpu) gpuUpdateUserSharedData_IMPL(pGpu) 4517 #endif //__nvoc_gpu_h_disabled 4518 4519 NV_STATUS gpuValidateRegOffset_IMPL(struct OBJGPU *pGpu, NvU32 arg0); 4520 4521 #ifdef __nvoc_gpu_h_disabled 4522 static inline NV_STATUS gpuValidateRegOffset(struct OBJGPU *pGpu, NvU32 arg0) { 4523 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4524 return NV_ERR_NOT_SUPPORTED; 4525 } 4526 #else //__nvoc_gpu_h_disabled 4527 #define gpuValidateRegOffset(pGpu, arg0) gpuValidateRegOffset_IMPL(pGpu, arg0) 4528 #endif //__nvoc_gpu_h_disabled 4529 4530 NV_STATUS gpuSetGC6SBIOSCapabilities_IMPL(struct OBJGPU *pGpu); 4531 4532 #ifdef __nvoc_gpu_h_disabled 4533 static inline NV_STATUS gpuSetGC6SBIOSCapabilities(struct OBJGPU *pGpu) { 4534 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4535 return NV_ERR_NOT_SUPPORTED; 4536 } 4537 #else //__nvoc_gpu_h_disabled 4538 #define gpuSetGC6SBIOSCapabilities(pGpu) gpuSetGC6SBIOSCapabilities_IMPL(pGpu) 4539 #endif //__nvoc_gpu_h_disabled 4540 4541 NV_STATUS gpuGc6Entry_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GC6_ENTRY_PARAMS *arg0); 4542 4543 #ifdef __nvoc_gpu_h_disabled 4544 static inline NV_STATUS gpuGc6Entry(struct OBJGPU *pGpu, NV2080_CTRL_GC6_ENTRY_PARAMS *arg0) { 4545 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4546 return NV_ERR_NOT_SUPPORTED; 4547 } 4548 #else //__nvoc_gpu_h_disabled 4549 #define gpuGc6Entry(pGpu, arg0) gpuGc6Entry_IMPL(pGpu, arg0) 4550 #endif //__nvoc_gpu_h_disabled 4551 4552 NV_STATUS gpuGc6EntryGpuPowerOff_IMPL(struct OBJGPU *pGpu); 4553 4554 #ifdef __nvoc_gpu_h_disabled 4555 static inline NV_STATUS gpuGc6EntryGpuPowerOff(struct OBJGPU *pGpu) { 4556 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4557 return NV_ERR_NOT_SUPPORTED; 4558 } 4559 #else //__nvoc_gpu_h_disabled 4560 #define gpuGc6EntryGpuPowerOff(pGpu) gpuGc6EntryGpuPowerOff_IMPL(pGpu) 4561 #endif //__nvoc_gpu_h_disabled 4562 4563 NV_STATUS gpuGc6Exit_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GC6_EXIT_PARAMS *arg0); 4564 4565 #ifdef __nvoc_gpu_h_disabled 4566 static inline NV_STATUS gpuGc6Exit(struct OBJGPU *pGpu, NV2080_CTRL_GC6_EXIT_PARAMS *arg0) { 4567 NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); 4568 return NV_ERR_NOT_SUPPORTED; 4569 } 4570 #else //__nvoc_gpu_h_disabled 4571 #define gpuGc6Exit(pGpu, arg0) gpuGc6Exit_IMPL(pGpu, arg0) 4572 #endif //__nvoc_gpu_h_disabled 4573 4574 #undef PRIVATE_FIELD 4575 4576 4577 // Look up pGpu associated with a pResourceRef 4578 NV_STATUS gpuGetByRef (RsResourceRef *pContextRef, NvBool *pbBroadcast, struct OBJGPU **ppGpu); 4579 4580 // Look up pGpu associated with a hResource 4581 NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *pbBroadcast, struct OBJGPU **ppGpu); 4582 4583 #define GPU_GFID_PF (0) 4584 #define IS_GFID_PF(gfid) (((NvU32)(gfid)) == GPU_GFID_PF) 4585 #define IS_GFID_VF(gfid) (((NvU32)(gfid)) != GPU_GFID_PF) 4586 // Invalid P2P GFID 4587 #define INVALID_P2P_GFID (0xFFFFFFFF) 4588 #define INVALID_FABRIC_PARTITION_ID (0xFFFFFFFF) 4589 4590 // 4591 // Generates GPU child accessor macros (i.e.: GPU_GET_{ENG}) 4592 // 4593 #define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4594 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return pGpu->gpuField; } \ 4595 ct_assert(numInstances == 1); 4596 4597 #define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4598 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return index < numInstances ? pGpu->gpuField[index] : NULL; } 4599 4600 #include "gpu/gpu_child_list.h" 4601 4602 static NV_FORCEINLINE struct Graphics *GPU_GET_GR(struct OBJGPU *pGpu) { return NULL; } 4603 4604 // Temporary stubs 4605 #if RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS 4606 #define GPU_CHILD_LIST_DISABLED_ONLY 4607 #define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4608 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return NULL; } 4609 4610 #define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ 4611 static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return NULL; } 4612 4613 #include "gpu/gpu_child_list.h" 4614 #endif // RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS 4615 4616 4617 // 4618 // Inline functions 4619 // 4620 4621 // 4622 // This function returns subdevice mask for a GPU. 4623 // For non SLI, subdeviceInstance is 0, so this 4624 // function will always return 1. 4625 // 4626 4627 static NV_INLINE NvU32 4628 gpuGetSubdeviceMask 4629 ( 4630 struct OBJGPU *pGpu 4631 ) 4632 { 4633 return 1 << pGpu->subdeviceInstance; 4634 } 4635 4636 static NV_INLINE NvU32 4637 gpuGetInstance 4638 ( 4639 struct OBJGPU *pGpu 4640 ) 4641 { 4642 return pGpu->gpuInstance; 4643 } 4644 4645 static NV_INLINE NvU32 4646 gpuGetDeviceInstance 4647 ( 4648 struct OBJGPU *pGpu 4649 ) 4650 { 4651 return pGpu->deviceInstance; 4652 } 4653 4654 NV_INLINE 4655 static NvU32 gpuGetNumCEs(struct OBJGPU *pGpu) 4656 { 4657 return pGpu->numCEs; 4658 } 4659 4660 // 4661 // Per GPU mode flags macros. In general these macros should not be 4662 // used and all code paths should be the same on all environments. 4663 // However occasionally a tweak is needed to work around a limitation 4664 // or improve speed on non-hardware. Is_RTLSIM normally is handled 4665 // in the IS_SIMULATION case and should almost never be used. 4666 // 4667 // IS_EMULATION actual emulation hardware 4668 // IS_SIMULATION fmodel or RTL simulation 4669 // IS_MODS_AMODEL amodel under mods for trace player 4670 // IS_LIVE_AMODEL amodel under windows for 3D drivers (removed) 4671 // IS_RTLSIM RTL simulation 4672 // IS_SILICON Real hardware 4673 // IS_VIRTUAL RM is running within a guest VM 4674 // IS_GSP_CLIENT RM is a GSP/DCE client with GPU support offloaded to GSP/DCE 4675 // 4676 4677 #define IS_EMULATION(pGpu) ((pGpu)->getProperty((pGpu), PDB_PROP_GPU_EMULATION)) 4678 #define IS_SIMULATION(pGpu) (pGpu->bIsSimulation) 4679 #define IS_MODS_AMODEL(pGpu) (pGpu->bIsModsAmodel) 4680 #define IS_FMODEL(pGpu) (pGpu->bIsFmodel) 4681 #define IS_RTLSIM(pGpu) (pGpu->bIsRtlsim) 4682 #define IS_SILICON(pGpu) (!(IS_EMULATION(pGpu) || IS_SIMULATION(pGpu))) 4683 #define IS_PASSTHRU(pGpu) ((pGpu)->bIsPassthru) 4684 #define IS_GSP_CLIENT(pGpu) ((RMCFG_FEATURE_GSP_CLIENT_RM || RMCFG_FEATURE_DCE_CLIENT_RM) && (pGpu)->isGspClient) 4685 #define IS_VIRTUAL(pGpu) NV_FALSE 4686 #define IS_VIRTUAL_WITH_SRIOV(pGpu) NV_FALSE 4687 #define IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu) NV_FALSE 4688 #define IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) NV_FALSE 4689 #define IS_VIRTUAL_WITHOUT_SRIOV(pGpu) NV_FALSE 4690 #define IS_SRIOV_HEAVY(pGpu) NV_FALSE 4691 #define IS_SRIOV_HEAVY_GUEST(pGpu) NV_FALSE 4692 #define IS_SRIOV_FULL_GUEST(pGpu) NV_FALSE 4693 #define IS_SRIOV_HEAVY_HOST(pGpu) NV_FALSE 4694 #define IS_SRIOV_FULL_HOST(pGpu) ((hypervisorIsVgxHyper()) && gpuIsSriovEnabled(pGpu) && !IS_SRIOV_HEAVY(pGpu)) 4695 #define IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) ((pGpu)->bVgpuGspPluginOffloadEnabled) 4696 #define IS_SRIOV_WITH_VGPU_GSP_ENABLED(pGpu) (gpuIsSriovEnabled(pGpu) && IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu)) 4697 #define IS_SRIOV_WITH_VGPU_GSP_DISABLED(pGpu) (gpuIsSriovEnabled(pGpu) && !IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu)) 4698 4699 extern GPU_CHILD_ITER gpuGetPossibleEngDescriptorIter(void); 4700 extern NvBool gpuGetNextPossibleEngDescriptor(GPU_CHILD_ITER *pIt, ENGDESCRIPTOR *pEngDesc); 4701 4702 NV_STATUS gpuCtrlExecRegOps(struct OBJGPU *, struct Graphics *, NvHandle, NvHandle, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool); 4703 NV_STATUS gpuValidateRegOps(struct OBJGPU *, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool, NvBool); 4704 4705 // GPU Sanity Check Flags 4706 #define GPU_SANITY_CHECK_FLAGS_BOOT_0 NVBIT(0) 4707 #define GPU_SANITY_CHECK_FLAGS_OFF_BY_N NVBIT(1) 4708 #define GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH NVBIT(2) 4709 #define GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED NVBIT(3) 4710 #define GPU_SANITY_CHECK_FLAGS_FB NVBIT(4) 4711 4712 #define GPU_SANITY_CHECK_FLAGS_NONE 0x0 4713 #define GPU_SANITY_CHECK_FLAGS_ALL 0xffffffff 4714 4715 // 4716 // Macro for checking if GPU is in reset. 4717 // 4718 #define API_GPU_IN_RESET_SANITY_CHECK(pGpu) \ 4719 ((NULL == pGpu) || \ 4720 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) || \ 4721 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || \ 4722 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) || \ 4723 pGpu->getProperty(pGpu, PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING)) 4724 4725 // 4726 // Marco for checking if GPU is still connected. 4727 // 4728 #define API_GPU_ATTACHED_SANITY_CHECK(pGpu) \ 4729 ((NULL != pGpu) && \ 4730 pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ 4731 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET)) 4732 4733 // 4734 // Macro for checking if GPU has Full Sanity 4735 // 4736 #define FULL_GPU_SANITY_CHECK(pGpu) \ 4737 ((NULL != pGpu) && \ 4738 gpuIsGpuFullPower(pGpu) && \ 4739 pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ 4740 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ 4741 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ 4742 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ 4743 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST) && \ 4744 gpuCheckSysmemAccess(pGpu)) 4745 4746 // 4747 // Macro for checking if GPU has Full Sanity 4748 // 4749 #define FULL_GPU_SANITY_FOR_PM_RESUME(pGpu) \ 4750 ((NULL != pGpu) && \ 4751 gpuIsGpuFullPowerForPmResume(pGpu) && \ 4752 pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ 4753 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ 4754 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ 4755 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ 4756 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)) 4757 4758 // 4759 // Macro for checking if GPU is in the recovery path 4760 // 4761 #define API_GPU_IN_RECOVERY_SANITY_CHECK(pGpu) \ 4762 ((NULL == pGpu) || \ 4763 pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY)) 4764 4765 // 4766 // Identifiers for gpuGetRegBaseOffset HAL interface. 4767 // 4768 #define NV_REG_BASE_GR (0x00000001) 4769 #define NV_REG_BASE_TIMER (0x00000002) 4770 #define NV_REG_BASE_MASTER (0x00000003) 4771 #define NV_REG_BASE_USERMODE (0x00000004) 4772 #define NV_REG_BASE_LAST NV_REG_BASE_USERMODE 4773 ct_assert(NV_REG_BASE_LAST < NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX); 4774 4775 #define GPU_READ_PRI_ERROR_MASK 0xFFF00000 4776 #define GPU_READ_PRI_ERROR_CODE 0xBAD00000 4777 4778 // 4779 // Define for invalid register value. GPU could have fallen off the bus or 4780 // the GPU could be in reset. 4781 // 4782 #define GPU_REG_VALUE_INVALID 0xFFFFFFFF 4783 4784 // 4785 // Hal InfoBlock access interface 4786 // 4787 #define gpuGetInfoBlock(pGpu, pListHead, dataId) getInfoPtr(pListHead, dataId) 4788 #define gpuAddInfoBlock(pGpu, ppListHead, dataId, size) addInfoPtr(ppListHead, dataId, size) 4789 #define gpuDeleteInfoBlock(pGpu, ppListHead, dataId) deleteInfoPtr(ppListHead, dataId); 4790 #define gpuTestInfoBlock(pGpu, pListHead, dataId) testInfoPtr(pListHead, dataId); 4791 4792 typedef struct _vgpu_static_info VGPU_STATIC_INFO; 4793 typedef struct GspStaticConfigInfo_t GspStaticConfigInfo; 4794 4795 // Static info getters 4796 VGPU_STATIC_INFO *gpuGetStaticInfo(struct OBJGPU *pGpu); 4797 #define GPU_GET_STATIC_INFO(pGpu) gpuGetStaticInfo(pGpu) 4798 GspStaticConfigInfo *gpuGetGspStaticInfo(struct OBJGPU *pGpu); 4799 #define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu) 4800 4801 NV_STATUS gpuSimEscapeWrite(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 Value); 4802 NV_STATUS gpuSimEscapeWriteBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); 4803 NV_STATUS gpuSimEscapeRead(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value); 4804 NV_STATUS gpuSimEscapeReadBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); 4805 4806 #endif // _OBJGPU_H_ 4807 4808 #ifdef __cplusplus 4809 } // extern "C" 4810 #endif 4811 4812 #endif // _G_GPU_NVOC_H_ 4813