1 #ifndef _G_GPU_NVOC_H_
2 #define _G_GPU_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 #include "g_gpu_nvoc.h"
32 
33 #ifndef _OBJGPU_H_
34 #define _OBJGPU_H_
35 
36 /*!
37  * @file
38  * @brief Resource Manager Defines and Structures: Defines and structures used for the GPU Object.
39  */
40 
41 /*!
42  *
43  * Forward declaration of SEQSCRIPT - here because it is used by many clients
44  * and we don't want objseq.h to have to be included everywhere, so adding this
45  * here.  See NVCR 12827752
46  *
47  */
48 typedef struct _SEQSCRIPT    SEQSCRIPT, *PSEQSCRIPT;
49 
50 typedef struct GPUATTACHARG GPUATTACHARG;
51 
52 /*
53  * WARNING -- Avoid including headers in gpu.h
54  *   A change in gpu.h and headers included by gpu.h triggers recompilation of most RM
55  *   files in an incremental build.  We should keep the list of included header as short as
56  *   possible.
57  *   Especially, GPU's child module should not have its object header being included here.
58  *   A child module generally includes the header of its parent. A child module header included
59  *   by the parent module affects all the sibling modules.
60  * */
61 #include "ctrl/ctrl0000/ctrl0000system.h"
62 #include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS (form hal)
63 #include "ctrl/ctrl2080/ctrl2080bios.h"
64 #include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_CMD_INTERNAL_MAX_BSPS/NVENCS
65 #include "ctrl/ctrl2080/ctrl2080ecc.h"
66 #include "ctrl/ctrl2080/ctrl2080nvd.h"
67 #include "ctrl/ctrl0073/ctrl0073system.h"
68 #include "class/cl2080.h"
69 #include "class/cl90cd.h"
70 
71 #include "nvlimits.h"
72 #include "utils/nv_enum.h"
73 
74 #include "gpu/gpu_timeout.h"
75 #include "gpu/gpu_access.h"
76 #include "gpu/gpu_shared_data_map.h"
77 #include "gpu/kern_gpu_power.h"
78 
79 #include "platform/acpi_common.h"
80 #include "gpu/gpu_acpi_data.h"
81 
82 #include "core/core.h"
83 #include "core/hal.h"
84 #include "core/system.h"
85 #include "diagnostics/traceable.h"
86 #include "gpu/error_cont.h"
87 #include "gpu/gpu_halspec.h"
88 #include "gpu/gpu_resource_desc.h"
89 #include "gpu/gpu_uuid.h"
90 #include "gpu/mem_mgr/mem_desc.h"
91 #include "kernel/gpu/gpu_engine_type.h"
92 #include "nvoc/utility.h"
93 #include "prereq_tracker/prereq_tracker.h"
94 
95 #include "kernel/disp/nvfbc_session.h"
96 #include "kernel/gpu/nvenc/nvencsession.h"
97 
98 #include "rmapi/control.h"
99 #include "rmapi/event.h"
100 #include "rmapi/rmapi.h"
101 
102 #include "gpuvideo/videoeventlist.h"
103 
104 #include "gpu/gpu_fabric_probe.h"
105 
106 #include "nv_arch.h"
107 
108 #include "g_rmconfig_util.h"      // prototypes for rmconfig utility functions, eg: rmcfg_IsGK104()
109 
110 // TODO - the forward declaration of OS_GPU_INFO should be simplified
111 typedef struct nv_state_t OS_GPU_INFO;
112 
113 struct OBJGMMU;
114 
115 #ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__
116 #define __NVOC_CLASS_OBJGMMU_TYPEDEF__
117 typedef struct OBJGMMU OBJGMMU;
118 #endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */
119 
120 #ifndef __nvoc_class_id_OBJGMMU
121 #define __nvoc_class_id_OBJGMMU 0xd7a41d
122 #endif /* __nvoc_class_id_OBJGMMU */
123 
124 
125 struct OBJGRIDDISPLAYLESS;
126 
127 #ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__
128 #define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__
129 typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS;
130 #endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */
131 
132 #ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS
133 #define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a
134 #endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */
135 
136 
137 struct OBJHOSTENG;
138 
139 #ifndef __NVOC_CLASS_OBJHOSTENG_TYPEDEF__
140 #define __NVOC_CLASS_OBJHOSTENG_TYPEDEF__
141 typedef struct OBJHOSTENG OBJHOSTENG;
142 #endif /* __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ */
143 
144 #ifndef __nvoc_class_id_OBJHOSTENG
145 #define __nvoc_class_id_OBJHOSTENG 0xb356e7
146 #endif /* __nvoc_class_id_OBJHOSTENG */
147 
148 
149 struct OBJPMU_CLIENT_IMPLEMENTER;
150 
151 #ifndef __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__
152 #define __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__
153 typedef struct OBJPMU_CLIENT_IMPLEMENTER OBJPMU_CLIENT_IMPLEMENTER;
154 #endif /* __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ */
155 
156 #ifndef __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER
157 #define __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER 0x88cace
158 #endif /* __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER */
159 
160 
161 struct OBJINTRABLE;
162 
163 #ifndef __NVOC_CLASS_OBJINTRABLE_TYPEDEF__
164 #define __NVOC_CLASS_OBJINTRABLE_TYPEDEF__
165 typedef struct OBJINTRABLE OBJINTRABLE;
166 #endif /* __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ */
167 
168 #ifndef __nvoc_class_id_OBJINTRABLE
169 #define __nvoc_class_id_OBJINTRABLE 0x31ccb7
170 #endif /* __nvoc_class_id_OBJINTRABLE */
171 
172 
173 struct OBJVBIOS;
174 
175 #ifndef __NVOC_CLASS_OBJVBIOS_TYPEDEF__
176 #define __NVOC_CLASS_OBJVBIOS_TYPEDEF__
177 typedef struct OBJVBIOS OBJVBIOS;
178 #endif /* __NVOC_CLASS_OBJVBIOS_TYPEDEF__ */
179 
180 #ifndef __nvoc_class_id_OBJVBIOS
181 #define __nvoc_class_id_OBJVBIOS 0x5dc772
182 #endif /* __nvoc_class_id_OBJVBIOS */
183 
184 
185 struct NvDebugDump;
186 
187 #ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__
188 #define __NVOC_CLASS_NvDebugDump_TYPEDEF__
189 typedef struct NvDebugDump NvDebugDump;
190 #endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */
191 
192 #ifndef __nvoc_class_id_NvDebugDump
193 #define __nvoc_class_id_NvDebugDump 0x7e80a2
194 #endif /* __nvoc_class_id_NvDebugDump */
195 
196 
197 struct GpuMutexMgr;
198 
199 #ifndef __NVOC_CLASS_GpuMutexMgr_TYPEDEF__
200 #define __NVOC_CLASS_GpuMutexMgr_TYPEDEF__
201 typedef struct GpuMutexMgr GpuMutexMgr;
202 #endif /* __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ */
203 
204 #ifndef __nvoc_class_id_GpuMutexMgr
205 #define __nvoc_class_id_GpuMutexMgr 0x9d93b2
206 #endif /* __nvoc_class_id_GpuMutexMgr */
207 
208 
209 struct KernelFalcon;
210 
211 #ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__
212 #define __NVOC_CLASS_KernelFalcon_TYPEDEF__
213 typedef struct KernelFalcon KernelFalcon;
214 #endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */
215 
216 #ifndef __nvoc_class_id_KernelFalcon
217 #define __nvoc_class_id_KernelFalcon 0xb6b1af
218 #endif /* __nvoc_class_id_KernelFalcon */
219 
220 
221 struct KernelVideoEngine;
222 
223 #ifndef __NVOC_CLASS_KernelVideoEngine_TYPEDEF__
224 #define __NVOC_CLASS_KernelVideoEngine_TYPEDEF__
225 typedef struct KernelVideoEngine KernelVideoEngine;
226 #endif /* __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ */
227 
228 #ifndef __nvoc_class_id_KernelVideoEngine
229 #define __nvoc_class_id_KernelVideoEngine 0x9e2f3e
230 #endif /* __nvoc_class_id_KernelVideoEngine */
231 
232 
233 struct KernelChannel;
234 
235 #ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__
236 #define __NVOC_CLASS_KernelChannel_TYPEDEF__
237 typedef struct KernelChannel KernelChannel;
238 #endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */
239 
240 #ifndef __nvoc_class_id_KernelChannel
241 #define __nvoc_class_id_KernelChannel 0x5d8d70
242 #endif /* __nvoc_class_id_KernelChannel */
243 
244 
245 struct GenericKernelFalcon;
246 
247 #ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__
248 #define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__
249 typedef struct GenericKernelFalcon GenericKernelFalcon;
250 #endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */
251 
252 #ifndef __nvoc_class_id_GenericKernelFalcon
253 #define __nvoc_class_id_GenericKernelFalcon 0xabcf08
254 #endif /* __nvoc_class_id_GenericKernelFalcon */
255 
256 
257 
258 struct Subdevice;
259 
260 #ifndef __NVOC_CLASS_Subdevice_TYPEDEF__
261 #define __NVOC_CLASS_Subdevice_TYPEDEF__
262 typedef struct Subdevice Subdevice;
263 #endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */
264 
265 #ifndef __nvoc_class_id_Subdevice
266 #define __nvoc_class_id_Subdevice 0x4b01b3
267 #endif /* __nvoc_class_id_Subdevice */
268 
269 
270 struct Device;
271 
272 #ifndef __NVOC_CLASS_Device_TYPEDEF__
273 #define __NVOC_CLASS_Device_TYPEDEF__
274 typedef struct Device Device;
275 #endif /* __NVOC_CLASS_Device_TYPEDEF__ */
276 
277 #ifndef __nvoc_class_id_Device
278 #define __nvoc_class_id_Device 0xe0ac20
279 #endif /* __nvoc_class_id_Device */
280 
281 
282 struct RsClient;
283 
284 #ifndef __NVOC_CLASS_RsClient_TYPEDEF__
285 #define __NVOC_CLASS_RsClient_TYPEDEF__
286 typedef struct RsClient RsClient;
287 #endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */
288 
289 #ifndef __nvoc_class_id_RsClient
290 #define __nvoc_class_id_RsClient 0x8f87e5
291 #endif /* __nvoc_class_id_RsClient */
292 
293 
294 struct Memory;
295 
296 #ifndef __NVOC_CLASS_Memory_TYPEDEF__
297 #define __NVOC_CLASS_Memory_TYPEDEF__
298 typedef struct Memory Memory;
299 #endif /* __NVOC_CLASS_Memory_TYPEDEF__ */
300 
301 #ifndef __nvoc_class_id_Memory
302 #define __nvoc_class_id_Memory 0x4789f2
303 #endif /* __nvoc_class_id_Memory */
304 
305 
306 
307 #ifndef PARTITIONID_INVALID
308 #define PARTITIONID_INVALID 0xFFFFFFFF
309 #endif
310 typedef struct MIG_INSTANCE_REF MIG_INSTANCE_REF;
311 typedef struct NV2080_CTRL_GPU_REG_OP NV2080_CTRL_GPU_REG_OP;
312 
313 typedef enum
314 {
315     BRANDING_TYPE_NONE,
316     BRANDING_TYPE_QUADRO_GENERIC,
317     BRANDING_TYPE_QUADRO_AD,
318     BRANDING_TYPE_NVS_NVIDIA, // "NVIDIA NVS"
319     BRANDING_TYPE_VGX,
320 } BRANDING_TYPE;
321 
322 typedef enum
323 {
324     COMPUTE_BRANDING_TYPE_NONE,
325     COMPUTE_BRANDING_TYPE_TESLA,
326 } COMPUTE_BRANDING_TYPE;
327 
328 #define OOR_ARCH_DEF(x)                            \
329     NV_ENUM_ENTRY(x, OOR_ARCH_X86_64,  0x00000000) \
330     NV_ENUM_ENTRY(x, OOR_ARCH_PPC64LE, 0x00000001) \
331     NV_ENUM_ENTRY(x, OOR_ARCH_ARM,     0x00000002) \
332     NV_ENUM_ENTRY(x, OOR_ARCH_AARCH64, 0x00000003) \
333     NV_ENUM_ENTRY(x, OOR_ARCH_NONE,    0x00000004)
334 
335 NV_ENUM_DEF(OOR_ARCH, OOR_ARCH_DEF)
336 
337 typedef struct
338 {
339     NvU32 classId;
340     NvU32 flags;
341 } GPUCHILDORDER;
342 
343 typedef struct
344 {
345     NvU32 classId;
346     NvU32 instances;
347 
348     /*!
349      * Pointer to the @ref NVOC_CLASS_INFO for the concrete class to instantiate
350      * for this child.
351      */
352     const NVOC_CLASS_INFO *pClassInfo;
353 } GPUCHILDPRESENT;
354 
355 /*!
356  * @brief   Generates an entry for a list of @ref GPUCHILDPRESENT objects for a
357  *          class of the given name
358  *
359  * @param[in]   _childClassName
360  *  Name of the class for the entry
361  * @param[in]   _instances
362  *  Number of instances of the child that may be present; see
363  *  @ref GPUCHILDPRESENT::instances
364  *
365  * @return  An entry suitable for a list of @ref GPUCHILDPRESENT for the given
366  *          child of @ref OBJGPU
367  */
368 #define GPU_CHILD_PRESENT(_childClassName, _instances) \
369     GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, (_instances), _childClassName)
370 
371 /*!
372  * @brief   Generates an entry for a list of @ref GPUCHILDPRESENT objects that
373  *          allows the @ref OBJGPU child to instantiate a sub-class of the base
374  *          @ref OBJGPU child class.
375  *
376  * @details The intention of this macro is to allow a list of
377  *          @ref GPUCHILDPRESENT to essentially state "this child should be
378  *          present with this concrete class type". This allows for different
379  *          @ref GPUCHILDPRESENT lists to request different classes with
380  *          different behavior via sub-classes, for the same basic @ref OBJGPU
381  *          child.
382  *
383  * @param[in]   _childClassName
384  *  Name of the base class at which @ref OBJGPU points
385  * @param[in]   _instances
386  *  Number of instances of the child that may be present; see
387  *  @ref GPUCHILDPRESENT::instances
388  * @param[in]   _concreteClassName
389  *   Name of the sub-class of _childClassName that should actually be
390  *   instantiated
391  *
392  * @return  An entry suitable for a list of @ref GPUCHILDPRESENT for the given
393  *          child of @ref OBJGPU with the given concrete class type.
394  */
395 #define GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, _instances, _concreteClassName) \
396     { \
397         .classId = classId(_childClassName), \
398         .instances = (_instances), \
399         .pClassInfo = classInfo(_concreteClassName) \
400     }
401 
402 // GPU Child Order Flags
403 #define GCO_LIST_INIT            NVBIT(0)    // entry is used for init ordering (DO NOT USE)
404 #define GCO_LIST_LOAD            NVBIT(1)    // entry is used for load and postload ordering (DO NOT USE)
405 #define GCO_LIST_UNLOAD          NVBIT(2)    // entry is used for unload and preunload ordering (DO NOT USE)
406 #define GCO_LIST_DESTROY         NVBIT(3)    // entry is used for destroy order (DO NOT USE)
407 #define GCO_LIST_ALL             (GCO_LIST_INIT | GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY)
408                                            // ^ entry is used for all list types (RECOMMENDED)
409 #define GCO_ALL                  (GCO_LIST_ALL)
410 
411 
412 typedef struct
413 {
414     NvU32           childTypeIdx;
415     NvU32           childInst;
416     NvU32           gpuChildPtrOffset;
417 } GPU_CHILD_ITER;
418 
419 typedef GPU_CHILD_ITER ENGSTATE_ITER;
420 typedef GPU_CHILD_ITER PMU_CLIENT_IMPLEMENTER_ITER;
421 
422 //
423 // Object 'get' macros for GPU relative object retrievals.
424 //
425 
426 #define ENG_GET_GPU(p)                  objFindAncestorOfType(OBJGPU, (p))
427 
428 // GPU_GET_FIFO_UC  is autogenerated, returns per Gpu pFifo.
429 #define GPU_GET_FIFO(p)                 GPU_GET_FIFO_UC(p)
430 
431 // GPU_GET_KERNEL_FIFO_UC  is autogenerated, returns per Gpu pKernelFifo.
432 #define GPU_GET_KERNEL_FIFO(p)          gpuGetKernelFifoShared(p)
433 
434 #define GPU_GET_HEAP(p)                 (RMCFG_MODULE_HEAP ? MEMORY_MANAGER_GET_HEAP(GPU_GET_MEMORY_MANAGER(p)) : NULL)
435 
436 #define GPU_GET_HAL(p)                  (RMCFG_MODULE_HAL ? (p)->pHal : NULL)
437 
438 #define GPU_GET_OS(p)                   (RMCFG_MODULE_OS ? (p)->pOS : NULL)     // TBD: replace with SYS_GET_OS
439 #define GPU_QUICK_PATH_GET_OS(p)        GPU_GET_OS(p)  // TBD: remove
440 
441 #define GPU_GET_REGISTER_ACCESS(g)      (&(g)->registerAccess)
442 
443 // Returns the pRmApi that routes to the physical driver, either via RPC or local calls
444 #define GPU_GET_PHYSICAL_RMAPI(g)       (&(g)->physicalRmApi)
445 
446 //
447 // Defines and helpers for encoding and decoding PCI domain, bus and device.
448 //
449 // Ideally these would live in objbus.h (or somewhere else more appropriate) and
450 // not gpu/gpu.h, but keep them here for now while support for 32-bit domains is
451 // being added as part of bug 1904645.
452 //
453 
454 // DRF macros for GPUBUSINFO::nvDomainBusDeviceFunc
455 #define NVGPU_BUSDEVICE_DOMAIN     63:32
456 #define NVGPU_BUSDEVICE_BUS        15:8
457 #define NVGPU_BUSDEVICE_DEVICE      7:0
458 
gpuDecodeDomain(NvU64 gpuDomainBusDevice)459 static NV_INLINE NvU32 gpuDecodeDomain(NvU64 gpuDomainBusDevice)
460 {
461     return (NvU32)DRF_VAL64(GPU, _BUSDEVICE, _DOMAIN, gpuDomainBusDevice);
462 }
463 
gpuDecodeBus(NvU64 gpuDomainBusDevice)464 static NV_INLINE NvU8 gpuDecodeBus(NvU64 gpuDomainBusDevice)
465 {
466     return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _BUS, gpuDomainBusDevice);
467 }
468 
gpuDecodeDevice(NvU64 gpuDomainBusDevice)469 static NV_INLINE NvU8 gpuDecodeDevice(NvU64 gpuDomainBusDevice)
470 {
471     return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _DEVICE, gpuDomainBusDevice);
472 }
473 
gpuEncodeDomainBusDevice(NvU32 domain,NvU8 bus,NvU8 device)474 static NV_INLINE NvU64 gpuEncodeDomainBusDevice(NvU32 domain, NvU8 bus, NvU8 device)
475 {
476     return DRF_NUM64(GPU, _BUSDEVICE, _DOMAIN, domain) |
477            DRF_NUM64(GPU, _BUSDEVICE, _BUS, bus) |
478            DRF_NUM64(GPU, _BUSDEVICE, _DEVICE, device);
479 }
480 
gpuEncodeBusDevice(NvU8 bus,NvU8 device)481 static NV_INLINE NvU32 gpuEncodeBusDevice(NvU8 bus, NvU8 device)
482 {
483     NvU64 busDevice = gpuEncodeDomainBusDevice(0, bus, device);
484 
485     // Bus and device are guaranteed to fit in the lower 32bits
486     return (NvU32)busDevice;
487 }
488 
489 //
490 // Generate a 32-bit id from domain, bus and device tuple.
491 //
492 NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device);
493 
494 //
495 // Generate a 32-bit id from a physical address
496 //
497 NvU32 gpuGenerate32BitIdFromPhysAddr(RmPhysAddr addr);
498 
499 //
500 // Helpers for getting domain, bus and device of a GPU
501 //
502 // Ideally these would be inline functions, but NVOC doesn't support that today,
503 // tracked in bug 1905882
504 //
505 #define gpuGetDBDF(pGpu) ((pGpu)->busInfo.nvDomainBusDeviceFunc)
506 #define gpuGetDomain(pGpu) gpuDecodeDomain((pGpu)->busInfo.nvDomainBusDeviceFunc)
507 #define gpuGetBus(pGpu)    gpuDecodeBus((pGpu)->busInfo.nvDomainBusDeviceFunc)
508 #define gpuGetDevice(pGpu) gpuDecodeDevice((pGpu)->busInfo.nvDomainBusDeviceFunc)
509 
510 #undef NVGPU_BUSDEVICE_DOMAIN
511 #undef NVGPU_BUSDEVICE_BUS
512 #undef NVGPU_BUSDEVICE_DEVICE
513 
514 //
515 // MaskRevision constants.
516 //
517 #define GPU_NO_MASK_REVISION             0x00
518 #define GPU_MASK_REVISION_A1             0xA1
519 #define GPU_MASK_REVISION_A2             0xA2
520 #define GPU_MASK_REVISION_A3             0xA3
521 #define GPU_MASK_REVISION_A4             0xA4
522 #define GPU_MASK_REVISION_A5             0xA5
523 #define GPU_MASK_REVISION_A6             0xA6
524 #define GPU_MASK_REVISION_B1             0xB1
525 #define GPU_MASK_REVISION_B2             0xB2
526 #define GPU_MASK_REVISION_C1             0xC1
527 #define GPU_MASK_REVISION_D1             0xD1
528 
529 #define GPU_GET_MASKREVISION(pGpu)      (((gpuGetChipMajRev(pGpu))<<4)|(gpuGetChipMinRev(pGpu)))
530 
531 //
532 // Revision constants.
533 //
534 #define GPU_NO_REVISION                  0xFF
535 #define GPU_REVISION_0                   0x00
536 #define GPU_REVISION_1                   0x01
537 #define GPU_REVISION_2                   0x02
538 #define GPU_REVISION_3                   0x03
539 #define GPU_REVISION_4                   0x04
540 #define GPU_REVISION_5                   0x05
541 #define GPU_REVISION_6                   0x06
542 #define GPU_REVISION_7                   0x07
543 #define GPU_REVISION_8                   0x08
544 #define GPU_REVISION_9                   0x09
545 #define GPU_REVISION_A                   0x0A
546 #define GPU_REVISION_B                   0x0B
547 #define GPU_REVISION_C                   0x0C
548 #define GPU_REVISION_D                   0x0D
549 #define GPU_REVISION_E                   0x0E
550 #define GPU_REVISION_F                   0x0F
551 
552 //
553 // One extra nibble should be added to the architecture version read from the
554 // PMC boot register to represent the architecture number in RM.
555 //
556 #define GPU_ARCH_SHIFT                  0x4
557 
558 // Registry key for inst mem modification defines
559 #define INSTMEM_TAG_MASK    (0xf0000000)
560 #define INSTMEM_TAG(a)      ((INSTMEM_TAG_MASK & (a)) >> 28)
561 
562 
563 typedef struct
564 {
565 
566     NvU32                 PCIDeviceID;
567     NvU32                 Manufacturer;
568     NvU32                 PCISubDeviceID;
569     NvU32                 PCIRevisionID;
570     NvU32                 Subrevision;
571 
572 } GPUIDINFO;
573 
574 
575 typedef struct
576 {
577     NvU32                 impl;
578     NvU32                 arch;
579     NvU32                 majorRev;
580     NvU32                 minorRev;
581     NvU32                 minorExtRev;
582 } PMCBOOT0;
583 
584 typedef struct
585 {
586     NvU32                 impl;
587     NvU32                 arch;
588     NvU32                 majorRev;
589     NvU32                 minorRev;
590     NvU32                 minorExtRev;
591 } PMCBOOT42;
592 
593 //
594 // Random collection of bus-related configuration state.
595 //
596 typedef struct
597 {
598     RmPhysAddr            gpuPhysAddr;
599     RmPhysAddr            gpuPhysFbAddr;
600     RmPhysAddr            gpuPhysInstAddr;
601     RmPhysAddr            gpuPhysIoAddr;
602     NvU32                 iovaspaceId;
603     NvU32                 IntLine;
604     NvU32                 IsrHooked;
605     NvU64                 nvDomainBusDeviceFunc;
606     OOR_ARCH              oorArch;
607 } GPUBUSINFO;
608 
609 typedef struct
610 {
611     PCLASSDESCRIPTOR    pClasses;
612     NvU32              *pSuppressClasses;
613     NvU32               numClasses;
614     NvBool              bSuppressRead;
615 } GPUCLASSDB, *PGPUCLASSDB;
616 
617 typedef struct
618 {
619     const CLASSDESCRIPTOR *pClassDescriptors;
620     NvU32                  numClassDescriptors;
621 
622     PENGDESCRIPTOR         pEngineInitDescriptors;
623     PENGDESCRIPTOR         pEngineDestroyDescriptors;
624     PENGDESCRIPTOR         pEngineLoadDescriptors;
625     PENGDESCRIPTOR         pEngineUnloadDescriptors;
626     NvU32                  numEngineDescriptors;
627 } GPU_ENGINE_ORDER, *PGPU_ENGINE_ORDER;
628 
629 //
630 // PCI Express Support
631 //
632 typedef struct NBADDR
633 {
634     NvU32  domain;
635     NvU8   bus;
636     NvU8   device;
637     NvU8   func;
638     NvU8   valid;
639     void  *handle;
640 } NBADDR;
641 
642 typedef struct
643 {
644     NBADDR  addr;
645     void   *vAddr;              // virtual address of the port, if it has been mapped . Not used starting with Win10 BuildXXXXX
646     NvU32   PCIECapPtr;         // offset of the PCIE capptr in the NB
647     // Capability register set in enhanced configuration space
648     //
649     NvU32   PCIEErrorCapPtr;    // offset of the Advanced Error Reporting Capability register set
650     NvU32   PCIEVCCapPtr;       // offset of the Virtual Channel (VC) Capability register set
651     NvU32   PCIEL1SsCapPtr;     // Offset of the L1 Substates Capabilities
652     NvU16   DeviceID, VendorID; // device and vendor ID for port
653 } PORTDATA;
654 
655 typedef struct // GPU specific data for core logic object, stored in GPU object
656 {
657     PORTDATA  upstreamPort;     // the upstream port info for the GPU
658                                 // If there is a switch this is equal to boardDownstreamPort
659                                 // If there is no switch this is equal to rootPort
660     PORTDATA  rootPort;         // The root port of the PCI-E root complex
661     PORTDATA  boardUpstreamPort;    // If there is no BR03 this is equal to rootPort.
662     PORTDATA  boardDownstreamPort;  // If there is no BR03 these data are not set.
663 } GPUCLDATA;
664 
665 // For SLI Support Using Peer Model
666 typedef struct
667 {
668     OBJGPU     *pGpu;           // Mapping from the local pinset number (i.e. array index) to peer GPU
669     NvU32       pinset;         // Mapping from the local pinset number (i.e. array index) to peer pinset number
670 } _GPU_SLI_PEER;
671 #define DR_PINSET_COUNT 2
672 
673 /*!
674  * SLI link detection HAL flag defines for Sli/Vid/NvLink link detection HAL functions.
675  */
676 #define GPU_LINK_DETECTION_HAL_STUB  0
677 #define GPU_LINK_DETECTION_HAL_GK104 1
678 #define GPU_LINK_DETECTION_HAL_GP100 2
679 #define GPU_LINK_DETECTION_HAL_GP102 3
680 
681 
682 //
683 // Flags for gpuStateLoad() and gpuStateUnload() routines. Flags *must* be used
684 // symmetrically across an Unload/Load pair.
685 //
686 #define GPU_STATE_FLAGS_PRESERVING         NVBIT(0)  // GPU state is preserved
687 #define GPU_STATE_FLAGS_VGA_TRANSITION     NVBIT(1)   // To be used with GPU_STATE_FLAGS_PRESERVING.
688 #define GPU_STATE_FLAGS_PM_TRANSITION      NVBIT(2)   // To be used with GPU_STATE_FLAGS_PRESERVING.
689 #define GPU_STATE_FLAGS_PM_SUSPEND         NVBIT(3)
690 #define GPU_STATE_FLAGS_PM_HIBERNATE       NVBIT(4)
691 #define GPU_STATE_FLAGS_GC6_TRANSITION     NVBIT(5)  // To be used with GPU_STATE_FLAGS_PRESERVING.
692 #define GPU_STATE_FLAGS_FAST_UNLOAD        NVBIT(6)  // Used during windows restart, skips stateDestroy steps
693 #define GPU_STATE_DEFAULT                  0       // Default flags for destructive state loads
694                                                    // and unloads
695 
696 struct OBJHWBC;
697 typedef struct hwbc_list
698 {
699     struct OBJHWBC *pHWBC;
700     struct hwbc_list *pNext;
701 } HWBC_LIST;
702 
703 /*!
704  * GFID allocation state
705  */
706 typedef enum
707 {
708     GFID_FREE = 0,
709     GFID_ALLOCATED = 1,
710     GFID_INVALIDATED = 2,
711 } GFID_ALLOC_STATUS;
712 
713 typedef struct SRIOV_P2P_INFO
714 {
715     NvU32    gfid;
716     NvBool   bAllowP2pAccess;
717     NvU32    accessRefCount;
718     NvU32    destRefCount;
719 } SRIOV_P2P_INFO, *PSRIOV_P2P_INFO;
720 
721 typedef struct
722 {
723     NvU32 peerGpuId;
724     NvU32 peerGpuInstance;
725     NvU32 p2pCaps;
726     NvU32 p2pOptimalReadCEs;
727     NvU32 p2pOptimalWriteCEs;
728     NvU8  p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE];
729     NvU32 busPeerId;
730 } GPU_P2P_PEER_GPU_CAPS;
731 
732 //
733 // typedef of private struct used in OBJGPU's data field
734 //
735 
736 typedef struct
737 {
738     NvBool              isInitialized;
739     NvU8                uuid[RM_SHA1_GID_SIZE];
740 } _GPU_UUID;
741 
742 typedef struct
743 {
744     NvBool              bValid;
745     NvU8                id;
746 } _GPU_PCIE_PEER_CLIQUE;
747 
748 typedef struct
749 {
750     NvU32     platformId;         // used to identify soc
751     NvU32     implementationId;   // soc-specific
752     NvU32     revisionId;         // soc-revision
753     PMCBOOT0  pmcBoot0;
754     PMCBOOT42 pmcBoot42;
755     NvU8      subRevision;        // sub-revision (NV_FUSE_OPT_SUBREVISION on GPU)
756 } _GPU_CHIP_INFO;
757 
758 
759 // Engine Database
760 typedef struct
761 {
762     NvU32 size;
763     RM_ENGINE_TYPE *pType;
764     NvBool bValid;
765 } _GPU_ENGINE_DB;
766 
767 #define MAX_NUM_BARS      (8)
768 // SRIOV state
769 typedef struct
770 {
771     /*!
772      * Total number of VFs available in this GPU
773      */
774     NvU32           totalVFs;
775 
776     /*!
777      * First VF Offset
778      */
779     NvU32           firstVFOffset;
780 
781     /*!
782      * Max GFID possible
783      */
784     NvU32           maxGfid;
785 
786     /*!
787      *  Physical offset of Virtual BAR0 register. Stores the offset if the GPU is
788      *  a physical function, else 0
789      */
790     NvU32           virtualRegPhysOffset;
791 
792     /*!
793      * Allocated GFIDs. Will be used to ensure plugins doesn't use same GFID for multiple VFs
794      */
795     NvU8            *pAllocatedGfids;
796 
797     /*!
798      * The sizes of the BAR regions on the VF
799      */
800     NvU64 vfBarSize[MAX_NUM_BARS];
801 
802     /*!
803      * First PF's BAR addresses
804      */
805     NvU64 firstVFBarAddress[MAX_NUM_BARS];
806 
807     /*!
808      * If the VF BARs are 64-bit addressable
809      */
810     NvBool b64bitVFBar0;
811     NvBool b64bitVFBar1;
812     NvBool b64bitVFBar2;
813 
814     /*!
815      * GFID used for P2P access
816      */
817     PSRIOV_P2P_INFO pP2PInfo;
818     NvBool          bP2PAllocated;
819     NvU32           maxP2pGfid;
820     NvU32           p2pFabricPartitionId;
821 } _GPU_SRIOV_STATE;
822 
823 // Max # of instances for GPU children
824 #define GPU_MAX_CES                     10
825 #define GPU_MAX_GRS                     8
826 #define GPU_MAX_FIFOS                   1
827 #define GPU_MAX_MSENCS                  NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS
828 #define GPU_MAX_NVDECS                  NV2080_CTRL_CMD_INTERNAL_MAX_BSPS
829 #define GPU_MAX_NVJPGS                  8
830 #define GPU_MAX_HSHUBS                  5
831 #define GPU_MAX_OFAS                    1
832 #define GPU_MAX_GSPLITES                4
833 //
834 // Macro defines for OBJGPU fields -- Macro defines inside NVOC class block is
835 // gone after NVOC preprocessing stage.  For macros used outside gpu/gpu.h should
836 // not be defined inside the class block.
837 //
838 
839 //
840 // Maximum number of Falcon objects that can be allocated on one GPU.
841 // This is purely a software limit and can be raised freely as more are added.
842 //
843 #define GPU_MAX_FALCON_ENGINES \
844     ENG_IOCTRL__SIZE_1       + \
845     ENG_GPCCS__SIZE_1        + \
846     ENG_FECS__SIZE_1         + \
847     ENG_NVJPEG__SIZE_1       + \
848     ENG_NVDEC__SIZE_1        + \
849     ENG_MSENC__SIZE_1        + \
850     32
851 
852 #define GPU_MAX_VIDEO_ENGINES  \
853     (ENG_NVJPEG__SIZE_1      + \
854      ENG_NVDEC__SIZE_1       + \
855      ENG_MSENC__SIZE_1       + \
856      ENG_OFA__SIZE_1)
857 
858 // for OBJGPU::pRmCtrlDeferredCmd
859 #define MAX_DEFERRED_CMDS 2
860 
861 // for OBJGPU::computeModeRefCount
862 #define NV_GPU_MODE_GRAPHICS_MODE                   0x00000001
863 #define NV_GPU_MODE_COMPUTE_MODE                    0x00000002
864 #define NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT   0x0000000a
865 #define NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT   0x0000000b
866 
867 //
868 // Structure to hold information obtained from
869 // parsing the DEVICE_INFO2 table during init.
870 //
871 
872 typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO DEVICE_INFO2_ENTRY;
873 
874 
875 //! Value of DEV_GROUP_ID used in gpuGetDeviceEntryByType for any group ID.
876 #define DEVICE_INFO2_ENTRY_GROUP_ID_ANY (-1)
877 
878 #define NV_GPU_INTERNAL_DEVICE_HANDLE    0xABCD0080
879 #define NV_GPU_INTERNAL_SUBDEVICE_HANDLE 0xABCD2080
880 
881 //
882 // NV GPU simulation mode defines
883 // Keep in sync with os.h SIM MODE defines until osGetSimulationMode is deprecated.
884 //
885 #ifndef NV_SIM_MODE_DEFS
886 #define NV_SIM_MODE_DEFS
887 #define NV_SIM_MODE_HARDWARE            0U
888 #define NV_SIM_MODE_RTL                 1U
889 #define NV_SIM_MODE_CMODEL              2U
890 #define NV_SIM_MODE_MODS_AMODEL         3U
891 #define NV_SIM_MODE_TEGRA_FPGA          4U
892 #define NV_SIM_MODE_INVALID         (~0x0U)
893 #endif
894 
895 #define GPU_IS_NVSWITCH_DETECTED(pGpu) \
896     (pGpu->nvswitchSupport == NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_SUPPORTED)
897 
898 
899 //
900 // The actual GPU object definition
901 //
902 
903 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
904 // the matching C source file, but causes diagnostics to be issued if another
905 // source file references the field.
906 #ifdef NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED
907 #define PRIVATE_FIELD(x) x
908 #else
909 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
910 #endif
911 
912 struct OBJGPU {
913     const struct NVOC_RTTI *__nvoc_rtti;
914     struct Object __nvoc_base_Object;
915     struct RmHalspecOwner __nvoc_base_RmHalspecOwner;
916     struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE;
917     struct Object *__nvoc_pbase_Object;
918     struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner;
919     struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE;
920     struct OBJGPU *__nvoc_pbase_OBJGPU;
921     NV_STATUS (*__gpuConstructDeviceInfoTable__)(struct OBJGPU *);
922     NV_STATUS (*__gpuGetNameString__)(struct OBJGPU *, NvU32, void *);
923     NV_STATUS (*__gpuGetShortNameString__)(struct OBJGPU *, NvU8 *);
924     NV_STATUS (*__gpuInitBranding__)(struct OBJGPU *);
925     void (*__gpuInitProperties__)(struct OBJGPU *);
926     NV_STATUS (*__gpuBuildKernelVideoEngineList__)(struct OBJGPU *);
927     NV_STATUS (*__gpuInitVideoLogging__)(struct OBJGPU *);
928     void (*__gpuFreeVideoLogging__)(struct OBJGPU *);
929     void (*__gpuDestroyKernelVideoEngineList__)(struct OBJGPU *);
930     NV_STATUS (*__gpuPowerOff__)(struct OBJGPU *);
931     NV_STATUS (*__gpuWriteBusConfigReg__)(struct OBJGPU *, NvU32, NvU32);
932     NV_STATUS (*__gpuReadBusConfigReg__)(struct OBJGPU *, NvU32, NvU32 *);
933     NV_STATUS (*__gpuReadBusConfigRegEx__)(struct OBJGPU *, NvU32, NvU32 *, THREAD_STATE_NODE *);
934     NV_STATUS (*__gpuReadFunctionConfigReg__)(struct OBJGPU *, NvU32, NvU32, NvU32 *);
935     NV_STATUS (*__gpuWriteFunctionConfigReg__)(struct OBJGPU *, NvU32, NvU32, NvU32);
936     NV_STATUS (*__gpuWriteFunctionConfigRegEx__)(struct OBJGPU *, NvU32, NvU32, NvU32, THREAD_STATE_NODE *);
937     NV_STATUS (*__gpuReadVgpuConfigReg__)(struct OBJGPU *, NvU32, NvU32 *);
938     void (*__gpuGetIdInfo__)(struct OBJGPU *);
939     NV_STATUS (*__gpuGenGidData__)(struct OBJGPU *, NvU8 *, NvU32, NvU32);
940     NvU8 (*__gpuGetChipSubRev__)(struct OBJGPU *);
941     NV_STATUS (*__gpuGetSkuInfo__)(struct OBJGPU *, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *);
942     NV_STATUS (*__gpuGetRegBaseOffset__)(struct OBJGPU *, NvU32, NvU32 *);
943     void (*__gpuHandleSanityCheckRegReadError__)(struct OBJGPU *, NvU32, NvU32);
944     void (*__gpuHandleSecFault__)(struct OBJGPU *);
945     NV_STATUS (*__gpuSanityCheckVirtRegAccess__)(struct OBJGPU *, NvU32);
946     const GPUCHILDPRESENT *(*__gpuGetChildrenPresent__)(struct OBJGPU *, NvU32 *);
947     const CLASSDESCRIPTOR *(*__gpuGetClassDescriptorList__)(struct OBJGPU *, NvU32 *);
948     NvU32 (*__gpuGetPhysAddrWidth__)(struct OBJGPU *, NV_ADDRESS_SPACE);
949     NV_STATUS (*__gpuInitSriov__)(struct OBJGPU *);
950     NV_STATUS (*__gpuDeinitSriov__)(struct OBJGPU *);
951     NV_STATUS (*__gpuCreateDefaultClientShare__)(struct OBJGPU *);
952     void (*__gpuDestroyDefaultClientShare__)(struct OBJGPU *);
953     NvU64 (*__gpuGetVmmuSegmentSize__)(struct OBJGPU *);
954     NvBool (*__gpuFuseSupportsDisplay__)(struct OBJGPU *);
955     NvU32 (*__gpuGetActiveFBIOs__)(struct OBJGPU *);
956     NvBool (*__gpuCheckPageRetirementSupport__)(struct OBJGPU *);
957     NvBool (*__gpuIsInternalSku__)(struct OBJGPU *);
958     NV_STATUS (*__gpuClearFbhubPoisonIntrForBug2924523__)(struct OBJGPU *);
959     NvBool (*__gpuCheckIfFbhubPoisonIntrPending__)(struct OBJGPU *);
960     NV_STATUS (*__gpuGetSriovCaps__)(struct OBJGPU *, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *);
961     NvBool (*__gpuCheckIsP2PAllocated__)(struct OBJGPU *);
962     NV_STATUS (*__gpuPrePowerOff__)(struct OBJGPU *);
963     NV_STATUS (*__gpuVerifyExistence__)(struct OBJGPU *);
964     NvU64 (*__gpuGetFlaVasSize__)(struct OBJGPU *, NvBool);
965     NvBool (*__gpuIsAtsSupportedWithSmcMemPartitioning__)(struct OBJGPU *);
966     NvBool (*__gpuIsGlobalPoisonFuseEnabled__)(struct OBJGPU *);
967     void (*__gpuDetermineSelfHostedMode__)(struct OBJGPU *);
968     void (*__gpuDetermineMIGSupport__)(struct OBJGPU *);
969     NV_STATUS (*__gpuInitOptimusSettings__)(struct OBJGPU *);
970     NV_STATUS (*__gpuDeinitOptimusSettings__)(struct OBJGPU *);
971     NvBool (*__gpuIsSliCapableWithoutDisplay__)(struct OBJGPU *);
972     NvBool (*__gpuIsCCEnabledInHw__)(struct OBJGPU *);
973     NvBool (*__gpuIsDevModeEnabledInHw__)(struct OBJGPU *);
974     NvBool (*__gpuIsProtectedPcieEnabledInHw__)(struct OBJGPU *);
975     NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *);
976     NV_STATUS (*__gpuUpdateErrorContainmentState__)(struct OBJGPU *, NV_ERROR_CONT_ERR_ID, NV_ERROR_CONT_LOCATION, NvU32 *);
977     NV_STATUS (*__gpuWaitForGfwBootComplete__)(struct OBJGPU *);
978     NvBool (*__gpuGetIsCmpSku__)(struct OBJGPU *);
979     NvBool PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED;
980     NvBool bVideoLinkDisabled;
981     GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel;
982     NvU32 moduleId;
983     NvU8 nvswitchSupport;
984     NvBool PDB_PROP_GPU_IN_STANDBY;
985     NvBool PDB_PROP_GPU_IN_HIBERNATE;
986     NvBool PDB_PROP_GPU_IN_PM_CODEPATH;
987     NvBool PDB_PROP_GPU_IN_PM_RESUME_CODEPATH;
988     NvBool PDB_PROP_GPU_STATE_INITIALIZED;
989     NvBool PDB_PROP_GPU_EMULATION;
990     NvBool PDB_PROP_GPU_PRIMARY_DEVICE;
991     NvBool PDB_PROP_GPU_HYBRID_MGPU;
992     NvBool PDB_PROP_GPU_ALTERNATE_TREE_ENABLED;
993     NvBool PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS;
994     NvBool PDB_PROP_GPU_3D_CONTROLLER;
995     NvBool PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM;
996     NvBool PDB_PROP_GPU_IS_CONNECTED;
997     NvBool PDB_PROP_GPU_BROKEN_FB;
998     NvBool PDB_PROP_GPU_IN_FULLCHIP_RESET;
999     NvBool PDB_PROP_GPU_IN_SECONDARY_BUS_RESET;
1000     NvBool PDB_PROP_GPU_IN_GC6_RESET;
1001     NvBool PDB_PROP_GPU_IS_GEMINI;
1002     NvBool PDB_PROP_GPU_PERSISTENT_SW_STATE;
1003     NvBool PDB_PROP_GPU_COHERENT_CPU_MAPPING;
1004     NvBool PDB_PROP_GPU_IS_LOST;
1005     NvBool PDB_PROP_GPU_IN_TIMEOUT_RECOVERY;
1006     NvBool PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT;
1007     NvBool PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY;
1008     NvBool PDB_PROP_GPU_TEGRA_SOC_IGPU;
1009     NvBool PDB_PROP_GPU_ATS_SUPPORTED;
1010     NvBool PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING;
1011     NvBool PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE;
1012     NvBool PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE;
1013     NvBool PDB_PROP_GPU_IS_UEFI;
1014     NvBool PDB_PROP_GPU_ZERO_FB;
1015     NvBool PDB_PROP_GPU_BAR1_BAR2_DISABLED;
1016     NvBool PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE;
1017     NvBool PDB_PROP_GPU_MIG_SUPPORTED;
1018     NvBool PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED;
1019     NvBool PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED;
1020     NvBool PDB_PROP_GPU_IS_COT_ENABLED;
1021     NvBool bIsFlexibleFlaSupported;
1022     NvBool PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED;
1023     NvBool PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE;
1024     NvBool PDB_PROP_GPU_SWRL_GRANULAR_LOCKING;
1025     NvBool PDB_PROP_GPU_IN_SLI_LINK_CODEPATH;
1026     NvBool PDB_PROP_GPU_IS_PLX_PRESENT;
1027     NvBool PDB_PROP_GPU_IS_BR03_PRESENT;
1028     NvBool PDB_PROP_GPU_IS_BR04_PRESENT;
1029     NvBool PDB_PROP_GPU_BEHIND_BRIDGE;
1030     NvBool PDB_PROP_GPU_BEHIND_BR03;
1031     NvBool PDB_PROP_GPU_BEHIND_BR04;
1032     NvBool PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED;
1033     NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED;
1034     NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED;
1035     NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY;
1036     NvBool PDB_PROP_GPU_RM_UNLINKED_SLI;
1037     NvBool PDB_PROP_GPU_SLI_LINK_ACTIVE;
1038     NvBool PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST;
1039     NvBool PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH;
1040     NvBool PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL;
1041     NvBool PDB_PROP_GPU_IS_MOBILE;
1042     NvBool PDB_PROP_GPU_RTD3_GC6_SUPPORTED;
1043     NvBool PDB_PROP_GPU_RTD3_GC6_ACTIVE;
1044     NvBool PDB_PROP_GPU_FAST_GC6_ACTIVE;
1045     NvBool PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED;
1046     NvBool PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA;
1047     NvBool PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED;
1048     NvBool PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED;
1049     NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERING;
1050     NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERED;
1051     NvBool PDB_PROP_GPU_ACCOUNTING_ON;
1052     NvBool PDB_PROP_GPU_INACCESSIBLE;
1053     NvBool PDB_PROP_GPU_NVLINK_SYSMEM;
1054     NvBool PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK;
1055     NvBool PDB_PROP_GPU_C2C_SYSMEM;
1056     NvBool PDB_PROP_GPU_IN_TCC_MODE;
1057     NvBool PDB_PROP_GPU_SUPPORTS_TDR_EVENT;
1058     NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE;
1059     NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K;
1060     NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT;
1061     NvBool PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT;
1062     NvBool PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS;
1063     NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU;
1064     NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA;
1065     NvBool PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE;
1066     NvBool PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED;
1067     NvBool PDB_PROP_GPU_NV_USERMODE_ENABLED;
1068     NvBool PDB_PROP_GPU_IN_FATAL_ERROR;
1069     NvBool PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE;
1070     NvBool PDB_PROP_GPU_VGA_ENABLED;
1071     NvBool PDB_PROP_GPU_IS_MXM_3X;
1072     NvBool PDB_PROP_GPU_GSYNC_III_ATTACHED;
1073     NvBool PDB_PROP_GPU_QSYNC_II_ATTACHED;
1074     NvBool PDB_PROP_GPU_CC_FEATURE_CAPABLE;
1075     NvBool PDB_PROP_GPU_APM_FEATURE_CAPABLE;
1076     NvBool PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX;
1077     NvBool PDB_PROP_GPU_SKIP_TABLE_CE_MAP;
1078     NvBool PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF;
1079     NvBool PDB_PROP_GPU_IS_SOC_SDM;
1080     NvBool PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS;
1081     NvBool PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL;
1082     NvBool PDB_PROP_GPU_FASTPATH_SEQ_ENABLED;
1083     OS_GPU_INFO *pOsGpuInfo;
1084     OS_RM_CAPS *pOsRmCaps;
1085     NvU32 halImpl;
1086     void *hPci;
1087     GpuEngineEventNotificationList *engineNonstallIntrEventNotifications[64];
1088     NvBool bIsSOC;
1089     NvU32 gpuInstance;
1090     NvU32 gpuDisabled;
1091     NvU32 gpuId;
1092     NvU32 boardId;
1093     NvU32 deviceInstance;
1094     NvU32 subdeviceInstance;
1095     NvS32 numaNodeId;
1096     NvS32 cpuNumaNodeId;
1097     _GPU_UUID gpuUuid;
1098     NvU32 gpuPhysicalId;
1099     NvU32 gpuTerminatedLinkMask;
1100     NvBool gpuLinkTerminationEnabled;
1101     NvBool gspRmInitialized;
1102     _GPU_PCIE_PEER_CLIQUE pciePeerClique;
1103     NvU32 i2cPortForExtdev;
1104     GPUIDINFO idInfo;
1105     _GPU_CHIP_INFO chipInfo;
1106     GPUBUSINFO busInfo;
1107     const GPUCHILDPRESENT *pChildrenPresent;
1108     NvU32 numChildrenPresent;
1109     GPU_ENGINE_ORDER engineOrder;
1110     GPUCLASSDB classDB;
1111     NvU32 chipId0;
1112     NvU32 chipId1;
1113     NvU32 pmcEnable;
1114     NvU32 pmcRmOwnsIntrMask;
1115     NvBool testIntr;
1116     NvU32 numCEs;
1117     NvU32 ceFaultMethodBufferSize;
1118     NvBool isVirtual;
1119     NvBool isGspClient;
1120     NvU64 fbLength;
1121     NvU32 instLength;
1122     NvBool instSetViaAttachArg;
1123     NvU32 activeFBIOs;
1124     NvU64 gpuVbiosPostTime;
1125     NvU32 uefiScanoutSurfaceSizeInMB;
1126     RmPhysAddr dmaStartAddress;
1127     NvU32 gpuDeviceMapCount;
1128     DEVICE_MAPPING deviceMappings[60];
1129     struct IoAperture *pIOApertures[12];
1130     DEVICE_MAPPING *pDeviceMappingsByDeviceInstance[12];
1131     void *gpuCfgAddr;
1132     TIMEOUT_DATA timeoutData;
1133     NvU32 computeModeRules;
1134     NvS32 computeModeRefCount;
1135     NvHandle hComputeModeReservation;
1136     NvBool bIsDebugModeEnabled;
1137     NvU64 lastCallbackTime;
1138     volatile NvU32 bCallbackQueued;
1139     NvU32 masterFromSLIConfig;
1140     NvU32 sliStatus;
1141     struct OBJOS *pOS;
1142     struct OBJHAL *pHal;
1143     struct KernelBif *pKernelBif;
1144     struct KernelMc *pKernelMc;
1145     struct SwIntr *pSwIntr;
1146     struct KernelMemorySystem *pKernelMemorySystem;
1147     struct MemoryManager *pMemoryManager;
1148     struct KernelDisplay *pKernelDisplay;
1149     struct OBJTMR *pTmr;
1150     struct KernelBus *pKernelBus;
1151     struct KernelGmmu *pKernelGmmu;
1152     struct KernelSec2 *pKernelSec2;
1153     struct KernelGsp *pKernelGsp;
1154     struct VirtMemAllocator *pDma;
1155     struct KernelMIGManager *pKernelMIGManager;
1156     struct KernelGraphicsManager *pKernelGraphicsManager;
1157     struct KernelGraphics *pKernelGraphics[8];
1158     struct KernelPerf *pKernelPerf;
1159     struct KernelRc *pKernelRc;
1160     struct Intr *pIntr;
1161     struct KernelPmu *pKernelPmu;
1162     struct KernelCE *pKCe[10];
1163     struct KernelFifo *pKernelFifo;
1164     struct OBJUVM *pUvm;
1165     struct NvDebugDump *pNvd;
1166     struct KernelNvlink *pKernelNvlink;
1167     struct OBJGPUMON *pGpuMon;
1168     struct KernelHwpm *pKernelHwpm;
1169     struct OBJSWENG *pSwEng;
1170     struct KernelFsp *pKernelFsp;
1171     struct ConfidentialCompute *pConfCompute;
1172     struct KernelCcu *pKernelCcu;
1173     HWBC_LIST *pHWBCList;
1174     GPUCLDATA gpuClData;
1175     _GPU_ENGINE_DB engineDB;
1176     NvU32 engineDBSize;
1177     NvU32 instCacheOverride;
1178     NvS32 numOfMclkLockRequests;
1179     NvU32 netlistNum;
1180     RmCtrlDeferredCmd pRmCtrlDeferredCmd[2];
1181     ACPI_DATA acpi;
1182     ACPI_METHOD_DATA acpiMethodData;
1183     NvBool bSystemHasMux;
1184     NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS backLightMethodData;
1185     NvU32 activeFifoEventMthdNotifiers;
1186     struct Falcon *constructedFalcons[70];
1187     NvU32 numConstructedFalcons;
1188     struct GenericKernelFalcon *genericKernelFalcons[70];
1189     NvU32 numGenericKernelFalcons;
1190     struct KernelVideoEngine *kernelVideoEngines[20];
1191     NvU32 numKernelVideoEngines;
1192     NvU8 *pUserRegisterAccessMap;
1193     NvU8 *pUnrestrictedRegisterAccessMap;
1194     NvU32 userRegisterAccessMapSize;
1195     struct PrereqTracker *pPrereqTracker;
1196     RegisterAccess registerAccess;
1197     NvBool bUseRegisterAccessMap;
1198     NvU32 *pRegopOffsetScratchBuffer;
1199     NvU32 *pRegopOffsetAddrScratchBuffer;
1200     NvU32 regopScratchBufferMaxOffsets;
1201     _GPU_SRIOV_STATE sriovState;
1202     NvU64 vmmuSegmentSize;
1203     NvHandle hDefaultClientShare;
1204     NvHandle hDefaultClientShareDevice;
1205     NvHandle hDefaultClientShareSubDevice;
1206     NvU32 externalKernelClientCount;
1207     DEVICE_INFO2_ENTRY *pDeviceInfoTable;
1208     NvU32 numDeviceInfoEntries;
1209     NvHandle hInternalClient;
1210     NvHandle hInternalDevice;
1211     NvHandle hInternalSubdevice;
1212     struct Subdevice *pCachedSubdevice;
1213     struct RsClient *pCachedRsClient;
1214     RM_API physicalRmApi;
1215     struct Subdevice **pSubdeviceBackReferences;
1216     NvU32 numSubdeviceBackReferences;
1217     NvU32 maxSubdeviceBackReferences;
1218     NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo;
1219     NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *boardInfo;
1220     GpuSharedDataMap userSharedData;
1221     NvBool bBar2MovedByVtd;
1222     NvBool bBar1Is64Bit;
1223     NvBool bSurpriseRemovalSupported;
1224     NvBool bTwoStageRcRecoveryEnabled;
1225     NvBool bReplayableTraceEnabled;
1226     NvBool bInD3Cold;
1227     NvBool bIsSimulation;
1228     NvBool bIsModsAmodel;
1229     NvBool bIsFmodel;
1230     NvBool bIsRtlsim;
1231     NvBool bIsPassthru;
1232     NvBool bIsVirtualWithSriov;
1233     NvBool bIsMigRm;
1234     NvU32 P2PPeerGpuCount;
1235     GPU_P2P_PEER_GPU_CAPS P2PPeerGpuCaps[32];
1236     NvBool bIsSelfHosted;
1237     NvBool bStateLoading;
1238     NvBool bStateUnloading;
1239     NvBool bStateLoaded;
1240     NvBool bFullyConstructed;
1241     NvBool bRecoveryMarginPresent;
1242     NvBool bBf3WarBug4040336Enabled;
1243     NvBool bUnifiedMemorySpaceEnabled;
1244     NvBool bSriovEnabled;
1245     NvBool bWarBug200577889SriovHeavyEnabled;
1246     NvBool bNonPowerOf2ChannelCountSupported;
1247     NvBool bCacheOnlyMode;
1248     NvBool bNeed4kPageIsolation;
1249     NvBool bSplitVasManagementServerClientRm;
1250     NvU32 instLocOverrides;
1251     NvU32 instLocOverrides2;
1252     NvU32 instLocOverrides3;
1253     NvU32 instLocOverrides4;
1254     NvBool bInstLoc47bitPaWar;
1255     NvU32 instVprOverrides;
1256     NvU32 optimizeUseCaseOverride;
1257     NvS16 videoCtxswLogConsumerCount;
1258     VideoEventBufferBindMultiMap videoEventBufferBindingsUid;
1259     TMR_EVENT *pVideoTimerEvent;
1260     NVENC_SESSION_LIST nvencSessionList;
1261     NvU32 encSessionStatsReportingState;
1262     NvBool bNvEncSessionDataProcessingWorkItemPending;
1263     NVFBC_SESSION_LIST nvfbcSessionList;
1264     struct OBJVASPACE *pFabricVAS;
1265     NvBool bPipelinedPteMemEnabled;
1266     NvBool bIsBarPteInSysmemSupported;
1267     NvBool bRegUsesGlobalSurfaceOverrides;
1268     NvBool bClientRmAllocatedCtxBuffer;
1269     NvBool bIterativeMmuWalker;
1270     NvBool bEccPageRetirementWithSliAllowed;
1271     NvBool bVidmemPreservationBrokenBug3172217;
1272     NvBool bInstanceMemoryAlwaysCached;
1273     NvBool bUseRpcSimEscapes;
1274     NvBool bRmProfilingPrivileged;
1275     NvBool bGeforceSmb;
1276     NvBool bIsGeforce;
1277     NvBool bIsQuadro;
1278     NvBool bIsQuadroAD;
1279     NvBool bIsVgx;
1280     NvBool bIsNvidiaNvs;
1281     NvBool bIsTitan;
1282     NvBool bIsTesla;
1283     NvBool bComputePolicyTimesliceSupported;
1284     NvBool bGlobalPoisonFuseEnabled;
1285     RmPhysAddr simAccessBufPhysAddr;
1286     RmPhysAddr notifyOpSharedSurfacePhysAddr;
1287     NvU32 fabricProbeRegKeyOverride;
1288     NvU8 fabricProbeRetryDelay;
1289     NvU8 fabricProbeSlowdownThreshold;
1290     NvBool bVgpuGspPluginOffloadEnabled;
1291     NvBool bSriovCapable;
1292     NvBool bRecheckSliSupportAtResume;
1293     NvBool bGpuNvEncAv1Supported;
1294     _GPU_SLI_PEER peer[2];
1295     NvBool bIsGspOwnedFaultBuffersEnabled;
1296     NvBool bVfResizableBAR1Supported;
1297     NvBool bVoltaHubIntrSupported;
1298     NvBool bAmpereErrorContainmentXidEnabled;
1299     _GPU_GC6_STATE gc6State;
1300 };
1301 
1302 #ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__
1303 #define __NVOC_CLASS_OBJGPU_TYPEDEF__
1304 typedef struct OBJGPU OBJGPU;
1305 #endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */
1306 
1307 #ifndef __nvoc_class_id_OBJGPU
1308 #define __nvoc_class_id_OBJGPU 0x7ef3cb
1309 #endif /* __nvoc_class_id_OBJGPU */
1310 
1311 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
1312 
1313 #define __staticCast_OBJGPU(pThis) \
1314     ((pThis)->__nvoc_pbase_OBJGPU)
1315 
1316 #ifdef __nvoc_gpu_h_disabled
1317 #define __dynamicCast_OBJGPU(pThis) ((OBJGPU*)NULL)
1318 #else //__nvoc_gpu_h_disabled
1319 #define __dynamicCast_OBJGPU(pThis) \
1320     ((OBJGPU*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPU)))
1321 #endif //__nvoc_gpu_h_disabled
1322 
1323 #define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_CAST
1324 #define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GC6_SUPPORTED
1325 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_CAST
1326 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU
1327 #define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_CAST
1328 #define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_NAME PDB_PROP_GPU_SKIP_TABLE_CE_MAP
1329 #define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_CAST
1330 #define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_NAME PDB_PROP_GPU_IN_FATAL_ERROR
1331 #define PDB_PROP_GPU_VGA_ENABLED_BASE_CAST
1332 #define PDB_PROP_GPU_VGA_ENABLED_BASE_NAME PDB_PROP_GPU_VGA_ENABLED
1333 #define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_CAST
1334 #define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_NAME PDB_PROP_GPU_COHERENT_CPU_MAPPING
1335 #define PDB_PROP_GPU_IN_STANDBY_BASE_CAST
1336 #define PDB_PROP_GPU_IN_STANDBY_BASE_NAME PDB_PROP_GPU_IN_STANDBY
1337 #define PDB_PROP_GPU_IS_COT_ENABLED_BASE_CAST
1338 #define PDB_PROP_GPU_IS_COT_ENABLED_BASE_NAME PDB_PROP_GPU_IS_COT_ENABLED
1339 #define PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED_BASE_CAST
1340 #define PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED_BASE_NAME PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED
1341 #define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_CAST
1342 #define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_NAME PDB_PROP_GPU_SLI_LINK_ACTIVE
1343 #define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_CAST
1344 #define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED
1345 #define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_CAST
1346 #define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_NAME PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING
1347 #define PDB_PROP_GPU_IN_GC6_RESET_BASE_CAST
1348 #define PDB_PROP_GPU_IN_GC6_RESET_BASE_NAME PDB_PROP_GPU_IN_GC6_RESET
1349 #define PDB_PROP_GPU_3D_CONTROLLER_BASE_CAST
1350 #define PDB_PROP_GPU_3D_CONTROLLER_BASE_NAME PDB_PROP_GPU_3D_CONTROLLER
1351 #define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_CAST
1352 #define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_NAME PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL
1353 #define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_CAST
1354 #define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI
1355 #define PDB_PROP_GPU_IS_UEFI_BASE_CAST
1356 #define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI
1357 #define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_CAST
1358 #define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_NAME PDB_PROP_GPU_IN_SECONDARY_BUS_RESET
1359 #define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_CAST
1360 #define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_NAME PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT
1361 #define PDB_PROP_GPU_IS_CONNECTED_BASE_CAST
1362 #define PDB_PROP_GPU_IS_CONNECTED_BASE_NAME PDB_PROP_GPU_IS_CONNECTED
1363 #define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_CAST
1364 #define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_RTD3_GC6_ACTIVE
1365 #define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_CAST
1366 #define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_CC_FEATURE_CAPABLE
1367 #define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_CAST
1368 #define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_NAME PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT
1369 #define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_CAST
1370 #define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_NAME PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED
1371 #define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_CAST
1372 #define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERING
1373 #define PDB_PROP_GPU_BAR1_BAR2_DISABLED_BASE_CAST
1374 #define PDB_PROP_GPU_BAR1_BAR2_DISABLED_BASE_NAME PDB_PROP_GPU_BAR1_BAR2_DISABLED
1375 #define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_CAST
1376 #define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_NAME PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE
1377 #define PDB_PROP_GPU_ACCOUNTING_ON_BASE_CAST
1378 #define PDB_PROP_GPU_ACCOUNTING_ON_BASE_NAME PDB_PROP_GPU_ACCOUNTING_ON
1379 #define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_CAST
1380 #define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_FAST_GC6_ACTIVE
1381 #define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_CAST
1382 #define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERED
1383 #define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_CAST
1384 #define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_NAME PDB_PROP_GPU_IN_FULLCHIP_RESET
1385 #define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_CAST
1386 #define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_NAME PDB_PROP_GPU_NV_USERMODE_ENABLED
1387 #define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_CAST
1388 #define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_SLI_LINK_CODEPATH
1389 #define PDB_PROP_GPU_IS_GEMINI_BASE_CAST
1390 #define PDB_PROP_GPU_IS_GEMINI_BASE_NAME PDB_PROP_GPU_IS_GEMINI
1391 #define PDB_PROP_GPU_STATE_INITIALIZED_BASE_CAST
1392 #define PDB_PROP_GPU_STATE_INITIALIZED_BASE_NAME PDB_PROP_GPU_STATE_INITIALIZED
1393 #define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_CAST
1394 #define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_NAME PDB_PROP_GPU_GSYNC_III_ATTACHED
1395 #define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_CAST
1396 #define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_NAME PDB_PROP_GPU_QSYNC_II_ATTACHED
1397 #define PDB_PROP_GPU_IS_SOC_SDM_BASE_CAST
1398 #define PDB_PROP_GPU_IS_SOC_SDM_BASE_NAME PDB_PROP_GPU_IS_SOC_SDM
1399 #define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_CAST
1400 #define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_NAME PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM
1401 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_CAST
1402 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED
1403 #define PDB_PROP_GPU_EMULATION_BASE_CAST
1404 #define PDB_PROP_GPU_EMULATION_BASE_NAME PDB_PROP_GPU_EMULATION
1405 #define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_CAST
1406 #define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_APM_FEATURE_CAPABLE
1407 #define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_CAST
1408 #define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_NAME PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST
1409 #define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_CAST
1410 #define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED
1411 #define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_CAST
1412 #define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_NAME PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL
1413 #define PDB_PROP_GPU_INACCESSIBLE_BASE_CAST
1414 #define PDB_PROP_GPU_INACCESSIBLE_BASE_NAME PDB_PROP_GPU_INACCESSIBLE
1415 #define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_CAST
1416 #define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH
1417 #define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_CAST
1418 #define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_RESUME_CODEPATH
1419 #define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_CAST
1420 #define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_NAME PDB_PROP_GPU_FASTPATH_SEQ_ENABLED
1421 #define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_CAST
1422 #define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY
1423 #define PDB_PROP_GPU_IN_TCC_MODE_BASE_CAST
1424 #define PDB_PROP_GPU_IN_TCC_MODE_BASE_NAME PDB_PROP_GPU_IN_TCC_MODE
1425 #define PDB_PROP_GPU_C2C_SYSMEM_BASE_CAST
1426 #define PDB_PROP_GPU_C2C_SYSMEM_BASE_NAME PDB_PROP_GPU_C2C_SYSMEM
1427 #define PDB_PROP_GPU_HYBRID_MGPU_BASE_CAST
1428 #define PDB_PROP_GPU_HYBRID_MGPU_BASE_NAME PDB_PROP_GPU_HYBRID_MGPU
1429 #define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_CAST
1430 #define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED
1431 #define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_CAST
1432 #define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE
1433 #define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_CAST
1434 #define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_NAME PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED
1435 #define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_CAST
1436 #define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_NAME PDB_PROP_GPU_IS_PLX_PRESENT
1437 #define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_CAST
1438 #define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_NAME PDB_PROP_GPU_NVLINK_SYSMEM
1439 #define PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED_BASE_CAST
1440 #define PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED_BASE_NAME PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED
1441 #define PDB_PROP_GPU_IS_MOBILE_BASE_CAST
1442 #define PDB_PROP_GPU_IS_MOBILE_BASE_NAME PDB_PROP_GPU_IS_MOBILE
1443 #define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_CAST
1444 #define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_ENABLED
1445 #define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_CAST
1446 #define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_NAME PDB_PROP_GPU_PERSISTENT_SW_STATE
1447 #define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_CAST
1448 #define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH
1449 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST
1450 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED
1451 #define PDB_PROP_GPU_SUPPORTS_TDR_EVENT_BASE_CAST
1452 #define PDB_PROP_GPU_SUPPORTS_TDR_EVENT_BASE_NAME PDB_PROP_GPU_SUPPORTS_TDR_EVENT
1453 #define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_CAST
1454 #define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_NAME PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE
1455 #define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST
1456 #define PDB_PROP_GPU_BEHIND_BR03_BASE_NAME PDB_PROP_GPU_BEHIND_BR03
1457 #define PDB_PROP_GPU_BEHIND_BR04_BASE_CAST
1458 #define PDB_PROP_GPU_BEHIND_BR04_BASE_NAME PDB_PROP_GPU_BEHIND_BR04
1459 #define PDB_PROP_GPU_MIG_SUPPORTED_BASE_CAST
1460 #define PDB_PROP_GPU_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_SUPPORTED
1461 #define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_CAST
1462 #define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_NAME PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE
1463 #define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_CAST
1464 #define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_NAME PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE
1465 #define PDB_PROP_GPU_IN_HIBERNATE_BASE_CAST
1466 #define PDB_PROP_GPU_IN_HIBERNATE_BASE_NAME PDB_PROP_GPU_IN_HIBERNATE
1467 #define PDB_PROP_GPU_BROKEN_FB_BASE_CAST
1468 #define PDB_PROP_GPU_BROKEN_FB_BASE_NAME PDB_PROP_GPU_BROKEN_FB
1469 #define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_CAST
1470 #define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_NAME PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT
1471 #define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_CAST
1472 #define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_NAME PDB_PROP_GPU_IN_TIMEOUT_RECOVERY
1473 #define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_CAST
1474 #define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_NAME PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA
1475 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_CAST
1476 #define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA
1477 #define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_CAST
1478 #define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR03_PRESENT
1479 #define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_CAST
1480 #define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR04_PRESENT
1481 #define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_CAST
1482 #define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_NAME PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE
1483 #define PDB_PROP_GPU_IS_MXM_3X_BASE_CAST
1484 #define PDB_PROP_GPU_IS_MXM_3X_BASE_NAME PDB_PROP_GPU_IS_MXM_3X
1485 #define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_CAST
1486 #define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS
1487 #define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_CAST
1488 #define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_NAME PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF
1489 #define PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS_BASE_CAST
1490 #define PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS_BASE_NAME PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS
1491 #define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_CAST
1492 #define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_NAME PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED
1493 #define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_CAST
1494 #define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED
1495 #define PDB_PROP_GPU_ZERO_FB_BASE_CAST
1496 #define PDB_PROP_GPU_ZERO_FB_BASE_NAME PDB_PROP_GPU_ZERO_FB
1497 #define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_CAST
1498 #define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_NAME PDB_PROP_GPU_SWRL_GRANULAR_LOCKING
1499 #define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_CAST
1500 #define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_NAME PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK
1501 #define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_CAST
1502 #define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_IGPU
1503 #define PDB_PROP_GPU_ATS_SUPPORTED_BASE_CAST
1504 #define PDB_PROP_GPU_ATS_SUPPORTED_BASE_NAME PDB_PROP_GPU_ATS_SUPPORTED
1505 #define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_CAST
1506 #define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_NAME PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS
1507 #define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_CAST
1508 #define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_NAME PDB_PROP_GPU_PRIMARY_DEVICE
1509 #define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_CAST
1510 #define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_NAME PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE
1511 #define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_CAST
1512 #define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_NAME PDB_PROP_GPU_BEHIND_BRIDGE
1513 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_CAST
1514 #define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY
1515 #define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_CAST
1516 #define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_NAME PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K
1517 #define PDB_PROP_GPU_IS_LOST_BASE_CAST
1518 #define PDB_PROP_GPU_IS_LOST_BASE_NAME PDB_PROP_GPU_IS_LOST
1519 #define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_CAST
1520 #define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_NAME PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX
1521 
1522 NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU**, Dynamic*, NvU32, va_list);
1523 
1524 NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32,
1525         NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
1526         RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
1527         TEGRA_CHIP_TYPE TegraChipHal_tegraType,
1528         NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance, NvU32 arg_gpuId, NvUuid * arg_pUuid);
1529 #define __objCreate_OBJGPU(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, TegraChipHal_tegraType, DispIpHal_ipver, arg_gpuInstance, arg_gpuId, arg_pUuid) \
1530     __nvoc_objCreate_OBJGPU((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, TegraChipHal_tegraType, DispIpHal_ipver, arg_gpuInstance, arg_gpuId, arg_pUuid)
1531 
1532 #define gpuConstructDeviceInfoTable(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu)
1533 #define gpuConstructDeviceInfoTable_HAL(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu)
1534 #define gpuGetNameString(pGpu, arg0, arg1) gpuGetNameString_DISPATCH(pGpu, arg0, arg1)
1535 #define gpuGetNameString_HAL(pGpu, arg0, arg1) gpuGetNameString_DISPATCH(pGpu, arg0, arg1)
1536 #define gpuGetShortNameString(pGpu, arg0) gpuGetShortNameString_DISPATCH(pGpu, arg0)
1537 #define gpuGetShortNameString_HAL(pGpu, arg0) gpuGetShortNameString_DISPATCH(pGpu, arg0)
1538 #define gpuInitBranding(pGpu) gpuInitBranding_DISPATCH(pGpu)
1539 #define gpuInitBranding_HAL(pGpu) gpuInitBranding_DISPATCH(pGpu)
1540 #define gpuInitProperties(pGpu) gpuInitProperties_DISPATCH(pGpu)
1541 #define gpuInitProperties_HAL(pGpu) gpuInitProperties_DISPATCH(pGpu)
1542 #define gpuBuildKernelVideoEngineList(pGpu) gpuBuildKernelVideoEngineList_DISPATCH(pGpu)
1543 #define gpuBuildKernelVideoEngineList_HAL(pGpu) gpuBuildKernelVideoEngineList_DISPATCH(pGpu)
1544 #define gpuInitVideoLogging(pGpu) gpuInitVideoLogging_DISPATCH(pGpu)
1545 #define gpuInitVideoLogging_HAL(pGpu) gpuInitVideoLogging_DISPATCH(pGpu)
1546 #define gpuFreeVideoLogging(pGpu) gpuFreeVideoLogging_DISPATCH(pGpu)
1547 #define gpuFreeVideoLogging_HAL(pGpu) gpuFreeVideoLogging_DISPATCH(pGpu)
1548 #define gpuDestroyKernelVideoEngineList(pGpu) gpuDestroyKernelVideoEngineList_DISPATCH(pGpu)
1549 #define gpuDestroyKernelVideoEngineList_HAL(pGpu) gpuDestroyKernelVideoEngineList_DISPATCH(pGpu)
1550 #define gpuPowerOff(pGpu) gpuPowerOff_DISPATCH(pGpu)
1551 #define gpuPowerOff_HAL(pGpu) gpuPowerOff_DISPATCH(pGpu)
1552 #define gpuWriteBusConfigReg(pGpu, index, value) gpuWriteBusConfigReg_DISPATCH(pGpu, index, value)
1553 #define gpuWriteBusConfigReg_HAL(pGpu, index, value) gpuWriteBusConfigReg_DISPATCH(pGpu, index, value)
1554 #define gpuReadBusConfigReg(pGpu, index, data) gpuReadBusConfigReg_DISPATCH(pGpu, index, data)
1555 #define gpuReadBusConfigReg_HAL(pGpu, index, data) gpuReadBusConfigReg_DISPATCH(pGpu, index, data)
1556 #define gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_DISPATCH(pGpu, index, data, pThreadState)
1557 #define gpuReadBusConfigRegEx_HAL(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_DISPATCH(pGpu, index, data, pThreadState)
1558 #define gpuReadFunctionConfigReg(pGpu, function, reg, data) gpuReadFunctionConfigReg_DISPATCH(pGpu, function, reg, data)
1559 #define gpuReadFunctionConfigReg_HAL(pGpu, function, reg, data) gpuReadFunctionConfigReg_DISPATCH(pGpu, function, reg, data)
1560 #define gpuWriteFunctionConfigReg(pGpu, function, reg, data) gpuWriteFunctionConfigReg_DISPATCH(pGpu, function, reg, data)
1561 #define gpuWriteFunctionConfigReg_HAL(pGpu, function, reg, data) gpuWriteFunctionConfigReg_DISPATCH(pGpu, function, reg, data)
1562 #define gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_DISPATCH(pGpu, function, reg, data, pThreadState)
1563 #define gpuWriteFunctionConfigRegEx_HAL(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_DISPATCH(pGpu, function, reg, data, pThreadState)
1564 #define gpuReadVgpuConfigReg(pGpu, index, data) gpuReadVgpuConfigReg_DISPATCH(pGpu, index, data)
1565 #define gpuReadVgpuConfigReg_HAL(pGpu, index, data) gpuReadVgpuConfigReg_DISPATCH(pGpu, index, data)
1566 #define gpuGetIdInfo(pGpu) gpuGetIdInfo_DISPATCH(pGpu)
1567 #define gpuGetIdInfo_HAL(pGpu) gpuGetIdInfo_DISPATCH(pGpu)
1568 #define gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData_DISPATCH(pGpu, pGidData, gidSize, gidFlags)
1569 #define gpuGenGidData_HAL(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData_DISPATCH(pGpu, pGidData, gidSize, gidFlags)
1570 #define gpuGetChipSubRev(pGpu) gpuGetChipSubRev_DISPATCH(pGpu)
1571 #define gpuGetChipSubRev_HAL(pGpu) gpuGetChipSubRev_DISPATCH(pGpu)
1572 #define gpuGetSkuInfo(pGpu, pParams) gpuGetSkuInfo_DISPATCH(pGpu, pParams)
1573 #define gpuGetSkuInfo_HAL(pGpu, pParams) gpuGetSkuInfo_DISPATCH(pGpu, pParams)
1574 #define gpuGetRegBaseOffset(pGpu, arg0, arg1) gpuGetRegBaseOffset_DISPATCH(pGpu, arg0, arg1)
1575 #define gpuGetRegBaseOffset_HAL(pGpu, arg0, arg1) gpuGetRegBaseOffset_DISPATCH(pGpu, arg0, arg1)
1576 #define gpuHandleSanityCheckRegReadError(pGpu, addr, value) gpuHandleSanityCheckRegReadError_DISPATCH(pGpu, addr, value)
1577 #define gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value) gpuHandleSanityCheckRegReadError_DISPATCH(pGpu, addr, value)
1578 #define gpuHandleSecFault(pGpu) gpuHandleSecFault_DISPATCH(pGpu)
1579 #define gpuHandleSecFault_HAL(pGpu) gpuHandleSecFault_DISPATCH(pGpu)
1580 #define gpuSanityCheckVirtRegAccess(pGpu, arg0) gpuSanityCheckVirtRegAccess_DISPATCH(pGpu, arg0)
1581 #define gpuSanityCheckVirtRegAccess_HAL(pGpu, arg0) gpuSanityCheckVirtRegAccess_DISPATCH(pGpu, arg0)
1582 #define gpuGetChildrenPresent(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries)
1583 #define gpuGetChildrenPresent_HAL(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries)
1584 #define gpuGetClassDescriptorList(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0)
1585 #define gpuGetClassDescriptorList_HAL(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0)
1586 #define gpuGetPhysAddrWidth(pGpu, arg0) gpuGetPhysAddrWidth_DISPATCH(pGpu, arg0)
1587 #define gpuGetPhysAddrWidth_HAL(pGpu, arg0) gpuGetPhysAddrWidth_DISPATCH(pGpu, arg0)
1588 #define gpuInitSriov(pGpu) gpuInitSriov_DISPATCH(pGpu)
1589 #define gpuInitSriov_HAL(pGpu) gpuInitSriov_DISPATCH(pGpu)
1590 #define gpuDeinitSriov(pGpu) gpuDeinitSriov_DISPATCH(pGpu)
1591 #define gpuDeinitSriov_HAL(pGpu) gpuDeinitSriov_DISPATCH(pGpu)
1592 #define gpuCreateDefaultClientShare(pGpu) gpuCreateDefaultClientShare_DISPATCH(pGpu)
1593 #define gpuCreateDefaultClientShare_HAL(pGpu) gpuCreateDefaultClientShare_DISPATCH(pGpu)
1594 #define gpuDestroyDefaultClientShare(pGpu) gpuDestroyDefaultClientShare_DISPATCH(pGpu)
1595 #define gpuDestroyDefaultClientShare_HAL(pGpu) gpuDestroyDefaultClientShare_DISPATCH(pGpu)
1596 #define gpuGetVmmuSegmentSize(pGpu) gpuGetVmmuSegmentSize_DISPATCH(pGpu)
1597 #define gpuGetVmmuSegmentSize_HAL(pGpu) gpuGetVmmuSegmentSize_DISPATCH(pGpu)
1598 #define gpuFuseSupportsDisplay(pGpu) gpuFuseSupportsDisplay_DISPATCH(pGpu)
1599 #define gpuFuseSupportsDisplay_HAL(pGpu) gpuFuseSupportsDisplay_DISPATCH(pGpu)
1600 #define gpuGetActiveFBIOs(pGpu) gpuGetActiveFBIOs_DISPATCH(pGpu)
1601 #define gpuGetActiveFBIOs_HAL(pGpu) gpuGetActiveFBIOs_DISPATCH(pGpu)
1602 #define gpuCheckPageRetirementSupport(pGpu) gpuCheckPageRetirementSupport_DISPATCH(pGpu)
1603 #define gpuCheckPageRetirementSupport_HAL(pGpu) gpuCheckPageRetirementSupport_DISPATCH(pGpu)
1604 #define gpuIsInternalSku(pGpu) gpuIsInternalSku_DISPATCH(pGpu)
1605 #define gpuIsInternalSku_HAL(pGpu) gpuIsInternalSku_DISPATCH(pGpu)
1606 #define gpuClearFbhubPoisonIntrForBug2924523(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu)
1607 #define gpuClearFbhubPoisonIntrForBug2924523_HAL(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu)
1608 #define gpuCheckIfFbhubPoisonIntrPending(pGpu) gpuCheckIfFbhubPoisonIntrPending_DISPATCH(pGpu)
1609 #define gpuCheckIfFbhubPoisonIntrPending_HAL(pGpu) gpuCheckIfFbhubPoisonIntrPending_DISPATCH(pGpu)
1610 #define gpuGetSriovCaps(pGpu, arg0) gpuGetSriovCaps_DISPATCH(pGpu, arg0)
1611 #define gpuGetSriovCaps_HAL(pGpu, arg0) gpuGetSriovCaps_DISPATCH(pGpu, arg0)
1612 #define gpuCheckIsP2PAllocated(pGpu) gpuCheckIsP2PAllocated_DISPATCH(pGpu)
1613 #define gpuCheckIsP2PAllocated_HAL(pGpu) gpuCheckIsP2PAllocated_DISPATCH(pGpu)
1614 #define gpuPrePowerOff(pGpu) gpuPrePowerOff_DISPATCH(pGpu)
1615 #define gpuPrePowerOff_HAL(pGpu) gpuPrePowerOff_DISPATCH(pGpu)
1616 #define gpuVerifyExistence(pGpu) gpuVerifyExistence_DISPATCH(pGpu)
1617 #define gpuVerifyExistence_HAL(pGpu) gpuVerifyExistence_DISPATCH(pGpu)
1618 #define gpuGetFlaVasSize(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization)
1619 #define gpuGetFlaVasSize_HAL(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization)
1620 #define gpuIsAtsSupportedWithSmcMemPartitioning(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(pGpu)
1621 #define gpuIsAtsSupportedWithSmcMemPartitioning_HAL(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(pGpu)
1622 #define gpuIsGlobalPoisonFuseEnabled(pGpu) gpuIsGlobalPoisonFuseEnabled_DISPATCH(pGpu)
1623 #define gpuIsGlobalPoisonFuseEnabled_HAL(pGpu) gpuIsGlobalPoisonFuseEnabled_DISPATCH(pGpu)
1624 #define gpuDetermineSelfHostedMode(pGpu) gpuDetermineSelfHostedMode_DISPATCH(pGpu)
1625 #define gpuDetermineSelfHostedMode_HAL(pGpu) gpuDetermineSelfHostedMode_DISPATCH(pGpu)
1626 #define gpuDetermineMIGSupport(pGpu) gpuDetermineMIGSupport_DISPATCH(pGpu)
1627 #define gpuDetermineMIGSupport_HAL(pGpu) gpuDetermineMIGSupport_DISPATCH(pGpu)
1628 #define gpuInitOptimusSettings(pGpu) gpuInitOptimusSettings_DISPATCH(pGpu)
1629 #define gpuInitOptimusSettings_HAL(pGpu) gpuInitOptimusSettings_DISPATCH(pGpu)
1630 #define gpuDeinitOptimusSettings(pGpu) gpuDeinitOptimusSettings_DISPATCH(pGpu)
1631 #define gpuDeinitOptimusSettings_HAL(pGpu) gpuDeinitOptimusSettings_DISPATCH(pGpu)
1632 #define gpuIsSliCapableWithoutDisplay(pGpu) gpuIsSliCapableWithoutDisplay_DISPATCH(pGpu)
1633 #define gpuIsSliCapableWithoutDisplay_HAL(pGpu) gpuIsSliCapableWithoutDisplay_DISPATCH(pGpu)
1634 #define gpuIsCCEnabledInHw(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu)
1635 #define gpuIsCCEnabledInHw_HAL(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu)
1636 #define gpuIsDevModeEnabledInHw(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu)
1637 #define gpuIsDevModeEnabledInHw_HAL(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu)
1638 #define gpuIsProtectedPcieEnabledInHw(pGpu) gpuIsProtectedPcieEnabledInHw_DISPATCH(pGpu)
1639 #define gpuIsProtectedPcieEnabledInHw_HAL(pGpu) gpuIsProtectedPcieEnabledInHw_DISPATCH(pGpu)
1640 #define gpuIsCtxBufAllocInPmaSupported(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu)
1641 #define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu)
1642 #define gpuUpdateErrorContainmentState(pGpu, arg0, arg1, arg2) gpuUpdateErrorContainmentState_DISPATCH(pGpu, arg0, arg1, arg2)
1643 #define gpuUpdateErrorContainmentState_HAL(pGpu, arg0, arg1, arg2) gpuUpdateErrorContainmentState_DISPATCH(pGpu, arg0, arg1, arg2)
1644 #define gpuWaitForGfwBootComplete(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
1645 #define gpuWaitForGfwBootComplete_HAL(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
1646 #define gpuGetIsCmpSku(pGpu) gpuGetIsCmpSku_DISPATCH(pGpu)
1647 #define gpuGetIsCmpSku_HAL(pGpu) gpuGetIsCmpSku_DISPATCH(pGpu)
gpuConstructPhysical_56cd7a(struct OBJGPU * pGpu)1648 static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) {
1649     return NV_OK;
1650 }
1651 
1652 
1653 #ifdef __nvoc_gpu_h_disabled
gpuConstructPhysical(struct OBJGPU * pGpu)1654 static inline NV_STATUS gpuConstructPhysical(struct OBJGPU *pGpu) {
1655     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1656     return NV_ERR_NOT_SUPPORTED;
1657 }
1658 #else //__nvoc_gpu_h_disabled
1659 #define gpuConstructPhysical(pGpu) gpuConstructPhysical_56cd7a(pGpu)
1660 #endif //__nvoc_gpu_h_disabled
1661 
1662 #define gpuConstructPhysical_HAL(pGpu) gpuConstructPhysical(pGpu)
1663 
gpuDestructPhysical_b3696a(struct OBJGPU * pGpu)1664 static inline void gpuDestructPhysical_b3696a(struct OBJGPU *pGpu) {
1665     return;
1666 }
1667 
1668 
1669 #ifdef __nvoc_gpu_h_disabled
gpuDestructPhysical(struct OBJGPU * pGpu)1670 static inline void gpuDestructPhysical(struct OBJGPU *pGpu) {
1671     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1672 }
1673 #else //__nvoc_gpu_h_disabled
1674 #define gpuDestructPhysical(pGpu) gpuDestructPhysical_b3696a(pGpu)
1675 #endif //__nvoc_gpu_h_disabled
1676 
1677 #define gpuDestructPhysical_HAL(pGpu) gpuDestructPhysical(pGpu)
1678 
1679 NV_STATUS gpuStatePreInit_IMPL(struct OBJGPU *pGpu);
1680 
1681 
1682 #ifdef __nvoc_gpu_h_disabled
gpuStatePreInit(struct OBJGPU * pGpu)1683 static inline NV_STATUS gpuStatePreInit(struct OBJGPU *pGpu) {
1684     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1685     return NV_ERR_NOT_SUPPORTED;
1686 }
1687 #else //__nvoc_gpu_h_disabled
1688 #define gpuStatePreInit(pGpu) gpuStatePreInit_IMPL(pGpu)
1689 #endif //__nvoc_gpu_h_disabled
1690 
1691 #define gpuStatePreInit_HAL(pGpu) gpuStatePreInit(pGpu)
1692 
1693 NV_STATUS gpuStateLoad_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
1694 
1695 
1696 #ifdef __nvoc_gpu_h_disabled
gpuStateLoad(struct OBJGPU * pGpu,NvU32 arg0)1697 static inline NV_STATUS gpuStateLoad(struct OBJGPU *pGpu, NvU32 arg0) {
1698     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1699     return NV_ERR_NOT_SUPPORTED;
1700 }
1701 #else //__nvoc_gpu_h_disabled
1702 #define gpuStateLoad(pGpu, arg0) gpuStateLoad_IMPL(pGpu, arg0)
1703 #endif //__nvoc_gpu_h_disabled
1704 
1705 #define gpuStateLoad_HAL(pGpu, arg0) gpuStateLoad(pGpu, arg0)
1706 
1707 NV_STATUS gpuStateDestroy_IMPL(struct OBJGPU *pGpu);
1708 
1709 
1710 #ifdef __nvoc_gpu_h_disabled
gpuStateDestroy(struct OBJGPU * pGpu)1711 static inline NV_STATUS gpuStateDestroy(struct OBJGPU *pGpu) {
1712     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1713     return NV_ERR_NOT_SUPPORTED;
1714 }
1715 #else //__nvoc_gpu_h_disabled
1716 #define gpuStateDestroy(pGpu) gpuStateDestroy_IMPL(pGpu)
1717 #endif //__nvoc_gpu_h_disabled
1718 
1719 #define gpuStateDestroy_HAL(pGpu) gpuStateDestroy(pGpu)
1720 
gpuPowerManagementEnterPreUnloadPhysical_56cd7a(struct OBJGPU * pGpu)1721 static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical_56cd7a(struct OBJGPU *pGpu) {
1722     return NV_OK;
1723 }
1724 
1725 NV_STATUS gpuPowerManagementEnterPreUnloadPhysical_IMPL(struct OBJGPU *pGpu);
1726 
1727 
1728 #ifdef __nvoc_gpu_h_disabled
gpuPowerManagementEnterPreUnloadPhysical(struct OBJGPU * pGpu)1729 static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical(struct OBJGPU *pGpu) {
1730     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1731     return NV_ERR_NOT_SUPPORTED;
1732 }
1733 #else //__nvoc_gpu_h_disabled
1734 #define gpuPowerManagementEnterPreUnloadPhysical(pGpu) gpuPowerManagementEnterPreUnloadPhysical_56cd7a(pGpu)
1735 #endif //__nvoc_gpu_h_disabled
1736 
1737 #define gpuPowerManagementEnterPreUnloadPhysical_HAL(pGpu) gpuPowerManagementEnterPreUnloadPhysical(pGpu)
1738 
gpuPowerManagementEnterPostUnloadPhysical_56cd7a(struct OBJGPU * pGpu,NvU32 newLevel)1739 static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 newLevel) {
1740     return NV_OK;
1741 }
1742 
1743 NV_STATUS gpuPowerManagementEnterPostUnloadPhysical_IMPL(struct OBJGPU *pGpu, NvU32 newLevel);
1744 
1745 
1746 #ifdef __nvoc_gpu_h_disabled
gpuPowerManagementEnterPostUnloadPhysical(struct OBJGPU * pGpu,NvU32 newLevel)1747 static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical(struct OBJGPU *pGpu, NvU32 newLevel) {
1748     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1749     return NV_ERR_NOT_SUPPORTED;
1750 }
1751 #else //__nvoc_gpu_h_disabled
1752 #define gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical_56cd7a(pGpu, newLevel)
1753 #endif //__nvoc_gpu_h_disabled
1754 
1755 #define gpuPowerManagementEnterPostUnloadPhysical_HAL(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel)
1756 
gpuPowerManagementResumePreLoadPhysical_56cd7a(struct OBJGPU * pGpu,NvU32 oldLevel,NvU32 flags)1757 static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) {
1758     return NV_OK;
1759 }
1760 
1761 NV_STATUS gpuPowerManagementResumePreLoadPhysical_IMPL(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags);
1762 
1763 
1764 #ifdef __nvoc_gpu_h_disabled
gpuPowerManagementResumePreLoadPhysical(struct OBJGPU * pGpu,NvU32 oldLevel,NvU32 flags)1765 static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) {
1766     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1767     return NV_ERR_NOT_SUPPORTED;
1768 }
1769 #else //__nvoc_gpu_h_disabled
1770 #define gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical_56cd7a(pGpu, oldLevel, flags)
1771 #endif //__nvoc_gpu_h_disabled
1772 
1773 #define gpuPowerManagementResumePreLoadPhysical_HAL(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags)
1774 
gpuPowerManagementResumePostLoadPhysical_56cd7a(struct OBJGPU * pGpu)1775 static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical_56cd7a(struct OBJGPU *pGpu) {
1776     return NV_OK;
1777 }
1778 
1779 NV_STATUS gpuPowerManagementResumePostLoadPhysical_IMPL(struct OBJGPU *pGpu);
1780 
1781 
1782 #ifdef __nvoc_gpu_h_disabled
gpuPowerManagementResumePostLoadPhysical(struct OBJGPU * pGpu)1783 static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical(struct OBJGPU *pGpu) {
1784     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1785     return NV_ERR_NOT_SUPPORTED;
1786 }
1787 #else //__nvoc_gpu_h_disabled
1788 #define gpuPowerManagementResumePostLoadPhysical(pGpu) gpuPowerManagementResumePostLoadPhysical_56cd7a(pGpu)
1789 #endif //__nvoc_gpu_h_disabled
1790 
1791 #define gpuPowerManagementResumePostLoadPhysical_HAL(pGpu) gpuPowerManagementResumePostLoadPhysical(pGpu)
1792 
gpuInitializeMemDescFromPromotedCtx_46f6a7(struct OBJGPU * pGpu,MEMORY_DESCRIPTOR ** ppMemDesc,NvU64 gpuPhysAddr,NvU64 size,NvU32 physAttr,NvBool bIsCallingContextVgpuPlugin)1793 static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx_46f6a7(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) {
1794     return NV_ERR_NOT_SUPPORTED;
1795 }
1796 
1797 
1798 #ifdef __nvoc_gpu_h_disabled
gpuInitializeMemDescFromPromotedCtx(struct OBJGPU * pGpu,MEMORY_DESCRIPTOR ** ppMemDesc,NvU64 gpuPhysAddr,NvU64 size,NvU32 physAttr,NvBool bIsCallingContextVgpuPlugin)1799 static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) {
1800     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1801     return NV_ERR_NOT_SUPPORTED;
1802 }
1803 #else //__nvoc_gpu_h_disabled
1804 #define gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx_46f6a7(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin)
1805 #endif //__nvoc_gpu_h_disabled
1806 
1807 #define gpuInitializeMemDescFromPromotedCtx_HAL(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin)
1808 
gpuSetThreadBcState_b3696a(struct OBJGPU * pGpu,NvBool arg0)1809 static inline void gpuSetThreadBcState_b3696a(struct OBJGPU *pGpu, NvBool arg0) {
1810     return;
1811 }
1812 
1813 
1814 #ifdef __nvoc_gpu_h_disabled
gpuSetThreadBcState(struct OBJGPU * pGpu,NvBool arg0)1815 static inline void gpuSetThreadBcState(struct OBJGPU *pGpu, NvBool arg0) {
1816     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1817 }
1818 #else //__nvoc_gpu_h_disabled
1819 #define gpuSetThreadBcState(pGpu, arg0) gpuSetThreadBcState_b3696a(pGpu, arg0)
1820 #endif //__nvoc_gpu_h_disabled
1821 
1822 #define gpuSetThreadBcState_HAL(pGpu, arg0) gpuSetThreadBcState(pGpu, arg0)
1823 
gpuDeterminePersistantIllumSettings_b3696a(struct OBJGPU * pGpu)1824 static inline void gpuDeterminePersistantIllumSettings_b3696a(struct OBJGPU *pGpu) {
1825     return;
1826 }
1827 
1828 
1829 #ifdef __nvoc_gpu_h_disabled
gpuDeterminePersistantIllumSettings(struct OBJGPU * pGpu)1830 static inline void gpuDeterminePersistantIllumSettings(struct OBJGPU *pGpu) {
1831     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1832 }
1833 #else //__nvoc_gpu_h_disabled
1834 #define gpuDeterminePersistantIllumSettings(pGpu) gpuDeterminePersistantIllumSettings_b3696a(pGpu)
1835 #endif //__nvoc_gpu_h_disabled
1836 
1837 #define gpuDeterminePersistantIllumSettings_HAL(pGpu) gpuDeterminePersistantIllumSettings(pGpu)
1838 
gpuInitSliIllumination_46f6a7(struct OBJGPU * pGpu)1839 static inline NV_STATUS gpuInitSliIllumination_46f6a7(struct OBJGPU *pGpu) {
1840     return NV_ERR_NOT_SUPPORTED;
1841 }
1842 
1843 
1844 #ifdef __nvoc_gpu_h_disabled
gpuInitSliIllumination(struct OBJGPU * pGpu)1845 static inline NV_STATUS gpuInitSliIllumination(struct OBJGPU *pGpu) {
1846     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1847     return NV_ERR_NOT_SUPPORTED;
1848 }
1849 #else //__nvoc_gpu_h_disabled
1850 #define gpuInitSliIllumination(pGpu) gpuInitSliIllumination_46f6a7(pGpu)
1851 #endif //__nvoc_gpu_h_disabled
1852 
1853 #define gpuInitSliIllumination_HAL(pGpu) gpuInitSliIllumination(pGpu)
1854 
1855 NV_STATUS gpuBuildGenericKernelFalconList_IMPL(struct OBJGPU *pGpu);
1856 
1857 
1858 #ifdef __nvoc_gpu_h_disabled
gpuBuildGenericKernelFalconList(struct OBJGPU * pGpu)1859 static inline NV_STATUS gpuBuildGenericKernelFalconList(struct OBJGPU *pGpu) {
1860     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1861     return NV_ERR_NOT_SUPPORTED;
1862 }
1863 #else //__nvoc_gpu_h_disabled
1864 #define gpuBuildGenericKernelFalconList(pGpu) gpuBuildGenericKernelFalconList_IMPL(pGpu)
1865 #endif //__nvoc_gpu_h_disabled
1866 
1867 #define gpuBuildGenericKernelFalconList_HAL(pGpu) gpuBuildGenericKernelFalconList(pGpu)
1868 
1869 void gpuDestroyGenericKernelFalconList_IMPL(struct OBJGPU *pGpu);
1870 
1871 
1872 #ifdef __nvoc_gpu_h_disabled
gpuDestroyGenericKernelFalconList(struct OBJGPU * pGpu)1873 static inline void gpuDestroyGenericKernelFalconList(struct OBJGPU *pGpu) {
1874     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1875 }
1876 #else //__nvoc_gpu_h_disabled
1877 #define gpuDestroyGenericKernelFalconList(pGpu) gpuDestroyGenericKernelFalconList_IMPL(pGpu)
1878 #endif //__nvoc_gpu_h_disabled
1879 
1880 #define gpuDestroyGenericKernelFalconList_HAL(pGpu) gpuDestroyGenericKernelFalconList(pGpu)
1881 
1882 struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0);
1883 
1884 
1885 #ifdef __nvoc_gpu_h_disabled
gpuGetGenericKernelFalconForEngine(struct OBJGPU * pGpu,ENGDESCRIPTOR arg0)1886 static inline struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) {
1887     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1888     return NULL;
1889 }
1890 #else //__nvoc_gpu_h_disabled
1891 #define gpuGetGenericKernelFalconForEngine(pGpu, arg0) gpuGetGenericKernelFalconForEngine_IMPL(pGpu, arg0)
1892 #endif //__nvoc_gpu_h_disabled
1893 
1894 #define gpuGetGenericKernelFalconForEngine_HAL(pGpu, arg0) gpuGetGenericKernelFalconForEngine(pGpu, arg0)
1895 
1896 void gpuRegisterGenericKernelFalconIntrService_IMPL(struct OBJGPU *pGpu, void *pRecords);
1897 
1898 
1899 #ifdef __nvoc_gpu_h_disabled
gpuRegisterGenericKernelFalconIntrService(struct OBJGPU * pGpu,void * pRecords)1900 static inline void gpuRegisterGenericKernelFalconIntrService(struct OBJGPU *pGpu, void *pRecords) {
1901     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1902 }
1903 #else //__nvoc_gpu_h_disabled
1904 #define gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService_IMPL(pGpu, pRecords)
1905 #endif //__nvoc_gpu_h_disabled
1906 
1907 #define gpuRegisterGenericKernelFalconIntrService_HAL(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords)
1908 
gpuGetHwDefaults_b3696a(struct OBJGPU * pGpu)1909 static inline void gpuGetHwDefaults_b3696a(struct OBJGPU *pGpu) {
1910     return;
1911 }
1912 
1913 
1914 #ifdef __nvoc_gpu_h_disabled
gpuGetHwDefaults(struct OBJGPU * pGpu)1915 static inline void gpuGetHwDefaults(struct OBJGPU *pGpu) {
1916     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1917 }
1918 #else //__nvoc_gpu_h_disabled
1919 #define gpuGetHwDefaults(pGpu) gpuGetHwDefaults_b3696a(pGpu)
1920 #endif //__nvoc_gpu_h_disabled
1921 
1922 #define gpuGetHwDefaults_HAL(pGpu) gpuGetHwDefaults(pGpu)
1923 
1924 RmPhysAddr gpuGetDmaEndAddress_IMPL(struct OBJGPU *pGpu);
1925 
1926 
1927 #ifdef __nvoc_gpu_h_disabled
gpuGetDmaEndAddress(struct OBJGPU * pGpu)1928 static inline RmPhysAddr gpuGetDmaEndAddress(struct OBJGPU *pGpu) {
1929     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1930     RmPhysAddr ret;
1931     portMemSet(&ret, 0, sizeof(RmPhysAddr));
1932     return ret;
1933 }
1934 #else //__nvoc_gpu_h_disabled
1935 #define gpuGetDmaEndAddress(pGpu) gpuGetDmaEndAddress_IMPL(pGpu)
1936 #endif //__nvoc_gpu_h_disabled
1937 
1938 #define gpuGetDmaEndAddress_HAL(pGpu) gpuGetDmaEndAddress(pGpu)
1939 
gpuSetStateResetRequired_395e98(struct OBJGPU * pGpu,NvU32 exceptType)1940 static inline NV_STATUS gpuSetStateResetRequired_395e98(struct OBJGPU *pGpu, NvU32 exceptType) {
1941     return NV_ERR_NOT_SUPPORTED;
1942 }
1943 
1944 
1945 #ifdef __nvoc_gpu_h_disabled
gpuSetStateResetRequired(struct OBJGPU * pGpu,NvU32 exceptType)1946 static inline NV_STATUS gpuSetStateResetRequired(struct OBJGPU *pGpu, NvU32 exceptType) {
1947     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1948     return NV_ERR_NOT_SUPPORTED;
1949 }
1950 #else //__nvoc_gpu_h_disabled
1951 #define gpuSetStateResetRequired(pGpu, exceptType) gpuSetStateResetRequired_395e98(pGpu, exceptType)
1952 #endif //__nvoc_gpu_h_disabled
1953 
1954 #define gpuSetStateResetRequired_HAL(pGpu, exceptType) gpuSetStateResetRequired(pGpu, exceptType)
1955 
gpuMarkDeviceForReset_395e98(struct OBJGPU * pGpu)1956 static inline NV_STATUS gpuMarkDeviceForReset_395e98(struct OBJGPU *pGpu) {
1957     return NV_ERR_NOT_SUPPORTED;
1958 }
1959 
1960 
1961 #ifdef __nvoc_gpu_h_disabled
gpuMarkDeviceForReset(struct OBJGPU * pGpu)1962 static inline NV_STATUS gpuMarkDeviceForReset(struct OBJGPU *pGpu) {
1963     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1964     return NV_ERR_NOT_SUPPORTED;
1965 }
1966 #else //__nvoc_gpu_h_disabled
1967 #define gpuMarkDeviceForReset(pGpu) gpuMarkDeviceForReset_395e98(pGpu)
1968 #endif //__nvoc_gpu_h_disabled
1969 
1970 #define gpuMarkDeviceForReset_HAL(pGpu) gpuMarkDeviceForReset(pGpu)
1971 
gpuUnmarkDeviceForReset_395e98(struct OBJGPU * pGpu)1972 static inline NV_STATUS gpuUnmarkDeviceForReset_395e98(struct OBJGPU *pGpu) {
1973     return NV_ERR_NOT_SUPPORTED;
1974 }
1975 
1976 
1977 #ifdef __nvoc_gpu_h_disabled
gpuUnmarkDeviceForReset(struct OBJGPU * pGpu)1978 static inline NV_STATUS gpuUnmarkDeviceForReset(struct OBJGPU *pGpu) {
1979     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
1980     return NV_ERR_NOT_SUPPORTED;
1981 }
1982 #else //__nvoc_gpu_h_disabled
1983 #define gpuUnmarkDeviceForReset(pGpu) gpuUnmarkDeviceForReset_395e98(pGpu)
1984 #endif //__nvoc_gpu_h_disabled
1985 
1986 #define gpuUnmarkDeviceForReset_HAL(pGpu) gpuUnmarkDeviceForReset(pGpu)
1987 
gpuIsDeviceMarkedForReset_82f166(struct OBJGPU * pGpu,NvBool * pbResetRequired)1988 static inline NV_STATUS gpuIsDeviceMarkedForReset_82f166(struct OBJGPU *pGpu, NvBool *pbResetRequired) {
1989     *pbResetRequired = ((NvBool)(0 != 0));
1990     {
1991         return NV_ERR_NOT_SUPPORTED;
1992     }
1993     ;
1994 }
1995 
1996 
1997 #ifdef __nvoc_gpu_h_disabled
gpuIsDeviceMarkedForReset(struct OBJGPU * pGpu,NvBool * pbResetRequired)1998 static inline NV_STATUS gpuIsDeviceMarkedForReset(struct OBJGPU *pGpu, NvBool *pbResetRequired) {
1999     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2000     return NV_ERR_NOT_SUPPORTED;
2001 }
2002 #else //__nvoc_gpu_h_disabled
2003 #define gpuIsDeviceMarkedForReset(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset_82f166(pGpu, pbResetRequired)
2004 #endif //__nvoc_gpu_h_disabled
2005 
2006 #define gpuIsDeviceMarkedForReset_HAL(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset(pGpu, pbResetRequired)
2007 
gpuMarkDeviceForDrainAndReset_395e98(struct OBJGPU * pGpu)2008 static inline NV_STATUS gpuMarkDeviceForDrainAndReset_395e98(struct OBJGPU *pGpu) {
2009     return NV_ERR_NOT_SUPPORTED;
2010 }
2011 
2012 
2013 #ifdef __nvoc_gpu_h_disabled
gpuMarkDeviceForDrainAndReset(struct OBJGPU * pGpu)2014 static inline NV_STATUS gpuMarkDeviceForDrainAndReset(struct OBJGPU *pGpu) {
2015     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2016     return NV_ERR_NOT_SUPPORTED;
2017 }
2018 #else //__nvoc_gpu_h_disabled
2019 #define gpuMarkDeviceForDrainAndReset(pGpu) gpuMarkDeviceForDrainAndReset_395e98(pGpu)
2020 #endif //__nvoc_gpu_h_disabled
2021 
2022 #define gpuMarkDeviceForDrainAndReset_HAL(pGpu) gpuMarkDeviceForDrainAndReset(pGpu)
2023 
gpuUnmarkDeviceForDrainAndReset_395e98(struct OBJGPU * pGpu)2024 static inline NV_STATUS gpuUnmarkDeviceForDrainAndReset_395e98(struct OBJGPU *pGpu) {
2025     return NV_ERR_NOT_SUPPORTED;
2026 }
2027 
2028 
2029 #ifdef __nvoc_gpu_h_disabled
gpuUnmarkDeviceForDrainAndReset(struct OBJGPU * pGpu)2030 static inline NV_STATUS gpuUnmarkDeviceForDrainAndReset(struct OBJGPU *pGpu) {
2031     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2032     return NV_ERR_NOT_SUPPORTED;
2033 }
2034 #else //__nvoc_gpu_h_disabled
2035 #define gpuUnmarkDeviceForDrainAndReset(pGpu) gpuUnmarkDeviceForDrainAndReset_395e98(pGpu)
2036 #endif //__nvoc_gpu_h_disabled
2037 
2038 #define gpuUnmarkDeviceForDrainAndReset_HAL(pGpu) gpuUnmarkDeviceForDrainAndReset(pGpu)
2039 
gpuIsDeviceMarkedForDrainAndReset_244f65(struct OBJGPU * pGpu,NvBool * pbDrainRecommended)2040 static inline NV_STATUS gpuIsDeviceMarkedForDrainAndReset_244f65(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) {
2041     *pbDrainRecommended = ((NvBool)(0 != 0));
2042     {
2043         return NV_ERR_NOT_SUPPORTED;
2044     }
2045     ;
2046 }
2047 
2048 
2049 #ifdef __nvoc_gpu_h_disabled
gpuIsDeviceMarkedForDrainAndReset(struct OBJGPU * pGpu,NvBool * pbDrainRecommended)2050 static inline NV_STATUS gpuIsDeviceMarkedForDrainAndReset(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) {
2051     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2052     return NV_ERR_NOT_SUPPORTED;
2053 }
2054 #else //__nvoc_gpu_h_disabled
2055 #define gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset_244f65(pGpu, pbDrainRecommended)
2056 #endif //__nvoc_gpu_h_disabled
2057 
2058 #define gpuIsDeviceMarkedForDrainAndReset_HAL(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended)
2059 
gpuPrivSecInitRegistryOverrides_56cd7a(struct OBJGPU * pGpu)2060 static inline NV_STATUS gpuPrivSecInitRegistryOverrides_56cd7a(struct OBJGPU *pGpu) {
2061     return NV_OK;
2062 }
2063 
2064 
2065 #ifdef __nvoc_gpu_h_disabled
gpuPrivSecInitRegistryOverrides(struct OBJGPU * pGpu)2066 static inline NV_STATUS gpuPrivSecInitRegistryOverrides(struct OBJGPU *pGpu) {
2067     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2068     return NV_ERR_NOT_SUPPORTED;
2069 }
2070 #else //__nvoc_gpu_h_disabled
2071 #define gpuPrivSecInitRegistryOverrides(pGpu) gpuPrivSecInitRegistryOverrides_56cd7a(pGpu)
2072 #endif //__nvoc_gpu_h_disabled
2073 
2074 #define gpuPrivSecInitRegistryOverrides_HAL(pGpu) gpuPrivSecInitRegistryOverrides(pGpu)
2075 
2076 NV_STATUS gpuSetPower_GM107(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3);
2077 
2078 
2079 #ifdef __nvoc_gpu_h_disabled
gpuSetPower(struct OBJGPU * pGpu,NvU32 arg1,NvU32 arg2,NvU32 arg3)2080 static inline NV_STATUS gpuSetPower(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3) {
2081     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2082     return NV_ERR_NOT_SUPPORTED;
2083 }
2084 #else //__nvoc_gpu_h_disabled
2085 #define gpuSetPower(pGpu, arg1, arg2, arg3) gpuSetPower_GM107(pGpu, arg1, arg2, arg3)
2086 #endif //__nvoc_gpu_h_disabled
2087 
2088 #define gpuSetPower_HAL(pGpu, arg1, arg2, arg3) gpuSetPower(pGpu, arg1, arg2, arg3)
2089 
gpuUpdateIdInfo_b3696a(struct OBJGPU * pGpu)2090 static inline void gpuUpdateIdInfo_b3696a(struct OBJGPU *pGpu) {
2091     return;
2092 }
2093 
2094 void gpuUpdateIdInfo_GK104(struct OBJGPU *pGpu);
2095 
2096 
2097 #ifdef __nvoc_gpu_h_disabled
gpuUpdateIdInfo(struct OBJGPU * pGpu)2098 static inline void gpuUpdateIdInfo(struct OBJGPU *pGpu) {
2099     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2100 }
2101 #else //__nvoc_gpu_h_disabled
2102 #define gpuUpdateIdInfo(pGpu) gpuUpdateIdInfo_b3696a(pGpu)
2103 #endif //__nvoc_gpu_h_disabled
2104 
2105 #define gpuUpdateIdInfo_HAL(pGpu) gpuUpdateIdInfo(pGpu)
2106 
gpuGetDeviceIDList_4a4dee(struct OBJGPU * pGpu,DEVICE_ID_MAPPING ** arg0)2107 static inline NvU32 gpuGetDeviceIDList_4a4dee(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) {
2108     return 0;
2109 }
2110 
2111 
2112 #ifdef __nvoc_gpu_h_disabled
gpuGetDeviceIDList(struct OBJGPU * pGpu,DEVICE_ID_MAPPING ** arg0)2113 static inline NvU32 gpuGetDeviceIDList(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) {
2114     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2115     return 0;
2116 }
2117 #else //__nvoc_gpu_h_disabled
2118 #define gpuGetDeviceIDList(pGpu, arg0) gpuGetDeviceIDList_4a4dee(pGpu, arg0)
2119 #endif //__nvoc_gpu_h_disabled
2120 
2121 #define gpuGetDeviceIDList_HAL(pGpu, arg0) gpuGetDeviceIDList(pGpu, arg0)
2122 
gpuPerformUniversalValidation_56cd7a(struct OBJGPU * pGpu)2123 static inline NV_STATUS gpuPerformUniversalValidation_56cd7a(struct OBJGPU *pGpu) {
2124     return NV_OK;
2125 }
2126 
2127 NV_STATUS gpuPerformUniversalValidation_GM107(struct OBJGPU *pGpu);
2128 
2129 
2130 #ifdef __nvoc_gpu_h_disabled
gpuPerformUniversalValidation(struct OBJGPU * pGpu)2131 static inline NV_STATUS gpuPerformUniversalValidation(struct OBJGPU *pGpu) {
2132     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2133     return NV_ERR_NOT_SUPPORTED;
2134 }
2135 #else //__nvoc_gpu_h_disabled
2136 #define gpuPerformUniversalValidation(pGpu) gpuPerformUniversalValidation_56cd7a(pGpu)
2137 #endif //__nvoc_gpu_h_disabled
2138 
2139 #define gpuPerformUniversalValidation_HAL(pGpu) gpuPerformUniversalValidation(pGpu)
2140 
2141 NvU32 gpuGetVirtRegPhysOffset_TU102(struct OBJGPU *pGpu);
2142 
2143 
2144 #ifdef __nvoc_gpu_h_disabled
gpuGetVirtRegPhysOffset(struct OBJGPU * pGpu)2145 static inline NvU32 gpuGetVirtRegPhysOffset(struct OBJGPU *pGpu) {
2146     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2147     return 0;
2148 }
2149 #else //__nvoc_gpu_h_disabled
2150 #define gpuGetVirtRegPhysOffset(pGpu) gpuGetVirtRegPhysOffset_TU102(pGpu)
2151 #endif //__nvoc_gpu_h_disabled
2152 
2153 #define gpuGetVirtRegPhysOffset_HAL(pGpu) gpuGetVirtRegPhysOffset(pGpu)
2154 
2155 void gpuGetSanityCheckRegReadError_GM107(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString);
2156 
2157 
2158 #ifdef __nvoc_gpu_h_disabled
gpuGetSanityCheckRegReadError(struct OBJGPU * pGpu,NvU32 value,const char ** pErrorString)2159 static inline void gpuGetSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) {
2160     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2161 }
2162 #else //__nvoc_gpu_h_disabled
2163 #define gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError_GM107(pGpu, value, pErrorString)
2164 #endif //__nvoc_gpu_h_disabled
2165 
2166 #define gpuGetSanityCheckRegReadError_HAL(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError(pGpu, value, pErrorString)
2167 
2168 NV_STATUS gpuInitRegistryOverrides_KERNEL(struct OBJGPU *pGpu);
2169 
2170 
2171 #ifdef __nvoc_gpu_h_disabled
gpuInitRegistryOverrides(struct OBJGPU * pGpu)2172 static inline NV_STATUS gpuInitRegistryOverrides(struct OBJGPU *pGpu) {
2173     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2174     return NV_ERR_NOT_SUPPORTED;
2175 }
2176 #else //__nvoc_gpu_h_disabled
2177 #define gpuInitRegistryOverrides(pGpu) gpuInitRegistryOverrides_KERNEL(pGpu)
2178 #endif //__nvoc_gpu_h_disabled
2179 
2180 #define gpuInitRegistryOverrides_HAL(pGpu) gpuInitRegistryOverrides(pGpu)
2181 
2182 NV_STATUS gpuInitInstLocOverrides_IMPL(struct OBJGPU *pGpu);
2183 
2184 
2185 #ifdef __nvoc_gpu_h_disabled
gpuInitInstLocOverrides(struct OBJGPU * pGpu)2186 static inline NV_STATUS gpuInitInstLocOverrides(struct OBJGPU *pGpu) {
2187     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2188     return NV_ERR_NOT_SUPPORTED;
2189 }
2190 #else //__nvoc_gpu_h_disabled
2191 #define gpuInitInstLocOverrides(pGpu) gpuInitInstLocOverrides_IMPL(pGpu)
2192 #endif //__nvoc_gpu_h_disabled
2193 
2194 #define gpuInitInstLocOverrides_HAL(pGpu) gpuInitInstLocOverrides(pGpu)
2195 
2196 const GPUCHILDORDER *gpuGetChildrenOrder_GM200(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2197 
2198 
2199 #ifdef __nvoc_gpu_h_disabled
gpuGetChildrenOrder(struct OBJGPU * pGpu,NvU32 * pNumEntries)2200 static inline const GPUCHILDORDER *gpuGetChildrenOrder(struct OBJGPU *pGpu, NvU32 *pNumEntries) {
2201     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2202     return NULL;
2203 }
2204 #else //__nvoc_gpu_h_disabled
2205 #define gpuGetChildrenOrder(pGpu, pNumEntries) gpuGetChildrenOrder_GM200(pGpu, pNumEntries)
2206 #endif //__nvoc_gpu_h_disabled
2207 
2208 #define gpuGetChildrenOrder_HAL(pGpu, pNumEntries) gpuGetChildrenOrder(pGpu, pNumEntries)
2209 
2210 void gpuGetTerminatedLinkMask_GA100(struct OBJGPU *pGpu, NvU32 arg0);
2211 
2212 
2213 #ifdef __nvoc_gpu_h_disabled
gpuGetTerminatedLinkMask(struct OBJGPU * pGpu,NvU32 arg0)2214 static inline void gpuGetTerminatedLinkMask(struct OBJGPU *pGpu, NvU32 arg0) {
2215     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2216 }
2217 #else //__nvoc_gpu_h_disabled
2218 #define gpuGetTerminatedLinkMask(pGpu, arg0) gpuGetTerminatedLinkMask_GA100(pGpu, arg0)
2219 #endif //__nvoc_gpu_h_disabled
2220 
2221 #define gpuGetTerminatedLinkMask_HAL(pGpu, arg0) gpuGetTerminatedLinkMask(pGpu, arg0)
2222 
2223 NV_STATUS gpuJtVersionSanityCheck_TU102(struct OBJGPU *pGpu);
2224 
2225 
2226 #ifdef __nvoc_gpu_h_disabled
gpuJtVersionSanityCheck(struct OBJGPU * pGpu)2227 static inline NV_STATUS gpuJtVersionSanityCheck(struct OBJGPU *pGpu) {
2228     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2229     return NV_ERR_NOT_SUPPORTED;
2230 }
2231 #else //__nvoc_gpu_h_disabled
2232 #define gpuJtVersionSanityCheck(pGpu) gpuJtVersionSanityCheck_TU102(pGpu)
2233 #endif //__nvoc_gpu_h_disabled
2234 
2235 #define gpuJtVersionSanityCheck_HAL(pGpu) gpuJtVersionSanityCheck(pGpu)
2236 
gpuCompletedGC6PowerOff_cbe027(struct OBJGPU * pGpu)2237 static inline NvBool gpuCompletedGC6PowerOff_cbe027(struct OBJGPU *pGpu) {
2238     return ((NvBool)(0 == 0));
2239 }
2240 
2241 NvBool gpuCompletedGC6PowerOff_GV100(struct OBJGPU *pGpu);
2242 
2243 
2244 #ifdef __nvoc_gpu_h_disabled
gpuCompletedGC6PowerOff(struct OBJGPU * pGpu)2245 static inline NvBool gpuCompletedGC6PowerOff(struct OBJGPU *pGpu) {
2246     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2247     return NV_FALSE;
2248 }
2249 #else //__nvoc_gpu_h_disabled
2250 #define gpuCompletedGC6PowerOff(pGpu) gpuCompletedGC6PowerOff_cbe027(pGpu)
2251 #endif //__nvoc_gpu_h_disabled
2252 
2253 #define gpuCompletedGC6PowerOff_HAL(pGpu) gpuCompletedGC6PowerOff(pGpu)
2254 
gpuIsACPIPatchRequiredForBug2473619_491d52(struct OBJGPU * pGpu)2255 static inline NvBool gpuIsACPIPatchRequiredForBug2473619_491d52(struct OBJGPU *pGpu) {
2256     return ((NvBool)(0 != 0));
2257 }
2258 
2259 
2260 #ifdef __nvoc_gpu_h_disabled
gpuIsACPIPatchRequiredForBug2473619(struct OBJGPU * pGpu)2261 static inline NvBool gpuIsACPIPatchRequiredForBug2473619(struct OBJGPU *pGpu) {
2262     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2263     return NV_FALSE;
2264 }
2265 #else //__nvoc_gpu_h_disabled
2266 #define gpuIsACPIPatchRequiredForBug2473619(pGpu) gpuIsACPIPatchRequiredForBug2473619_491d52(pGpu)
2267 #endif //__nvoc_gpu_h_disabled
2268 
2269 #define gpuIsACPIPatchRequiredForBug2473619_HAL(pGpu) gpuIsACPIPatchRequiredForBug2473619(pGpu)
2270 
gpuIsDebuggerActive_8031b9(struct OBJGPU * pGpu)2271 static inline NvBool gpuIsDebuggerActive_8031b9(struct OBJGPU *pGpu) {
2272     return pGpu->bIsDebugModeEnabled;
2273 }
2274 
2275 
2276 #ifdef __nvoc_gpu_h_disabled
gpuIsDebuggerActive(struct OBJGPU * pGpu)2277 static inline NvBool gpuIsDebuggerActive(struct OBJGPU *pGpu) {
2278     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2279     return NV_FALSE;
2280 }
2281 #else //__nvoc_gpu_h_disabled
2282 #define gpuIsDebuggerActive(pGpu) gpuIsDebuggerActive_8031b9(pGpu)
2283 #endif //__nvoc_gpu_h_disabled
2284 
2285 #define gpuIsDebuggerActive_HAL(pGpu) gpuIsDebuggerActive(pGpu)
2286 
gpuGetFipsStatus_46f6a7(struct OBJGPU * pGpu,NvBool * bFipsEnabled)2287 static inline NV_STATUS gpuGetFipsStatus_46f6a7(struct OBJGPU *pGpu, NvBool *bFipsEnabled) {
2288     return NV_ERR_NOT_SUPPORTED;
2289 }
2290 
2291 NV_STATUS gpuGetFipsStatus_GH100(struct OBJGPU *pGpu, NvBool *bFipsEnabled);
2292 
2293 
2294 #ifdef __nvoc_gpu_h_disabled
gpuGetFipsStatus(struct OBJGPU * pGpu,NvBool * bFipsEnabled)2295 static inline NV_STATUS gpuGetFipsStatus(struct OBJGPU *pGpu, NvBool *bFipsEnabled) {
2296     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2297     return NV_ERR_NOT_SUPPORTED;
2298 }
2299 #else //__nvoc_gpu_h_disabled
2300 #define gpuGetFipsStatus(pGpu, bFipsEnabled) gpuGetFipsStatus_46f6a7(pGpu, bFipsEnabled)
2301 #endif //__nvoc_gpu_h_disabled
2302 
2303 #define gpuGetFipsStatus_HAL(pGpu, bFipsEnabled) gpuGetFipsStatus(pGpu, bFipsEnabled)
2304 
2305 NV_STATUS gpuExecGrCtxRegops_GK104(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw);
2306 
2307 
2308 #ifdef __nvoc_gpu_h_disabled
gpuExecGrCtxRegops(struct OBJGPU * pGpu,struct Graphics * arg0,struct KernelChannel * arg1,NV2080_CTRL_GPU_REG_OP * pRegOps,NvU32 regOpCount,RMTIMEOUT * pTimeout,NvBool bStopCtxsw)2309 static inline NV_STATUS gpuExecGrCtxRegops(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw) {
2310     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2311     return NV_ERR_NOT_SUPPORTED;
2312 }
2313 #else //__nvoc_gpu_h_disabled
2314 #define gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops_GK104(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw)
2315 #endif //__nvoc_gpu_h_disabled
2316 
2317 #define gpuExecGrCtxRegops_HAL(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw)
2318 
2319 NV_STATUS gpuExtdevConstruct_GK104(struct OBJGPU *pGpu);
2320 
2321 
2322 #ifdef __nvoc_gpu_h_disabled
gpuExtdevConstruct(struct OBJGPU * pGpu)2323 static inline NV_STATUS gpuExtdevConstruct(struct OBJGPU *pGpu) {
2324     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2325     return NV_ERR_NOT_SUPPORTED;
2326 }
2327 #else //__nvoc_gpu_h_disabled
2328 #define gpuExtdevConstruct(pGpu) gpuExtdevConstruct_GK104(pGpu)
2329 #endif //__nvoc_gpu_h_disabled
2330 
2331 #define gpuExtdevConstruct_HAL(pGpu) gpuExtdevConstruct(pGpu)
2332 
gpuGc6EntryPstateCheck_56cd7a(struct OBJGPU * pGpu)2333 static inline NV_STATUS gpuGc6EntryPstateCheck_56cd7a(struct OBJGPU *pGpu) {
2334     return NV_OK;
2335 }
2336 
2337 
2338 #ifdef __nvoc_gpu_h_disabled
gpuGc6EntryPstateCheck(struct OBJGPU * pGpu)2339 static inline NV_STATUS gpuGc6EntryPstateCheck(struct OBJGPU *pGpu) {
2340     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2341     return NV_ERR_NOT_SUPPORTED;
2342 }
2343 #else //__nvoc_gpu_h_disabled
2344 #define gpuGc6EntryPstateCheck(pGpu) gpuGc6EntryPstateCheck_56cd7a(pGpu)
2345 #endif //__nvoc_gpu_h_disabled
2346 
2347 #define gpuGc6EntryPstateCheck_HAL(pGpu) gpuGc6EntryPstateCheck(pGpu)
2348 
gpuWaitGC6Ready_56cd7a(struct OBJGPU * pGpu)2349 static inline NV_STATUS gpuWaitGC6Ready_56cd7a(struct OBJGPU *pGpu) {
2350     return NV_OK;
2351 }
2352 
2353 NV_STATUS gpuWaitGC6Ready_GM107(struct OBJGPU *pGpu);
2354 
2355 
2356 #ifdef __nvoc_gpu_h_disabled
gpuWaitGC6Ready(struct OBJGPU * pGpu)2357 static inline NV_STATUS gpuWaitGC6Ready(struct OBJGPU *pGpu) {
2358     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2359     return NV_ERR_NOT_SUPPORTED;
2360 }
2361 #else //__nvoc_gpu_h_disabled
2362 #define gpuWaitGC6Ready(pGpu) gpuWaitGC6Ready_56cd7a(pGpu)
2363 #endif //__nvoc_gpu_h_disabled
2364 
2365 #define gpuWaitGC6Ready_HAL(pGpu) gpuWaitGC6Ready(pGpu)
2366 
gpuResetVFRegisters_b3696a(struct OBJGPU * pGpu,NvU32 gfid)2367 static inline void gpuResetVFRegisters_b3696a(struct OBJGPU *pGpu, NvU32 gfid) {
2368     return;
2369 }
2370 
2371 void gpuResetVFRegisters_TU102(struct OBJGPU *pGpu, NvU32 gfid);
2372 
2373 
2374 #ifdef __nvoc_gpu_h_disabled
gpuResetVFRegisters(struct OBJGPU * pGpu,NvU32 gfid)2375 static inline void gpuResetVFRegisters(struct OBJGPU *pGpu, NvU32 gfid) {
2376     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2377 }
2378 #else //__nvoc_gpu_h_disabled
2379 #define gpuResetVFRegisters(pGpu, gfid) gpuResetVFRegisters_b3696a(pGpu, gfid)
2380 #endif //__nvoc_gpu_h_disabled
2381 
2382 #define gpuResetVFRegisters_HAL(pGpu, gfid) gpuResetVFRegisters(pGpu, gfid)
2383 
gpuGetSliLinkDetectionHalFlag_539ab4(struct OBJGPU * pGpu)2384 static inline NvU32 gpuGetSliLinkDetectionHalFlag_539ab4(struct OBJGPU *pGpu) {
2385     return 1;
2386 }
2387 
2388 
2389 #ifdef __nvoc_gpu_h_disabled
gpuGetSliLinkDetectionHalFlag(struct OBJGPU * pGpu)2390 static inline NvU32 gpuGetSliLinkDetectionHalFlag(struct OBJGPU *pGpu) {
2391     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2392     return 0;
2393 }
2394 #else //__nvoc_gpu_h_disabled
2395 #define gpuGetSliLinkDetectionHalFlag(pGpu) gpuGetSliLinkDetectionHalFlag_539ab4(pGpu)
2396 #endif //__nvoc_gpu_h_disabled
2397 
2398 #define gpuGetSliLinkDetectionHalFlag_HAL(pGpu) gpuGetSliLinkDetectionHalFlag(pGpu)
2399 
2400 void gpuDetectSliLinkFromGpus_GK104(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount);
2401 
2402 
2403 #ifdef __nvoc_gpu_h_disabled
gpuDetectSliLinkFromGpus(struct OBJGPU * pGpu,NvU32 gpuCount,NvU32 gpuMaskArg,NvU32 * pSliLinkOutputMask,NvBool * pSliLinkCircular,NvU32 * pSliLinkEndsMask,NvU32 * pVidLinkCount)2404 static inline void gpuDetectSliLinkFromGpus(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount) {
2405     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2406 }
2407 #else //__nvoc_gpu_h_disabled
2408 #define gpuDetectSliLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectSliLinkFromGpus_GK104(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount)
2409 #endif //__nvoc_gpu_h_disabled
2410 
2411 #define gpuDetectSliLinkFromGpus_HAL(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectSliLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount)
2412 
gpuGetNvlinkLinkDetectionHalFlag_adde13(struct OBJGPU * pGpu)2413 static inline NvU32 gpuGetNvlinkLinkDetectionHalFlag_adde13(struct OBJGPU *pGpu) {
2414     return 2;
2415 }
2416 
2417 
2418 #ifdef __nvoc_gpu_h_disabled
gpuGetNvlinkLinkDetectionHalFlag(struct OBJGPU * pGpu)2419 static inline NvU32 gpuGetNvlinkLinkDetectionHalFlag(struct OBJGPU *pGpu) {
2420     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2421     return 0;
2422 }
2423 #else //__nvoc_gpu_h_disabled
2424 #define gpuGetNvlinkLinkDetectionHalFlag(pGpu) gpuGetNvlinkLinkDetectionHalFlag_adde13(pGpu)
2425 #endif //__nvoc_gpu_h_disabled
2426 
2427 #define gpuGetNvlinkLinkDetectionHalFlag_HAL(pGpu) gpuGetNvlinkLinkDetectionHalFlag(pGpu)
2428 
2429 void gpuDetectNvlinkLinkFromGpus_GP100(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount);
2430 
2431 
2432 #ifdef __nvoc_gpu_h_disabled
gpuDetectNvlinkLinkFromGpus(struct OBJGPU * pGpu,NvU32 gpuCount,NvU32 gpuMaskArg,NvU32 * pSliLinkOutputMask,NvBool * pSliLinkCircular,NvU32 * pSliLinkEndsMask,NvU32 * pVidLinkCount)2433 static inline void gpuDetectNvlinkLinkFromGpus(struct OBJGPU *pGpu, NvU32 gpuCount, NvU32 gpuMaskArg, NvU32 *pSliLinkOutputMask, NvBool *pSliLinkCircular, NvU32 *pSliLinkEndsMask, NvU32 *pVidLinkCount) {
2434     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2435 }
2436 #else //__nvoc_gpu_h_disabled
2437 #define gpuDetectNvlinkLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectNvlinkLinkFromGpus_GP100(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount)
2438 #endif //__nvoc_gpu_h_disabled
2439 
2440 #define gpuDetectNvlinkLinkFromGpus_HAL(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount) gpuDetectNvlinkLinkFromGpus(pGpu, gpuCount, gpuMaskArg, pSliLinkOutputMask, pSliLinkCircular, pSliLinkEndsMask, pVidLinkCount)
2441 
2442 NvU32 gpuGetLitterValues_KERNEL(struct OBJGPU *pGpu, NvU32 index);
2443 
2444 NvU32 gpuGetLitterValues_TU102(struct OBJGPU *pGpu, NvU32 index);
2445 
2446 NvU32 gpuGetLitterValues_GA100(struct OBJGPU *pGpu, NvU32 index);
2447 
2448 NvU32 gpuGetLitterValues_GA102(struct OBJGPU *pGpu, NvU32 index);
2449 
2450 NvU32 gpuGetLitterValues_AD102(struct OBJGPU *pGpu, NvU32 index);
2451 
2452 NvU32 gpuGetLitterValues_GH100(struct OBJGPU *pGpu, NvU32 index);
2453 
2454 
2455 #ifdef __nvoc_gpu_h_disabled
gpuGetLitterValues(struct OBJGPU * pGpu,NvU32 index)2456 static inline NvU32 gpuGetLitterValues(struct OBJGPU *pGpu, NvU32 index) {
2457     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2458     return 0;
2459 }
2460 #else //__nvoc_gpu_h_disabled
2461 #define gpuGetLitterValues(pGpu, index) gpuGetLitterValues_KERNEL(pGpu, index)
2462 #endif //__nvoc_gpu_h_disabled
2463 
2464 #define gpuGetLitterValues_HAL(pGpu, index) gpuGetLitterValues(pGpu, index)
2465 
gpuSetCacheOnlyModeOverrides_56cd7a(struct OBJGPU * pGpu)2466 static inline NV_STATUS gpuSetCacheOnlyModeOverrides_56cd7a(struct OBJGPU *pGpu) {
2467     return NV_OK;
2468 }
2469 
2470 
2471 #ifdef __nvoc_gpu_h_disabled
gpuSetCacheOnlyModeOverrides(struct OBJGPU * pGpu)2472 static inline NV_STATUS gpuSetCacheOnlyModeOverrides(struct OBJGPU *pGpu) {
2473     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2474     return NV_ERR_NOT_SUPPORTED;
2475 }
2476 #else //__nvoc_gpu_h_disabled
2477 #define gpuSetCacheOnlyModeOverrides(pGpu) gpuSetCacheOnlyModeOverrides_56cd7a(pGpu)
2478 #endif //__nvoc_gpu_h_disabled
2479 
2480 #define gpuSetCacheOnlyModeOverrides_HAL(pGpu) gpuSetCacheOnlyModeOverrides(pGpu)
2481 
2482 NV_STATUS gpuGetCeFaultMethodBufferSize_KERNEL(struct OBJGPU *arg0, NvU32 *arg1);
2483 
2484 
2485 #ifdef __nvoc_gpu_h_disabled
gpuGetCeFaultMethodBufferSize(struct OBJGPU * arg0,NvU32 * arg1)2486 static inline NV_STATUS gpuGetCeFaultMethodBufferSize(struct OBJGPU *arg0, NvU32 *arg1) {
2487     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2488     return NV_ERR_NOT_SUPPORTED;
2489 }
2490 #else //__nvoc_gpu_h_disabled
2491 #define gpuGetCeFaultMethodBufferSize(arg0, arg1) gpuGetCeFaultMethodBufferSize_KERNEL(arg0, arg1)
2492 #endif //__nvoc_gpu_h_disabled
2493 
2494 #define gpuGetCeFaultMethodBufferSize_HAL(arg0, arg1) gpuGetCeFaultMethodBufferSize(arg0, arg1)
2495 
gpuSetVFBarSizes_46f6a7(struct OBJGPU * pGpu,NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS * arg0)2496 static inline NV_STATUS gpuSetVFBarSizes_46f6a7(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) {
2497     return NV_ERR_NOT_SUPPORTED;
2498 }
2499 
2500 NV_STATUS gpuSetVFBarSizes_GA102(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0);
2501 
2502 
2503 #ifdef __nvoc_gpu_h_disabled
gpuSetVFBarSizes(struct OBJGPU * pGpu,NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS * arg0)2504 static inline NV_STATUS gpuSetVFBarSizes(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) {
2505     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2506     return NV_ERR_NOT_SUPPORTED;
2507 }
2508 #else //__nvoc_gpu_h_disabled
2509 #define gpuSetVFBarSizes(pGpu, arg0) gpuSetVFBarSizes_46f6a7(pGpu, arg0)
2510 #endif //__nvoc_gpu_h_disabled
2511 
2512 #define gpuSetVFBarSizes_HAL(pGpu, arg0) gpuSetVFBarSizes(pGpu, arg0)
2513 
gpuFindP2PPeerGpuCapsByGpuId_80f438(struct OBJGPU * pGpu,NvU32 peerGpuId)2514 static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId_80f438(struct OBJGPU *pGpu, NvU32 peerGpuId) {
2515     NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0));
2516 }
2517 
2518 
2519 #ifdef __nvoc_gpu_h_disabled
gpuFindP2PPeerGpuCapsByGpuId(struct OBJGPU * pGpu,NvU32 peerGpuId)2520 static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId(struct OBJGPU *pGpu, NvU32 peerGpuId) {
2521     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2522     return NULL;
2523 }
2524 #else //__nvoc_gpu_h_disabled
2525 #define gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId_80f438(pGpu, peerGpuId)
2526 #endif //__nvoc_gpu_h_disabled
2527 
2528 #define gpuFindP2PPeerGpuCapsByGpuId_HAL(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId)
2529 
gpuLoadFailurePathTest_56cd7a(struct OBJGPU * pGpu,NvU32 engStage,NvU32 engDescIdx,NvBool bStopTest)2530 static inline NV_STATUS gpuLoadFailurePathTest_56cd7a(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) {
2531     return NV_OK;
2532 }
2533 
2534 
2535 #ifdef __nvoc_gpu_h_disabled
gpuLoadFailurePathTest(struct OBJGPU * pGpu,NvU32 engStage,NvU32 engDescIdx,NvBool bStopTest)2536 static inline NV_STATUS gpuLoadFailurePathTest(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) {
2537     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2538     return NV_ERR_NOT_SUPPORTED;
2539 }
2540 #else //__nvoc_gpu_h_disabled
2541 #define gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest_56cd7a(pGpu, engStage, engDescIdx, bStopTest)
2542 #endif //__nvoc_gpu_h_disabled
2543 
2544 #define gpuLoadFailurePathTest_HAL(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest)
2545 
gpuSetPartitionErrorAttribution_c04480(struct OBJGPU * pGpu,NV_ERROR_CONT_ERR_ID arg0,NV_ERROR_CONT_LOCATION arg1,NvU32 arg2)2546 static inline NV_STATUS gpuSetPartitionErrorAttribution_c04480(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg0, NV_ERROR_CONT_LOCATION arg1, NvU32 arg2) {
2547     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
2548 }
2549 
2550 NV_STATUS gpuSetPartitionErrorAttribution_GA100(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg0, NV_ERROR_CONT_LOCATION arg1, NvU32 arg2);
2551 
2552 
2553 #ifdef __nvoc_gpu_h_disabled
gpuSetPartitionErrorAttribution(struct OBJGPU * pGpu,NV_ERROR_CONT_ERR_ID arg0,NV_ERROR_CONT_LOCATION arg1,NvU32 arg2)2554 static inline NV_STATUS gpuSetPartitionErrorAttribution(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg0, NV_ERROR_CONT_LOCATION arg1, NvU32 arg2) {
2555     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2556     return NV_ERR_NOT_SUPPORTED;
2557 }
2558 #else //__nvoc_gpu_h_disabled
2559 #define gpuSetPartitionErrorAttribution(pGpu, arg0, arg1, arg2) gpuSetPartitionErrorAttribution_c04480(pGpu, arg0, arg1, arg2)
2560 #endif //__nvoc_gpu_h_disabled
2561 
2562 #define gpuSetPartitionErrorAttribution_HAL(pGpu, arg0, arg1, arg2) gpuSetPartitionErrorAttribution(pGpu, arg0, arg1, arg2)
2563 
2564 NV_STATUS gpuCreateRusdMemory_IMPL(struct OBJGPU *pGpu);
2565 
2566 
2567 #ifdef __nvoc_gpu_h_disabled
gpuCreateRusdMemory(struct OBJGPU * pGpu)2568 static inline NV_STATUS gpuCreateRusdMemory(struct OBJGPU *pGpu) {
2569     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2570     return NV_ERR_NOT_SUPPORTED;
2571 }
2572 #else //__nvoc_gpu_h_disabled
2573 #define gpuCreateRusdMemory(pGpu) gpuCreateRusdMemory_IMPL(pGpu)
2574 #endif //__nvoc_gpu_h_disabled
2575 
2576 #define gpuCreateRusdMemory_HAL(pGpu) gpuCreateRusdMemory(pGpu)
2577 
2578 NvBool gpuCheckEccCounts_TU102(struct OBJGPU *pGpu);
2579 
2580 
2581 #ifdef __nvoc_gpu_h_disabled
gpuCheckEccCounts(struct OBJGPU * pGpu)2582 static inline NvBool gpuCheckEccCounts(struct OBJGPU *pGpu) {
2583     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
2584     return NV_FALSE;
2585 }
2586 #else //__nvoc_gpu_h_disabled
2587 #define gpuCheckEccCounts(pGpu) gpuCheckEccCounts_TU102(pGpu)
2588 #endif //__nvoc_gpu_h_disabled
2589 
2590 #define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts(pGpu)
2591 
2592 NV_STATUS gpuConstructDeviceInfoTable_FWCLIENT(struct OBJGPU *pGpu);
2593 
2594 NV_STATUS gpuConstructDeviceInfoTable_VGPUSTUB(struct OBJGPU *pGpu);
2595 
gpuConstructDeviceInfoTable_56cd7a(struct OBJGPU * pGpu)2596 static inline NV_STATUS gpuConstructDeviceInfoTable_56cd7a(struct OBJGPU *pGpu) {
2597     return NV_OK;
2598 }
2599 
gpuConstructDeviceInfoTable_DISPATCH(struct OBJGPU * pGpu)2600 static inline NV_STATUS gpuConstructDeviceInfoTable_DISPATCH(struct OBJGPU *pGpu) {
2601     return pGpu->__gpuConstructDeviceInfoTable__(pGpu);
2602 }
2603 
2604 NV_STATUS gpuGetNameString_VGPUSTUB(struct OBJGPU *pGpu, NvU32 arg0, void *arg1);
2605 
2606 NV_STATUS gpuGetNameString_KERNEL(struct OBJGPU *pGpu, NvU32 arg0, void *arg1);
2607 
2608 NV_STATUS gpuGetNameString_IMPL(struct OBJGPU *pGpu, NvU32 arg0, void *arg1);
2609 
gpuGetNameString_DISPATCH(struct OBJGPU * pGpu,NvU32 arg0,void * arg1)2610 static inline NV_STATUS gpuGetNameString_DISPATCH(struct OBJGPU *pGpu, NvU32 arg0, void *arg1) {
2611     return pGpu->__gpuGetNameString__(pGpu, arg0, arg1);
2612 }
2613 
2614 NV_STATUS gpuGetShortNameString_VGPUSTUB(struct OBJGPU *pGpu, NvU8 *arg0);
2615 
2616 NV_STATUS gpuGetShortNameString_KERNEL(struct OBJGPU *pGpu, NvU8 *arg0);
2617 
2618 NV_STATUS gpuGetShortNameString_IMPL(struct OBJGPU *pGpu, NvU8 *arg0);
2619 
gpuGetShortNameString_DISPATCH(struct OBJGPU * pGpu,NvU8 * arg0)2620 static inline NV_STATUS gpuGetShortNameString_DISPATCH(struct OBJGPU *pGpu, NvU8 *arg0) {
2621     return pGpu->__gpuGetShortNameString__(pGpu, arg0);
2622 }
2623 
2624 NV_STATUS gpuInitBranding_FWCLIENT(struct OBJGPU *pGpu);
2625 
2626 NV_STATUS gpuInitBranding_VGPUSTUB(struct OBJGPU *pGpu);
2627 
2628 NV_STATUS gpuInitBranding_IMPL(struct OBJGPU *pGpu);
2629 
gpuInitBranding_DISPATCH(struct OBJGPU * pGpu)2630 static inline NV_STATUS gpuInitBranding_DISPATCH(struct OBJGPU *pGpu) {
2631     return pGpu->__gpuInitBranding__(pGpu);
2632 }
2633 
2634 void gpuInitProperties_FWCLIENT(struct OBJGPU *pGpu);
2635 
gpuInitProperties_b3696a(struct OBJGPU * pGpu)2636 static inline void gpuInitProperties_b3696a(struct OBJGPU *pGpu) {
2637     return;
2638 }
2639 
gpuInitProperties_DISPATCH(struct OBJGPU * pGpu)2640 static inline void gpuInitProperties_DISPATCH(struct OBJGPU *pGpu) {
2641     pGpu->__gpuInitProperties__(pGpu);
2642 }
2643 
2644 NV_STATUS gpuBuildKernelVideoEngineList_IMPL(struct OBJGPU *pGpu);
2645 
gpuBuildKernelVideoEngineList_56cd7a(struct OBJGPU * pGpu)2646 static inline NV_STATUS gpuBuildKernelVideoEngineList_56cd7a(struct OBJGPU *pGpu) {
2647     return NV_OK;
2648 }
2649 
gpuBuildKernelVideoEngineList_DISPATCH(struct OBJGPU * pGpu)2650 static inline NV_STATUS gpuBuildKernelVideoEngineList_DISPATCH(struct OBJGPU *pGpu) {
2651     return pGpu->__gpuBuildKernelVideoEngineList__(pGpu);
2652 }
2653 
2654 NV_STATUS gpuInitVideoLogging_IMPL(struct OBJGPU *pGpu);
2655 
gpuInitVideoLogging_56cd7a(struct OBJGPU * pGpu)2656 static inline NV_STATUS gpuInitVideoLogging_56cd7a(struct OBJGPU *pGpu) {
2657     return NV_OK;
2658 }
2659 
gpuInitVideoLogging_DISPATCH(struct OBJGPU * pGpu)2660 static inline NV_STATUS gpuInitVideoLogging_DISPATCH(struct OBJGPU *pGpu) {
2661     return pGpu->__gpuInitVideoLogging__(pGpu);
2662 }
2663 
2664 void gpuFreeVideoLogging_IMPL(struct OBJGPU *pGpu);
2665 
gpuFreeVideoLogging_b3696a(struct OBJGPU * pGpu)2666 static inline void gpuFreeVideoLogging_b3696a(struct OBJGPU *pGpu) {
2667     return;
2668 }
2669 
gpuFreeVideoLogging_DISPATCH(struct OBJGPU * pGpu)2670 static inline void gpuFreeVideoLogging_DISPATCH(struct OBJGPU *pGpu) {
2671     pGpu->__gpuFreeVideoLogging__(pGpu);
2672 }
2673 
2674 void gpuDestroyKernelVideoEngineList_IMPL(struct OBJGPU *pGpu);
2675 
gpuDestroyKernelVideoEngineList_b3696a(struct OBJGPU * pGpu)2676 static inline void gpuDestroyKernelVideoEngineList_b3696a(struct OBJGPU *pGpu) {
2677     return;
2678 }
2679 
gpuDestroyKernelVideoEngineList_DISPATCH(struct OBJGPU * pGpu)2680 static inline void gpuDestroyKernelVideoEngineList_DISPATCH(struct OBJGPU *pGpu) {
2681     pGpu->__gpuDestroyKernelVideoEngineList__(pGpu);
2682 }
2683 
2684 NV_STATUS gpuPowerOff_KERNEL(struct OBJGPU *pGpu);
2685 
gpuPowerOff_46f6a7(struct OBJGPU * pGpu)2686 static inline NV_STATUS gpuPowerOff_46f6a7(struct OBJGPU *pGpu) {
2687     return NV_ERR_NOT_SUPPORTED;
2688 }
2689 
gpuPowerOff_DISPATCH(struct OBJGPU * pGpu)2690 static inline NV_STATUS gpuPowerOff_DISPATCH(struct OBJGPU *pGpu) {
2691     return pGpu->__gpuPowerOff__(pGpu);
2692 }
2693 
2694 NV_STATUS gpuWriteBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 value);
2695 
2696 NV_STATUS gpuWriteBusConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 value);
2697 
gpuWriteBusConfigReg_DISPATCH(struct OBJGPU * pGpu,NvU32 index,NvU32 value)2698 static inline NV_STATUS gpuWriteBusConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 value) {
2699     return pGpu->__gpuWriteBusConfigReg__(pGpu, index, value);
2700 }
2701 
2702 NV_STATUS gpuReadBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data);
2703 
2704 NV_STATUS gpuReadBusConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 *data);
2705 
gpuReadBusConfigReg_DISPATCH(struct OBJGPU * pGpu,NvU32 index,NvU32 * data)2706 static inline NV_STATUS gpuReadBusConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) {
2707     return pGpu->__gpuReadBusConfigReg__(pGpu, index, data);
2708 }
2709 
2710 NV_STATUS gpuReadBusConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState);
2711 
gpuReadBusConfigRegEx_5baef9(struct OBJGPU * pGpu,NvU32 index,NvU32 * data,THREAD_STATE_NODE * pThreadState)2712 static inline NV_STATUS gpuReadBusConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) {
2713     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
2714 }
2715 
gpuReadBusConfigRegEx_DISPATCH(struct OBJGPU * pGpu,NvU32 index,NvU32 * data,THREAD_STATE_NODE * pThreadState)2716 static inline NV_STATUS gpuReadBusConfigRegEx_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) {
2717     return pGpu->__gpuReadBusConfigRegEx__(pGpu, index, data, pThreadState);
2718 }
2719 
2720 NV_STATUS gpuReadFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data);
2721 
gpuReadFunctionConfigReg_5baef9(struct OBJGPU * pGpu,NvU32 function,NvU32 reg,NvU32 * data)2722 static inline NV_STATUS gpuReadFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) {
2723     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
2724 }
2725 
gpuReadFunctionConfigReg_DISPATCH(struct OBJGPU * pGpu,NvU32 function,NvU32 reg,NvU32 * data)2726 static inline NV_STATUS gpuReadFunctionConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) {
2727     return pGpu->__gpuReadFunctionConfigReg__(pGpu, function, reg, data);
2728 }
2729 
2730 NV_STATUS gpuWriteFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data);
2731 
gpuWriteFunctionConfigReg_5baef9(struct OBJGPU * pGpu,NvU32 function,NvU32 reg,NvU32 data)2732 static inline NV_STATUS gpuWriteFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) {
2733     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
2734 }
2735 
gpuWriteFunctionConfigReg_DISPATCH(struct OBJGPU * pGpu,NvU32 function,NvU32 reg,NvU32 data)2736 static inline NV_STATUS gpuWriteFunctionConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) {
2737     return pGpu->__gpuWriteFunctionConfigReg__(pGpu, function, reg, data);
2738 }
2739 
2740 NV_STATUS gpuWriteFunctionConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState);
2741 
gpuWriteFunctionConfigRegEx_5baef9(struct OBJGPU * pGpu,NvU32 function,NvU32 reg,NvU32 data,THREAD_STATE_NODE * pThreadState)2742 static inline NV_STATUS gpuWriteFunctionConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) {
2743     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
2744 }
2745 
gpuWriteFunctionConfigRegEx_DISPATCH(struct OBJGPU * pGpu,NvU32 function,NvU32 reg,NvU32 data,THREAD_STATE_NODE * pThreadState)2746 static inline NV_STATUS gpuWriteFunctionConfigRegEx_DISPATCH(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) {
2747     return pGpu->__gpuWriteFunctionConfigRegEx__(pGpu, function, reg, data, pThreadState);
2748 }
2749 
2750 NV_STATUS gpuReadVgpuConfigReg_GH100(struct OBJGPU *pGpu, NvU32 index, NvU32 *data);
2751 
gpuReadVgpuConfigReg_46f6a7(struct OBJGPU * pGpu,NvU32 index,NvU32 * data)2752 static inline NV_STATUS gpuReadVgpuConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) {
2753     return NV_ERR_NOT_SUPPORTED;
2754 }
2755 
gpuReadVgpuConfigReg_DISPATCH(struct OBJGPU * pGpu,NvU32 index,NvU32 * data)2756 static inline NV_STATUS gpuReadVgpuConfigReg_DISPATCH(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) {
2757     return pGpu->__gpuReadVgpuConfigReg__(pGpu, index, data);
2758 }
2759 
2760 void gpuGetIdInfo_GM107(struct OBJGPU *pGpu);
2761 
2762 void gpuGetIdInfo_GH100(struct OBJGPU *pGpu);
2763 
gpuGetIdInfo_DISPATCH(struct OBJGPU * pGpu)2764 static inline void gpuGetIdInfo_DISPATCH(struct OBJGPU *pGpu) {
2765     pGpu->__gpuGetIdInfo__(pGpu);
2766 }
2767 
2768 NV_STATUS gpuGenGidData_VGPUSTUB(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags);
2769 
2770 NV_STATUS gpuGenGidData_FWCLIENT(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags);
2771 
2772 NV_STATUS gpuGenGidData_GK104(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags);
2773 
gpuGenGidData_DISPATCH(struct OBJGPU * pGpu,NvU8 * pGidData,NvU32 gidSize,NvU32 gidFlags)2774 static inline NV_STATUS gpuGenGidData_DISPATCH(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags) {
2775     return pGpu->__gpuGenGidData__(pGpu, pGidData, gidSize, gidFlags);
2776 }
2777 
2778 NvU8 gpuGetChipSubRev_FWCLIENT(struct OBJGPU *pGpu);
2779 
gpuGetChipSubRev_4a4dee(struct OBJGPU * pGpu)2780 static inline NvU8 gpuGetChipSubRev_4a4dee(struct OBJGPU *pGpu) {
2781     return 0;
2782 }
2783 
2784 NvU8 gpuGetChipSubRev_GK104(struct OBJGPU *pGpu);
2785 
2786 NvU8 gpuGetChipSubRev_GA100(struct OBJGPU *pGpu);
2787 
gpuGetChipSubRev_DISPATCH(struct OBJGPU * pGpu)2788 static inline NvU8 gpuGetChipSubRev_DISPATCH(struct OBJGPU *pGpu) {
2789     return pGpu->__gpuGetChipSubRev__(pGpu);
2790 }
2791 
gpuGetSkuInfo_92bfc3(struct OBJGPU * pGpu,NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS * pParams)2792 static inline NV_STATUS gpuGetSkuInfo_92bfc3(struct OBJGPU *pGpu, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pParams) {
2793     NV_ASSERT_PRECOMP(0);
2794     return NV_ERR_NOT_SUPPORTED;
2795 }
2796 
2797 NV_STATUS gpuGetSkuInfo_VGPUSTUB(struct OBJGPU *pGpu, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pParams);
2798 
gpuGetSkuInfo_DISPATCH(struct OBJGPU * pGpu,NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS * pParams)2799 static inline NV_STATUS gpuGetSkuInfo_DISPATCH(struct OBJGPU *pGpu, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pParams) {
2800     return pGpu->__gpuGetSkuInfo__(pGpu, pParams);
2801 }
2802 
2803 NV_STATUS gpuGetRegBaseOffset_FWCLIENT(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1);
2804 
2805 NV_STATUS gpuGetRegBaseOffset_TU102(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1);
2806 
gpuGetRegBaseOffset_DISPATCH(struct OBJGPU * pGpu,NvU32 arg0,NvU32 * arg1)2807 static inline NV_STATUS gpuGetRegBaseOffset_DISPATCH(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) {
2808     return pGpu->__gpuGetRegBaseOffset__(pGpu, arg0, arg1);
2809 }
2810 
2811 void gpuHandleSanityCheckRegReadError_GM107(struct OBJGPU *pGpu, NvU32 addr, NvU32 value);
2812 
2813 void gpuHandleSanityCheckRegReadError_GH100(struct OBJGPU *pGpu, NvU32 addr, NvU32 value);
2814 
gpuHandleSanityCheckRegReadError_DISPATCH(struct OBJGPU * pGpu,NvU32 addr,NvU32 value)2815 static inline void gpuHandleSanityCheckRegReadError_DISPATCH(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) {
2816     pGpu->__gpuHandleSanityCheckRegReadError__(pGpu, addr, value);
2817 }
2818 
2819 void gpuHandleSecFault_GH100(struct OBJGPU *pGpu);
2820 
gpuHandleSecFault_b3696a(struct OBJGPU * pGpu)2821 static inline void gpuHandleSecFault_b3696a(struct OBJGPU *pGpu) {
2822     return;
2823 }
2824 
gpuHandleSecFault_DISPATCH(struct OBJGPU * pGpu)2825 static inline void gpuHandleSecFault_DISPATCH(struct OBJGPU *pGpu) {
2826     pGpu->__gpuHandleSecFault__(pGpu);
2827 }
2828 
gpuSanityCheckVirtRegAccess_56cd7a(struct OBJGPU * pGpu,NvU32 arg0)2829 static inline NV_STATUS gpuSanityCheckVirtRegAccess_56cd7a(struct OBJGPU *pGpu, NvU32 arg0) {
2830     return NV_OK;
2831 }
2832 
2833 NV_STATUS gpuSanityCheckVirtRegAccess_TU102(struct OBJGPU *pGpu, NvU32 arg0);
2834 
2835 NV_STATUS gpuSanityCheckVirtRegAccess_GH100(struct OBJGPU *pGpu, NvU32 arg0);
2836 
gpuSanityCheckVirtRegAccess_DISPATCH(struct OBJGPU * pGpu,NvU32 arg0)2837 static inline NV_STATUS gpuSanityCheckVirtRegAccess_DISPATCH(struct OBJGPU *pGpu, NvU32 arg0) {
2838     return pGpu->__gpuSanityCheckVirtRegAccess__(pGpu, arg0);
2839 }
2840 
2841 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU102(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2842 
2843 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU104(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2844 
2845 const GPUCHILDPRESENT *gpuGetChildrenPresent_TU106(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2846 
2847 const GPUCHILDPRESENT *gpuGetChildrenPresent_GA100(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2848 
2849 const GPUCHILDPRESENT *gpuGetChildrenPresent_GA102(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2850 
2851 const GPUCHILDPRESENT *gpuGetChildrenPresent_AD102(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2852 
2853 const GPUCHILDPRESENT *gpuGetChildrenPresent_GH100(struct OBJGPU *pGpu, NvU32 *pNumEntries);
2854 
gpuGetChildrenPresent_DISPATCH(struct OBJGPU * pGpu,NvU32 * pNumEntries)2855 static inline const GPUCHILDPRESENT *gpuGetChildrenPresent_DISPATCH(struct OBJGPU *pGpu, NvU32 *pNumEntries) {
2856     return pGpu->__gpuGetChildrenPresent__(pGpu, pNumEntries);
2857 }
2858 
2859 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU102(struct OBJGPU *pGpu, NvU32 *arg0);
2860 
2861 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU104(struct OBJGPU *pGpu, NvU32 *arg0);
2862 
2863 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU106(struct OBJGPU *pGpu, NvU32 *arg0);
2864 
2865 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU117(struct OBJGPU *pGpu, NvU32 *arg0);
2866 
2867 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA100(struct OBJGPU *pGpu, NvU32 *arg0);
2868 
2869 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA102(struct OBJGPU *pGpu, NvU32 *arg0);
2870 
2871 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_AD102(struct OBJGPU *pGpu, NvU32 *arg0);
2872 
2873 const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GH100(struct OBJGPU *pGpu, NvU32 *arg0);
2874 
gpuGetClassDescriptorList_DISPATCH(struct OBJGPU * pGpu,NvU32 * arg0)2875 static inline const CLASSDESCRIPTOR *gpuGetClassDescriptorList_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg0) {
2876     return pGpu->__gpuGetClassDescriptorList__(pGpu, arg0);
2877 }
2878 
2879 NvU32 gpuGetPhysAddrWidth_TU102(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0);
2880 
2881 NvU32 gpuGetPhysAddrWidth_GH100(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0);
2882 
gpuGetPhysAddrWidth_DISPATCH(struct OBJGPU * pGpu,NV_ADDRESS_SPACE arg0)2883 static inline NvU32 gpuGetPhysAddrWidth_DISPATCH(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0) {
2884     return pGpu->__gpuGetPhysAddrWidth__(pGpu, arg0);
2885 }
2886 
2887 NV_STATUS gpuInitSriov_VGPUSTUB(struct OBJGPU *pGpu);
2888 
2889 NV_STATUS gpuInitSriov_FWCLIENT(struct OBJGPU *pGpu);
2890 
2891 NV_STATUS gpuInitSriov_TU102(struct OBJGPU *pGpu);
2892 
gpuInitSriov_DISPATCH(struct OBJGPU * pGpu)2893 static inline NV_STATUS gpuInitSriov_DISPATCH(struct OBJGPU *pGpu) {
2894     return pGpu->__gpuInitSriov__(pGpu);
2895 }
2896 
gpuDeinitSriov_56cd7a(struct OBJGPU * pGpu)2897 static inline NV_STATUS gpuDeinitSriov_56cd7a(struct OBJGPU *pGpu) {
2898     return NV_OK;
2899 }
2900 
2901 NV_STATUS gpuDeinitSriov_FWCLIENT(struct OBJGPU *pGpu);
2902 
2903 NV_STATUS gpuDeinitSriov_TU102(struct OBJGPU *pGpu);
2904 
gpuDeinitSriov_DISPATCH(struct OBJGPU * pGpu)2905 static inline NV_STATUS gpuDeinitSriov_DISPATCH(struct OBJGPU *pGpu) {
2906     return pGpu->__gpuDeinitSriov__(pGpu);
2907 }
2908 
2909 NV_STATUS gpuCreateDefaultClientShare_VGPUSTUB(struct OBJGPU *pGpu);
2910 
gpuCreateDefaultClientShare_56cd7a(struct OBJGPU * pGpu)2911 static inline NV_STATUS gpuCreateDefaultClientShare_56cd7a(struct OBJGPU *pGpu) {
2912     return NV_OK;
2913 }
2914 
gpuCreateDefaultClientShare_DISPATCH(struct OBJGPU * pGpu)2915 static inline NV_STATUS gpuCreateDefaultClientShare_DISPATCH(struct OBJGPU *pGpu) {
2916     return pGpu->__gpuCreateDefaultClientShare__(pGpu);
2917 }
2918 
2919 void gpuDestroyDefaultClientShare_VGPUSTUB(struct OBJGPU *pGpu);
2920 
gpuDestroyDefaultClientShare_b3696a(struct OBJGPU * pGpu)2921 static inline void gpuDestroyDefaultClientShare_b3696a(struct OBJGPU *pGpu) {
2922     return;
2923 }
2924 
gpuDestroyDefaultClientShare_DISPATCH(struct OBJGPU * pGpu)2925 static inline void gpuDestroyDefaultClientShare_DISPATCH(struct OBJGPU *pGpu) {
2926     pGpu->__gpuDestroyDefaultClientShare__(pGpu);
2927 }
2928 
gpuGetVmmuSegmentSize_13cd8d(struct OBJGPU * pGpu)2929 static inline NvU64 gpuGetVmmuSegmentSize_13cd8d(struct OBJGPU *pGpu) {
2930     NV_ASSERT_PRECOMP(0);
2931     return 0;
2932 }
2933 
gpuGetVmmuSegmentSize_72c522(struct OBJGPU * pGpu)2934 static inline NvU64 gpuGetVmmuSegmentSize_72c522(struct OBJGPU *pGpu) {
2935     return pGpu->vmmuSegmentSize;
2936 }
2937 
gpuGetVmmuSegmentSize_DISPATCH(struct OBJGPU * pGpu)2938 static inline NvU64 gpuGetVmmuSegmentSize_DISPATCH(struct OBJGPU *pGpu) {
2939     return pGpu->__gpuGetVmmuSegmentSize__(pGpu);
2940 }
2941 
2942 NvBool gpuFuseSupportsDisplay_GM107(struct OBJGPU *pGpu);
2943 
2944 NvBool gpuFuseSupportsDisplay_GA100(struct OBJGPU *pGpu);
2945 
gpuFuseSupportsDisplay_491d52(struct OBJGPU * pGpu)2946 static inline NvBool gpuFuseSupportsDisplay_491d52(struct OBJGPU *pGpu) {
2947     return ((NvBool)(0 != 0));
2948 }
2949 
gpuFuseSupportsDisplay_DISPATCH(struct OBJGPU * pGpu)2950 static inline NvBool gpuFuseSupportsDisplay_DISPATCH(struct OBJGPU *pGpu) {
2951     return pGpu->__gpuFuseSupportsDisplay__(pGpu);
2952 }
2953 
2954 NvU32 gpuGetActiveFBIOs_VGPUSTUB(struct OBJGPU *pGpu);
2955 
2956 NvU32 gpuGetActiveFBIOs_FWCLIENT(struct OBJGPU *pGpu);
2957 
2958 NvU32 gpuGetActiveFBIOs_GM107(struct OBJGPU *pGpu);
2959 
gpuGetActiveFBIOs_DISPATCH(struct OBJGPU * pGpu)2960 static inline NvU32 gpuGetActiveFBIOs_DISPATCH(struct OBJGPU *pGpu) {
2961     return pGpu->__gpuGetActiveFBIOs__(pGpu);
2962 }
2963 
2964 NvBool gpuCheckPageRetirementSupport_VGPUSTUB(struct OBJGPU *pGpu);
2965 
2966 NvBool gpuCheckPageRetirementSupport_GSPCLIENT(struct OBJGPU *pGpu);
2967 
2968 NvBool gpuCheckPageRetirementSupport_GV100(struct OBJGPU *pGpu);
2969 
gpuCheckPageRetirementSupport_DISPATCH(struct OBJGPU * pGpu)2970 static inline NvBool gpuCheckPageRetirementSupport_DISPATCH(struct OBJGPU *pGpu) {
2971     return pGpu->__gpuCheckPageRetirementSupport__(pGpu);
2972 }
2973 
gpuIsInternalSku_491d52(struct OBJGPU * pGpu)2974 static inline NvBool gpuIsInternalSku_491d52(struct OBJGPU *pGpu) {
2975     return ((NvBool)(0 != 0));
2976 }
2977 
2978 NvBool gpuIsInternalSku_FWCLIENT(struct OBJGPU *pGpu);
2979 
2980 NvBool gpuIsInternalSku_GP100(struct OBJGPU *pGpu);
2981 
gpuIsInternalSku_DISPATCH(struct OBJGPU * pGpu)2982 static inline NvBool gpuIsInternalSku_DISPATCH(struct OBJGPU *pGpu) {
2983     return pGpu->__gpuIsInternalSku__(pGpu);
2984 }
2985 
2986 NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_GA100(struct OBJGPU *pGpu);
2987 
gpuClearFbhubPoisonIntrForBug2924523_56cd7a(struct OBJGPU * pGpu)2988 static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_56cd7a(struct OBJGPU *pGpu) {
2989     return NV_OK;
2990 }
2991 
gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(struct OBJGPU * pGpu)2992 static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(struct OBJGPU *pGpu) {
2993     return pGpu->__gpuClearFbhubPoisonIntrForBug2924523__(pGpu);
2994 }
2995 
2996 NvBool gpuCheckIfFbhubPoisonIntrPending_GA100(struct OBJGPU *pGpu);
2997 
gpuCheckIfFbhubPoisonIntrPending_491d52(struct OBJGPU * pGpu)2998 static inline NvBool gpuCheckIfFbhubPoisonIntrPending_491d52(struct OBJGPU *pGpu) {
2999     return ((NvBool)(0 != 0));
3000 }
3001 
gpuCheckIfFbhubPoisonIntrPending_DISPATCH(struct OBJGPU * pGpu)3002 static inline NvBool gpuCheckIfFbhubPoisonIntrPending_DISPATCH(struct OBJGPU *pGpu) {
3003     return pGpu->__gpuCheckIfFbhubPoisonIntrPending__(pGpu);
3004 }
3005 
gpuGetSriovCaps_46f6a7(struct OBJGPU * pGpu,NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS * arg0)3006 static inline NV_STATUS gpuGetSriovCaps_46f6a7(struct OBJGPU *pGpu, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *arg0) {
3007     return NV_ERR_NOT_SUPPORTED;
3008 }
3009 
3010 NV_STATUS gpuGetSriovCaps_TU102(struct OBJGPU *pGpu, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *arg0);
3011 
gpuGetSriovCaps_DISPATCH(struct OBJGPU * pGpu,NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS * arg0)3012 static inline NV_STATUS gpuGetSriovCaps_DISPATCH(struct OBJGPU *pGpu, NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *arg0) {
3013     return pGpu->__gpuGetSriovCaps__(pGpu, arg0);
3014 }
3015 
gpuCheckIsP2PAllocated_491d52(struct OBJGPU * pGpu)3016 static inline NvBool gpuCheckIsP2PAllocated_491d52(struct OBJGPU *pGpu) {
3017     return ((NvBool)(0 != 0));
3018 }
3019 
3020 NvBool gpuCheckIsP2PAllocated_GA100(struct OBJGPU *pGpu);
3021 
gpuCheckIsP2PAllocated_108313(struct OBJGPU * pGpu)3022 static inline NvBool gpuCheckIsP2PAllocated_108313(struct OBJGPU *pGpu) {
3023     NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0)));
3024 }
3025 
gpuCheckIsP2PAllocated_DISPATCH(struct OBJGPU * pGpu)3026 static inline NvBool gpuCheckIsP2PAllocated_DISPATCH(struct OBJGPU *pGpu) {
3027     return pGpu->__gpuCheckIsP2PAllocated__(pGpu);
3028 }
3029 
gpuPrePowerOff_56cd7a(struct OBJGPU * pGpu)3030 static inline NV_STATUS gpuPrePowerOff_56cd7a(struct OBJGPU *pGpu) {
3031     return NV_OK;
3032 }
3033 
gpuPrePowerOff_46f6a7(struct OBJGPU * pGpu)3034 static inline NV_STATUS gpuPrePowerOff_46f6a7(struct OBJGPU *pGpu) {
3035     return NV_ERR_NOT_SUPPORTED;
3036 }
3037 
3038 NV_STATUS gpuPrePowerOff_GM107(struct OBJGPU *pGpu);
3039 
gpuPrePowerOff_DISPATCH(struct OBJGPU * pGpu)3040 static inline NV_STATUS gpuPrePowerOff_DISPATCH(struct OBJGPU *pGpu) {
3041     return pGpu->__gpuPrePowerOff__(pGpu);
3042 }
3043 
gpuVerifyExistence_56cd7a(struct OBJGPU * pGpu)3044 static inline NV_STATUS gpuVerifyExistence_56cd7a(struct OBJGPU *pGpu) {
3045     return NV_OK;
3046 }
3047 
3048 NV_STATUS gpuVerifyExistence_IMPL(struct OBJGPU *pGpu);
3049 
gpuVerifyExistence_DISPATCH(struct OBJGPU * pGpu)3050 static inline NV_STATUS gpuVerifyExistence_DISPATCH(struct OBJGPU *pGpu) {
3051     return pGpu->__gpuVerifyExistence__(pGpu);
3052 }
3053 
3054 NvU64 gpuGetFlaVasSize_GA100(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization);
3055 
3056 NvU64 gpuGetFlaVasSize_GH100(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization);
3057 
gpuGetFlaVasSize_474d46(struct OBJGPU * pGpu,NvBool bNvswitchVirtualization)3058 static inline NvU64 gpuGetFlaVasSize_474d46(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) {
3059     NV_ASSERT_OR_RETURN_PRECOMP(0, 0);
3060 }
3061 
gpuGetFlaVasSize_DISPATCH(struct OBJGPU * pGpu,NvBool bNvswitchVirtualization)3062 static inline NvU64 gpuGetFlaVasSize_DISPATCH(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) {
3063     return pGpu->__gpuGetFlaVasSize__(pGpu, bNvswitchVirtualization);
3064 }
3065 
3066 NvBool gpuIsAtsSupportedWithSmcMemPartitioning_GH100(struct OBJGPU *pGpu);
3067 
gpuIsAtsSupportedWithSmcMemPartitioning_491d52(struct OBJGPU * pGpu)3068 static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning_491d52(struct OBJGPU *pGpu) {
3069     return ((NvBool)(0 != 0));
3070 }
3071 
gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(struct OBJGPU * pGpu)3072 static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning_DISPATCH(struct OBJGPU *pGpu) {
3073     return pGpu->__gpuIsAtsSupportedWithSmcMemPartitioning__(pGpu);
3074 }
3075 
3076 NvBool gpuIsGlobalPoisonFuseEnabled_VGPUSTUB(struct OBJGPU *pGpu);
3077 
3078 NvBool gpuIsGlobalPoisonFuseEnabled_FWCLIENT(struct OBJGPU *pGpu);
3079 
gpuIsGlobalPoisonFuseEnabled_DISPATCH(struct OBJGPU * pGpu)3080 static inline NvBool gpuIsGlobalPoisonFuseEnabled_DISPATCH(struct OBJGPU *pGpu) {
3081     return pGpu->__gpuIsGlobalPoisonFuseEnabled__(pGpu);
3082 }
3083 
3084 void gpuDetermineSelfHostedMode_PHYSICAL_GH100(struct OBJGPU *pGpu);
3085 
gpuDetermineSelfHostedMode_b3696a(struct OBJGPU * pGpu)3086 static inline void gpuDetermineSelfHostedMode_b3696a(struct OBJGPU *pGpu) {
3087     return;
3088 }
3089 
3090 void gpuDetermineSelfHostedMode_KERNEL_GH100(struct OBJGPU *pGpu);
3091 
gpuDetermineSelfHostedMode_DISPATCH(struct OBJGPU * pGpu)3092 static inline void gpuDetermineSelfHostedMode_DISPATCH(struct OBJGPU *pGpu) {
3093     pGpu->__gpuDetermineSelfHostedMode__(pGpu);
3094 }
3095 
3096 void gpuDetermineMIGSupport_GH100(struct OBJGPU *pGpu);
3097 
gpuDetermineMIGSupport_b3696a(struct OBJGPU * pGpu)3098 static inline void gpuDetermineMIGSupport_b3696a(struct OBJGPU *pGpu) {
3099     return;
3100 }
3101 
gpuDetermineMIGSupport_DISPATCH(struct OBJGPU * pGpu)3102 static inline void gpuDetermineMIGSupport_DISPATCH(struct OBJGPU *pGpu) {
3103     pGpu->__gpuDetermineMIGSupport__(pGpu);
3104 }
3105 
gpuInitOptimusSettings_56cd7a(struct OBJGPU * pGpu)3106 static inline NV_STATUS gpuInitOptimusSettings_56cd7a(struct OBJGPU *pGpu) {
3107     return NV_OK;
3108 }
3109 
3110 NV_STATUS gpuInitOptimusSettings_IMPL(struct OBJGPU *pGpu);
3111 
gpuInitOptimusSettings_DISPATCH(struct OBJGPU * pGpu)3112 static inline NV_STATUS gpuInitOptimusSettings_DISPATCH(struct OBJGPU *pGpu) {
3113     return pGpu->__gpuInitOptimusSettings__(pGpu);
3114 }
3115 
gpuDeinitOptimusSettings_56cd7a(struct OBJGPU * pGpu)3116 static inline NV_STATUS gpuDeinitOptimusSettings_56cd7a(struct OBJGPU *pGpu) {
3117     return NV_OK;
3118 }
3119 
3120 NV_STATUS gpuDeinitOptimusSettings_IMPL(struct OBJGPU *pGpu);
3121 
gpuDeinitOptimusSettings_DISPATCH(struct OBJGPU * pGpu)3122 static inline NV_STATUS gpuDeinitOptimusSettings_DISPATCH(struct OBJGPU *pGpu) {
3123     return pGpu->__gpuDeinitOptimusSettings__(pGpu);
3124 }
3125 
gpuIsSliCapableWithoutDisplay_cbe027(struct OBJGPU * pGpu)3126 static inline NvBool gpuIsSliCapableWithoutDisplay_cbe027(struct OBJGPU *pGpu) {
3127     return ((NvBool)(0 == 0));
3128 }
3129 
gpuIsSliCapableWithoutDisplay_491d52(struct OBJGPU * pGpu)3130 static inline NvBool gpuIsSliCapableWithoutDisplay_491d52(struct OBJGPU *pGpu) {
3131     return ((NvBool)(0 != 0));
3132 }
3133 
gpuIsSliCapableWithoutDisplay_DISPATCH(struct OBJGPU * pGpu)3134 static inline NvBool gpuIsSliCapableWithoutDisplay_DISPATCH(struct OBJGPU *pGpu) {
3135     return pGpu->__gpuIsSliCapableWithoutDisplay__(pGpu);
3136 }
3137 
gpuIsCCEnabledInHw_491d52(struct OBJGPU * pGpu)3138 static inline NvBool gpuIsCCEnabledInHw_491d52(struct OBJGPU *pGpu) {
3139     return ((NvBool)(0 != 0));
3140 }
3141 
3142 NvBool gpuIsCCEnabledInHw_GH100(struct OBJGPU *pGpu);
3143 
gpuIsCCEnabledInHw_DISPATCH(struct OBJGPU * pGpu)3144 static inline NvBool gpuIsCCEnabledInHw_DISPATCH(struct OBJGPU *pGpu) {
3145     return pGpu->__gpuIsCCEnabledInHw__(pGpu);
3146 }
3147 
3148 NvBool gpuIsDevModeEnabledInHw_GH100(struct OBJGPU *pGpu);
3149 
gpuIsDevModeEnabledInHw_491d52(struct OBJGPU * pGpu)3150 static inline NvBool gpuIsDevModeEnabledInHw_491d52(struct OBJGPU *pGpu) {
3151     return ((NvBool)(0 != 0));
3152 }
3153 
gpuIsDevModeEnabledInHw_DISPATCH(struct OBJGPU * pGpu)3154 static inline NvBool gpuIsDevModeEnabledInHw_DISPATCH(struct OBJGPU *pGpu) {
3155     return pGpu->__gpuIsDevModeEnabledInHw__(pGpu);
3156 }
3157 
3158 NvBool gpuIsProtectedPcieEnabledInHw_GH100(struct OBJGPU *pGpu);
3159 
gpuIsProtectedPcieEnabledInHw_491d52(struct OBJGPU * pGpu)3160 static inline NvBool gpuIsProtectedPcieEnabledInHw_491d52(struct OBJGPU *pGpu) {
3161     return ((NvBool)(0 != 0));
3162 }
3163 
gpuIsProtectedPcieEnabledInHw_DISPATCH(struct OBJGPU * pGpu)3164 static inline NvBool gpuIsProtectedPcieEnabledInHw_DISPATCH(struct OBJGPU *pGpu) {
3165     return pGpu->__gpuIsProtectedPcieEnabledInHw__(pGpu);
3166 }
3167 
3168 NvBool gpuIsCtxBufAllocInPmaSupported_GA100(struct OBJGPU *pGpu);
3169 
gpuIsCtxBufAllocInPmaSupported_491d52(struct OBJGPU * pGpu)3170 static inline NvBool gpuIsCtxBufAllocInPmaSupported_491d52(struct OBJGPU *pGpu) {
3171     return ((NvBool)(0 != 0));
3172 }
3173 
gpuIsCtxBufAllocInPmaSupported_DISPATCH(struct OBJGPU * pGpu)3174 static inline NvBool gpuIsCtxBufAllocInPmaSupported_DISPATCH(struct OBJGPU *pGpu) {
3175     return pGpu->__gpuIsCtxBufAllocInPmaSupported__(pGpu);
3176 }
3177 
gpuUpdateErrorContainmentState_c04480(struct OBJGPU * pGpu,NV_ERROR_CONT_ERR_ID arg0,NV_ERROR_CONT_LOCATION arg1,NvU32 * arg2)3178 static inline NV_STATUS gpuUpdateErrorContainmentState_c04480(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg0, NV_ERROR_CONT_LOCATION arg1, NvU32 *arg2) {
3179     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
3180 }
3181 
3182 NV_STATUS gpuUpdateErrorContainmentState_GA100(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg0, NV_ERROR_CONT_LOCATION arg1, NvU32 *arg2);
3183 
gpuUpdateErrorContainmentState_f91eed(struct OBJGPU * pGpu,NV_ERROR_CONT_ERR_ID arg0,NV_ERROR_CONT_LOCATION arg1,NvU32 * arg2)3184 static inline NV_STATUS gpuUpdateErrorContainmentState_f91eed(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg0, NV_ERROR_CONT_LOCATION arg1, NvU32 *arg2) {
3185     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_OK);
3186 }
3187 
gpuUpdateErrorContainmentState_DISPATCH(struct OBJGPU * pGpu,NV_ERROR_CONT_ERR_ID arg0,NV_ERROR_CONT_LOCATION arg1,NvU32 * arg2)3188 static inline NV_STATUS gpuUpdateErrorContainmentState_DISPATCH(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg0, NV_ERROR_CONT_LOCATION arg1, NvU32 *arg2) {
3189     return pGpu->__gpuUpdateErrorContainmentState__(pGpu, arg0, arg1, arg2);
3190 }
3191 
3192 NV_STATUS gpuWaitForGfwBootComplete_TU102(struct OBJGPU *pGpu);
3193 
gpuWaitForGfwBootComplete_5baef9(struct OBJGPU * pGpu)3194 static inline NV_STATUS gpuWaitForGfwBootComplete_5baef9(struct OBJGPU *pGpu) {
3195     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
3196 }
3197 
gpuWaitForGfwBootComplete_DISPATCH(struct OBJGPU * pGpu)3198 static inline NV_STATUS gpuWaitForGfwBootComplete_DISPATCH(struct OBJGPU *pGpu) {
3199     return pGpu->__gpuWaitForGfwBootComplete__(pGpu);
3200 }
3201 
gpuGetIsCmpSku_ceaee8(struct OBJGPU * pGpu)3202 static inline NvBool gpuGetIsCmpSku_ceaee8(struct OBJGPU *pGpu) {
3203     NV_ASSERT_PRECOMP(0);
3204     return ((NvBool)(0 != 0));
3205 }
3206 
gpuGetIsCmpSku_491d52(struct OBJGPU * pGpu)3207 static inline NvBool gpuGetIsCmpSku_491d52(struct OBJGPU *pGpu) {
3208     return ((NvBool)(0 != 0));
3209 }
3210 
3211 NvBool gpuGetIsCmpSku_GV100(struct OBJGPU *pGpu);
3212 
gpuGetIsCmpSku_DISPATCH(struct OBJGPU * pGpu)3213 static inline NvBool gpuGetIsCmpSku_DISPATCH(struct OBJGPU *pGpu) {
3214     return pGpu->__gpuGetIsCmpSku__(pGpu);
3215 }
3216 
gpuGetInitEngineDescriptors(struct OBJGPU * pGpu)3217 static inline PENGDESCRIPTOR gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) {
3218     return pGpu->engineOrder.pEngineInitDescriptors;
3219 }
3220 
gpuGetLoadEngineDescriptors(struct OBJGPU * pGpu)3221 static inline PENGDESCRIPTOR gpuGetLoadEngineDescriptors(struct OBJGPU *pGpu) {
3222     return pGpu->engineOrder.pEngineLoadDescriptors;
3223 }
3224 
gpuGetUnloadEngineDescriptors(struct OBJGPU * pGpu)3225 static inline PENGDESCRIPTOR gpuGetUnloadEngineDescriptors(struct OBJGPU *pGpu) {
3226     return pGpu->engineOrder.pEngineUnloadDescriptors;
3227 }
3228 
gpuGetDestroyEngineDescriptors(struct OBJGPU * pGpu)3229 static inline PENGDESCRIPTOR gpuGetDestroyEngineDescriptors(struct OBJGPU *pGpu) {
3230     return pGpu->engineOrder.pEngineDestroyDescriptors;
3231 }
3232 
gpuGetNumEngDescriptors(struct OBJGPU * pGpu)3233 static inline NvU32 gpuGetNumEngDescriptors(struct OBJGPU *pGpu) {
3234     return pGpu->engineOrder.numEngineDescriptors;
3235 }
3236 
gpuGetMode(struct OBJGPU * pGpu)3237 static inline NvU32 gpuGetMode(struct OBJGPU *pGpu) {
3238     return pGpu->computeModeRefCount > 0 ? 2 : 1;
3239 }
3240 
gpuGetDispStatusHotplugFunc(struct OBJGPU * pGpu)3241 static inline ACPI_DSM_FUNCTION gpuGetDispStatusHotplugFunc(struct OBJGPU *pGpu) {
3242     return pGpu->acpi.dispStatusHotplugFunc;
3243 }
3244 
gpuGetDispStatusConfigFunc(struct OBJGPU * pGpu)3245 static inline ACPI_DSM_FUNCTION gpuGetDispStatusConfigFunc(struct OBJGPU *pGpu) {
3246     return pGpu->acpi.dispStatusConfigFunc;
3247 }
3248 
gpuGetPerfPostPowerStateFunc(struct OBJGPU * pGpu)3249 static inline ACPI_DSM_FUNCTION gpuGetPerfPostPowerStateFunc(struct OBJGPU *pGpu) {
3250     return pGpu->acpi.perfPostPowerStateFunc;
3251 }
3252 
gpuGetStereo3dStateActiveFunc(struct OBJGPU * pGpu)3253 static inline ACPI_DSM_FUNCTION gpuGetStereo3dStateActiveFunc(struct OBJGPU *pGpu) {
3254     return pGpu->acpi.stereo3dStateActiveFunc;
3255 }
3256 
gpuGetPmcBoot0(struct OBJGPU * pGpu)3257 static inline NvU32 gpuGetPmcBoot0(struct OBJGPU *pGpu) {
3258     return pGpu->chipId0;
3259 }
3260 
gpuGetFifoShared(struct OBJGPU * pGpu)3261 static inline struct OBJFIFO *gpuGetFifoShared(struct OBJGPU *pGpu) {
3262     return ((void *)0);
3263 }
3264 
gpuGetEngstateIter(struct OBJGPU * pGpu)3265 static inline ENGSTATE_ITER gpuGetEngstateIter(struct OBJGPU *pGpu) {
3266     GPU_CHILD_ITER it = { 0 };
3267     return it;
3268 }
3269 
gpuGetDmaStartAddress(struct OBJGPU * pGpu)3270 static inline RmPhysAddr gpuGetDmaStartAddress(struct OBJGPU *pGpu) {
3271     return pGpu->dmaStartAddress;
3272 }
3273 
gpuFreeEventHandle(struct OBJGPU * pGpu)3274 static inline NV_STATUS gpuFreeEventHandle(struct OBJGPU *pGpu) {
3275     return NV_OK;
3276 }
3277 
gpuGetChipMajRev(struct OBJGPU * pGpu)3278 static inline NvU32 gpuGetChipMajRev(struct OBJGPU *pGpu) {
3279     return pGpu->chipInfo.pmcBoot42.majorRev;
3280 }
3281 
gpuGetChipMinRev(struct OBJGPU * pGpu)3282 static inline NvU32 gpuGetChipMinRev(struct OBJGPU *pGpu) {
3283     return pGpu->chipInfo.pmcBoot42.minorRev;
3284 }
3285 
gpuGetChipImpl(struct OBJGPU * pGpu)3286 static inline NvU32 gpuGetChipImpl(struct OBJGPU *pGpu) {
3287     return pGpu->chipInfo.implementationId;
3288 }
3289 
gpuGetChipArch(struct OBJGPU * pGpu)3290 static inline NvU32 gpuGetChipArch(struct OBJGPU *pGpu) {
3291     return pGpu->chipInfo.platformId;
3292 }
3293 
gpuGetChipMinExtRev(struct OBJGPU * pGpu)3294 static inline NvU32 gpuGetChipMinExtRev(struct OBJGPU *pGpu) {
3295     return pGpu->chipInfo.pmcBoot42.minorExtRev;
3296 }
3297 
gpuIsVideoLinkDisabled(struct OBJGPU * pGpu)3298 static inline NvBool gpuIsVideoLinkDisabled(struct OBJGPU *pGpu) {
3299     return pGpu->bVideoLinkDisabled;
3300 }
3301 
gpuGetChipInfo(struct OBJGPU * pGpu)3302 static inline const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *gpuGetChipInfo(struct OBJGPU *pGpu) {
3303     return pGpu->pChipInfo;
3304 }
3305 
gpuIsBar2MovedByVtd(struct OBJGPU * pGpu)3306 static inline NvBool gpuIsBar2MovedByVtd(struct OBJGPU *pGpu) {
3307     return pGpu->bBar2MovedByVtd;
3308 }
3309 
gpuIsBar1Size64Bit(struct OBJGPU * pGpu)3310 static inline NvBool gpuIsBar1Size64Bit(struct OBJGPU *pGpu) {
3311     return pGpu->bBar1Is64Bit;
3312 }
3313 
gpuIsSurpriseRemovalSupported(struct OBJGPU * pGpu)3314 static inline NvBool gpuIsSurpriseRemovalSupported(struct OBJGPU *pGpu) {
3315     return pGpu->bSurpriseRemovalSupported;
3316 }
3317 
gpuIsReplayableTraceEnabled(struct OBJGPU * pGpu)3318 static inline NvBool gpuIsReplayableTraceEnabled(struct OBJGPU *pGpu) {
3319     return pGpu->bReplayableTraceEnabled;
3320 }
3321 
gpuIsStateLoading(struct OBJGPU * pGpu)3322 static inline NvBool gpuIsStateLoading(struct OBJGPU *pGpu) {
3323     return pGpu->bStateLoading;
3324 }
3325 
gpuIsStateUnloading(struct OBJGPU * pGpu)3326 static inline NvBool gpuIsStateUnloading(struct OBJGPU *pGpu) {
3327     return pGpu->bStateUnloading;
3328 }
3329 
gpuIsStateLoaded(struct OBJGPU * pGpu)3330 static inline NvBool gpuIsStateLoaded(struct OBJGPU *pGpu) {
3331     return pGpu->bStateLoaded;
3332 }
3333 
gpuIsFullyConstructed(struct OBJGPU * pGpu)3334 static inline NvBool gpuIsFullyConstructed(struct OBJGPU *pGpu) {
3335     return pGpu->bFullyConstructed;
3336 }
3337 
gpuIsUnifiedMemorySpaceEnabled(struct OBJGPU * pGpu)3338 static inline NvBool gpuIsUnifiedMemorySpaceEnabled(struct OBJGPU *pGpu) {
3339     return pGpu->bUnifiedMemorySpaceEnabled;
3340 }
3341 
gpuIsWarBug4040336Enabled(struct OBJGPU * pGpu)3342 static inline NvBool gpuIsWarBug4040336Enabled(struct OBJGPU *pGpu) {
3343     return pGpu->bBf3WarBug4040336Enabled;
3344 }
3345 
gpuIsSriovEnabled(struct OBJGPU * pGpu)3346 static inline NvBool gpuIsSriovEnabled(struct OBJGPU *pGpu) {
3347     return pGpu->bSriovEnabled;
3348 }
3349 
gpuIsCacheOnlyModeEnabled(struct OBJGPU * pGpu)3350 static inline NvBool gpuIsCacheOnlyModeEnabled(struct OBJGPU *pGpu) {
3351     return pGpu->bCacheOnlyMode;
3352 }
3353 
gpuIsSplitVasManagementServerClientRmEnabled(struct OBJGPU * pGpu)3354 static inline NvBool gpuIsSplitVasManagementServerClientRmEnabled(struct OBJGPU *pGpu) {
3355     return pGpu->bSplitVasManagementServerClientRm;
3356 }
3357 
gpuIsWarBug200577889SriovHeavyEnabled(struct OBJGPU * pGpu)3358 static inline NvBool gpuIsWarBug200577889SriovHeavyEnabled(struct OBJGPU *pGpu) {
3359     return pGpu->bWarBug200577889SriovHeavyEnabled;
3360 }
3361 
gpuIsPipelinedPteMemEnabled(struct OBJGPU * pGpu)3362 static inline NvBool gpuIsPipelinedPteMemEnabled(struct OBJGPU *pGpu) {
3363     return pGpu->bPipelinedPteMemEnabled;
3364 }
3365 
gpuIsBarPteInSysmemSupported(struct OBJGPU * pGpu)3366 static inline NvBool gpuIsBarPteInSysmemSupported(struct OBJGPU *pGpu) {
3367     return pGpu->bIsBarPteInSysmemSupported;
3368 }
3369 
gpuIsRegUsesGlobalSurfaceOverridesEnabled(struct OBJGPU * pGpu)3370 static inline NvBool gpuIsRegUsesGlobalSurfaceOverridesEnabled(struct OBJGPU *pGpu) {
3371     return pGpu->bRegUsesGlobalSurfaceOverrides;
3372 }
3373 
gpuIsTwoStageRcRecoveryEnabled(struct OBJGPU * pGpu)3374 static inline NvBool gpuIsTwoStageRcRecoveryEnabled(struct OBJGPU *pGpu) {
3375     return pGpu->bTwoStageRcRecoveryEnabled;
3376 }
3377 
gpuIsInD3Cold(struct OBJGPU * pGpu)3378 static inline NvBool gpuIsInD3Cold(struct OBJGPU *pGpu) {
3379     return pGpu->bInD3Cold;
3380 }
3381 
gpuIsClientRmAllocatedCtxBufferEnabled(struct OBJGPU * pGpu)3382 static inline NvBool gpuIsClientRmAllocatedCtxBufferEnabled(struct OBJGPU *pGpu) {
3383     return pGpu->bClientRmAllocatedCtxBuffer;
3384 }
3385 
gpuIsIterativeMmuWalkerEnabled(struct OBJGPU * pGpu)3386 static inline NvBool gpuIsIterativeMmuWalkerEnabled(struct OBJGPU *pGpu) {
3387     return pGpu->bIterativeMmuWalker;
3388 }
3389 
gpuIsEccPageRetirementWithSliAllowed(struct OBJGPU * pGpu)3390 static inline NvBool gpuIsEccPageRetirementWithSliAllowed(struct OBJGPU *pGpu) {
3391     return pGpu->bEccPageRetirementWithSliAllowed;
3392 }
3393 
gpuIsVidmemPreservationBrokenBug3172217(struct OBJGPU * pGpu)3394 static inline NvBool gpuIsVidmemPreservationBrokenBug3172217(struct OBJGPU *pGpu) {
3395     return pGpu->bVidmemPreservationBrokenBug3172217;
3396 }
3397 
gpuIsInstanceMemoryAlwaysCached(struct OBJGPU * pGpu)3398 static inline NvBool gpuIsInstanceMemoryAlwaysCached(struct OBJGPU *pGpu) {
3399     return pGpu->bInstanceMemoryAlwaysCached;
3400 }
3401 
gpuIsRmProfilingPrivileged(struct OBJGPU * pGpu)3402 static inline NvBool gpuIsRmProfilingPrivileged(struct OBJGPU *pGpu) {
3403     return pGpu->bRmProfilingPrivileged;
3404 }
3405 
gpuIsGeforceSmb(struct OBJGPU * pGpu)3406 static inline NvBool gpuIsGeforceSmb(struct OBJGPU *pGpu) {
3407     return pGpu->bGeforceSmb;
3408 }
3409 
gpuIsGeforceBranded(struct OBJGPU * pGpu)3410 static inline NvBool gpuIsGeforceBranded(struct OBJGPU *pGpu) {
3411     return pGpu->bIsGeforce;
3412 }
3413 
gpuIsQuadroBranded(struct OBJGPU * pGpu)3414 static inline NvBool gpuIsQuadroBranded(struct OBJGPU *pGpu) {
3415     return pGpu->bIsQuadro;
3416 }
3417 
gpuIsVgxBranded(struct OBJGPU * pGpu)3418 static inline NvBool gpuIsVgxBranded(struct OBJGPU *pGpu) {
3419     return pGpu->bIsVgx;
3420 }
3421 
gpuIsNvidiaNvsBranded(struct OBJGPU * pGpu)3422 static inline NvBool gpuIsNvidiaNvsBranded(struct OBJGPU *pGpu) {
3423     return pGpu->bIsNvidiaNvs;
3424 }
3425 
gpuIsTitanBranded(struct OBJGPU * pGpu)3426 static inline NvBool gpuIsTitanBranded(struct OBJGPU *pGpu) {
3427     return pGpu->bIsTitan;
3428 }
3429 
gpuIsTeslaBranded(struct OBJGPU * pGpu)3430 static inline NvBool gpuIsTeslaBranded(struct OBJGPU *pGpu) {
3431     return pGpu->bIsTesla;
3432 }
3433 
gpuIsComputePolicyTimesliceSupported(struct OBJGPU * pGpu)3434 static inline NvBool gpuIsComputePolicyTimesliceSupported(struct OBJGPU *pGpu) {
3435     return pGpu->bComputePolicyTimesliceSupported;
3436 }
3437 
gpuIsSriovCapable(struct OBJGPU * pGpu)3438 static inline NvBool gpuIsSriovCapable(struct OBJGPU *pGpu) {
3439     return pGpu->bSriovCapable;
3440 }
3441 
gpuIsNonPowerOf2ChannelCountSupported(struct OBJGPU * pGpu)3442 static inline NvBool gpuIsNonPowerOf2ChannelCountSupported(struct OBJGPU *pGpu) {
3443     return pGpu->bNonPowerOf2ChannelCountSupported;
3444 }
3445 
gpuIsVfResizableBAR1Supported(struct OBJGPU * pGpu)3446 static inline NvBool gpuIsVfResizableBAR1Supported(struct OBJGPU *pGpu) {
3447     return pGpu->bVfResizableBAR1Supported;
3448 }
3449 
gpuIsVoltaHubIntrSupported(struct OBJGPU * pGpu)3450 static inline NvBool gpuIsVoltaHubIntrSupported(struct OBJGPU *pGpu) {
3451     return pGpu->bVoltaHubIntrSupported;
3452 }
3453 
gpuIsAmpereErrorContainmentXidEnabled(struct OBJGPU * pGpu)3454 static inline NvBool gpuIsAmpereErrorContainmentXidEnabled(struct OBJGPU *pGpu) {
3455     return pGpu->bAmpereErrorContainmentXidEnabled;
3456 }
3457 
gpuIsSelfHosted(struct OBJGPU * pGpu)3458 static inline NvBool gpuIsSelfHosted(struct OBJGPU *pGpu) {
3459     return pGpu->bIsSelfHosted;
3460 }
3461 
gpuIsGspOwnedFaultBuffersEnabled(struct OBJGPU * pGpu)3462 static inline NvBool gpuIsGspOwnedFaultBuffersEnabled(struct OBJGPU *pGpu) {
3463     return pGpu->bIsGspOwnedFaultBuffersEnabled;
3464 }
3465 
3466 NV_STATUS gpuConstruct_IMPL(struct OBJGPU *arg_pGpu, NvU32 arg_gpuInstance, NvU32 arg_gpuId, NvUuid *arg_pUuid);
3467 
3468 #define __nvoc_gpuConstruct(arg_pGpu, arg_gpuInstance, arg_gpuId, arg_pUuid) gpuConstruct_IMPL(arg_pGpu, arg_gpuInstance, arg_gpuId, arg_pUuid)
3469 NV_STATUS gpuBindHalLegacy_IMPL(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0);
3470 
3471 #ifdef __nvoc_gpu_h_disabled
gpuBindHalLegacy(struct OBJGPU * pGpu,NvU32 chipId0,NvU32 chipId1,NvU32 socChipId0)3472 static inline NV_STATUS gpuBindHalLegacy(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0) {
3473     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3474     return NV_ERR_NOT_SUPPORTED;
3475 }
3476 #else //__nvoc_gpu_h_disabled
3477 #define gpuBindHalLegacy(pGpu, chipId0, chipId1, socChipId0) gpuBindHalLegacy_IMPL(pGpu, chipId0, chipId1, socChipId0)
3478 #endif //__nvoc_gpu_h_disabled
3479 
3480 NV_STATUS gpuPostConstruct_IMPL(struct OBJGPU *pGpu, GPUATTACHARG *arg0);
3481 
3482 #ifdef __nvoc_gpu_h_disabled
gpuPostConstruct(struct OBJGPU * pGpu,GPUATTACHARG * arg0)3483 static inline NV_STATUS gpuPostConstruct(struct OBJGPU *pGpu, GPUATTACHARG *arg0) {
3484     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3485     return NV_ERR_NOT_SUPPORTED;
3486 }
3487 #else //__nvoc_gpu_h_disabled
3488 #define gpuPostConstruct(pGpu, arg0) gpuPostConstruct_IMPL(pGpu, arg0)
3489 #endif //__nvoc_gpu_h_disabled
3490 
3491 NV_STATUS gpuCreateObject_IMPL(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1);
3492 
3493 #ifdef __nvoc_gpu_h_disabled
gpuCreateObject(struct OBJGPU * pGpu,NVOC_CLASS_ID arg0,NvU32 arg1)3494 static inline NV_STATUS gpuCreateObject(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1) {
3495     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3496     return NV_ERR_NOT_SUPPORTED;
3497 }
3498 #else //__nvoc_gpu_h_disabled
3499 #define gpuCreateObject(pGpu, arg0, arg1) gpuCreateObject_IMPL(pGpu, arg0, arg1)
3500 #endif //__nvoc_gpu_h_disabled
3501 
3502 void gpuDestruct_IMPL(struct OBJGPU *pGpu);
3503 
3504 #define __nvoc_gpuDestruct(pGpu) gpuDestruct_IMPL(pGpu)
3505 NV_STATUS gpuStateInit_IMPL(struct OBJGPU *pGpu);
3506 
3507 #ifdef __nvoc_gpu_h_disabled
gpuStateInit(struct OBJGPU * pGpu)3508 static inline NV_STATUS gpuStateInit(struct OBJGPU *pGpu) {
3509     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3510     return NV_ERR_NOT_SUPPORTED;
3511 }
3512 #else //__nvoc_gpu_h_disabled
3513 #define gpuStateInit(pGpu) gpuStateInit_IMPL(pGpu)
3514 #endif //__nvoc_gpu_h_disabled
3515 
3516 NV_STATUS gpuStateUnload_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3517 
3518 #ifdef __nvoc_gpu_h_disabled
gpuStateUnload(struct OBJGPU * pGpu,NvU32 arg0)3519 static inline NV_STATUS gpuStateUnload(struct OBJGPU *pGpu, NvU32 arg0) {
3520     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3521     return NV_ERR_NOT_SUPPORTED;
3522 }
3523 #else //__nvoc_gpu_h_disabled
3524 #define gpuStateUnload(pGpu, arg0) gpuStateUnload_IMPL(pGpu, arg0)
3525 #endif //__nvoc_gpu_h_disabled
3526 
3527 NV_STATUS gpuInitDispIpHal_IMPL(struct OBJGPU *pGpu, NvU32 ipver);
3528 
3529 #ifdef __nvoc_gpu_h_disabled
gpuInitDispIpHal(struct OBJGPU * pGpu,NvU32 ipver)3530 static inline NV_STATUS gpuInitDispIpHal(struct OBJGPU *pGpu, NvU32 ipver) {
3531     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3532     return NV_ERR_NOT_SUPPORTED;
3533 }
3534 #else //__nvoc_gpu_h_disabled
3535 #define gpuInitDispIpHal(pGpu, ipver) gpuInitDispIpHal_IMPL(pGpu, ipver)
3536 #endif //__nvoc_gpu_h_disabled
3537 
3538 void gpuServiceInterruptsAllGpus_IMPL(struct OBJGPU *pGpu);
3539 
3540 #ifdef __nvoc_gpu_h_disabled
gpuServiceInterruptsAllGpus(struct OBJGPU * pGpu)3541 static inline void gpuServiceInterruptsAllGpus(struct OBJGPU *pGpu) {
3542     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3543 }
3544 #else //__nvoc_gpu_h_disabled
3545 #define gpuServiceInterruptsAllGpus(pGpu) gpuServiceInterruptsAllGpus_IMPL(pGpu)
3546 #endif //__nvoc_gpu_h_disabled
3547 
3548 NvBool gpuIsImplementation_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2);
3549 
3550 #ifdef __nvoc_gpu_h_disabled
gpuIsImplementation(struct OBJGPU * pGpu,HAL_IMPLEMENTATION arg0,NvU32 arg1,NvU32 arg2)3551 static inline NvBool gpuIsImplementation(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) {
3552     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3553     return NV_FALSE;
3554 }
3555 #else //__nvoc_gpu_h_disabled
3556 #define gpuIsImplementation(pGpu, arg0, arg1, arg2) gpuIsImplementation_IMPL(pGpu, arg0, arg1, arg2)
3557 #endif //__nvoc_gpu_h_disabled
3558 
3559 NvBool gpuIsImplementationOrBetter_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2);
3560 
3561 #ifdef __nvoc_gpu_h_disabled
gpuIsImplementationOrBetter(struct OBJGPU * pGpu,HAL_IMPLEMENTATION arg0,NvU32 arg1,NvU32 arg2)3562 static inline NvBool gpuIsImplementationOrBetter(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) {
3563     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3564     return NV_FALSE;
3565 }
3566 #else //__nvoc_gpu_h_disabled
3567 #define gpuIsImplementationOrBetter(pGpu, arg0, arg1, arg2) gpuIsImplementationOrBetter_IMPL(pGpu, arg0, arg1, arg2)
3568 #endif //__nvoc_gpu_h_disabled
3569 
3570 NvBool gpuIsGpuFullPower_IMPL(struct OBJGPU *pGpu);
3571 
3572 #ifdef __nvoc_gpu_h_disabled
gpuIsGpuFullPower(struct OBJGPU * pGpu)3573 static inline NvBool gpuIsGpuFullPower(struct OBJGPU *pGpu) {
3574     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3575     return NV_FALSE;
3576 }
3577 #else //__nvoc_gpu_h_disabled
3578 #define gpuIsGpuFullPower(pGpu) gpuIsGpuFullPower_IMPL(pGpu)
3579 #endif //__nvoc_gpu_h_disabled
3580 
3581 NvBool gpuIsGpuFullPowerForPmResume_IMPL(struct OBJGPU *pGpu);
3582 
3583 #ifdef __nvoc_gpu_h_disabled
gpuIsGpuFullPowerForPmResume(struct OBJGPU * pGpu)3584 static inline NvBool gpuIsGpuFullPowerForPmResume(struct OBJGPU *pGpu) {
3585     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3586     return NV_FALSE;
3587 }
3588 #else //__nvoc_gpu_h_disabled
3589 #define gpuIsGpuFullPowerForPmResume(pGpu) gpuIsGpuFullPowerForPmResume_IMPL(pGpu)
3590 #endif //__nvoc_gpu_h_disabled
3591 
3592 NV_STATUS gpuGetDeviceEntryByType_IMPL(struct OBJGPU *pGpu, NvU32 deviceTypeEnum, NvS32 groupId, NvU32 instanceId, const DEVICE_INFO2_ENTRY **ppDeviceEntry);
3593 
3594 #ifdef __nvoc_gpu_h_disabled
gpuGetDeviceEntryByType(struct OBJGPU * pGpu,NvU32 deviceTypeEnum,NvS32 groupId,NvU32 instanceId,const DEVICE_INFO2_ENTRY ** ppDeviceEntry)3595 static inline NV_STATUS gpuGetDeviceEntryByType(struct OBJGPU *pGpu, NvU32 deviceTypeEnum, NvS32 groupId, NvU32 instanceId, const DEVICE_INFO2_ENTRY **ppDeviceEntry) {
3596     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3597     return NV_ERR_NOT_SUPPORTED;
3598 }
3599 #else //__nvoc_gpu_h_disabled
3600 #define gpuGetDeviceEntryByType(pGpu, deviceTypeEnum, groupId, instanceId, ppDeviceEntry) gpuGetDeviceEntryByType_IMPL(pGpu, deviceTypeEnum, groupId, instanceId, ppDeviceEntry)
3601 #endif //__nvoc_gpu_h_disabled
3602 
3603 NV_STATUS gpuBuildClassDB_IMPL(struct OBJGPU *pGpu);
3604 
3605 #ifdef __nvoc_gpu_h_disabled
gpuBuildClassDB(struct OBJGPU * pGpu)3606 static inline NV_STATUS gpuBuildClassDB(struct OBJGPU *pGpu) {
3607     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3608     return NV_ERR_NOT_SUPPORTED;
3609 }
3610 #else //__nvoc_gpu_h_disabled
3611 #define gpuBuildClassDB(pGpu) gpuBuildClassDB_IMPL(pGpu)
3612 #endif //__nvoc_gpu_h_disabled
3613 
3614 NV_STATUS gpuDestroyClassDB_IMPL(struct OBJGPU *pGpu);
3615 
3616 #ifdef __nvoc_gpu_h_disabled
gpuDestroyClassDB(struct OBJGPU * pGpu)3617 static inline NV_STATUS gpuDestroyClassDB(struct OBJGPU *pGpu) {
3618     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3619     return NV_ERR_NOT_SUPPORTED;
3620 }
3621 #else //__nvoc_gpu_h_disabled
3622 #define gpuDestroyClassDB(pGpu) gpuDestroyClassDB_IMPL(pGpu)
3623 #endif //__nvoc_gpu_h_disabled
3624 
3625 NV_STATUS gpuDeleteEngineFromClassDB_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3626 
3627 #ifdef __nvoc_gpu_h_disabled
gpuDeleteEngineFromClassDB(struct OBJGPU * pGpu,NvU32 arg0)3628 static inline NV_STATUS gpuDeleteEngineFromClassDB(struct OBJGPU *pGpu, NvU32 arg0) {
3629     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3630     return NV_ERR_NOT_SUPPORTED;
3631 }
3632 #else //__nvoc_gpu_h_disabled
3633 #define gpuDeleteEngineFromClassDB(pGpu, arg0) gpuDeleteEngineFromClassDB_IMPL(pGpu, arg0)
3634 #endif //__nvoc_gpu_h_disabled
3635 
3636 NV_STATUS gpuDeleteEngineOnPreInit_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3637 
3638 #ifdef __nvoc_gpu_h_disabled
gpuDeleteEngineOnPreInit(struct OBJGPU * pGpu,NvU32 arg0)3639 static inline NV_STATUS gpuDeleteEngineOnPreInit(struct OBJGPU *pGpu, NvU32 arg0) {
3640     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3641     return NV_ERR_NOT_SUPPORTED;
3642 }
3643 #else //__nvoc_gpu_h_disabled
3644 #define gpuDeleteEngineOnPreInit(pGpu, arg0) gpuDeleteEngineOnPreInit_IMPL(pGpu, arg0)
3645 #endif //__nvoc_gpu_h_disabled
3646 
3647 NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3648 
3649 #ifdef __nvoc_gpu_h_disabled
gpuAddClassToClassDBByEngTag(struct OBJGPU * pGpu,NvU32 arg0)3650 static inline NV_STATUS gpuAddClassToClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) {
3651     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3652     return NV_ERR_NOT_SUPPORTED;
3653 }
3654 #else //__nvoc_gpu_h_disabled
3655 #define gpuAddClassToClassDBByEngTag(pGpu, arg0) gpuAddClassToClassDBByEngTag_IMPL(pGpu, arg0)
3656 #endif //__nvoc_gpu_h_disabled
3657 
3658 NV_STATUS gpuAddClassToClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3659 
3660 #ifdef __nvoc_gpu_h_disabled
gpuAddClassToClassDBByClassId(struct OBJGPU * pGpu,NvU32 arg0)3661 static inline NV_STATUS gpuAddClassToClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) {
3662     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3663     return NV_ERR_NOT_SUPPORTED;
3664 }
3665 #else //__nvoc_gpu_h_disabled
3666 #define gpuAddClassToClassDBByClassId(pGpu, arg0) gpuAddClassToClassDBByClassId_IMPL(pGpu, arg0)
3667 #endif //__nvoc_gpu_h_disabled
3668 
3669 NV_STATUS gpuAddClassToClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1);
3670 
3671 #ifdef __nvoc_gpu_h_disabled
gpuAddClassToClassDBByEngTagClassId(struct OBJGPU * pGpu,NvU32 arg0,NvU32 arg1)3672 static inline NV_STATUS gpuAddClassToClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) {
3673     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3674     return NV_ERR_NOT_SUPPORTED;
3675 }
3676 #else //__nvoc_gpu_h_disabled
3677 #define gpuAddClassToClassDBByEngTagClassId(pGpu, arg0, arg1) gpuAddClassToClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1)
3678 #endif //__nvoc_gpu_h_disabled
3679 
3680 NV_STATUS gpuDeleteClassFromClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3681 
3682 #ifdef __nvoc_gpu_h_disabled
gpuDeleteClassFromClassDBByClassId(struct OBJGPU * pGpu,NvU32 arg0)3683 static inline NV_STATUS gpuDeleteClassFromClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) {
3684     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3685     return NV_ERR_NOT_SUPPORTED;
3686 }
3687 #else //__nvoc_gpu_h_disabled
3688 #define gpuDeleteClassFromClassDBByClassId(pGpu, arg0) gpuDeleteClassFromClassDBByClassId_IMPL(pGpu, arg0)
3689 #endif //__nvoc_gpu_h_disabled
3690 
3691 NV_STATUS gpuDeleteClassFromClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3692 
3693 #ifdef __nvoc_gpu_h_disabled
gpuDeleteClassFromClassDBByEngTag(struct OBJGPU * pGpu,NvU32 arg0)3694 static inline NV_STATUS gpuDeleteClassFromClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) {
3695     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3696     return NV_ERR_NOT_SUPPORTED;
3697 }
3698 #else //__nvoc_gpu_h_disabled
3699 #define gpuDeleteClassFromClassDBByEngTag(pGpu, arg0) gpuDeleteClassFromClassDBByEngTag_IMPL(pGpu, arg0)
3700 #endif //__nvoc_gpu_h_disabled
3701 
3702 NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1);
3703 
3704 #ifdef __nvoc_gpu_h_disabled
gpuDeleteClassFromClassDBByEngTagClassId(struct OBJGPU * pGpu,NvU32 arg0,NvU32 arg1)3705 static inline NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) {
3706     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3707     return NV_ERR_NOT_SUPPORTED;
3708 }
3709 #else //__nvoc_gpu_h_disabled
3710 #define gpuDeleteClassFromClassDBByEngTagClassId(pGpu, arg0, arg1) gpuDeleteClassFromClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1)
3711 #endif //__nvoc_gpu_h_disabled
3712 
3713 NvBool gpuIsClassSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3714 
3715 #ifdef __nvoc_gpu_h_disabled
gpuIsClassSupported(struct OBJGPU * pGpu,NvU32 arg0)3716 static inline NvBool gpuIsClassSupported(struct OBJGPU *pGpu, NvU32 arg0) {
3717     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3718     return NV_FALSE;
3719 }
3720 #else //__nvoc_gpu_h_disabled
3721 #define gpuIsClassSupported(pGpu, arg0) gpuIsClassSupported_IMPL(pGpu, arg0)
3722 #endif //__nvoc_gpu_h_disabled
3723 
3724 NV_STATUS gpuGetClassByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1);
3725 
3726 #ifdef __nvoc_gpu_h_disabled
gpuGetClassByClassId(struct OBJGPU * pGpu,NvU32 arg0,PCLASSDESCRIPTOR * arg1)3727 static inline NV_STATUS gpuGetClassByClassId(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1) {
3728     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3729     return NV_ERR_NOT_SUPPORTED;
3730 }
3731 #else //__nvoc_gpu_h_disabled
3732 #define gpuGetClassByClassId(pGpu, arg0, arg1) gpuGetClassByClassId_IMPL(pGpu, arg0, arg1)
3733 #endif //__nvoc_gpu_h_disabled
3734 
3735 NV_STATUS gpuGetClassByEngineAndClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2);
3736 
3737 #ifdef __nvoc_gpu_h_disabled
gpuGetClassByEngineAndClassId(struct OBJGPU * pGpu,NvU32 arg0,NvU32 arg1,PCLASSDESCRIPTOR * arg2)3738 static inline NV_STATUS gpuGetClassByEngineAndClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2) {
3739     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3740     return NV_ERR_NOT_SUPPORTED;
3741 }
3742 #else //__nvoc_gpu_h_disabled
3743 #define gpuGetClassByEngineAndClassId(pGpu, arg0, arg1, arg2) gpuGetClassByEngineAndClassId_IMPL(pGpu, arg0, arg1, arg2)
3744 #endif //__nvoc_gpu_h_disabled
3745 
3746 NV_STATUS gpuGetClassList_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2);
3747 
3748 #ifdef __nvoc_gpu_h_disabled
gpuGetClassList(struct OBJGPU * pGpu,NvU32 * arg0,NvU32 * arg1,NvU32 arg2)3749 static inline NV_STATUS gpuGetClassList(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2) {
3750     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3751     return NV_ERR_NOT_SUPPORTED;
3752 }
3753 #else //__nvoc_gpu_h_disabled
3754 #define gpuGetClassList(pGpu, arg0, arg1, arg2) gpuGetClassList_IMPL(pGpu, arg0, arg1, arg2)
3755 #endif //__nvoc_gpu_h_disabled
3756 
3757 NV_STATUS gpuConstructEngineTable_IMPL(struct OBJGPU *pGpu);
3758 
3759 #ifdef __nvoc_gpu_h_disabled
gpuConstructEngineTable(struct OBJGPU * pGpu)3760 static inline NV_STATUS gpuConstructEngineTable(struct OBJGPU *pGpu) {
3761     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3762     return NV_ERR_NOT_SUPPORTED;
3763 }
3764 #else //__nvoc_gpu_h_disabled
3765 #define gpuConstructEngineTable(pGpu) gpuConstructEngineTable_IMPL(pGpu)
3766 #endif //__nvoc_gpu_h_disabled
3767 
3768 void gpuDestroyEngineTable_IMPL(struct OBJGPU *pGpu);
3769 
3770 #ifdef __nvoc_gpu_h_disabled
gpuDestroyEngineTable(struct OBJGPU * pGpu)3771 static inline void gpuDestroyEngineTable(struct OBJGPU *pGpu) {
3772     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3773 }
3774 #else //__nvoc_gpu_h_disabled
3775 #define gpuDestroyEngineTable(pGpu) gpuDestroyEngineTable_IMPL(pGpu)
3776 #endif //__nvoc_gpu_h_disabled
3777 
3778 NV_STATUS gpuUpdateEngineTable_IMPL(struct OBJGPU *pGpu);
3779 
3780 #ifdef __nvoc_gpu_h_disabled
gpuUpdateEngineTable(struct OBJGPU * pGpu)3781 static inline NV_STATUS gpuUpdateEngineTable(struct OBJGPU *pGpu) {
3782     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3783     return NV_ERR_NOT_SUPPORTED;
3784 }
3785 #else //__nvoc_gpu_h_disabled
3786 #define gpuUpdateEngineTable(pGpu) gpuUpdateEngineTable_IMPL(pGpu)
3787 #endif //__nvoc_gpu_h_disabled
3788 
3789 NvBool gpuCheckEngineTable_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0);
3790 
3791 #ifdef __nvoc_gpu_h_disabled
gpuCheckEngineTable(struct OBJGPU * pGpu,RM_ENGINE_TYPE arg0)3792 static inline NvBool gpuCheckEngineTable(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0) {
3793     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3794     return NV_FALSE;
3795 }
3796 #else //__nvoc_gpu_h_disabled
3797 #define gpuCheckEngineTable(pGpu, arg0) gpuCheckEngineTable_IMPL(pGpu, arg0)
3798 #endif //__nvoc_gpu_h_disabled
3799 
3800 NV_STATUS gpuXlateEngDescToClientEngineId_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, RM_ENGINE_TYPE *arg1);
3801 
3802 #ifdef __nvoc_gpu_h_disabled
gpuXlateEngDescToClientEngineId(struct OBJGPU * pGpu,ENGDESCRIPTOR arg0,RM_ENGINE_TYPE * arg1)3803 static inline NV_STATUS gpuXlateEngDescToClientEngineId(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, RM_ENGINE_TYPE *arg1) {
3804     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3805     return NV_ERR_NOT_SUPPORTED;
3806 }
3807 #else //__nvoc_gpu_h_disabled
3808 #define gpuXlateEngDescToClientEngineId(pGpu, arg0, arg1) gpuXlateEngDescToClientEngineId_IMPL(pGpu, arg0, arg1)
3809 #endif //__nvoc_gpu_h_disabled
3810 
3811 NV_STATUS gpuXlateClientEngineIdToEngDesc_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, ENGDESCRIPTOR *arg1);
3812 
3813 #ifdef __nvoc_gpu_h_disabled
gpuXlateClientEngineIdToEngDesc(struct OBJGPU * pGpu,RM_ENGINE_TYPE arg0,ENGDESCRIPTOR * arg1)3814 static inline NV_STATUS gpuXlateClientEngineIdToEngDesc(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, ENGDESCRIPTOR *arg1) {
3815     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3816     return NV_ERR_NOT_SUPPORTED;
3817 }
3818 #else //__nvoc_gpu_h_disabled
3819 #define gpuXlateClientEngineIdToEngDesc(pGpu, arg0, arg1) gpuXlateClientEngineIdToEngDesc_IMPL(pGpu, arg0, arg1)
3820 #endif //__nvoc_gpu_h_disabled
3821 
3822 NV_STATUS gpuGetFlcnFromClientEngineId_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, struct Falcon **arg1);
3823 
3824 #ifdef __nvoc_gpu_h_disabled
gpuGetFlcnFromClientEngineId(struct OBJGPU * pGpu,RM_ENGINE_TYPE arg0,struct Falcon ** arg1)3825 static inline NV_STATUS gpuGetFlcnFromClientEngineId(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg0, struct Falcon **arg1) {
3826     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3827     return NV_ERR_NOT_SUPPORTED;
3828 }
3829 #else //__nvoc_gpu_h_disabled
3830 #define gpuGetFlcnFromClientEngineId(pGpu, arg0, arg1) gpuGetFlcnFromClientEngineId_IMPL(pGpu, arg0, arg1)
3831 #endif //__nvoc_gpu_h_disabled
3832 
3833 NvBool gpuIsEngDescSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3834 
3835 #ifdef __nvoc_gpu_h_disabled
gpuIsEngDescSupported(struct OBJGPU * pGpu,NvU32 arg0)3836 static inline NvBool gpuIsEngDescSupported(struct OBJGPU *pGpu, NvU32 arg0) {
3837     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3838     return NV_FALSE;
3839 }
3840 #else //__nvoc_gpu_h_disabled
3841 #define gpuIsEngDescSupported(pGpu, arg0) gpuIsEngDescSupported_IMPL(pGpu, arg0)
3842 #endif //__nvoc_gpu_h_disabled
3843 
3844 NV_STATUS gpuReadBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData);
3845 
3846 #ifdef __nvoc_gpu_h_disabled
gpuReadBusConfigCycle(struct OBJGPU * pGpu,NvU32 index,NvU32 * pData)3847 static inline NV_STATUS gpuReadBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData) {
3848     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3849     return NV_ERR_NOT_SUPPORTED;
3850 }
3851 #else //__nvoc_gpu_h_disabled
3852 #define gpuReadBusConfigCycle(pGpu, index, pData) gpuReadBusConfigCycle_IMPL(pGpu, index, pData)
3853 #endif //__nvoc_gpu_h_disabled
3854 
3855 NV_STATUS gpuWriteBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 value);
3856 
3857 #ifdef __nvoc_gpu_h_disabled
gpuWriteBusConfigCycle(struct OBJGPU * pGpu,NvU32 index,NvU32 value)3858 static inline NV_STATUS gpuWriteBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 value) {
3859     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3860     return NV_ERR_NOT_SUPPORTED;
3861 }
3862 #else //__nvoc_gpu_h_disabled
3863 #define gpuWriteBusConfigCycle(pGpu, index, value) gpuWriteBusConfigCycle_IMPL(pGpu, index, value)
3864 #endif //__nvoc_gpu_h_disabled
3865 
3866 RM_ENGINE_TYPE gpuGetRmEngineType_IMPL(NvU32 index);
3867 
3868 #define gpuGetRmEngineType(index) gpuGetRmEngineType_IMPL(index)
3869 void gpuGetRmEngineTypeList_IMPL(NvU32 *pNv2080EngineList, NvU32 engineCount, RM_ENGINE_TYPE *pRmEngineList);
3870 
3871 #define gpuGetRmEngineTypeList(pNv2080EngineList, engineCount, pRmEngineList) gpuGetRmEngineTypeList_IMPL(pNv2080EngineList, engineCount, pRmEngineList)
3872 NvU32 gpuGetNv2080EngineType_IMPL(RM_ENGINE_TYPE index);
3873 
3874 #define gpuGetNv2080EngineType(index) gpuGetNv2080EngineType_IMPL(index)
3875 void gpuGetNv2080EngineTypeList_IMPL(RM_ENGINE_TYPE *pRmEngineList, NvU32 engineCount, NvU32 *pNv2080EngineList);
3876 
3877 #define gpuGetNv2080EngineTypeList(pRmEngineList, engineCount, pNv2080EngineList) gpuGetNv2080EngineTypeList_IMPL(pRmEngineList, engineCount, pNv2080EngineList)
3878 NV_STATUS gpuGetRmEngineTypeCapMask_IMPL(NvU32 *NV2080EngineTypeCap, NvU32 capSize, NvU32 *RmEngineTypeCap);
3879 
3880 #define gpuGetRmEngineTypeCapMask(NV2080EngineTypeCap, capSize, RmEngineTypeCap) gpuGetRmEngineTypeCapMask_IMPL(NV2080EngineTypeCap, capSize, RmEngineTypeCap)
3881 BRANDING_TYPE gpuDetectBranding_IMPL(struct OBJGPU *pGpu);
3882 
3883 #ifdef __nvoc_gpu_h_disabled
gpuDetectBranding(struct OBJGPU * pGpu)3884 static inline BRANDING_TYPE gpuDetectBranding(struct OBJGPU *pGpu) {
3885     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3886     BRANDING_TYPE ret;
3887     portMemSet(&ret, 0, sizeof(BRANDING_TYPE));
3888     return ret;
3889 }
3890 #else //__nvoc_gpu_h_disabled
3891 #define gpuDetectBranding(pGpu) gpuDetectBranding_IMPL(pGpu)
3892 #endif //__nvoc_gpu_h_disabled
3893 
3894 COMPUTE_BRANDING_TYPE gpuDetectComputeBranding_IMPL(struct OBJGPU *pGpu);
3895 
3896 #ifdef __nvoc_gpu_h_disabled
gpuDetectComputeBranding(struct OBJGPU * pGpu)3897 static inline COMPUTE_BRANDING_TYPE gpuDetectComputeBranding(struct OBJGPU *pGpu) {
3898     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3899     COMPUTE_BRANDING_TYPE ret;
3900     portMemSet(&ret, 0, sizeof(COMPUTE_BRANDING_TYPE));
3901     return ret;
3902 }
3903 #else //__nvoc_gpu_h_disabled
3904 #define gpuDetectComputeBranding(pGpu) gpuDetectComputeBranding_IMPL(pGpu)
3905 #endif //__nvoc_gpu_h_disabled
3906 
3907 BRANDING_TYPE gpuDetectVgxBranding_IMPL(struct OBJGPU *pGpu);
3908 
3909 #ifdef __nvoc_gpu_h_disabled
gpuDetectVgxBranding(struct OBJGPU * pGpu)3910 static inline BRANDING_TYPE gpuDetectVgxBranding(struct OBJGPU *pGpu) {
3911     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3912     BRANDING_TYPE ret;
3913     portMemSet(&ret, 0, sizeof(BRANDING_TYPE));
3914     return ret;
3915 }
3916 #else //__nvoc_gpu_h_disabled
3917 #define gpuDetectVgxBranding(pGpu) gpuDetectVgxBranding_IMPL(pGpu)
3918 #endif //__nvoc_gpu_h_disabled
3919 
3920 NvU32 gpuGetGpuMask_IMPL(struct OBJGPU *pGpu);
3921 
3922 #ifdef __nvoc_gpu_h_disabled
gpuGetGpuMask(struct OBJGPU * pGpu)3923 static inline NvU32 gpuGetGpuMask(struct OBJGPU *pGpu) {
3924     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3925     return 0;
3926 }
3927 #else //__nvoc_gpu_h_disabled
3928 #define gpuGetGpuMask(pGpu) gpuGetGpuMask_IMPL(pGpu)
3929 #endif //__nvoc_gpu_h_disabled
3930 
3931 void gpuChangeComputeModeRefCount_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
3932 
3933 #ifdef __nvoc_gpu_h_disabled
gpuChangeComputeModeRefCount(struct OBJGPU * pGpu,NvU32 arg0)3934 static inline void gpuChangeComputeModeRefCount(struct OBJGPU *pGpu, NvU32 arg0) {
3935     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3936 }
3937 #else //__nvoc_gpu_h_disabled
3938 #define gpuChangeComputeModeRefCount(pGpu, arg0) gpuChangeComputeModeRefCount_IMPL(pGpu, arg0)
3939 #endif //__nvoc_gpu_h_disabled
3940 
3941 NV_STATUS gpuEnterShutdown_IMPL(struct OBJGPU *pGpu);
3942 
3943 #ifdef __nvoc_gpu_h_disabled
gpuEnterShutdown(struct OBJGPU * pGpu)3944 static inline NV_STATUS gpuEnterShutdown(struct OBJGPU *pGpu) {
3945     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3946     return NV_ERR_NOT_SUPPORTED;
3947 }
3948 #else //__nvoc_gpu_h_disabled
3949 #define gpuEnterShutdown(pGpu) gpuEnterShutdown_IMPL(pGpu)
3950 #endif //__nvoc_gpu_h_disabled
3951 
3952 NV_STATUS gpuSanityCheck_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1);
3953 
3954 #ifdef __nvoc_gpu_h_disabled
gpuSanityCheck(struct OBJGPU * pGpu,NvU32 arg0,NvU32 * arg1)3955 static inline NV_STATUS gpuSanityCheck(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) {
3956     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3957     return NV_ERR_NOT_SUPPORTED;
3958 }
3959 #else //__nvoc_gpu_h_disabled
3960 #define gpuSanityCheck(pGpu, arg0, arg1) gpuSanityCheck_IMPL(pGpu, arg0, arg1)
3961 #endif //__nvoc_gpu_h_disabled
3962 
3963 DEVICE_MAPPING *gpuGetDeviceMapping_IMPL(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1);
3964 
3965 #ifdef __nvoc_gpu_h_disabled
gpuGetDeviceMapping(struct OBJGPU * pGpu,DEVICE_INDEX arg0,NvU32 arg1)3966 static inline DEVICE_MAPPING *gpuGetDeviceMapping(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1) {
3967     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3968     return NULL;
3969 }
3970 #else //__nvoc_gpu_h_disabled
3971 #define gpuGetDeviceMapping(pGpu, arg0, arg1) gpuGetDeviceMapping_IMPL(pGpu, arg0, arg1)
3972 #endif //__nvoc_gpu_h_disabled
3973 
3974 DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1);
3975 
3976 #ifdef __nvoc_gpu_h_disabled
gpuGetDeviceMappingFromDeviceID(struct OBJGPU * pGpu,NvU32 arg0,NvU32 arg1)3977 static inline DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) {
3978     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3979     return NULL;
3980 }
3981 #else //__nvoc_gpu_h_disabled
3982 #define gpuGetDeviceMappingFromDeviceID(pGpu, arg0, arg1) gpuGetDeviceMappingFromDeviceID_IMPL(pGpu, arg0, arg1)
3983 #endif //__nvoc_gpu_h_disabled
3984 
3985 NV_STATUS gpuGetGidInfo_IMPL(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags);
3986 
3987 #ifdef __nvoc_gpu_h_disabled
gpuGetGidInfo(struct OBJGPU * pGpu,NvU8 ** ppGidString,NvU32 * pGidStrlen,NvU32 gidFlags)3988 static inline NV_STATUS gpuGetGidInfo(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags) {
3989     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
3990     return NV_ERR_NOT_SUPPORTED;
3991 }
3992 #else //__nvoc_gpu_h_disabled
3993 #define gpuGetGidInfo(pGpu, ppGidString, pGidStrlen, gidFlags) gpuGetGidInfo_IMPL(pGpu, ppGidString, pGidStrlen, gidFlags)
3994 #endif //__nvoc_gpu_h_disabled
3995 
3996 void gpuSetDisconnectedProperties_IMPL(struct OBJGPU *pGpu);
3997 
3998 #ifdef __nvoc_gpu_h_disabled
gpuSetDisconnectedProperties(struct OBJGPU * pGpu)3999 static inline void gpuSetDisconnectedProperties(struct OBJGPU *pGpu) {
4000     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4001 }
4002 #else //__nvoc_gpu_h_disabled
4003 #define gpuSetDisconnectedProperties(pGpu) gpuSetDisconnectedProperties_IMPL(pGpu)
4004 #endif //__nvoc_gpu_h_disabled
4005 
4006 NV_STATUS gpuAddConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0);
4007 
4008 #ifdef __nvoc_gpu_h_disabled
gpuAddConstructedFalcon(struct OBJGPU * pGpu,struct Falcon * arg0)4009 static inline NV_STATUS gpuAddConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) {
4010     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4011     return NV_ERR_NOT_SUPPORTED;
4012 }
4013 #else //__nvoc_gpu_h_disabled
4014 #define gpuAddConstructedFalcon(pGpu, arg0) gpuAddConstructedFalcon_IMPL(pGpu, arg0)
4015 #endif //__nvoc_gpu_h_disabled
4016 
4017 NV_STATUS gpuRemoveConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0);
4018 
4019 #ifdef __nvoc_gpu_h_disabled
gpuRemoveConstructedFalcon(struct OBJGPU * pGpu,struct Falcon * arg0)4020 static inline NV_STATUS gpuRemoveConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) {
4021     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4022     return NV_ERR_NOT_SUPPORTED;
4023 }
4024 #else //__nvoc_gpu_h_disabled
4025 #define gpuRemoveConstructedFalcon(pGpu, arg0) gpuRemoveConstructedFalcon_IMPL(pGpu, arg0)
4026 #endif //__nvoc_gpu_h_disabled
4027 
4028 NV_STATUS gpuGetConstructedFalcon_IMPL(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1);
4029 
4030 #ifdef __nvoc_gpu_h_disabled
gpuGetConstructedFalcon(struct OBJGPU * pGpu,NvU32 arg0,struct Falcon ** arg1)4031 static inline NV_STATUS gpuGetConstructedFalcon(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1) {
4032     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4033     return NV_ERR_NOT_SUPPORTED;
4034 }
4035 #else //__nvoc_gpu_h_disabled
4036 #define gpuGetConstructedFalcon(pGpu, arg0, arg1) gpuGetConstructedFalcon_IMPL(pGpu, arg0, arg1)
4037 #endif //__nvoc_gpu_h_disabled
4038 
4039 NV_STATUS gpuGetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
4040 
4041 #ifdef __nvoc_gpu_h_disabled
gpuGetSparseTextureComputeMode(struct OBJGPU * pGpu,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2)4042 static inline NV_STATUS gpuGetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
4043     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4044     return NV_ERR_NOT_SUPPORTED;
4045 }
4046 #else //__nvoc_gpu_h_disabled
4047 #define gpuGetSparseTextureComputeMode(pGpu, arg0, arg1, arg2) gpuGetSparseTextureComputeMode_IMPL(pGpu, arg0, arg1, arg2)
4048 #endif //__nvoc_gpu_h_disabled
4049 
4050 NV_STATUS gpuSetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
4051 
4052 #ifdef __nvoc_gpu_h_disabled
gpuSetSparseTextureComputeMode(struct OBJGPU * pGpu,NvU32 arg0)4053 static inline NV_STATUS gpuSetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 arg0) {
4054     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4055     return NV_ERR_NOT_SUPPORTED;
4056 }
4057 #else //__nvoc_gpu_h_disabled
4058 #define gpuSetSparseTextureComputeMode(pGpu, arg0) gpuSetSparseTextureComputeMode_IMPL(pGpu, arg0)
4059 #endif //__nvoc_gpu_h_disabled
4060 
4061 struct OBJENGSTATE *gpuGetEngstate_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0);
4062 
4063 #ifdef __nvoc_gpu_h_disabled
gpuGetEngstate(struct OBJGPU * pGpu,ENGDESCRIPTOR arg0)4064 static inline struct OBJENGSTATE *gpuGetEngstate(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) {
4065     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4066     return NULL;
4067 }
4068 #else //__nvoc_gpu_h_disabled
4069 #define gpuGetEngstate(pGpu, arg0) gpuGetEngstate_IMPL(pGpu, arg0)
4070 #endif //__nvoc_gpu_h_disabled
4071 
4072 struct OBJENGSTATE *gpuGetEngstateNoShare_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0);
4073 
4074 #ifdef __nvoc_gpu_h_disabled
gpuGetEngstateNoShare(struct OBJGPU * pGpu,ENGDESCRIPTOR arg0)4075 static inline struct OBJENGSTATE *gpuGetEngstateNoShare(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) {
4076     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4077     return NULL;
4078 }
4079 #else //__nvoc_gpu_h_disabled
4080 #define gpuGetEngstateNoShare(pGpu, arg0) gpuGetEngstateNoShare_IMPL(pGpu, arg0)
4081 #endif //__nvoc_gpu_h_disabled
4082 
4083 struct KernelFifo *gpuGetKernelFifoShared_IMPL(struct OBJGPU *pGpu);
4084 
4085 #ifdef __nvoc_gpu_h_disabled
gpuGetKernelFifoShared(struct OBJGPU * pGpu)4086 static inline struct KernelFifo *gpuGetKernelFifoShared(struct OBJGPU *pGpu) {
4087     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4088     return NULL;
4089 }
4090 #else //__nvoc_gpu_h_disabled
4091 #define gpuGetKernelFifoShared(pGpu) gpuGetKernelFifoShared_IMPL(pGpu)
4092 #endif //__nvoc_gpu_h_disabled
4093 
4094 NvBool gpuGetNextEngstate_IMPL(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState);
4095 
4096 #ifdef __nvoc_gpu_h_disabled
gpuGetNextEngstate(struct OBJGPU * pGpu,ENGSTATE_ITER * pIt,struct OBJENGSTATE ** ppEngState)4097 static inline NvBool gpuGetNextEngstate(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState) {
4098     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4099     return NV_FALSE;
4100 }
4101 #else //__nvoc_gpu_h_disabled
4102 #define gpuGetNextEngstate(pGpu, pIt, ppEngState) gpuGetNextEngstate_IMPL(pGpu, pIt, ppEngState)
4103 #endif //__nvoc_gpu_h_disabled
4104 
4105 struct OBJHOSTENG *gpuGetHosteng_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0);
4106 
4107 #ifdef __nvoc_gpu_h_disabled
gpuGetHosteng(struct OBJGPU * pGpu,ENGDESCRIPTOR arg0)4108 static inline struct OBJHOSTENG *gpuGetHosteng(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) {
4109     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4110     return NULL;
4111 }
4112 #else //__nvoc_gpu_h_disabled
4113 #define gpuGetHosteng(pGpu, arg0) gpuGetHosteng_IMPL(pGpu, arg0)
4114 #endif //__nvoc_gpu_h_disabled
4115 
4116 NV_STATUS gpuConstructUserRegisterAccessMap_IMPL(struct OBJGPU *pGpu);
4117 
4118 #ifdef __nvoc_gpu_h_disabled
gpuConstructUserRegisterAccessMap(struct OBJGPU * pGpu)4119 static inline NV_STATUS gpuConstructUserRegisterAccessMap(struct OBJGPU *pGpu) {
4120     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4121     return NV_ERR_NOT_SUPPORTED;
4122 }
4123 #else //__nvoc_gpu_h_disabled
4124 #define gpuConstructUserRegisterAccessMap(pGpu) gpuConstructUserRegisterAccessMap_IMPL(pGpu)
4125 #endif //__nvoc_gpu_h_disabled
4126 
4127 NV_STATUS gpuInitRegisterAccessMap_IMPL(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3);
4128 
4129 #ifdef __nvoc_gpu_h_disabled
gpuInitRegisterAccessMap(struct OBJGPU * pGpu,NvU8 * arg0,NvU32 arg1,const NvU8 * arg2,const NvU32 arg3)4130 static inline NV_STATUS gpuInitRegisterAccessMap(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3) {
4131     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4132     return NV_ERR_NOT_SUPPORTED;
4133 }
4134 #else //__nvoc_gpu_h_disabled
4135 #define gpuInitRegisterAccessMap(pGpu, arg0, arg1, arg2, arg3) gpuInitRegisterAccessMap_IMPL(pGpu, arg0, arg1, arg2, arg3)
4136 #endif //__nvoc_gpu_h_disabled
4137 
4138 NV_STATUS gpuSetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow);
4139 
4140 #ifdef __nvoc_gpu_h_disabled
gpuSetUserRegisterAccessPermissions(struct OBJGPU * pGpu,NvU32 offset,NvU32 size,NvBool bAllow)4141 static inline NV_STATUS gpuSetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow) {
4142     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4143     return NV_ERR_NOT_SUPPORTED;
4144 }
4145 #else //__nvoc_gpu_h_disabled
4146 #define gpuSetUserRegisterAccessPermissions(pGpu, offset, size, bAllow) gpuSetUserRegisterAccessPermissions_IMPL(pGpu, offset, size, bAllow)
4147 #endif //__nvoc_gpu_h_disabled
4148 
4149 NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk_IMPL(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow);
4150 
4151 #ifdef __nvoc_gpu_h_disabled
gpuSetUserRegisterAccessPermissionsInBulk(struct OBJGPU * pGpu,const NvU32 * regOffsetsAndSizesArr,NvU32 arrSizeBytes,NvBool bAllow)4152 static inline NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow) {
4153     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4154     return NV_ERR_NOT_SUPPORTED;
4155 }
4156 #else //__nvoc_gpu_h_disabled
4157 #define gpuSetUserRegisterAccessPermissionsInBulk(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow) gpuSetUserRegisterAccessPermissionsInBulk_IMPL(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow)
4158 #endif //__nvoc_gpu_h_disabled
4159 
4160 NvBool gpuGetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset);
4161 
4162 #ifdef __nvoc_gpu_h_disabled
gpuGetUserRegisterAccessPermissions(struct OBJGPU * pGpu,NvU32 offset)4163 static inline NvBool gpuGetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset) {
4164     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4165     return NV_FALSE;
4166 }
4167 #else //__nvoc_gpu_h_disabled
4168 #define gpuGetUserRegisterAccessPermissions(pGpu, offset) gpuGetUserRegisterAccessPermissions_IMPL(pGpu, offset)
4169 #endif //__nvoc_gpu_h_disabled
4170 
4171 void gpuDumpCallbackRegister_IMPL(struct OBJGPU *pGpu);
4172 
4173 #ifdef __nvoc_gpu_h_disabled
gpuDumpCallbackRegister(struct OBJGPU * pGpu)4174 static inline void gpuDumpCallbackRegister(struct OBJGPU *pGpu) {
4175     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4176 }
4177 #else //__nvoc_gpu_h_disabled
4178 #define gpuDumpCallbackRegister(pGpu) gpuDumpCallbackRegister_IMPL(pGpu)
4179 #endif //__nvoc_gpu_h_disabled
4180 
4181 NV_STATUS gpuGetGfidState_IMPL(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState);
4182 
4183 #ifdef __nvoc_gpu_h_disabled
gpuGetGfidState(struct OBJGPU * pGpu,NvU32 gfid,GFID_ALLOC_STATUS * pState)4184 static inline NV_STATUS gpuGetGfidState(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState) {
4185     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4186     return NV_ERR_NOT_SUPPORTED;
4187 }
4188 #else //__nvoc_gpu_h_disabled
4189 #define gpuGetGfidState(pGpu, gfid, pState) gpuGetGfidState_IMPL(pGpu, gfid, pState)
4190 #endif //__nvoc_gpu_h_disabled
4191 
4192 void gpuSetGfidUsage_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse);
4193 
4194 #ifdef __nvoc_gpu_h_disabled
gpuSetGfidUsage(struct OBJGPU * pGpu,NvU32 gfid,NvBool bInUse)4195 static inline void gpuSetGfidUsage(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) {
4196     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4197 }
4198 #else //__nvoc_gpu_h_disabled
4199 #define gpuSetGfidUsage(pGpu, gfid, bInUse) gpuSetGfidUsage_IMPL(pGpu, gfid, bInUse)
4200 #endif //__nvoc_gpu_h_disabled
4201 
4202 void gpuSetGfidInvalidated_IMPL(struct OBJGPU *pGpu, NvU32 gfid);
4203 
4204 #ifdef __nvoc_gpu_h_disabled
gpuSetGfidInvalidated(struct OBJGPU * pGpu,NvU32 gfid)4205 static inline void gpuSetGfidInvalidated(struct OBJGPU *pGpu, NvU32 gfid) {
4206     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4207 }
4208 #else //__nvoc_gpu_h_disabled
4209 #define gpuSetGfidInvalidated(pGpu, gfid) gpuSetGfidInvalidated_IMPL(pGpu, gfid)
4210 #endif //__nvoc_gpu_h_disabled
4211 
4212 NV_STATUS gpuSetExternalKernelClientCount_IMPL(struct OBJGPU *pGpu, NvBool bIncr);
4213 
4214 #ifdef __nvoc_gpu_h_disabled
gpuSetExternalKernelClientCount(struct OBJGPU * pGpu,NvBool bIncr)4215 static inline NV_STATUS gpuSetExternalKernelClientCount(struct OBJGPU *pGpu, NvBool bIncr) {
4216     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4217     return NV_ERR_NOT_SUPPORTED;
4218 }
4219 #else //__nvoc_gpu_h_disabled
4220 #define gpuSetExternalKernelClientCount(pGpu, bIncr) gpuSetExternalKernelClientCount_IMPL(pGpu, bIncr)
4221 #endif //__nvoc_gpu_h_disabled
4222 
4223 NvBool gpuIsInUse_IMPL(struct OBJGPU *pGpu);
4224 
4225 #ifdef __nvoc_gpu_h_disabled
gpuIsInUse(struct OBJGPU * pGpu)4226 static inline NvBool gpuIsInUse(struct OBJGPU *pGpu) {
4227     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4228     return NV_FALSE;
4229 }
4230 #else //__nvoc_gpu_h_disabled
4231 #define gpuIsInUse(pGpu) gpuIsInUse_IMPL(pGpu)
4232 #endif //__nvoc_gpu_h_disabled
4233 
4234 NvU32 gpuGetUserClientCount_IMPL(struct OBJGPU *pGpu);
4235 
4236 #ifdef __nvoc_gpu_h_disabled
gpuGetUserClientCount(struct OBJGPU * pGpu)4237 static inline NvU32 gpuGetUserClientCount(struct OBJGPU *pGpu) {
4238     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4239     return 0;
4240 }
4241 #else //__nvoc_gpu_h_disabled
4242 #define gpuGetUserClientCount(pGpu) gpuGetUserClientCount_IMPL(pGpu)
4243 #endif //__nvoc_gpu_h_disabled
4244 
4245 NvU32 gpuGetExternalClientCount_IMPL(struct OBJGPU *pGpu);
4246 
4247 #ifdef __nvoc_gpu_h_disabled
gpuGetExternalClientCount(struct OBJGPU * pGpu)4248 static inline NvU32 gpuGetExternalClientCount(struct OBJGPU *pGpu) {
4249     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4250     return 0;
4251 }
4252 #else //__nvoc_gpu_h_disabled
4253 #define gpuGetExternalClientCount(pGpu) gpuGetExternalClientCount_IMPL(pGpu)
4254 #endif //__nvoc_gpu_h_disabled
4255 
4256 void gpuNotifySubDeviceEvent_IMPL(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16);
4257 
4258 #ifdef __nvoc_gpu_h_disabled
gpuNotifySubDeviceEvent(struct OBJGPU * pGpu,NvU32 notifyIndex,void * pNotifyParams,NvU32 notifyParamsSize,NvV32 info32,NvV16 info16)4259 static inline void gpuNotifySubDeviceEvent(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) {
4260     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4261 }
4262 #else //__nvoc_gpu_h_disabled
4263 #define gpuNotifySubDeviceEvent(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) gpuNotifySubDeviceEvent_IMPL(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16)
4264 #endif //__nvoc_gpu_h_disabled
4265 
4266 NV_STATUS gpuRegisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice);
4267 
4268 #ifdef __nvoc_gpu_h_disabled
gpuRegisterSubdevice(struct OBJGPU * pGpu,struct Subdevice * pSubdevice)4269 static inline NV_STATUS gpuRegisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) {
4270     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4271     return NV_ERR_NOT_SUPPORTED;
4272 }
4273 #else //__nvoc_gpu_h_disabled
4274 #define gpuRegisterSubdevice(pGpu, pSubdevice) gpuRegisterSubdevice_IMPL(pGpu, pSubdevice)
4275 #endif //__nvoc_gpu_h_disabled
4276 
4277 void gpuUnregisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice);
4278 
4279 #ifdef __nvoc_gpu_h_disabled
gpuUnregisterSubdevice(struct OBJGPU * pGpu,struct Subdevice * pSubdevice)4280 static inline void gpuUnregisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) {
4281     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4282 }
4283 #else //__nvoc_gpu_h_disabled
4284 #define gpuUnregisterSubdevice(pGpu, pSubdevice) gpuUnregisterSubdevice_IMPL(pGpu, pSubdevice)
4285 #endif //__nvoc_gpu_h_disabled
4286 
4287 void gpuGspPluginTriggeredEvent_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex);
4288 
4289 #ifdef __nvoc_gpu_h_disabled
gpuGspPluginTriggeredEvent(struct OBJGPU * pGpu,NvU32 gfid,NvU32 notifyIndex)4290 static inline void gpuGspPluginTriggeredEvent(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex) {
4291     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4292 }
4293 #else //__nvoc_gpu_h_disabled
4294 #define gpuGspPluginTriggeredEvent(pGpu, gfid, notifyIndex) gpuGspPluginTriggeredEvent_IMPL(pGpu, gfid, notifyIndex)
4295 #endif //__nvoc_gpu_h_disabled
4296 
4297 NV_STATUS gpuGetProcWithObject_IMPL(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef);
4298 
4299 #ifdef __nvoc_gpu_h_disabled
gpuGetProcWithObject(struct OBJGPU * pGpu,NvU32 elementID,NvU32 internalClassId,NvU32 * pPidArray,NvU32 * pPidArrayCount,MIG_INSTANCE_REF * pRef)4300 static inline NV_STATUS gpuGetProcWithObject(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef) {
4301     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4302     return NV_ERR_NOT_SUPPORTED;
4303 }
4304 #else //__nvoc_gpu_h_disabled
4305 #define gpuGetProcWithObject(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) gpuGetProcWithObject_IMPL(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef)
4306 #endif //__nvoc_gpu_h_disabled
4307 
4308 NV_STATUS gpuFindClientInfoWithPidIterator_IMPL(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo);
4309 
4310 #ifdef __nvoc_gpu_h_disabled
gpuFindClientInfoWithPidIterator(struct OBJGPU * pGpu,NvU32 pid,NvU32 subPid,NvU32 internalClassId,NV2080_CTRL_GPU_PID_INFO_DATA * pData,NV2080_CTRL_SMC_SUBSCRIPTION_INFO * pSmcInfo,MIG_INSTANCE_REF * pRef,NvBool bGlobalInfo)4311 static inline NV_STATUS gpuFindClientInfoWithPidIterator(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo) {
4312     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4313     return NV_ERR_NOT_SUPPORTED;
4314 }
4315 #else //__nvoc_gpu_h_disabled
4316 #define gpuFindClientInfoWithPidIterator(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) gpuFindClientInfoWithPidIterator_IMPL(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo)
4317 #endif //__nvoc_gpu_h_disabled
4318 
4319 NvBool gpuIsCCFeatureEnabled_IMPL(struct OBJGPU *pGpu);
4320 
4321 #ifdef __nvoc_gpu_h_disabled
gpuIsCCFeatureEnabled(struct OBJGPU * pGpu)4322 static inline NvBool gpuIsCCFeatureEnabled(struct OBJGPU *pGpu) {
4323     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4324     return NV_FALSE;
4325 }
4326 #else //__nvoc_gpu_h_disabled
4327 #define gpuIsCCFeatureEnabled(pGpu) gpuIsCCFeatureEnabled_IMPL(pGpu)
4328 #endif //__nvoc_gpu_h_disabled
4329 
4330 NvBool gpuIsApmFeatureEnabled_IMPL(struct OBJGPU *pGpu);
4331 
4332 #ifdef __nvoc_gpu_h_disabled
gpuIsApmFeatureEnabled(struct OBJGPU * pGpu)4333 static inline NvBool gpuIsApmFeatureEnabled(struct OBJGPU *pGpu) {
4334     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4335     return NV_FALSE;
4336 }
4337 #else //__nvoc_gpu_h_disabled
4338 #define gpuIsApmFeatureEnabled(pGpu) gpuIsApmFeatureEnabled_IMPL(pGpu)
4339 #endif //__nvoc_gpu_h_disabled
4340 
4341 NvBool gpuIsCCorApmFeatureEnabled_IMPL(struct OBJGPU *pGpu);
4342 
4343 #ifdef __nvoc_gpu_h_disabled
gpuIsCCorApmFeatureEnabled(struct OBJGPU * pGpu)4344 static inline NvBool gpuIsCCorApmFeatureEnabled(struct OBJGPU *pGpu) {
4345     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4346     return NV_FALSE;
4347 }
4348 #else //__nvoc_gpu_h_disabled
4349 #define gpuIsCCorApmFeatureEnabled(pGpu) gpuIsCCorApmFeatureEnabled_IMPL(pGpu)
4350 #endif //__nvoc_gpu_h_disabled
4351 
4352 NvBool gpuIsCCDevToolsModeEnabled_IMPL(struct OBJGPU *pGpu);
4353 
4354 #ifdef __nvoc_gpu_h_disabled
gpuIsCCDevToolsModeEnabled(struct OBJGPU * pGpu)4355 static inline NvBool gpuIsCCDevToolsModeEnabled(struct OBJGPU *pGpu) {
4356     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4357     return NV_FALSE;
4358 }
4359 #else //__nvoc_gpu_h_disabled
4360 #define gpuIsCCDevToolsModeEnabled(pGpu) gpuIsCCDevToolsModeEnabled_IMPL(pGpu)
4361 #endif //__nvoc_gpu_h_disabled
4362 
4363 NvBool gpuIsCCMultiGpuProtectedPcieModeEnabled_IMPL(struct OBJGPU *pGpu);
4364 
4365 #ifdef __nvoc_gpu_h_disabled
gpuIsCCMultiGpuProtectedPcieModeEnabled(struct OBJGPU * pGpu)4366 static inline NvBool gpuIsCCMultiGpuProtectedPcieModeEnabled(struct OBJGPU *pGpu) {
4367     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4368     return NV_FALSE;
4369 }
4370 #else //__nvoc_gpu_h_disabled
4371 #define gpuIsCCMultiGpuProtectedPcieModeEnabled(pGpu) gpuIsCCMultiGpuProtectedPcieModeEnabled_IMPL(pGpu)
4372 #endif //__nvoc_gpu_h_disabled
4373 
4374 NvBool gpuIsOnTheBus_IMPL(struct OBJGPU *pGpu);
4375 
4376 #ifdef __nvoc_gpu_h_disabled
gpuIsOnTheBus(struct OBJGPU * pGpu)4377 static inline NvBool gpuIsOnTheBus(struct OBJGPU *pGpu) {
4378     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4379     return NV_FALSE;
4380 }
4381 #else //__nvoc_gpu_h_disabled
4382 #define gpuIsOnTheBus(pGpu) gpuIsOnTheBus_IMPL(pGpu)
4383 #endif //__nvoc_gpu_h_disabled
4384 
4385 NV_STATUS gpuEnterStandby_IMPL(struct OBJGPU *pGpu);
4386 
4387 #ifdef __nvoc_gpu_h_disabled
gpuEnterStandby(struct OBJGPU * pGpu)4388 static inline NV_STATUS gpuEnterStandby(struct OBJGPU *pGpu) {
4389     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4390     return NV_ERR_NOT_SUPPORTED;
4391 }
4392 #else //__nvoc_gpu_h_disabled
4393 #define gpuEnterStandby(pGpu) gpuEnterStandby_IMPL(pGpu)
4394 #endif //__nvoc_gpu_h_disabled
4395 
4396 NV_STATUS gpuEnterHibernate_IMPL(struct OBJGPU *pGpu);
4397 
4398 #ifdef __nvoc_gpu_h_disabled
gpuEnterHibernate(struct OBJGPU * pGpu)4399 static inline NV_STATUS gpuEnterHibernate(struct OBJGPU *pGpu) {
4400     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4401     return NV_ERR_NOT_SUPPORTED;
4402 }
4403 #else //__nvoc_gpu_h_disabled
4404 #define gpuEnterHibernate(pGpu) gpuEnterHibernate_IMPL(pGpu)
4405 #endif //__nvoc_gpu_h_disabled
4406 
4407 NV_STATUS gpuResumeFromStandby_IMPL(struct OBJGPU *pGpu);
4408 
4409 #ifdef __nvoc_gpu_h_disabled
gpuResumeFromStandby(struct OBJGPU * pGpu)4410 static inline NV_STATUS gpuResumeFromStandby(struct OBJGPU *pGpu) {
4411     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4412     return NV_ERR_NOT_SUPPORTED;
4413 }
4414 #else //__nvoc_gpu_h_disabled
4415 #define gpuResumeFromStandby(pGpu) gpuResumeFromStandby_IMPL(pGpu)
4416 #endif //__nvoc_gpu_h_disabled
4417 
4418 NV_STATUS gpuResumeFromHibernate_IMPL(struct OBJGPU *pGpu);
4419 
4420 #ifdef __nvoc_gpu_h_disabled
gpuResumeFromHibernate(struct OBJGPU * pGpu)4421 static inline NV_STATUS gpuResumeFromHibernate(struct OBJGPU *pGpu) {
4422     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4423     return NV_ERR_NOT_SUPPORTED;
4424 }
4425 #else //__nvoc_gpu_h_disabled
4426 #define gpuResumeFromHibernate(pGpu) gpuResumeFromHibernate_IMPL(pGpu)
4427 #endif //__nvoc_gpu_h_disabled
4428 
4429 NvBool gpuCheckSysmemAccess_IMPL(struct OBJGPU *pGpu);
4430 
4431 #ifdef __nvoc_gpu_h_disabled
gpuCheckSysmemAccess(struct OBJGPU * pGpu)4432 static inline NvBool gpuCheckSysmemAccess(struct OBJGPU *pGpu) {
4433     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4434     return NV_FALSE;
4435 }
4436 #else //__nvoc_gpu_h_disabled
4437 #define gpuCheckSysmemAccess(pGpu) gpuCheckSysmemAccess_IMPL(pGpu)
4438 #endif //__nvoc_gpu_h_disabled
4439 
4440 void gpuInitChipInfo_IMPL(struct OBJGPU *pGpu);
4441 
4442 #ifdef __nvoc_gpu_h_disabled
gpuInitChipInfo(struct OBJGPU * pGpu)4443 static inline void gpuInitChipInfo(struct OBJGPU *pGpu) {
4444     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4445 }
4446 #else //__nvoc_gpu_h_disabled
4447 #define gpuInitChipInfo(pGpu) gpuInitChipInfo_IMPL(pGpu)
4448 #endif //__nvoc_gpu_h_disabled
4449 
4450 NV_STATUS gpuGetChipDetails_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *arg0);
4451 
4452 #ifdef __nvoc_gpu_h_disabled
gpuGetChipDetails(struct OBJGPU * pGpu,NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS * arg0)4453 static inline NV_STATUS gpuGetChipDetails(struct OBJGPU *pGpu, NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *arg0) {
4454     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4455     return NV_ERR_NOT_SUPPORTED;
4456 }
4457 #else //__nvoc_gpu_h_disabled
4458 #define gpuGetChipDetails(pGpu, arg0) gpuGetChipDetails_IMPL(pGpu, arg0)
4459 #endif //__nvoc_gpu_h_disabled
4460 
4461 NV_STATUS gpuSanityCheckRegRead_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue);
4462 
4463 #ifdef __nvoc_gpu_h_disabled
gpuSanityCheckRegRead(struct OBJGPU * pGpu,NvU32 addr,NvU32 size,void * pValue)4464 static inline NV_STATUS gpuSanityCheckRegRead(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue) {
4465     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4466     return NV_ERR_NOT_SUPPORTED;
4467 }
4468 #else //__nvoc_gpu_h_disabled
4469 #define gpuSanityCheckRegRead(pGpu, addr, size, pValue) gpuSanityCheckRegRead_IMPL(pGpu, addr, size, pValue)
4470 #endif //__nvoc_gpu_h_disabled
4471 
4472 NV_STATUS gpuSanityCheckRegisterAccess_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal);
4473 
4474 #ifdef __nvoc_gpu_h_disabled
gpuSanityCheckRegisterAccess(struct OBJGPU * pGpu,NvU32 addr,NvU32 * pRetVal)4475 static inline NV_STATUS gpuSanityCheckRegisterAccess(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal) {
4476     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4477     return NV_ERR_NOT_SUPPORTED;
4478 }
4479 #else //__nvoc_gpu_h_disabled
4480 #define gpuSanityCheckRegisterAccess(pGpu, addr, pRetVal) gpuSanityCheckRegisterAccess_IMPL(pGpu, addr, pRetVal)
4481 #endif //__nvoc_gpu_h_disabled
4482 
4483 NV_STATUS gpuValidateRegOffset_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
4484 
4485 #ifdef __nvoc_gpu_h_disabled
gpuValidateRegOffset(struct OBJGPU * pGpu,NvU32 arg0)4486 static inline NV_STATUS gpuValidateRegOffset(struct OBJGPU *pGpu, NvU32 arg0) {
4487     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4488     return NV_ERR_NOT_SUPPORTED;
4489 }
4490 #else //__nvoc_gpu_h_disabled
4491 #define gpuValidateRegOffset(pGpu, arg0) gpuValidateRegOffset_IMPL(pGpu, arg0)
4492 #endif //__nvoc_gpu_h_disabled
4493 
4494 NV_STATUS gpuSetGC6SBIOSCapabilities_IMPL(struct OBJGPU *pGpu);
4495 
4496 #ifdef __nvoc_gpu_h_disabled
gpuSetGC6SBIOSCapabilities(struct OBJGPU * pGpu)4497 static inline NV_STATUS gpuSetGC6SBIOSCapabilities(struct OBJGPU *pGpu) {
4498     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4499     return NV_ERR_NOT_SUPPORTED;
4500 }
4501 #else //__nvoc_gpu_h_disabled
4502 #define gpuSetGC6SBIOSCapabilities(pGpu) gpuSetGC6SBIOSCapabilities_IMPL(pGpu)
4503 #endif //__nvoc_gpu_h_disabled
4504 
4505 NV_STATUS gpuGc6Entry_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GC6_ENTRY_PARAMS *arg0);
4506 
4507 #ifdef __nvoc_gpu_h_disabled
gpuGc6Entry(struct OBJGPU * pGpu,NV2080_CTRL_GC6_ENTRY_PARAMS * arg0)4508 static inline NV_STATUS gpuGc6Entry(struct OBJGPU *pGpu, NV2080_CTRL_GC6_ENTRY_PARAMS *arg0) {
4509     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4510     return NV_ERR_NOT_SUPPORTED;
4511 }
4512 #else //__nvoc_gpu_h_disabled
4513 #define gpuGc6Entry(pGpu, arg0) gpuGc6Entry_IMPL(pGpu, arg0)
4514 #endif //__nvoc_gpu_h_disabled
4515 
4516 NV_STATUS gpuGc6EntryGpuPowerOff_IMPL(struct OBJGPU *pGpu);
4517 
4518 #ifdef __nvoc_gpu_h_disabled
gpuGc6EntryGpuPowerOff(struct OBJGPU * pGpu)4519 static inline NV_STATUS gpuGc6EntryGpuPowerOff(struct OBJGPU *pGpu) {
4520     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4521     return NV_ERR_NOT_SUPPORTED;
4522 }
4523 #else //__nvoc_gpu_h_disabled
4524 #define gpuGc6EntryGpuPowerOff(pGpu) gpuGc6EntryGpuPowerOff_IMPL(pGpu)
4525 #endif //__nvoc_gpu_h_disabled
4526 
4527 NV_STATUS gpuGc6Exit_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GC6_EXIT_PARAMS *arg0);
4528 
4529 #ifdef __nvoc_gpu_h_disabled
gpuGc6Exit(struct OBJGPU * pGpu,NV2080_CTRL_GC6_EXIT_PARAMS * arg0)4530 static inline NV_STATUS gpuGc6Exit(struct OBJGPU *pGpu, NV2080_CTRL_GC6_EXIT_PARAMS *arg0) {
4531     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4532     return NV_ERR_NOT_SUPPORTED;
4533 }
4534 #else //__nvoc_gpu_h_disabled
4535 #define gpuGc6Exit(pGpu, arg0) gpuGc6Exit_IMPL(pGpu, arg0)
4536 #endif //__nvoc_gpu_h_disabled
4537 
4538 void gpuDestroyRusdMemory_IMPL(struct OBJGPU *pGpu);
4539 
4540 #ifdef __nvoc_gpu_h_disabled
gpuDestroyRusdMemory(struct OBJGPU * pGpu)4541 static inline void gpuDestroyRusdMemory(struct OBJGPU *pGpu) {
4542     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4543 }
4544 #else //__nvoc_gpu_h_disabled
4545 #define gpuDestroyRusdMemory(pGpu) gpuDestroyRusdMemory_IMPL(pGpu)
4546 #endif //__nvoc_gpu_h_disabled
4547 
4548 NV_STATUS gpuEnableAccounting_IMPL(struct OBJGPU *arg0);
4549 
4550 #ifdef __nvoc_gpu_h_disabled
gpuEnableAccounting(struct OBJGPU * arg0)4551 static inline NV_STATUS gpuEnableAccounting(struct OBJGPU *arg0) {
4552     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4553     return NV_ERR_NOT_SUPPORTED;
4554 }
4555 #else //__nvoc_gpu_h_disabled
4556 #define gpuEnableAccounting(arg0) gpuEnableAccounting_IMPL(arg0)
4557 #endif //__nvoc_gpu_h_disabled
4558 
4559 NV_STATUS gpuDisableAccounting_IMPL(struct OBJGPU *arg0, NvBool bForce);
4560 
4561 #ifdef __nvoc_gpu_h_disabled
gpuDisableAccounting(struct OBJGPU * arg0,NvBool bForce)4562 static inline NV_STATUS gpuDisableAccounting(struct OBJGPU *arg0, NvBool bForce) {
4563     NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
4564     return NV_ERR_NOT_SUPPORTED;
4565 }
4566 #else //__nvoc_gpu_h_disabled
4567 #define gpuDisableAccounting(arg0, bForce) gpuDisableAccounting_IMPL(arg0, bForce)
4568 #endif //__nvoc_gpu_h_disabled
4569 
4570 #undef PRIVATE_FIELD
4571 
4572 
4573 // Look up pGpu associated with a pResourceRef
4574 NV_STATUS gpuGetByRef (RsResourceRef *pContextRef, NvBool *pbBroadcast, struct OBJGPU **ppGpu);
4575 
4576 // Look up pGpu associated with a hResource
4577 NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *pbBroadcast, struct OBJGPU **ppGpu);
4578 
4579 #define GPU_GFID_PF         (0)
4580 #define IS_GFID_PF(gfid)    (((NvU32)(gfid)) == GPU_GFID_PF)
4581 #define IS_GFID_VF(gfid)    (((NvU32)(gfid)) != GPU_GFID_PF)
4582 // Invalid P2P GFID
4583 #define INVALID_P2P_GFID    (0xFFFFFFFF)
4584 #define INVALID_FABRIC_PARTITION_ID (0xFFFFFFFF)
4585 
4586 //
4587 // Generates GPU child accessor macros (i.e.: GPU_GET_{ENG})
4588 //
4589 #define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \
4590     static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return pGpu->gpuField; }      \
4591     ct_assert(numInstances == 1);
4592 
4593 #define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \
4594     static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return index < numInstances ? pGpu->gpuField[index] : NULL; }
4595 
4596 #include "gpu/gpu_child_list.h"
4597 
GPU_GET_GR(struct OBJGPU * pGpu)4598 static NV_FORCEINLINE struct Graphics *GPU_GET_GR(struct OBJGPU *pGpu) { return NULL; }
4599 
4600 // Temporary stubs
4601 #if RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS
4602 #define GPU_CHILD_LIST_DISABLED_ONLY
4603 #define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \
4604     static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return NULL; }
4605 
4606 #define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \
4607     static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return NULL; }
4608 
4609 #include "gpu/gpu_child_list.h"
4610 #endif // RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS
4611 
4612 
4613 //
4614 // Inline functions
4615 //
4616 
4617 //
4618 // This function returns subdevice mask for a GPU.
4619 // For non SLI, subdeviceInstance is 0, so this
4620 // function will always return 1.
4621 //
4622 
4623 static NV_INLINE NvU32
gpuGetSubdeviceMask(struct OBJGPU * pGpu)4624 gpuGetSubdeviceMask
4625 (
4626     struct OBJGPU *pGpu
4627 )
4628 {
4629     return 1 << pGpu->subdeviceInstance;
4630 }
4631 
4632 static NV_INLINE NvU32
gpuGetInstance(struct OBJGPU * pGpu)4633 gpuGetInstance
4634 (
4635     struct OBJGPU *pGpu
4636 )
4637 {
4638     return pGpu->gpuInstance;
4639 }
4640 
4641 static NV_INLINE NvU32
gpuGetDeviceInstance(struct OBJGPU * pGpu)4642 gpuGetDeviceInstance
4643 (
4644     struct OBJGPU *pGpu
4645 )
4646 {
4647     return pGpu->deviceInstance;
4648 }
4649 
4650 NV_INLINE
gpuGetNumCEs(struct OBJGPU * pGpu)4651 static NvU32 gpuGetNumCEs(struct OBJGPU *pGpu)
4652 {
4653     return pGpu->numCEs;
4654 }
4655 
4656 //
4657 // Per GPU mode flags macros. In general these macros should not be
4658 // used and all code paths should be the same on all environments.
4659 // However occasionally a tweak is needed to work around a limitation
4660 // or improve speed on non-hardware.  Is_RTLSIM normally is handled
4661 // in the IS_SIMULATION case and should almost never be used.
4662 //
4663 // IS_EMULATION     actual emulation hardware
4664 // IS_SIMULATION    fmodel or RTL simulation
4665 // IS_MODS_AMODEL   amodel under mods for trace player
4666 // IS_LIVE_AMODEL   amodel under windows for 3D drivers (removed)
4667 // IS_RTLSIM        RTL simulation
4668 // IS_SILICON       Real hardware
4669 // IS_VIRTUAL       RM is running within a guest VM
4670 // IS_GSP_CLIENT    RM is a GSP/DCE client with GPU support offloaded to GSP/DCE
4671 // IS_DCE_CLIENT    RM is specifically a DCE client with GPU support offloaded
4672 //                  to DCE (subset of IS_GSP_CLIENT)
4673 //
4674 
4675 #define IS_EMULATION(pGpu)                 ((pGpu)->getProperty((pGpu), PDB_PROP_GPU_EMULATION))
4676 #define IS_SIMULATION(pGpu)                (pGpu->bIsSimulation)
4677 #define IS_MODS_AMODEL(pGpu)               (pGpu->bIsModsAmodel)
4678 #define IS_FMODEL(pGpu)                    (pGpu->bIsFmodel)
4679 #define IS_RTLSIM(pGpu)                    (pGpu->bIsRtlsim)
4680 #define IS_SILICON(pGpu)                   (!(IS_EMULATION(pGpu) || IS_SIMULATION(pGpu)))
4681 #define IS_PASSTHRU(pGpu)                  ((pGpu)->bIsPassthru)
4682 #define IS_GSP_CLIENT(pGpu)                ((RMCFG_FEATURE_GSP_CLIENT_RM || RMCFG_FEATURE_DCE_CLIENT_RM) && (pGpu)->isGspClient)
4683 #define IS_DCE_CLIENT(pGpu)                (RMCFG_FEATURE_DCE_CLIENT_RM && IS_GSP_CLIENT(pGpu))
4684 //
4685 // We expect GSP_CLIENT_RM and DCE_CLIENT_RM to be exclusive in RMCFG because we
4686 // use this above to differentiate between them. This allows a compile time
4687 // check for RMCFG_FEATURE_DCE_CLIENT_RM instead of a runtime check for
4688 // IsT234DorBetter(pGpu)
4689 //
4690 ct_assert(!(RMCFG_FEATURE_GSP_CLIENT_RM && RMCFG_FEATURE_DCE_CLIENT_RM));
4691 #define IS_VIRTUAL(pGpu)                   ((pGpu)->isVirtual)
4692 #define IS_VIRTUAL_WITH_SRIOV(pGpu)        ((pGpu)->bIsVirtualWithSriov)
4693 #define IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu)  (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
4694 #define IS_VIRTUAL_WITH_FULL_SRIOV(pGpu)   (IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu))
4695 #define IS_VIRTUAL_WITHOUT_SRIOV(pGpu)     (IS_VIRTUAL(pGpu) && !IS_VIRTUAL_WITH_SRIOV(pGpu))
4696 #define IS_SRIOV_HEAVY(pGpu)        (gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
4697 #define IS_SRIOV_HEAVY_GUEST(pGpu)  ((IS_VIRTUAL_WITH_SRIOV(pGpu))  &&  IS_SRIOV_HEAVY(pGpu))
4698 #define IS_SRIOV_FULL_GUEST(pGpu)   ((IS_VIRTUAL_WITH_SRIOV(pGpu))  &&  !IS_SRIOV_HEAVY(pGpu))
4699 #define IS_SRIOV_HEAVY_HOST(pGpu)   ((hypervisorIsVgxHyper())       &&  IS_SRIOV_HEAVY(pGpu))
4700 #define IS_SRIOV_FULL_HOST(pGpu)    ((hypervisorIsVgxHyper()) && gpuIsSriovEnabled(pGpu) && !IS_SRIOV_HEAVY(pGpu))
4701 #define IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu)     ((pGpu)->bVgpuGspPluginOffloadEnabled)
4702 #define IS_SRIOV_WITH_VGPU_GSP_ENABLED(pGpu)         (gpuIsSriovEnabled(pGpu) && IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu))
4703 #define IS_SRIOV_WITH_VGPU_GSP_DISABLED(pGpu)        (gpuIsSriovEnabled(pGpu) && !IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu))
4704 #define IS_MIG_RM(pGpu)                              ((pGpu)->bIsMigRm)
4705 
4706 extern GPU_CHILD_ITER gpuGetPossibleEngDescriptorIter(void);
4707 extern NvBool gpuGetNextPossibleEngDescriptor(GPU_CHILD_ITER *pIt, ENGDESCRIPTOR *pEngDesc);
4708 
4709 NV_STATUS gpuCtrlExecRegOps(struct OBJGPU *, struct Graphics *, NvHandle, NvHandle, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool);
4710 NV_STATUS gpuValidateRegOps(struct OBJGPU *, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool, NvBool);
4711 
4712 // GPU Sanity Check Flags
4713 #define GPU_SANITY_CHECK_FLAGS_BOOT_0                   NVBIT(0)
4714 #define GPU_SANITY_CHECK_FLAGS_OFF_BY_N                 NVBIT(1)
4715 #define GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH          NVBIT(2)
4716 #define GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED    NVBIT(3)
4717 #define GPU_SANITY_CHECK_FLAGS_FB                       NVBIT(4)
4718 
4719 #define GPU_SANITY_CHECK_FLAGS_NONE         0x0
4720 #define GPU_SANITY_CHECK_FLAGS_ALL          0xffffffff
4721 
4722 //
4723 // Macro for checking if GPU is in reset.
4724 //
4725 #define API_GPU_IN_RESET_SANITY_CHECK(pGpu)                                 \
4726     ((NULL == pGpu) ||                                                      \
4727      pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) ||             \
4728      pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) ||        \
4729      pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) ||                  \
4730      pGpu->getProperty(pGpu, PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING))
4731 
4732 //
4733 // Marco for checking if GPU is still connected.
4734 //
4735 #define API_GPU_ATTACHED_SANITY_CHECK(pGpu)                           \
4736     ((NULL != pGpu) &&                                                \
4737      pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) &&            \
4738      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET))
4739 
4740 //
4741 // Macro for checking if GPU has Full Sanity
4742 //
4743 #define FULL_GPU_SANITY_CHECK(pGpu)                                     \
4744     ((NULL != pGpu) &&                                                  \
4745      gpuIsGpuFullPower(pGpu) &&                                         \
4746      pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) &&              \
4747      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) &&        \
4748      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) &&   \
4749      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) &&             \
4750      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST) &&                  \
4751      gpuCheckSysmemAccess(pGpu))
4752 
4753 //
4754 // Macro for checking if GPU has Full Sanity
4755 //
4756 #define FULL_GPU_SANITY_FOR_PM_RESUME(pGpu)                             \
4757     ((NULL != pGpu) &&                                                  \
4758      gpuIsGpuFullPowerForPmResume(pGpu) &&                              \
4759      pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) &&              \
4760      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) &&        \
4761      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) &&   \
4762      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) &&             \
4763      !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST))
4764 
4765 //
4766 // Macro for checking if GPU is in the recovery path
4767 //
4768 #define API_GPU_IN_RECOVERY_SANITY_CHECK(pGpu)                          \
4769     ((NULL == pGpu) ||                                                  \
4770     pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY))
4771 
4772 //******************************************************************************
4773 //                            POWER SANITY CHECKS
4774 //******************************************************************************
4775 //
4776 // Make sure the GPU is in full power or resuming from D3 state. Else,
4777 // bailout from the calling function. An exception for systems, which support
4778 // surprise removal feature. See Bugs 440565, 479003, and 499228.DO NOT IGNORE
4779 // OR REMOVE THIS ASSERT.  If you have problems with it, please talk to cplummer.
4780 //
4781 // bAllowWithoutSysmemAccess: Allow this RM Control when sysmem access is not available
4782 // from the GPU. SHould be NV_TRUE only for NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS
4783 //
4784 // On systems supporting surprise removal, if the GPU is in D3 cold
4785 // and still attached we would consider it a true D3 cold state
4786 // and return NOT_FULL_POWER. See bug 1679965.
4787 //
4788 //
4789 #define API_GPU_FULL_POWER_SANITY_CHECK(pGpu, bGpuAccess, bAllowWithoutSysmemAccess) \
4790     if ((!gpuIsGpuFullPower(pGpu)) &&                                                \
4791               (!(pGpu)->getProperty((pGpu),                                          \
4792                              PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)))                   \
4793     {                                                                                \
4794         DBG_BREAKPOINT();                                                            \
4795         if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu)))                    \
4796         {                                                                            \
4797             return NV_ERR_GPU_NOT_FULL_POWER;                                        \
4798         }                                                                            \
4799         else if (gpuIsSurpriseRemovalSupported(pGpu) &&                              \
4800                  (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED))             \
4801         {                                                                            \
4802             return NV_ERR_GPU_NOT_FULL_POWER;                                        \
4803         }                                                                            \
4804     }                                                                                \
4805     if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu))                 \
4806     {                                                                                \
4807         return NV_ERR_GPU_NOT_FULL_POWER;                                            \
4808     }
4809 
4810 #define API_GPU_FULL_POWER_SANITY_CHECK_OR_GOTO(pGpu, bGpuAccess, bAllowWithoutSysmemAccess, status, tag) \
4811     if ((!gpuIsGpuFullPower(pGpu)) &&                                                                     \
4812               (!(pGpu)->getProperty((pGpu),                                                               \
4813                              PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)))                                        \
4814     {                                                                                                     \
4815         DBG_BREAKPOINT();                                                                                 \
4816         if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu)))                                         \
4817         {                                                                                                 \
4818             status = NV_ERR_GPU_NOT_FULL_POWER;                                                           \
4819             goto tag;                                                                                     \
4820         }                                                                                                 \
4821         else if (gpuIsSurpriseRemovalSupported(pGpu) &&                                                   \
4822                 (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED))                                   \
4823         {                                                                                                 \
4824             status = NV_ERR_GPU_NOT_FULL_POWER;                                                           \
4825             goto tag;                                                                                     \
4826         }                                                                                                 \
4827     }                                                                                                     \
4828     if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu))                                      \
4829     {                                                                                                     \
4830         return NV_ERR_GPU_NOT_FULL_POWER;                                                                 \
4831     }
4832 
4833 //
4834 // Identifiers for gpuGetRegBaseOffset HAL interface.
4835 //
4836 #define NV_REG_BASE_GR                (0x00000001)
4837 #define NV_REG_BASE_TIMER             (0x00000002)
4838 #define NV_REG_BASE_MASTER            (0x00000003)
4839 #define NV_REG_BASE_USERMODE          (0x00000004)
4840 #define NV_REG_BASE_LAST              NV_REG_BASE_USERMODE
4841 ct_assert(NV_REG_BASE_LAST < NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX);
4842 
4843 #define GPU_READ_PRI_ERROR_MASK  0xFFF00000
4844 #define GPU_READ_PRI_ERROR_CODE  0xBAD00000
4845 
4846 //
4847 // Define for invalid register value. GPU could have fallen off the bus or
4848 // the GPU could be in reset.
4849 //
4850 #define GPU_REG_VALUE_INVALID    0xFFFFFFFF
4851 
4852 typedef struct _vgpu_static_info VGPU_STATIC_INFO;
4853 typedef struct GspStaticConfigInfo_t GspStaticConfigInfo;
4854 
4855 // Static info getters
4856 VGPU_STATIC_INFO *gpuGetStaticInfo(struct OBJGPU *pGpu);
4857 #define GPU_GET_STATIC_INFO(pGpu) gpuGetStaticInfo(pGpu)
4858 GspStaticConfigInfo *gpuGetGspStaticInfo(struct OBJGPU *pGpu);
4859 #define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu)
4860 
4861 NV_STATUS gpuSimEscapeWrite(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 Value);
4862 NV_STATUS gpuSimEscapeWriteBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer);
4863 NV_STATUS gpuSimEscapeRead(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value);
4864 NV_STATUS gpuSimEscapeReadBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer);
4865 
4866 //
4867 // This function needs to be called when OBJGPU is not created. HAL
4868 // infrastructure can’t be used for this case, so it has been added manually.
4869 // It will be invoked directly by gpumgrIsDeviceMsixAllowed().
4870 //
4871 NvBool gpuIsMsixAllowed_TU102(RmPhysAddr bar0BaseAddr);
4872 
4873 #endif // _OBJGPU_H_
4874 
4875 #ifdef __cplusplus
4876 } // extern "C"
4877 #endif
4878 
4879 #endif // _G_GPU_NVOC_H_
4880