1 #ifndef _G_GPU_MGR_NVOC_H_
2 #define _G_GPU_MGR_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2005-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 #include "g_gpu_mgr_nvoc.h"
32 
33 
34 #ifndef _GPUMGR_H_
35 #define _GPUMGR_H_
36 
37 //
38 // GPU Manager Defines and Structures
39 //
40 
41 struct OBJGPU;
42 #include "core/core.h"
43 #include "core/system.h"
44 #include "nvlimits.h"
45 #include "gpu_mgr/gpu_group.h"
46 #include "gpu/gpu_uuid.h"
47 #include "gpu/gpu_device_mapping.h"
48 #include "gpu/gpu_access.h"
49 #include "ctrl/ctrl0000/ctrl0000gpu.h"
50 #include "ctrl/ctrl2080/ctrl2080ce.h"
51 #include "ctrl/ctrl2080/ctrl2080internal.h"
52 #include "ctrl/ctrlc637.h"
53 #include "nvoc/utility.h"
54 
55 #include "gpu_mgr/gpu_mgr_sli.h"
56 
57 #include "gpu/perf/kern_perf_gpuboostsync.h"
58 
59 #include "utils/nvbitvector.h"
60 TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR);
61 
62 #define GPUMGR_MAX_GPU_INSTANCES        8
63 #define GPUMGR_MAX_COMPUTE_INSTANCES    8
64 
65 //
66 // Terminology:
67 //    GPU         -> entity sitting on the bus
68 //    Device      -> broadcast semantics; maps to one or more GPUs
69 //    Subdevice   -> unicast semantics; maps to a single GPU
70 //
71 
72 
73 ////////////////////////////////////////////////////////////////////////////////
74 //                         DO NOT ADD NEW STUBS HERE                          //
75 ////////////////////////////////////////////////////////////////////////////////
76 #define gpumgrGetGpuLinkCount(deviceInstance)                   ((NvU32) 0)
77 #define gpumgrGetSliLinkOutputMaskFromGpu(pGpu)                 ((NvU32) 0)
78 #define gpumgrGetVidLinkOutputMaskFromGpu(pGpu)                 ((NvU32) 0)
79 #define gpumgrGetSliLinkOrderCount(pGpu)                        ((NvU32) 0)
80 #define gpumgrGetSliLinkConnectionCount(pGpu)                   ((NvU32) 0)
81 #define gpumgrGetSLIConfig(gpuInstance, onlyWithSliLink)        ((NvU32) 0)
82 #define gpumgrDisableVidLink(pGpu, head, max_dr_port)
83 #define gpumgrGetBcEnabledStatus(g)                             (NV_FALSE)
84 #define gpumgrGetBcEnabledStatusEx(g, t)                        (NV_FALSE)
85 #define gpumgrSetBcEnabledStatus(g, b)                          do { NvBool b2 = b; (void)b2; } while (0)
86 #define gpumgrSLILoopReentrancy(pGpu, l, r, i, pFuncStr)
87 #define gpumgrSLILoopReentrancyPop(pGpu)                        ((NvU32)0)
88 #define gpumgrSLILoopReentrancyPush(pGpu, sliLoopReentrancy)    do { NvU32 x = sliLoopReentrancy; (void)x; } while(0)
89 
90 typedef struct
91 {
92     NvU32     gpuId;
93     NvU64     gpuDomainBusDevice;
94     NvBool    bInitAttempted;
95     NvBool    bDrainState;  // no new client connections to this GPU
96     NvBool    bRemoveIdle;  // remove this GPU once it's idle (detached)
97     NvBool    bExcluded;    // this gpu is marked as excluded; do not use
98     NvBool    bUuidValid;   // cached uuid is valid
99     NvBool    bSkipHwNvlinkDisable; //skip HW registers configuration for disabled links
100     NvU32     initDisabledNvlinksMask;
101     NV_STATUS initStatus;
102     NvU8      uuid[RM_SHA1_GID_SIZE];
103     OS_RM_CAPS *pOsRmCaps;    // "Opaque" pointer to os-specific capabilities
104 } PROBEDGPU;
105 
106 #define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_FLIPS                   11:4
107 #define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME                   12:12
108 #define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_INVALID       0x0000000
109 #define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_VALID         0x0000001
110 
111 /*!
112  * Structure for tracking resources allocated for saving primary GPU's VBIOS
113  * state.  This is used for TDR/fullchip reset recovery.  The GPU object gets
114  * destroyed, so the data belongs here.
115  */
116 typedef struct _def_gpumgr_save_vbios_state
117 {
118     RmPhysAddr vgaWorkspaceVidMemBase;        //<! Base address of the VGA workspace
119     struct MEMORY_DESCRIPTOR *pSaveToMemDesc; //<! Where VGA workspace is saved to
120     void *pSaveRegsOpaque;                    //<! Saved values of VGA registers
121 } GPUMGRSAVEVBIOSSTATE, *PGPUMGRSAVEVBIOSSTATE;
122 
123 typedef struct CONF_COMPUTE_CAPS
124 {
125     NvBool bApmFeatureCapable;
126     NvBool bHccFeatureCapable;
127     NvBool bCCFeatureEnabled;
128     NvBool bDevToolsModeEnabled;
129     NvBool bAcceptClientRequest;
130     NvBool bMultiGpuProtectedPcieModeEnabled;
131     NvBool bFatalFailure;
132 } CONF_COMPUTE_CAPS;
133 
134 //
135 // types of bridges supported.
136 // These defines are inices for the types of bridges supported.
137 // preference for a given bridge type is determined by the lower value index.
138 // I.E. Video Link has the lower value index, so in the event that both NvLink & video link is
139 // detected, the video link will be used.
140 //
141 #define SLI_MAX_BRIDGE_TYPES    2
142 #define SLI_BT_VIDLINK          0
143 #define SLI_BT_NVLINK           1
144 
145 //
146 // GPU NVLINK reduced bandwidth mode
147 //
148 #define GPU_NVLINK_BW_MODE_FULL     (0x0)
149 #define GPU_NVLINK_BW_MODE_OFF      (0x1)
150 #define GPU_NVLINK_BW_MODE_MIN      (0x2)
151 #define GPU_NVLINK_BW_MODE_HALF     (0x3)
152 #define GPU_NVLINK_BW_MODE_3QUARTER (0x4)
153 
154 typedef struct NVLINK_TOPOLOGY_PARAMS
155 {
156     NvU32   sysmemLinks;
157     NvU32   maxLinksPerPeer;
158     NvBool  bSymmetric;
159     // Pascal only
160     NvU32   numLinks;
161     // Volta +
162     NvU32   numPeers;
163     NvBool  bSwitchConfig;
164     // Ampere +
165     NvU32   pceAvailableMaskPerHshub[NV2080_CTRL_CE_MAX_HSHUBS];
166     NvU32   fbhubPceMask;
167     NvU32   maxPceLceMap[NV2080_CTRL_MAX_PCES];
168     NvU32   maxGrceConfig[NV2080_CTRL_MAX_GRCES];
169     NvU32   maxExposeCeMask;
170     NvU32   maxTopoIdx;       // For table configs only; not applicable for algorithm
171 } NVLINK_TOPOLOGY_PARAMS, *PNVLINK_TOPOLOGY_PARAMS;
172 
173 typedef struct _def_gpu_nvlink_topology_info
174 {
175     NvBool  valid;
176     NvU64   DomainBusDevice;
177     NVLINK_TOPOLOGY_PARAMS params;
178 } NVLINK_TOPOLOGY_INFO, *PNVLINK_TOPOLOGY_INFO;
179 
180 typedef struct
181 {
182     OBJGPU     *pGpu;
183     NvU32       gpuInstance;
184 } GPU_HANDLE_ID;
185 
186 /*!
187  * @brief Persistent compute instance storage
188  *        Stores information required to recreate a compute instance which
189  *        was created on an attached GPU, which was then detached.
190  */
191 typedef struct GPUMGR_SAVE_COMPUTE_INSTANCE
192 {
193     // NV_TRUE if a compute instance with the below resources should be restored
194     NvBool bValid;
195     // Exported CPU instance information for persistence
196     NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO ciInfo;
197     // ID of compute instance
198     NvU32 id;
199     // "Opaque" pointer to os-specific capabilities
200     OS_RM_CAPS *pOsRmCaps;
201 } GPUMGR_SAVE_COMPUTE_INSTANCE;
202 
203 /*!
204  * @brief Persistent GPU instance storage
205  *        Stores information required to recreate a GPU instance which
206  *        was created on an attached GPU, which was then detached.
207  */
208 typedef struct GPUMGR_SAVE_GPU_INSTANCE
209 {
210     // NV_TRUE if a GPU instance with the below resources should be restored
211     NvBool bValid;
212     // Exported GPU instance information for persistence
213     NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO giInfo;
214     // SwizzId of the saved GPU instance to restore to
215     NvU32 swizzId;
216     // "Opaque" pointer to os-specific capabilities
217     OS_RM_CAPS *pOsRmCaps;
218     // Saved compute instance information. Mar or may not have valid entries.
219     GPUMGR_SAVE_COMPUTE_INSTANCE saveCI[GPUMGR_MAX_COMPUTE_INSTANCES];
220 } GPUMGR_SAVE_GPU_INSTANCE;
221 
222 /*!
223  * @brief Persistent MIG instance topology storage
224  *        Stores information required to recreate all MIG instances which
225  *        were created on an attached GPU, which was then detached.
226  */
227 typedef struct GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY
228 {
229     // NV_TRUE if a GPU was attached with associated DBDF.
230     NvBool bValid;
231     // The PCI Domain/Bus/Device/Function of the GPU for which this struct was saved.
232     NvU64 domainBusDevice;
233     // Flag checking whether we have restored from static info since boot
234     NvBool bVgpuRestoredFromStaticInfo;
235     // MIG repartitioning mode last registered for the GPU this struct was saved for.
236     NvBool bMIGEnabled;
237     // Saved instance information. May or may not have any valid entries.
238     GPUMGR_SAVE_GPU_INSTANCE saveGI[GPUMGR_MAX_GPU_INSTANCES];
239 } GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY;
240 
241 typedef struct GPUMGR_CACHED_MIG_GPU_INSTANCE
242 {
243     NvBool bValid;
244     NvU32 swizzId;
245     NvBool bValidComputeInstances[GPUMGR_MAX_COMPUTE_INSTANCES];
246 } GPUMGR_CACHED_MIG_GPU_INSTANCE;
247 
248 typedef struct GPUMGR_CACHED_MIG_STATE
249 {
250     NvBool bValid;
251     NvBool bMIGEnabled;
252     NvU32 gpuId;
253     GPUMGR_CACHED_MIG_GPU_INSTANCE gpuInstances[GPUMGR_MAX_GPU_INSTANCES];
254 } GPUMGR_CACHED_MIG_STATE;
255 
256 
257 #include "containers/list.h"
258 typedef struct PCIEP2PCAPSINFO
259 {
260     NvU32    gpuId[GPUMGR_MAX_GPU_INSTANCES]; // Group of GPUs
261     NvU32    gpuCount;                        // GPU count in gpuId[]
262     NvU8     p2pWriteCapsStatus;              // PCIE P2P CAPS status for this group of GPUs
263     NvU8     p2pReadCapsStatus;
264     ListNode node;                            // For intrusive lists
265 } PCIEP2PCAPSINFO;
266 MAKE_INTRUSIVE_LIST(pcieP2PCapsInfoList, PCIEP2PCAPSINFO, node);
267 
268 
269 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
270 // the matching C source file, but causes diagnostics to be issued if another
271 // source file references the field.
272 #ifdef NVOC_GPU_MGR_H_PRIVATE_ACCESS_ALLOWED
273 #define PRIVATE_FIELD(x) x
274 #else
275 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
276 #endif
277 
278 struct OBJGPUMGR {
279     const struct NVOC_RTTI *__nvoc_rtti;
280     struct Object __nvoc_base_Object;
281     struct Object *__nvoc_pbase_Object;
282     struct OBJGPUMGR *__nvoc_pbase_OBJGPUMGR;
283     PROBEDGPU probedGpus[32];
284     void *probedGpusLock;
285     NvU32 gpuAttachCount;
286     NvU32 gpuAttachMask;
287     NvU32 gpuMonolithicRmMask;
288     NvU32 persistentSwStateGpuMask;
289     NvU32 deviceCount;
290     struct OBJGPUGRP *pGpuGrpTable[32];
291     NvU32 gpuInstMaskTable[32];
292     NvU8 gpuBridgeType;
293     NvU8 gpuSliLinkRoute[2][32][32][2];
294     SLI_GPU_BOOST_SYNC sliGpuBoostSync;
295     GPUMGRSAVEVBIOSSTATE primaryVbiosState;
296     NvU8 powerDisconnectedGpuCount;
297     NvU8 powerDisconnectedGpuBus[32];
298     NVLINK_TOPOLOGY_INFO nvlinkTopologyInfo[32];
299     NvU8 nvlinkBwMode;
300     GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY MIGTopologyInfo[32];
301     void *cachedMIGInfoLock;
302     GPUMGR_CACHED_MIG_STATE cachedMIGInfo[32];
303     GPU_HANDLE_ID gpuHandleIDList[32];
304     NvU32 numGpuHandles;
305     CONF_COMPUTE_CAPS ccCaps;
306     NvU64 ccAttackerAdvantage;
307     pcieP2PCapsInfoList pcieP2PCapsInfoCache;
308     void *pcieP2PCapsInfoLock;
309 };
310 
311 #ifndef __NVOC_CLASS_OBJGPUMGR_TYPEDEF__
312 #define __NVOC_CLASS_OBJGPUMGR_TYPEDEF__
313 typedef struct OBJGPUMGR OBJGPUMGR;
314 #endif /* __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ */
315 
316 #ifndef __nvoc_class_id_OBJGPUMGR
317 #define __nvoc_class_id_OBJGPUMGR 0xcf1b25
318 #endif /* __nvoc_class_id_OBJGPUMGR */
319 
320 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR;
321 
322 #define __staticCast_OBJGPUMGR(pThis) \
323     ((pThis)->__nvoc_pbase_OBJGPUMGR)
324 
325 #ifdef __nvoc_gpu_mgr_h_disabled
326 #define __dynamicCast_OBJGPUMGR(pThis) ((OBJGPUMGR*)NULL)
327 #else //__nvoc_gpu_mgr_h_disabled
328 #define __dynamicCast_OBJGPUMGR(pThis) \
329     ((OBJGPUMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUMGR)))
330 #endif //__nvoc_gpu_mgr_h_disabled
331 
332 
333 NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32, va_list);
334 
335 NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32);
336 #define __objCreate_OBJGPUMGR(ppNewObj, pParent, createFlags) \
337     __nvoc_objCreate_OBJGPUMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
338 
339 NV_STATUS gpumgrInitPcieP2PCapsCache_IMPL(struct OBJGPUMGR *pGpuMgr);
340 
341 
342 #define gpumgrInitPcieP2PCapsCache(pGpuMgr) gpumgrInitPcieP2PCapsCache_IMPL(pGpuMgr)
343 #define gpumgrInitPcieP2PCapsCache_HAL(pGpuMgr) gpumgrInitPcieP2PCapsCache(pGpuMgr)
344 
345 void gpumgrDestroyPcieP2PCapsCache_IMPL(struct OBJGPUMGR *pGpuMgr);
346 
347 
348 #define gpumgrDestroyPcieP2PCapsCache(pGpuMgr) gpumgrDestroyPcieP2PCapsCache_IMPL(pGpuMgr)
349 #define gpumgrDestroyPcieP2PCapsCache_HAL(pGpuMgr) gpumgrDestroyPcieP2PCapsCache(pGpuMgr)
350 
351 NV_STATUS gpumgrStorePcieP2PCapsCache_IMPL(NvU32 gpuMask, NvU8 p2pWriteCapStatus, NvU8 p2pReadCapStatus);
352 
353 
354 #define gpumgrStorePcieP2PCapsCache(gpuMask, p2pWriteCapStatus, p2pReadCapStatus) gpumgrStorePcieP2PCapsCache_IMPL(gpuMask, p2pWriteCapStatus, p2pReadCapStatus)
355 #define gpumgrStorePcieP2PCapsCache_HAL(gpuMask, p2pWriteCapStatus, p2pReadCapStatus) gpumgrStorePcieP2PCapsCache(gpuMask, p2pWriteCapStatus, p2pReadCapStatus)
356 
357 void gpumgrRemovePcieP2PCapsFromCache_IMPL(NvU32 gpuId);
358 
359 
360 #define gpumgrRemovePcieP2PCapsFromCache(gpuId) gpumgrRemovePcieP2PCapsFromCache_IMPL(gpuId)
361 #define gpumgrRemovePcieP2PCapsFromCache_HAL(gpuId) gpumgrRemovePcieP2PCapsFromCache(gpuId)
362 
363 NvBool gpumgrGetPcieP2PCapsFromCache_IMPL(NvU32 gpuMask, NvU8 *pP2PWriteCapStatus, NvU8 *pP2PReadCapStatus);
364 
365 
366 #define gpumgrGetPcieP2PCapsFromCache(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus) gpumgrGetPcieP2PCapsFromCache_IMPL(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus)
367 #define gpumgrGetPcieP2PCapsFromCache_HAL(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus) gpumgrGetPcieP2PCapsFromCache(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus)
368 
369 NV_STATUS gpumgrConstruct_IMPL(struct OBJGPUMGR *arg_);
370 
371 #define __nvoc_gpumgrConstruct(arg_) gpumgrConstruct_IMPL(arg_)
372 void gpumgrDestruct_IMPL(struct OBJGPUMGR *arg0);
373 
374 #define __nvoc_gpumgrDestruct(arg0) gpumgrDestruct_IMPL(arg0)
375 void gpumgrAddSystemNvlinkTopo_IMPL(NvU64 DomainBusDevice);
376 
377 #define gpumgrAddSystemNvlinkTopo(DomainBusDevice) gpumgrAddSystemNvlinkTopo_IMPL(DomainBusDevice)
378 NvBool gpumgrGetSystemNvlinkTopo_IMPL(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams);
379 
380 #define gpumgrGetSystemNvlinkTopo(DomainBusDevice, pTopoParams) gpumgrGetSystemNvlinkTopo_IMPL(DomainBusDevice, pTopoParams)
381 void gpumgrUpdateSystemNvlinkTopo_IMPL(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams);
382 
383 #define gpumgrUpdateSystemNvlinkTopo(DomainBusDevice, pTopoParams) gpumgrUpdateSystemNvlinkTopo_IMPL(DomainBusDevice, pTopoParams)
384 NV_STATUS gpumgrSetGpuInitDisabledNvlinks_IMPL(NvU32 gpuId, NvU32 mask, NvBool bSkipHwNvlinkDisable);
385 
386 #define gpumgrSetGpuInitDisabledNvlinks(gpuId, mask, bSkipHwNvlinkDisable) gpumgrSetGpuInitDisabledNvlinks_IMPL(gpuId, mask, bSkipHwNvlinkDisable)
387 NV_STATUS gpumgrGetGpuInitDisabledNvlinks_IMPL(NvU32 gpuId, NvU32 *pMask, NvBool *pbSkipHwNvlinkDisable);
388 
389 #define gpumgrGetGpuInitDisabledNvlinks(gpuId, pMask, pbSkipHwNvlinkDisable) gpumgrGetGpuInitDisabledNvlinks_IMPL(gpuId, pMask, pbSkipHwNvlinkDisable)
390 NvU8 gpumgrGetGpuNvlinkBwMode_IMPL(void);
391 
392 #define gpumgrGetGpuNvlinkBwMode() gpumgrGetGpuNvlinkBwMode_IMPL()
393 void gpumgrSetGpuNvlinkBwModeFromRegistry_IMPL(struct OBJGPU *pGpu);
394 
395 #define gpumgrSetGpuNvlinkBwModeFromRegistry(pGpu) gpumgrSetGpuNvlinkBwModeFromRegistry_IMPL(pGpu)
396 NV_STATUS gpumgrSetGpuNvlinkBwMode_IMPL(NvU8 mode);
397 
398 #define gpumgrSetGpuNvlinkBwMode(mode) gpumgrSetGpuNvlinkBwMode_IMPL(mode)
399 NvBool gpumgrCheckIndirectPeer_IMPL(struct OBJGPU *pGpu, struct OBJGPU *pRemoteGpu);
400 
401 #define gpumgrCheckIndirectPeer(pGpu, pRemoteGpu) gpumgrCheckIndirectPeer_IMPL(pGpu, pRemoteGpu)
402 void gpumgrAddSystemMIGInstanceTopo_IMPL(NvU64 domainBusDevice);
403 
404 #define gpumgrAddSystemMIGInstanceTopo(domainBusDevice) gpumgrAddSystemMIGInstanceTopo_IMPL(domainBusDevice)
405 NvBool gpumgrGetSystemMIGInstanceTopo_IMPL(NvU64 domainBusDevice, struct GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY **ppTopoParams);
406 
407 #define gpumgrGetSystemMIGInstanceTopo(domainBusDevice, ppTopoParams) gpumgrGetSystemMIGInstanceTopo_IMPL(domainBusDevice, ppTopoParams)
408 NvBool gpumgrIsSystemMIGEnabled_IMPL(NvU64 domainBusDevice);
409 
410 #define gpumgrIsSystemMIGEnabled(domainBusDevice) gpumgrIsSystemMIGEnabled_IMPL(domainBusDevice)
411 void gpumgrSetSystemMIGEnabled_IMPL(NvU64 domainBusDevice, NvBool bMIGEnabled);
412 
413 #define gpumgrSetSystemMIGEnabled(domainBusDevice, bMIGEnabled) gpumgrSetSystemMIGEnabled_IMPL(domainBusDevice, bMIGEnabled)
414 void gpumgrUnregisterRmCapsForMIGGI_IMPL(NvU64 gpuDomainBusDevice);
415 
416 #define gpumgrUnregisterRmCapsForMIGGI(gpuDomainBusDevice) gpumgrUnregisterRmCapsForMIGGI_IMPL(gpuDomainBusDevice)
417 void gpumgrCacheCreateGpuInstance_IMPL(struct OBJGPU *pGpu, NvU32 swizzId);
418 
419 #define gpumgrCacheCreateGpuInstance(pGpu, swizzId) gpumgrCacheCreateGpuInstance_IMPL(pGpu, swizzId)
420 void gpumgrCacheDestroyGpuInstance_IMPL(struct OBJGPU *pGpu, NvU32 swizzId);
421 
422 #define gpumgrCacheDestroyGpuInstance(pGpu, swizzId) gpumgrCacheDestroyGpuInstance_IMPL(pGpu, swizzId)
423 void gpumgrCacheCreateComputeInstance_IMPL(struct OBJGPU *pGpu, NvU32 swizzId, NvU32 ciId);
424 
425 #define gpumgrCacheCreateComputeInstance(pGpu, swizzId, ciId) gpumgrCacheCreateComputeInstance_IMPL(pGpu, swizzId, ciId)
426 void gpumgrCacheDestroyComputeInstance_IMPL(struct OBJGPU *pGpu, NvU32 swizzId, NvU32 ciId);
427 
428 #define gpumgrCacheDestroyComputeInstance(pGpu, swizzId, ciId) gpumgrCacheDestroyComputeInstance_IMPL(pGpu, swizzId, ciId)
429 void gpumgrCacheSetMIGEnabled_IMPL(struct OBJGPU *pGpu, NvBool bMIGEnabled);
430 
431 #define gpumgrCacheSetMIGEnabled(pGpu, bMIGEnabled) gpumgrCacheSetMIGEnabled_IMPL(pGpu, bMIGEnabled)
432 NV_STATUS gpumgrCacheGetActiveDeviceIds_IMPL(NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS *pActiveDeviceIdsParams);
433 
434 #define gpumgrCacheGetActiveDeviceIds(pActiveDeviceIdsParams) gpumgrCacheGetActiveDeviceIds_IMPL(pActiveDeviceIdsParams)
435 void gpumgrUpdateBoardId_IMPL(struct OBJGPU *arg0);
436 
437 #define gpumgrUpdateBoardId(arg0) gpumgrUpdateBoardId_IMPL(arg0)
438 void gpumgrServiceInterrupts_IMPL(NvU32 arg0, MC_ENGINE_BITVECTOR *arg1, NvBool arg2);
439 
440 #define gpumgrServiceInterrupts(arg0, arg1, arg2) gpumgrServiceInterrupts_IMPL(arg0, arg1, arg2)
441 #undef PRIVATE_FIELD
442 
443 
444 typedef struct {
445     NvBool         specified;                           // Set this flag when using this struct
446     NvBool         bIsIGPU;                             // Set this flag for iGPU
447 
448     DEVICE_MAPPING deviceMapping[DEVICE_INDEX_MAX];  // Register Aperture mapping
449     NvU32          socChipId0;                          // Chip ID used for HAL binding
450     NvU32          iovaspaceId;                         // SMMU client ID
451 } SOCGPUATTACHARG;
452 
453 //
454 // Packages up system/bus state for attach process.
455 //
456 typedef struct GPUATTACHARG
457 {
458     GPUHWREG   *regBaseAddr;
459     GPUHWREG   *fbBaseAddr;
460     GPUHWREG   *instBaseAddr;
461     RmPhysAddr  devPhysAddr;
462     RmPhysAddr  fbPhysAddr;
463     RmPhysAddr  instPhysAddr;
464     RmPhysAddr  ioPhysAddr;
465     NvU64       nvDomainBusDeviceFunc;
466     NvU32       regLength;
467     NvU64       fbLength;
468     NvU32       instLength;
469     NvU32       intLine;
470     void        *pOsAttachArg;
471     NvBool      bIsSOC;
472     NvU32       socDeviceCount;
473     DEVICE_MAPPING socDeviceMappings[GPU_MAX_DEVICE_MAPPINGS];
474     NvU32       socId;
475     NvU32       socSubId;
476     NvU32       socChipId0;
477     NvU32       iovaspaceId;
478     NvBool      bRequestFwClientRm;
479     NvS32       cpuNumaNodeId;
480 
481     //
482     // The SOC-specific fields above are legacy fields that were added for
483     // ARCH MODS iGPU verification. There is a plan to deprecate these fields as
484     // part of an effort to clean up the existing iGPU code in RM.
485     //
486     // Starting with T234D+, the SOCGPUATTACHARG field below will be used to
487     // pass the required attach info for a single SOC device from the RM OS
488     // layer to core RM.
489     //
490     SOCGPUATTACHARG socDeviceArgs;
491 } GPUATTACHARG;
492 
493 NV_STATUS   gpumgrThreadEnableExpandedGpuVisibility(void);
494 void        gpumgrThreadDisableExpandedGpuVisibility(void);
495 NvBool      gpumgrThreadHasExpandedGpuVisibility(void);
496 
497 NV_STATUS   gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask);
498 NV_STATUS   gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *);
499 NV_STATUS   gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice);
500 NV_STATUS   gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *);
501 NV_STATUS   gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *);
502 NV_STATUS   gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *);
503 void        gpumgrSetGpuId(OBJGPU*, NvU32 gpuId);
504 NV_STATUS   gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *);
505 void        gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status);
506 OBJGPU*     gpumgrGetGpuFromId(NvU32 gpuId);
507 OBJGPU*     gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags);
508 OBJGPU*     gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device);
509 NvU32       gpumgrGetDefaultPrimaryGpu(NvU32 gpuMask);
510 NV_STATUS   gpumgrAllocGpuInstance(NvU32 *pDeviceInstance);
511 NV_STATUS   gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice);
512 NV_STATUS   gpumgrUnregisterGpuId(NvU32 gpuId);
513 NV_STATUS   gpumgrExcludeGpuId(NvU32 gpuId);
514 NV_STATUS   gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid);
515 NV_STATUS   gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags);
516 // gpumgrGetRmFirmwarePolicy() and  gpumgrGetRmFirmwareLogsEnabled() contain
517 // all logic for deciding the policies for loading firmwares, and so need to be
518 // compiled for all platforms besides those actually running the firmwares
519 void        gpumgrGetRmFirmwarePolicy(NvU32 chipId, NvU32 pmcBoot42, NvBool bIsSoc,
520                                       NvU32 enableFirmwareRegVal, NvBool *pbRequestFirmware,
521                                       NvBool *pbAllowFallbackToMonolithicRm);
522 NvBool      gpumgrGetRmFirmwareLogsEnabled(NvU32 enableFirmwareLogsRegVal);
523 NvBool      gpumgrIsDeviceRmFirmwareCapable(NvU16 devId, NvU32 pmcBoot42,
524                                             NvBool bIsSoc, NvBool *pbEnableByDefault);
525 NvBool      gpumgrIsVgxRmFirmwareCapableChip(NvU32 pmcBoot42);
526 NV_STATUS   gpumgrAttachGpu(NvU32 deviceInstance, GPUATTACHARG *);
527 NV_STATUS   gpumgrDetachGpu(NvU32 deviceInstance);
528 OBJGPU*     gpumgrGetNextGpu(NvU32 gpuMask, NvU32 *pStartIndex);
529 NV_STATUS   gpumgrStatePreInitGpu(OBJGPU*);
530 NV_STATUS   gpumgrStateInitGpu(OBJGPU*);
531 NV_STATUS   gpumgrStateLoadGpu(OBJGPU*, NvU32);
532 NV_STATUS   gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance);
533 NV_STATUS   gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal);
534 NV_STATUS   gpumgrDestroyDevice(NvU32 deviceInstance);
535 NvU32       gpumgrGetDeviceInstanceMask(void);
536 NvU32       gpumgrGetDeviceGpuMask(NvU32 deviceInstance);
537 NV_STATUS   gpumgrIsDeviceInstanceValid(NvU32 deviceInstance);
538 NvU32       gpumgrGetPrimaryForDevice(NvU32 deviceInstance);
539 NvBool      gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance);
540 NvBool      gpumgrIsDeviceEnabled(NvU32 deviceInstance);
541 NvU32       gpumgrGetGpuMask(OBJGPU *pGpu);
542 OBJGPU*     gpumgrGetGpu(NvU32 deviceInstance);
543 OBJGPU*     gpumgrGetSomeGpu(void);
544 NvU32       gpumgrGetSubDeviceCount(NvU32 gpuMask);
545 NvU32       gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu);
546 NvU32       gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu);
547 NvU32       gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu);
548 OBJGPU*     gpumgrGetParentGPU(OBJGPU *pGpu);
549 void        gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu);
550 NvBool      gpumgrIsGpuDisplayParent(OBJGPU*);
551 OBJGPU*     gpumgrGetDisplayParent(OBJGPU*);
552 NV_STATUS   gpumgrGetGpuLockAndDrPorts(OBJGPU*, OBJGPU*, NvU32 *, NvU32 *);
553 NV_STATUS   gpumgrGetBootPrimary(OBJGPU **ppGpu);
554 OBJGPU*     gpumgrGetMGpu(void);
555 RmPhysAddr  gpumgrGetGpuPhysFbAddr(OBJGPU*);
556 OBJGPU*     gpumgrGetGpuFromSubDeviceInst(NvU32, NvU32);
557 NV_STATUS   gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask);
558 NV_STATUS   gpumgrRemoveDeviceInstanceFromGpus(NvU32 gpuMask);
559 NV_STATUS   gpumgrConstructGpuGrpObject(struct OBJGPUMGR *pGpuMgr, NvU32 gpuMask, struct OBJGPUGRP **ppGpuGrp);
560 struct OBJGPUGRP*  gpumgrGetGpuGrpFromGpu(OBJGPU *pGpu);
561 struct OBJGPUGRP*  gpumgrGetGpuGrpFromInstance(NvU32 gpugrpInstance);
562 NV_STATUS   gpumgrModifyGpuDrainState(NvU32 gpuId, NvBool bEnable, NvBool bRemove, NvBool bLinkDisable);
563 NV_STATUS   gpumgrQueryGpuDrainState(NvU32 gpuId, NvBool *pBEnable, NvBool *pBRemove);
564 NvBool      gpumgrIsGpuPointerValid(OBJGPU *pGpu);
565 NvBool      gpumgrIsGpuPointerAttached(OBJGPU *pGpu);
566 NvU32       gpumgrGetGrpMaskFromGpuInst(NvU32 gpuInst);
567 void        gpumgrAddDeviceMaskToGpuInstTable(NvU32 gpuMask);
568 void        gpumgrClearDeviceMaskFromGpuInstTable(NvU32 gpuMask);
569 NvBool      gpumgrSetGpuAcquire(OBJGPU *pGpu);
570 void        gpumgrSetGpuRelease(void);
571 NvU8        gpumgrGetGpuBridgeType(void);
572 NvBool      gpumgrAreAllGpusInOffloadMode(void);
573 NvBool      gpumgrIsSafeToReadGpuInfo(void);
574 NvBool      gpumgrIsDeviceMsixAllowed(RmPhysAddr bar0BaseAddr, NvU32 pmcBoot1, NvU32 pmcBoot42);
575 
576 //
577 // gpumgrIsSubDeviceCountOne
578 //
579 static NV_INLINE NvBool
gpumgrIsSubDeviceCountOne(NvU32 gpuMask)580 gpumgrIsSubDeviceCountOne(NvU32 gpuMask)
581 {
582     //
583     // A fast version of gpumgrGetSubDeviceCount(gpumask) == 1.
584     // Make sure it returns 0 for gpuMask==0, just like gpumgrGetSubDeviceCount(0)!!!
585     //
586     return gpuMask != 0 && (gpuMask&(gpuMask-1)) == 0;
587 }
588 
589 //
590 // gpumgrIsParentGPU
591 //
592 static NV_INLINE NvBool
gpumgrIsParentGPU(OBJGPU * pGpu)593 gpumgrIsParentGPU(OBJGPU *pGpu)
594 {
595     return gpumgrGetParentGPU(pGpu) == pGpu;
596 }
597 
598 #endif // _GPUMGR_H_
599 
600 #ifdef __cplusplus
601 } // extern "C"
602 #endif
603 
604 #endif // _G_GPU_MGR_NVOC_H_
605