1 #ifndef _G_GPU_GROUP_NVOC_H_
2 #define _G_GPU_GROUP_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 #include "g_gpu_group_nvoc.h"
32 
33 #ifndef GPU_GROUP_H
34 #define GPU_GROUP_H
35 
36 /**************** Resource Manager Defines and Structures ******************\
37 *                                                                           *
38 *       Defines and structures used for GPUGRP Object.                      *
39 *                                                                           *
40 \***************************************************************************/
41 
42 #include "core/core.h"
43 #include "nvoc/object.h"
44 #include "nvlimits.h"
45 
46 struct OBJVASPACE;
47 struct OBJGPU;
48 
49 /*!
50  * @brief Specialization of @ref FOR_EACH_INDEX_IN_MASK for looping
51  *        over each GPU in an instance bitmask and processing the GPU in
52  *        unicast mode.
53  *
54  * @note This macro is constructed to handle 'continue' and 'break'
55  *       statements but not 'return.' Do NOT return directly from the loop -
56  *       use status variable and 'break' to safely abort.
57  *
58  * @param[in]       maskWidth   bit-width of the mask (allowed: 8, 16, 32, 64)
59  * @param[in,out]   pGpu        Local GPU variable to use.
60  * @param[in]       mask        GPU instance bitmask.
61  */
62 #define FOR_EACH_GPU_IN_MASK_UC(maskWidth, pSys, pGpu, mask)            \
63 {                                                                       \
64     NvU32  gpuInstance;                                                 \
65     NvBool bOrigBcState = NV_FALSE;                                     \
66     NvBool bEntryBcState = NV_FALSE;                                    \
67     OBJGPU *pEntryGpu = pGpu;                                           \
68     pGpu = NULL;                                                        \
69     if (pEntryGpu != NULL)                                              \
70     {                                                                   \
71         bEntryBcState = gpumgrGetBcEnabledStatus(pEntryGpu);            \
72     }                                                                   \
73     FOR_EACH_INDEX_IN_MASK(maskWidth, gpuInstance, mask)                \
74     {                                                                   \
75         if (NULL != pGpu) /* continue */                                \
76         {                                                               \
77             gpumgrSetBcEnabledStatus(pGpu, bOrigBcState);               \
78         }                                                               \
79         pGpu = gpumgrGetGpu(gpuInstance);                               \
80         if (pGpu == NULL)                                               \
81         {                 /* We should never hit this assert */         \
82             NV_ASSERT(0); /* But it occurs very rarely */               \
83             continue;     /* It needs to be debugged */                 \
84         }                                                               \
85         bOrigBcState = gpumgrGetBcEnabledStatus(pGpu);                  \
86         gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);                       \
87 
88 #define FOR_EACH_GPU_IN_MASK_UC_END                                      \
89     }                                                                    \
90     FOR_EACH_INDEX_IN_MASK_END                                           \
91     if (NULL != pGpu) /* break */                                        \
92     {                                                                    \
93         gpumgrSetBcEnabledStatus(pGpu, bOrigBcState);                    \
94         pGpu = NULL;                                                     \
95     }                                                                    \
96     if (pEntryGpu != NULL)                                               \
97     {                                                                    \
98         NV_ASSERT(bEntryBcState == gpumgrGetBcEnabledStatus(pEntryGpu)); \
99         pGpu = pEntryGpu;                                                \
100     }                                                                    \
101 }
102 
103 typedef struct _def_vid_link_node
104 {
105     /*!
106      * GPU instance for this node
107      */
108     NvU32 gpuInstance;
109     /*!
110      * DrPort that receives data from Child GPU
111      */
112     NvU32 ParentDrPort;
113     /*!
114      * DrPort that sources data to a Parent GPU
115      */
116     NvU32 ChildDrPort;
117 } SLILINKNODE;
118 
119 typedef struct OBJGPUGRP *POBJGPUGRP;
120 
121 #ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
122 #define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
123 typedef struct OBJGPUGRP OBJGPUGRP;
124 #endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */
125 
126 #ifndef __nvoc_class_id_OBJGPUGRP
127 #define __nvoc_class_id_OBJGPUGRP 0xe40531
128 #endif /* __nvoc_class_id_OBJGPUGRP */
129 
130 
131 
132 #ifdef NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED
133 #define PRIVATE_FIELD(x) x
134 #else
135 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
136 #endif
137 struct OBJGPUGRP {
138     const struct NVOC_RTTI *__nvoc_rtti;
139     struct Object __nvoc_base_Object;
140     struct Object *__nvoc_pbase_Object;
141     struct OBJGPUGRP *__nvoc_pbase_OBJGPUGRP;
142     NvU32 gpuMask;
143     NvU32 gpuSliLinkMask;
144     NvU32 linkingGpuMask;
145     NvU32 attachedGpuMaskAtLinking;
146     SLILINKNODE SliLinkOrder[8];
147     NvU32 ConnectionCount;
148     NvU32 flags;
149     NvU32 displayFlags;
150     NvBool bcEnabled;
151     struct OBJGPU *parentGpu;
152     struct OBJVASPACE *pGlobalVASpace;
153 };
154 
155 #ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
156 #define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
157 typedef struct OBJGPUGRP OBJGPUGRP;
158 #endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */
159 
160 #ifndef __nvoc_class_id_OBJGPUGRP
161 #define __nvoc_class_id_OBJGPUGRP 0xe40531
162 #endif /* __nvoc_class_id_OBJGPUGRP */
163 
164 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP;
165 
166 #define __staticCast_OBJGPUGRP(pThis) \
167     ((pThis)->__nvoc_pbase_OBJGPUGRP)
168 
169 #ifdef __nvoc_gpu_group_h_disabled
170 #define __dynamicCast_OBJGPUGRP(pThis) ((OBJGPUGRP*)NULL)
171 #else //__nvoc_gpu_group_h_disabled
172 #define __dynamicCast_OBJGPUGRP(pThis) \
173     ((OBJGPUGRP*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUGRP)))
174 #endif //__nvoc_gpu_group_h_disabled
175 
176 
177 NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32, va_list);
178 
179 NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32);
180 #define __objCreate_OBJGPUGRP(ppNewObj, pParent, createFlags) \
181     __nvoc_objCreate_OBJGPUGRP((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
182 
183 NV_STATUS gpugrpCreate_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask);
184 
185 #ifdef __nvoc_gpu_group_h_disabled
186 static inline NV_STATUS gpugrpCreate(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) {
187     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
188     return NV_ERR_NOT_SUPPORTED;
189 }
190 #else //__nvoc_gpu_group_h_disabled
191 #define gpugrpCreate(pGpuGrp, gpuMask) gpugrpCreate_IMPL(pGpuGrp, gpuMask)
192 #endif //__nvoc_gpu_group_h_disabled
193 
194 NV_STATUS gpugrpDestroy_IMPL(struct OBJGPUGRP *pGpuGrp);
195 
196 #ifdef __nvoc_gpu_group_h_disabled
197 static inline NV_STATUS gpugrpDestroy(struct OBJGPUGRP *pGpuGrp) {
198     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
199     return NV_ERR_NOT_SUPPORTED;
200 }
201 #else //__nvoc_gpu_group_h_disabled
202 #define gpugrpDestroy(pGpuGrp) gpugrpDestroy_IMPL(pGpuGrp)
203 #endif //__nvoc_gpu_group_h_disabled
204 
205 NvU32 gpugrpGetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp);
206 
207 #ifdef __nvoc_gpu_group_h_disabled
208 static inline NvU32 gpugrpGetGpuMask(struct OBJGPUGRP *pGpuGrp) {
209     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
210     return 0;
211 }
212 #else //__nvoc_gpu_group_h_disabled
213 #define gpugrpGetGpuMask(pGpuGrp) gpugrpGetGpuMask_IMPL(pGpuGrp)
214 #endif //__nvoc_gpu_group_h_disabled
215 
216 void gpugrpSetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask);
217 
218 #ifdef __nvoc_gpu_group_h_disabled
219 static inline void gpugrpSetGpuMask(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) {
220     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
221 }
222 #else //__nvoc_gpu_group_h_disabled
223 #define gpugrpSetGpuMask(pGpuGrp, gpuMask) gpugrpSetGpuMask_IMPL(pGpuGrp, gpuMask)
224 #endif //__nvoc_gpu_group_h_disabled
225 
226 NvBool gpugrpGetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp);
227 
228 #ifdef __nvoc_gpu_group_h_disabled
229 static inline NvBool gpugrpGetBcEnabledState(struct OBJGPUGRP *pGpuGrp) {
230     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
231     return NV_FALSE;
232 }
233 #else //__nvoc_gpu_group_h_disabled
234 #define gpugrpGetBcEnabledState(pGpuGrp) gpugrpGetBcEnabledState_IMPL(pGpuGrp)
235 #endif //__nvoc_gpu_group_h_disabled
236 
237 void gpugrpSetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp, NvBool bcState);
238 
239 #ifdef __nvoc_gpu_group_h_disabled
240 static inline void gpugrpSetBcEnabledState(struct OBJGPUGRP *pGpuGrp, NvBool bcState) {
241     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
242 }
243 #else //__nvoc_gpu_group_h_disabled
244 #define gpugrpSetBcEnabledState(pGpuGrp, bcState) gpugrpSetBcEnabledState_IMPL(pGpuGrp, bcState)
245 #endif //__nvoc_gpu_group_h_disabled
246 
247 void gpugrpSetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu);
248 
249 #ifdef __nvoc_gpu_group_h_disabled
250 static inline void gpugrpSetParentGpu(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu) {
251     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
252 }
253 #else //__nvoc_gpu_group_h_disabled
254 #define gpugrpSetParentGpu(pGpuGrp, pParentGpu) gpugrpSetParentGpu_IMPL(pGpuGrp, pParentGpu)
255 #endif //__nvoc_gpu_group_h_disabled
256 
257 struct OBJGPU *gpugrpGetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp);
258 
259 #ifdef __nvoc_gpu_group_h_disabled
260 static inline struct OBJGPU *gpugrpGetParentGpu(struct OBJGPUGRP *pGpuGrp) {
261     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
262     return NULL;
263 }
264 #else //__nvoc_gpu_group_h_disabled
265 #define gpugrpGetParentGpu(pGpuGrp) gpugrpGetParentGpu_IMPL(pGpuGrp)
266 #endif //__nvoc_gpu_group_h_disabled
267 
268 NV_STATUS gpugrpCreateGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS);
269 
270 #ifdef __nvoc_gpu_group_h_disabled
271 static inline NV_STATUS gpugrpCreateGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS) {
272     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
273     return NV_ERR_NOT_SUPPORTED;
274 }
275 #else //__nvoc_gpu_group_h_disabled
276 #define gpugrpCreateGlobalVASpace(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) gpugrpCreateGlobalVASpace_IMPL(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS)
277 #endif //__nvoc_gpu_group_h_disabled
278 
279 NV_STATUS gpugrpDestroyGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu);
280 
281 #ifdef __nvoc_gpu_group_h_disabled
282 static inline NV_STATUS gpugrpDestroyGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu) {
283     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
284     return NV_ERR_NOT_SUPPORTED;
285 }
286 #else //__nvoc_gpu_group_h_disabled
287 #define gpugrpDestroyGlobalVASpace(pGpuGrp, pGpu) gpugrpDestroyGlobalVASpace_IMPL(pGpuGrp, pGpu)
288 #endif //__nvoc_gpu_group_h_disabled
289 
290 NV_STATUS gpugrpGetGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS);
291 
292 #ifdef __nvoc_gpu_group_h_disabled
293 static inline NV_STATUS gpugrpGetGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS) {
294     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
295     return NV_ERR_NOT_SUPPORTED;
296 }
297 #else //__nvoc_gpu_group_h_disabled
298 #define gpugrpGetGlobalVASpace(pGpuGrp, ppGlobalVAS) gpugrpGetGlobalVASpace_IMPL(pGpuGrp, ppGlobalVAS)
299 #endif //__nvoc_gpu_group_h_disabled
300 
301 NV_STATUS gpugrpGetGpuFromSubDeviceInstance_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu);
302 
303 #ifdef __nvoc_gpu_group_h_disabled
304 static inline NV_STATUS gpugrpGetGpuFromSubDeviceInstance(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu) {
305     NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
306     return NV_ERR_NOT_SUPPORTED;
307 }
308 #else //__nvoc_gpu_group_h_disabled
309 #define gpugrpGetGpuFromSubDeviceInstance(pGpuGrp, subDeviceInst, ppGpu) gpugrpGetGpuFromSubDeviceInstance_IMPL(pGpuGrp, subDeviceInst, ppGpu)
310 #endif //__nvoc_gpu_group_h_disabled
311 
312 #undef PRIVATE_FIELD
313 
314 
315 #endif // GPU_GROUP_H
316 
317 #ifdef __cplusplus
318 } // extern "C"
319 #endif
320 #endif // _G_GPU_GROUP_NVOC_H_
321