1 #ifndef _G_KERNEL_CHANNEL_GROUP_NVOC_H_
2 #define _G_KERNEL_CHANNEL_GROUP_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_channel_group_nvoc.h"
33 
34 #ifndef KERNEL_CHANNEL_GROUP_H
35 #define KERNEL_CHANNEL_GROUP_H 1
36 
37 #include "kernel/mem_mgr/vaspace.h"
38 
39 #include "ctrl/ctrl0080/ctrl0080gr.h" // NV03_DEVICE
40 
41 #include "libraries/containers/btree.h"
42 #include "gpu/mem_mgr/mem_desc.h"
43 #include "nvoc/prelude.h"
44 #include "resserv/resserv.h"
45 #include "gpu/gpu_resource.h"
46 
47 #include "kernel/gpu/fifo/kernel_channel.h"
48 
49 #include "kernel/gpu/fifo/kernel_ctxshare.h"
50 
51 // Forward declaration
52 struct KernelChannelGroupApi;
53 
54 #ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__
55 #define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__
56 typedef struct KernelChannelGroupApi KernelChannelGroupApi;
57 #endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */
58 
59 #ifndef __nvoc_class_id_KernelChannelGroupApi
60 #define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80
61 #endif /* __nvoc_class_id_KernelChannelGroupApi */
62 
63 
64 
65 /*!
66  * Stores the list of all client kctxshareApi objects
67  * that reference the same shared kctxshare object
68  */
69 MAKE_LIST(KernelChannelGroupApiList, KernelChannelGroupApi *);
70 
71 typedef enum
72 {
73     CHANNELGROUP_STATE_ENABLE,
74     CHANNELGROUP_STATE_SCHED,
75     CHANNELGROUP_STATE_COUNT // Should be last
76 } CHANNELGROUP_STATE;
77 
78 //
79 // Describes the engine context memory for a channel
80 // (Stored in KernelChannelGroup because it's shared by all channels in the group)
81 //
82 typedef struct ENGINE_CTX_DESCRIPTOR
83 {
84     MEMORY_DESCRIPTOR *pMemDesc; // Context memory
85     VA_LIST vaList;              // Map to track the gpu va mapping to the context buffer
86     NvU32 engDesc;               // Which engine type
87 } ENGINE_CTX_DESCRIPTOR;
88 
89 
90 //
91 // HW method buffer used by supporting engines to save/restore
92 // faulting methods after corresponding fault is handled.
93 //
94 typedef struct _HW_ENG_FAULT_METHOD_BUFFER
95 {
96     NvU64              bar2Addr;
97     MEMORY_DESCRIPTOR *pMemDesc;
98 } HW_ENG_FAULT_METHOD_BUFFER;
99 
100 //
101 // dword array size used to track the valid subcontext mask.
102 // We use 1 bit per subcontext; so need 2 dwords to store the valid bitmask.
103 //
104 #define SUBCTX_MASK_ARRAY_SIZE 2
105 
106 /**
107  * This class represents data that is shared when a TSG is duped.
108  *
109  * Instances of this class are ref-counted and will be kept alive until
110  * all TSG copies have been freed.
111  */
112 #ifdef NVOC_KERNEL_CHANNEL_GROUP_H_PRIVATE_ACCESS_ALLOWED
113 #define PRIVATE_FIELD(x) x
114 #else
115 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
116 #endif
117 struct KernelChannelGroup {
118     const struct NVOC_RTTI *__nvoc_rtti;
119     struct RsShared __nvoc_base_RsShared;
120     struct Object *__nvoc_pbase_Object;
121     struct RsShared *__nvoc_pbase_RsShared;
122     struct KernelChannelGroup *__nvoc_pbase_KernelChannelGroup;
123     NODE node;
124     NvU32 grpID;
125     NvU32 runlistId;
126     NvU32 chanCount;
127     RM_ENGINE_TYPE engineType;
128     struct OBJVASPACE *pVAS;
129     NvU32 gfid;
130     struct OBJEHEAP *pSubctxIdHeap;
131     CHANNEL_LIST *pChanList;
132     NvU64 timesliceUs;
133     ENGINE_CTX_DESCRIPTOR **ppEngCtxDesc;
134     NvBool bAllocatedByRm;
135     NvBool bLegacyMode;
136     HW_ENG_FAULT_METHOD_BUFFER *pMthdBuffers;
137     NvU32 (*ppSubctxMask)[2];
138     NvU32 (*ppZombieSubctxMask)[2];
139     NvU32 *pStateMask;
140     NvU32 *pInterleaveLevel;
141     NvBool bRunlistAssigned;
142     struct CTX_BUF_POOL_INFO *pCtxBufPool;
143     struct CTX_BUF_POOL_INFO *pChannelBufPool;
144     struct MapNode mapNode;
145     KernelChannelGroupApiList apiObjList;
146     NvBool bIsCallingContextVgpuPlugin;
147 };
148 
149 #ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
150 #define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
151 typedef struct KernelChannelGroup KernelChannelGroup;
152 #endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */
153 
154 #ifndef __nvoc_class_id_KernelChannelGroup
155 #define __nvoc_class_id_KernelChannelGroup 0xec6de1
156 #endif /* __nvoc_class_id_KernelChannelGroup */
157 
158 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroup;
159 
160 #define __staticCast_KernelChannelGroup(pThis) \
161     ((pThis)->__nvoc_pbase_KernelChannelGroup)
162 
163 #ifdef __nvoc_kernel_channel_group_h_disabled
164 #define __dynamicCast_KernelChannelGroup(pThis) ((KernelChannelGroup*)NULL)
165 #else //__nvoc_kernel_channel_group_h_disabled
166 #define __dynamicCast_KernelChannelGroup(pThis) \
167     ((KernelChannelGroup*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelChannelGroup)))
168 #endif //__nvoc_kernel_channel_group_h_disabled
169 
170 
171 NV_STATUS __nvoc_objCreateDynamic_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32, va_list);
172 
173 NV_STATUS __nvoc_objCreate_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32);
174 #define __objCreate_KernelChannelGroup(ppNewObj, pParent, createFlags) \
175     __nvoc_objCreate_KernelChannelGroup((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
176 
177 static inline NV_STATUS kchangrpSetInterleaveLevelSched_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
178     return NV_OK;
179 }
180 
181 NV_STATUS kchangrpSetInterleaveLevelSched_GM107(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value);
182 
183 
184 #ifdef __nvoc_kernel_channel_group_h_disabled
185 static inline NV_STATUS kchangrpSetInterleaveLevelSched(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
186     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
187     return NV_ERR_NOT_SUPPORTED;
188 }
189 #else //__nvoc_kernel_channel_group_h_disabled
190 #define kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched_56cd7a(pGpu, pKernelChannelGroup, value)
191 #endif //__nvoc_kernel_channel_group_h_disabled
192 
193 #define kchangrpSetInterleaveLevelSched_HAL(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value)
194 
195 static inline void kchangrpUpdateSubcontextMask_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
196     return;
197 }
198 
199 void kchangrpUpdateSubcontextMask_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2);
200 
201 
202 #ifdef __nvoc_kernel_channel_group_h_disabled
203 static inline void kchangrpUpdateSubcontextMask(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
204     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
205 }
206 #else //__nvoc_kernel_channel_group_h_disabled
207 #define kchangrpUpdateSubcontextMask(pGpu, arg0, arg1, arg2) kchangrpUpdateSubcontextMask_b3696a(pGpu, arg0, arg1, arg2)
208 #endif //__nvoc_kernel_channel_group_h_disabled
209 
210 #define kchangrpUpdateSubcontextMask_HAL(pGpu, arg0, arg1, arg2) kchangrpUpdateSubcontextMask(pGpu, arg0, arg1, arg2)
211 
212 static inline void kchangrpSetSubcontextZombieState_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
213     return;
214 }
215 
216 
217 #ifdef __nvoc_kernel_channel_group_h_disabled
218 static inline void kchangrpSetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
219     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
220 }
221 #else //__nvoc_kernel_channel_group_h_disabled
222 #define kchangrpSetSubcontextZombieState(pGpu, arg0, arg1, arg2) kchangrpSetSubcontextZombieState_b3696a(pGpu, arg0, arg1, arg2)
223 #endif //__nvoc_kernel_channel_group_h_disabled
224 
225 #define kchangrpSetSubcontextZombieState_HAL(pGpu, arg0, arg1, arg2) kchangrpSetSubcontextZombieState(pGpu, arg0, arg1, arg2)
226 
227 static inline NvBool kchangrpGetSubcontextZombieState_ceaee8(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1) {
228     NV_ASSERT_PRECOMP(0);
229     return ((NvBool)(0 != 0));
230 }
231 
232 
233 #ifdef __nvoc_kernel_channel_group_h_disabled
234 static inline NvBool kchangrpGetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1) {
235     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
236     return NV_FALSE;
237 }
238 #else //__nvoc_kernel_channel_group_h_disabled
239 #define kchangrpGetSubcontextZombieState(pGpu, arg0, arg1) kchangrpGetSubcontextZombieState_ceaee8(pGpu, arg0, arg1)
240 #endif //__nvoc_kernel_channel_group_h_disabled
241 
242 #define kchangrpGetSubcontextZombieState_HAL(pGpu, arg0, arg1) kchangrpGetSubcontextZombieState(pGpu, arg0, arg1)
243 
244 NV_STATUS kchangrpAllocFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
245 
246 
247 #ifdef __nvoc_kernel_channel_group_h_disabled
248 static inline NV_STATUS kchangrpAllocFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
249     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
250     return NV_ERR_NOT_SUPPORTED;
251 }
252 #else //__nvoc_kernel_channel_group_h_disabled
253 #define kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup)
254 #endif //__nvoc_kernel_channel_group_h_disabled
255 
256 #define kchangrpAllocFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup)
257 
258 NV_STATUS kchangrpFreeFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
259 
260 
261 #ifdef __nvoc_kernel_channel_group_h_disabled
262 static inline NV_STATUS kchangrpFreeFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
263     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
264     return NV_ERR_NOT_SUPPORTED;
265 }
266 #else //__nvoc_kernel_channel_group_h_disabled
267 #define kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup)
268 #endif //__nvoc_kernel_channel_group_h_disabled
269 
270 #define kchangrpFreeFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup)
271 
272 NV_STATUS kchangrpMapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue);
273 
274 
275 #ifdef __nvoc_kernel_channel_group_h_disabled
276 static inline NV_STATUS kchangrpMapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) {
277     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
278     return NV_ERR_NOT_SUPPORTED;
279 }
280 #else //__nvoc_kernel_channel_group_h_disabled
281 #define kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue)
282 #endif //__nvoc_kernel_channel_group_h_disabled
283 
284 #define kchangrpMapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue)
285 
286 NV_STATUS kchangrpUnmapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue);
287 
288 
289 #ifdef __nvoc_kernel_channel_group_h_disabled
290 static inline NV_STATUS kchangrpUnmapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) {
291     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
292     return NV_ERR_NOT_SUPPORTED;
293 }
294 #else //__nvoc_kernel_channel_group_h_disabled
295 #define kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue)
296 #endif //__nvoc_kernel_channel_group_h_disabled
297 
298 #define kchangrpUnmapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue)
299 
300 static inline NV_STATUS kchangrpSetRealtime_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) {
301     return NV_OK;
302 }
303 
304 
305 #ifdef __nvoc_kernel_channel_group_h_disabled
306 static inline NV_STATUS kchangrpSetRealtime(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) {
307     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
308     return NV_ERR_NOT_SUPPORTED;
309 }
310 #else //__nvoc_kernel_channel_group_h_disabled
311 #define kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime_56cd7a(pGpu, pKernelChannelGroup, bRealtime)
312 #endif //__nvoc_kernel_channel_group_h_disabled
313 
314 #define kchangrpSetRealtime_HAL(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime)
315 
316 NV_STATUS kchangrpConstruct_IMPL(struct KernelChannelGroup *arg_pKernelChannelGroup);
317 
318 #define __nvoc_kchangrpConstruct(arg_pKernelChannelGroup) kchangrpConstruct_IMPL(arg_pKernelChannelGroup)
319 void kchangrpDestruct_IMPL(struct KernelChannelGroup *pKernelChannelGroup);
320 
321 #define __nvoc_kchangrpDestruct(pKernelChannelGroup) kchangrpDestruct_IMPL(pKernelChannelGroup)
322 void kchangrpSetState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
323 
324 #ifdef __nvoc_kernel_channel_group_h_disabled
325 static inline void kchangrpSetState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
326     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
327 }
328 #else //__nvoc_kernel_channel_group_h_disabled
329 #define kchangrpSetState(pKernelChannelGroup, subdevice, state) kchangrpSetState_IMPL(pKernelChannelGroup, subdevice, state)
330 #endif //__nvoc_kernel_channel_group_h_disabled
331 
332 void kchangrpClearState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
333 
334 #ifdef __nvoc_kernel_channel_group_h_disabled
335 static inline void kchangrpClearState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
336     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
337 }
338 #else //__nvoc_kernel_channel_group_h_disabled
339 #define kchangrpClearState(pKernelChannelGroup, subdevice, state) kchangrpClearState_IMPL(pKernelChannelGroup, subdevice, state)
340 #endif //__nvoc_kernel_channel_group_h_disabled
341 
342 NvBool kchangrpIsStateSet_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
343 
344 #ifdef __nvoc_kernel_channel_group_h_disabled
345 static inline NvBool kchangrpIsStateSet(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
346     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
347     return NV_FALSE;
348 }
349 #else //__nvoc_kernel_channel_group_h_disabled
350 #define kchangrpIsStateSet(pKernelChannelGroup, subdevice, state) kchangrpIsStateSet_IMPL(pKernelChannelGroup, subdevice, state)
351 #endif //__nvoc_kernel_channel_group_h_disabled
352 
353 NV_STATUS kchangrpAddChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel);
354 
355 #ifdef __nvoc_kernel_channel_group_h_disabled
356 static inline NV_STATUS kchangrpAddChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) {
357     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
358     return NV_ERR_NOT_SUPPORTED;
359 }
360 #else //__nvoc_kernel_channel_group_h_disabled
361 #define kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpAddChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel)
362 #endif //__nvoc_kernel_channel_group_h_disabled
363 
364 NV_STATUS kchangrpRemoveChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel);
365 
366 #ifdef __nvoc_kernel_channel_group_h_disabled
367 static inline NV_STATUS kchangrpRemoveChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) {
368     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
369     return NV_ERR_NOT_SUPPORTED;
370 }
371 #else //__nvoc_kernel_channel_group_h_disabled
372 #define kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpRemoveChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel)
373 #endif //__nvoc_kernel_channel_group_h_disabled
374 
375 NV_STATUS kchangrpInit_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid);
376 
377 #ifdef __nvoc_kernel_channel_group_h_disabled
378 static inline NV_STATUS kchangrpInit(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid) {
379     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
380     return NV_ERR_NOT_SUPPORTED;
381 }
382 #else //__nvoc_kernel_channel_group_h_disabled
383 #define kchangrpInit(pGpu, pKernelChannelGroup, pVAS, gfid) kchangrpInit_IMPL(pGpu, pKernelChannelGroup, pVAS, gfid)
384 #endif //__nvoc_kernel_channel_group_h_disabled
385 
386 NV_STATUS kchangrpDestroy_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
387 
388 #ifdef __nvoc_kernel_channel_group_h_disabled
389 static inline NV_STATUS kchangrpDestroy(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
390     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
391     return NV_ERR_NOT_SUPPORTED;
392 }
393 #else //__nvoc_kernel_channel_group_h_disabled
394 #define kchangrpDestroy(pGpu, pKernelChannelGroup) kchangrpDestroy_IMPL(pGpu, pKernelChannelGroup)
395 #endif //__nvoc_kernel_channel_group_h_disabled
396 
397 NV_STATUS kchangrpAllocEngineContextDescriptor_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
398 
399 #ifdef __nvoc_kernel_channel_group_h_disabled
400 static inline NV_STATUS kchangrpAllocEngineContextDescriptor(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
401     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
402     return NV_ERR_NOT_SUPPORTED;
403 }
404 #else //__nvoc_kernel_channel_group_h_disabled
405 #define kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup) kchangrpAllocEngineContextDescriptor_IMPL(pGpu, pKernelChannelGroup)
406 #endif //__nvoc_kernel_channel_group_h_disabled
407 
408 NV_STATUS kchangrpGetEngineContextMemDesc_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, MEMORY_DESCRIPTOR **arg1);
409 
410 #ifdef __nvoc_kernel_channel_group_h_disabled
411 static inline NV_STATUS kchangrpGetEngineContextMemDesc(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, MEMORY_DESCRIPTOR **arg1) {
412     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
413     return NV_ERR_NOT_SUPPORTED;
414 }
415 #else //__nvoc_kernel_channel_group_h_disabled
416 #define kchangrpGetEngineContextMemDesc(pGpu, arg0, arg1) kchangrpGetEngineContextMemDesc_IMPL(pGpu, arg0, arg1)
417 #endif //__nvoc_kernel_channel_group_h_disabled
418 
419 NV_STATUS kchangrpSetInterleaveLevel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value);
420 
421 #ifdef __nvoc_kernel_channel_group_h_disabled
422 static inline NV_STATUS kchangrpSetInterleaveLevel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
423     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
424     return NV_ERR_NOT_SUPPORTED;
425 }
426 #else //__nvoc_kernel_channel_group_h_disabled
427 #define kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevel_IMPL(pGpu, pKernelChannelGroup, value)
428 #endif //__nvoc_kernel_channel_group_h_disabled
429 
430 #undef PRIVATE_FIELD
431 
432 
433 MAKE_INTRUSIVE_MAP(KernelChannelGroupMap, KernelChannelGroup, mapNode);
434 
435 #endif // KERNEL_CHANNEL_GROUP_H
436 
437 #ifdef __cplusplus
438 } // extern "C"
439 #endif
440 #endif // _G_KERNEL_CHANNEL_GROUP_NVOC_H_
441