1 
2 #ifndef _G_KERNEL_CHANNEL_GROUP_NVOC_H_
3 #define _G_KERNEL_CHANNEL_GROUP_NVOC_H_
4 #include "nvoc/runtime.h"
5 
6 // Version of generated metadata structures
7 #ifdef NVOC_METADATA_VERSION
8 #undef NVOC_METADATA_VERSION
9 #endif
10 #define NVOC_METADATA_VERSION 0
11 
12 #ifdef __cplusplus
13 extern "C" {
14 #endif
15 
16 /*
17  * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
18  * SPDX-License-Identifier: MIT
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a
21  * copy of this software and associated documentation files (the "Software"),
22  * to deal in the Software without restriction, including without limitation
23  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24  * and/or sell copies of the Software, and to permit persons to whom the
25  * Software is furnished to do so, subject to the following conditions:
26  *
27  * The above copyright notice and this permission notice shall be included in
28  * all copies or substantial portions of the Software.
29  *
30  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
36  * DEALINGS IN THE SOFTWARE.
37  */
38 
39 #pragma once
40 #include "g_kernel_channel_group_nvoc.h"
41 
42 #ifndef KERNEL_CHANNEL_GROUP_H
43 #define KERNEL_CHANNEL_GROUP_H 1
44 
45 #include "kernel/mem_mgr/vaspace.h"
46 
47 #include "ctrl/ctrl0080/ctrl0080gr.h" // NV03_DEVICE
48 
49 #include "libraries/containers/btree.h"
50 #include "gpu/mem_mgr/mem_desc.h"
51 #include "nvoc/prelude.h"
52 #include "resserv/resserv.h"
53 #include "gpu/gpu_resource.h"
54 
55 #include "kernel/gpu/fifo/kernel_channel.h"
56 
57 #include "kernel/gpu/fifo/kernel_ctxshare.h"
58 
59 // Forward declaration
60 
61 struct KernelChannelGroupApi;
62 
63 #ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__
64 #define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__
65 typedef struct KernelChannelGroupApi KernelChannelGroupApi;
66 #endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */
67 
68 #ifndef __nvoc_class_id_KernelChannelGroupApi
69 #define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80
70 #endif /* __nvoc_class_id_KernelChannelGroupApi */
71 
72 
73 
74 /*!
75  * Stores the list of all client kctxshareApi objects
76  * that reference the same shared kctxshare object
77  */
78 MAKE_LIST(KernelChannelGroupApiList, KernelChannelGroupApi *);
79 
80 typedef enum
81 {
82     CHANNELGROUP_STATE_ENABLE,
83     CHANNELGROUP_STATE_SCHED,
84     CHANNELGROUP_STATE_COUNT // Should be last
85 } CHANNELGROUP_STATE;
86 
87 //
88 // Describes the engine context memory for a channel
89 // (Stored in KernelChannelGroup because it's shared by all channels in the group)
90 //
91 typedef struct ENGINE_CTX_DESCRIPTOR
92 {
93     MEMORY_DESCRIPTOR *pMemDesc; // Context memory
94     VA_LIST vaList;              // Map to track the gpu va mapping to the context buffer
95     NvU32 engDesc;               // Which engine type
96 } ENGINE_CTX_DESCRIPTOR;
97 
98 
99 //
100 // HW method buffer used by supporting engines to save/restore
101 // faulting methods after corresponding fault is handled.
102 //
103 typedef struct _HW_ENG_FAULT_METHOD_BUFFER
104 {
105     NvU64              bar2Addr;
106     MEMORY_DESCRIPTOR *pMemDesc;
107 } HW_ENG_FAULT_METHOD_BUFFER;
108 
109 //
110 // dword array size used to track the valid subcontext mask.
111 // We use 1 bit per subcontext; so need 2 dwords to store the valid bitmask.
112 //
113 #define SUBCTX_MASK_ARRAY_SIZE 2
114 
115 /**
116  * This class represents data that is shared when a TSG is duped.
117  *
118  * Instances of this class are ref-counted and will be kept alive until
119  * all TSG copies have been freed.
120  */
121 
122 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
123 // the matching C source file, but causes diagnostics to be issued if another
124 // source file references the field.
125 #ifdef NVOC_KERNEL_CHANNEL_GROUP_H_PRIVATE_ACCESS_ALLOWED
126 #define PRIVATE_FIELD(x) x
127 #else
128 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
129 #endif
130 
131 
132 struct KernelChannelGroup {
133 
134     // Metadata
135     const struct NVOC_RTTI *__nvoc_rtti;
136 
137     // Parent (i.e. superclass or base class) object pointers
138     struct RsShared __nvoc_base_RsShared;
139 
140     // Ancestor object pointers for `staticCast` feature
141     struct Object *__nvoc_pbase_Object;    // obj super^2
142     struct RsShared *__nvoc_pbase_RsShared;    // shr super
143     struct KernelChannelGroup *__nvoc_pbase_KernelChannelGroup;    // kchangrp
144 
145     // Data members
146     NODE node;
147     NvU32 grpID;
148     NvU32 runlistId;
149     NvU32 chanCount;
150     RM_ENGINE_TYPE engineType;
151     struct OBJVASPACE *pVAS;
152     NvU32 gfid;
153     OBJEHEAP *pSubctxIdHeap;
154     CHANNEL_LIST *pChanList;
155     NvU64 timesliceUs;
156     ENGINE_CTX_DESCRIPTOR **ppEngCtxDesc;
157     NvBool bAllocatedByRm;
158     NvBool bLegacyMode;
159     HW_ENG_FAULT_METHOD_BUFFER *pMthdBuffers;
160     NvU32 (*ppSubctxMask)[2];
161     NvU32 (*ppZombieSubctxMask)[2];
162     NvU32 *pStateMask;
163     NvU32 *pInterleaveLevel;
164     NvBool bRunlistAssigned;
165     struct CTX_BUF_POOL_INFO *pCtxBufPool;
166     struct CTX_BUF_POOL_INFO *pChannelBufPool;
167     struct MapNode mapNode;
168     KernelChannelGroupApiList apiObjList;
169     NvBool bIsCallingContextVgpuPlugin;
170 };
171 
172 #ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
173 #define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
174 typedef struct KernelChannelGroup KernelChannelGroup;
175 #endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */
176 
177 #ifndef __nvoc_class_id_KernelChannelGroup
178 #define __nvoc_class_id_KernelChannelGroup 0xec6de1
179 #endif /* __nvoc_class_id_KernelChannelGroup */
180 
181 // Casting support
182 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroup;
183 
184 #define __staticCast_KernelChannelGroup(pThis) \
185     ((pThis)->__nvoc_pbase_KernelChannelGroup)
186 
187 #ifdef __nvoc_kernel_channel_group_h_disabled
188 #define __dynamicCast_KernelChannelGroup(pThis) ((KernelChannelGroup*)NULL)
189 #else //__nvoc_kernel_channel_group_h_disabled
190 #define __dynamicCast_KernelChannelGroup(pThis) \
191     ((KernelChannelGroup*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelChannelGroup)))
192 #endif //__nvoc_kernel_channel_group_h_disabled
193 
194 NV_STATUS __nvoc_objCreateDynamic_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32, va_list);
195 
196 NV_STATUS __nvoc_objCreate_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32);
197 #define __objCreate_KernelChannelGroup(ppNewObj, pParent, createFlags) \
198     __nvoc_objCreate_KernelChannelGroup((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
199 
200 
201 // Wrapper macros
202 
203 // Dispatch functions
kchangrpSetInterleaveLevelSched_56cd7a(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 value)204 static inline NV_STATUS kchangrpSetInterleaveLevelSched_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
205     return NV_OK;
206 }
207 
208 NV_STATUS kchangrpSetInterleaveLevelSched_GM107(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value);
209 
210 
211 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetInterleaveLevelSched(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 value)212 static inline NV_STATUS kchangrpSetInterleaveLevelSched(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
213     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
214     return NV_ERR_NOT_SUPPORTED;
215 }
216 #else //__nvoc_kernel_channel_group_h_disabled
217 #define kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched_56cd7a(pGpu, pKernelChannelGroup, value)
218 #endif //__nvoc_kernel_channel_group_h_disabled
219 
220 #define kchangrpSetInterleaveLevelSched_HAL(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value)
221 
kchangrpUpdateSubcontextMask_b3696a(struct OBJGPU * pGpu,struct KernelChannelGroup * arg2,NvU32 arg3,NvBool arg4)222 static inline void kchangrpUpdateSubcontextMask_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, NvU32 arg3, NvBool arg4) {
223     return;
224 }
225 
226 void kchangrpUpdateSubcontextMask_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, NvU32 arg3, NvBool arg4);
227 
228 
229 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpUpdateSubcontextMask(struct OBJGPU * pGpu,struct KernelChannelGroup * arg2,NvU32 arg3,NvBool arg4)230 static inline void kchangrpUpdateSubcontextMask(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, NvU32 arg3, NvBool arg4) {
231     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
232 }
233 #else //__nvoc_kernel_channel_group_h_disabled
234 #define kchangrpUpdateSubcontextMask(pGpu, arg2, arg3, arg4) kchangrpUpdateSubcontextMask_b3696a(pGpu, arg2, arg3, arg4)
235 #endif //__nvoc_kernel_channel_group_h_disabled
236 
237 #define kchangrpUpdateSubcontextMask_HAL(pGpu, arg2, arg3, arg4) kchangrpUpdateSubcontextMask(pGpu, arg2, arg3, arg4)
238 
kchangrpSetSubcontextZombieState_b3696a(struct OBJGPU * pGpu,struct KernelChannelGroup * arg2,NvU32 arg3,NvBool arg4)239 static inline void kchangrpSetSubcontextZombieState_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, NvU32 arg3, NvBool arg4) {
240     return;
241 }
242 
243 
244 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetSubcontextZombieState(struct OBJGPU * pGpu,struct KernelChannelGroup * arg2,NvU32 arg3,NvBool arg4)245 static inline void kchangrpSetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, NvU32 arg3, NvBool arg4) {
246     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
247 }
248 #else //__nvoc_kernel_channel_group_h_disabled
249 #define kchangrpSetSubcontextZombieState(pGpu, arg2, arg3, arg4) kchangrpSetSubcontextZombieState_b3696a(pGpu, arg2, arg3, arg4)
250 #endif //__nvoc_kernel_channel_group_h_disabled
251 
252 #define kchangrpSetSubcontextZombieState_HAL(pGpu, arg2, arg3, arg4) kchangrpSetSubcontextZombieState(pGpu, arg2, arg3, arg4)
253 
kchangrpGetSubcontextZombieState_ceaee8(struct OBJGPU * pGpu,struct KernelChannelGroup * arg2,NvU32 arg3)254 static inline NvBool kchangrpGetSubcontextZombieState_ceaee8(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, NvU32 arg3) {
255     NV_ASSERT_PRECOMP(0);
256     return ((NvBool)(0 != 0));
257 }
258 
259 
260 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpGetSubcontextZombieState(struct OBJGPU * pGpu,struct KernelChannelGroup * arg2,NvU32 arg3)261 static inline NvBool kchangrpGetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, NvU32 arg3) {
262     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
263     return NV_FALSE;
264 }
265 #else //__nvoc_kernel_channel_group_h_disabled
266 #define kchangrpGetSubcontextZombieState(pGpu, arg2, arg3) kchangrpGetSubcontextZombieState_ceaee8(pGpu, arg2, arg3)
267 #endif //__nvoc_kernel_channel_group_h_disabled
268 
269 #define kchangrpGetSubcontextZombieState_HAL(pGpu, arg2, arg3) kchangrpGetSubcontextZombieState(pGpu, arg2, arg3)
270 
271 NV_STATUS kchangrpAllocFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
272 
273 
274 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpAllocFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)275 static inline NV_STATUS kchangrpAllocFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
276     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
277     return NV_ERR_NOT_SUPPORTED;
278 }
279 #else //__nvoc_kernel_channel_group_h_disabled
280 #define kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup)
281 #endif //__nvoc_kernel_channel_group_h_disabled
282 
283 #define kchangrpAllocFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup)
284 
285 NV_STATUS kchangrpFreeFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
286 
287 
288 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpFreeFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)289 static inline NV_STATUS kchangrpFreeFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
290     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
291     return NV_ERR_NOT_SUPPORTED;
292 }
293 #else //__nvoc_kernel_channel_group_h_disabled
294 #define kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup)
295 #endif //__nvoc_kernel_channel_group_h_disabled
296 
297 #define kchangrpFreeFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup)
298 
299 NV_STATUS kchangrpMapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue);
300 
301 
302 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpMapFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 runqueue)303 static inline NV_STATUS kchangrpMapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) {
304     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
305     return NV_ERR_NOT_SUPPORTED;
306 }
307 #else //__nvoc_kernel_channel_group_h_disabled
308 #define kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue)
309 #endif //__nvoc_kernel_channel_group_h_disabled
310 
311 #define kchangrpMapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue)
312 
313 NV_STATUS kchangrpUnmapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue);
314 
315 
316 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpUnmapFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 runqueue)317 static inline NV_STATUS kchangrpUnmapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) {
318     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
319     return NV_ERR_NOT_SUPPORTED;
320 }
321 #else //__nvoc_kernel_channel_group_h_disabled
322 #define kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue)
323 #endif //__nvoc_kernel_channel_group_h_disabled
324 
325 #define kchangrpUnmapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue)
326 
kchangrpSetRealtime_56cd7a(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvBool bRealtime)327 static inline NV_STATUS kchangrpSetRealtime_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) {
328     return NV_OK;
329 }
330 
331 
332 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetRealtime(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvBool bRealtime)333 static inline NV_STATUS kchangrpSetRealtime(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) {
334     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
335     return NV_ERR_NOT_SUPPORTED;
336 }
337 #else //__nvoc_kernel_channel_group_h_disabled
338 #define kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime_56cd7a(pGpu, pKernelChannelGroup, bRealtime)
339 #endif //__nvoc_kernel_channel_group_h_disabled
340 
341 #define kchangrpSetRealtime_HAL(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime)
342 
343 NV_STATUS kchangrpConstruct_IMPL(struct KernelChannelGroup *arg_pKernelChannelGroup);
344 
345 #define __nvoc_kchangrpConstruct(arg_pKernelChannelGroup) kchangrpConstruct_IMPL(arg_pKernelChannelGroup)
346 void kchangrpDestruct_IMPL(struct KernelChannelGroup *pKernelChannelGroup);
347 
348 #define __nvoc_kchangrpDestruct(pKernelChannelGroup) kchangrpDestruct_IMPL(pKernelChannelGroup)
349 void kchangrpSetState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
350 
351 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetState(struct KernelChannelGroup * pKernelChannelGroup,NvU32 subdevice,CHANNELGROUP_STATE state)352 static inline void kchangrpSetState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
353     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
354 }
355 #else //__nvoc_kernel_channel_group_h_disabled
356 #define kchangrpSetState(pKernelChannelGroup, subdevice, state) kchangrpSetState_IMPL(pKernelChannelGroup, subdevice, state)
357 #endif //__nvoc_kernel_channel_group_h_disabled
358 
359 void kchangrpClearState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
360 
361 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpClearState(struct KernelChannelGroup * pKernelChannelGroup,NvU32 subdevice,CHANNELGROUP_STATE state)362 static inline void kchangrpClearState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
363     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
364 }
365 #else //__nvoc_kernel_channel_group_h_disabled
366 #define kchangrpClearState(pKernelChannelGroup, subdevice, state) kchangrpClearState_IMPL(pKernelChannelGroup, subdevice, state)
367 #endif //__nvoc_kernel_channel_group_h_disabled
368 
369 NvBool kchangrpIsStateSet_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
370 
371 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpIsStateSet(struct KernelChannelGroup * pKernelChannelGroup,NvU32 subdevice,CHANNELGROUP_STATE state)372 static inline NvBool kchangrpIsStateSet(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
373     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
374     return NV_FALSE;
375 }
376 #else //__nvoc_kernel_channel_group_h_disabled
377 #define kchangrpIsStateSet(pKernelChannelGroup, subdevice, state) kchangrpIsStateSet_IMPL(pKernelChannelGroup, subdevice, state)
378 #endif //__nvoc_kernel_channel_group_h_disabled
379 
380 NV_STATUS kchangrpAddChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel);
381 
382 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpAddChannel(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,struct KernelChannel * pKernelChannel)383 static inline NV_STATUS kchangrpAddChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) {
384     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
385     return NV_ERR_NOT_SUPPORTED;
386 }
387 #else //__nvoc_kernel_channel_group_h_disabled
388 #define kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpAddChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel)
389 #endif //__nvoc_kernel_channel_group_h_disabled
390 
391 NV_STATUS kchangrpRemoveChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel);
392 
393 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpRemoveChannel(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,struct KernelChannel * pKernelChannel)394 static inline NV_STATUS kchangrpRemoveChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) {
395     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
396     return NV_ERR_NOT_SUPPORTED;
397 }
398 #else //__nvoc_kernel_channel_group_h_disabled
399 #define kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpRemoveChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel)
400 #endif //__nvoc_kernel_channel_group_h_disabled
401 
402 NV_STATUS kchangrpInit_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid);
403 
404 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpInit(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,struct OBJVASPACE * pVAS,NvU32 gfid)405 static inline NV_STATUS kchangrpInit(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid) {
406     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
407     return NV_ERR_NOT_SUPPORTED;
408 }
409 #else //__nvoc_kernel_channel_group_h_disabled
410 #define kchangrpInit(pGpu, pKernelChannelGroup, pVAS, gfid) kchangrpInit_IMPL(pGpu, pKernelChannelGroup, pVAS, gfid)
411 #endif //__nvoc_kernel_channel_group_h_disabled
412 
413 NV_STATUS kchangrpDestroy_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
414 
415 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpDestroy(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)416 static inline NV_STATUS kchangrpDestroy(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
417     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
418     return NV_ERR_NOT_SUPPORTED;
419 }
420 #else //__nvoc_kernel_channel_group_h_disabled
421 #define kchangrpDestroy(pGpu, pKernelChannelGroup) kchangrpDestroy_IMPL(pGpu, pKernelChannelGroup)
422 #endif //__nvoc_kernel_channel_group_h_disabled
423 
424 NV_STATUS kchangrpAllocEngineContextDescriptor_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
425 
426 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpAllocEngineContextDescriptor(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)427 static inline NV_STATUS kchangrpAllocEngineContextDescriptor(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
428     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
429     return NV_ERR_NOT_SUPPORTED;
430 }
431 #else //__nvoc_kernel_channel_group_h_disabled
432 #define kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup) kchangrpAllocEngineContextDescriptor_IMPL(pGpu, pKernelChannelGroup)
433 #endif //__nvoc_kernel_channel_group_h_disabled
434 
435 NV_STATUS kchangrpGetEngineContextMemDesc_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, MEMORY_DESCRIPTOR **arg3);
436 
437 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpGetEngineContextMemDesc(struct OBJGPU * pGpu,struct KernelChannelGroup * arg2,MEMORY_DESCRIPTOR ** arg3)438 static inline NV_STATUS kchangrpGetEngineContextMemDesc(struct OBJGPU *pGpu, struct KernelChannelGroup *arg2, MEMORY_DESCRIPTOR **arg3) {
439     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
440     return NV_ERR_NOT_SUPPORTED;
441 }
442 #else //__nvoc_kernel_channel_group_h_disabled
443 #define kchangrpGetEngineContextMemDesc(pGpu, arg2, arg3) kchangrpGetEngineContextMemDesc_IMPL(pGpu, arg2, arg3)
444 #endif //__nvoc_kernel_channel_group_h_disabled
445 
446 NV_STATUS kchangrpSetInterleaveLevel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value);
447 
448 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetInterleaveLevel(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 value)449 static inline NV_STATUS kchangrpSetInterleaveLevel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
450     NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
451     return NV_ERR_NOT_SUPPORTED;
452 }
453 #else //__nvoc_kernel_channel_group_h_disabled
454 #define kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevel_IMPL(pGpu, pKernelChannelGroup, value)
455 #endif //__nvoc_kernel_channel_group_h_disabled
456 
457 #undef PRIVATE_FIELD
458 
459 
460 MAKE_INTRUSIVE_MAP(KernelChannelGroupMap, KernelChannelGroup, mapNode);
461 
462 #endif // KERNEL_CHANNEL_GROUP_H
463 
464 #ifdef __cplusplus
465 } // extern "C"
466 #endif
467 
468 #endif // _G_KERNEL_CHANNEL_GROUP_NVOC_H_
469