1 #ifndef _G_KERNEL_CHANNEL_GROUP_NVOC_H_
2 #define _G_KERNEL_CHANNEL_GROUP_NVOC_H_
3 #include "nvoc/runtime.h"
4
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8
9 /*
10 * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11 * SPDX-License-Identifier: MIT
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 * DEALINGS IN THE SOFTWARE.
30 */
31
32 #include "g_kernel_channel_group_nvoc.h"
33
34 #ifndef KERNEL_CHANNEL_GROUP_H
35 #define KERNEL_CHANNEL_GROUP_H 1
36
37 #include "kernel/mem_mgr/vaspace.h"
38
39 #include "ctrl/ctrl0080/ctrl0080gr.h" // NV03_DEVICE
40
41 #include "libraries/containers/btree.h"
42 #include "gpu/mem_mgr/mem_desc.h"
43 #include "nvoc/prelude.h"
44 #include "resserv/resserv.h"
45 #include "gpu/gpu_resource.h"
46
47 #include "kernel/gpu/fifo/kernel_channel.h"
48
49 #include "kernel/gpu/fifo/kernel_ctxshare.h"
50
51 // Forward declaration
52 struct KernelChannelGroupApi;
53
54 #ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__
55 #define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__
56 typedef struct KernelChannelGroupApi KernelChannelGroupApi;
57 #endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */
58
59 #ifndef __nvoc_class_id_KernelChannelGroupApi
60 #define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80
61 #endif /* __nvoc_class_id_KernelChannelGroupApi */
62
63
64
65 /*!
66 * Stores the list of all client kctxshareApi objects
67 * that reference the same shared kctxshare object
68 */
69 MAKE_LIST(KernelChannelGroupApiList, KernelChannelGroupApi *);
70
71 typedef enum
72 {
73 CHANNELGROUP_STATE_ENABLE,
74 CHANNELGROUP_STATE_SCHED,
75 CHANNELGROUP_STATE_COUNT // Should be last
76 } CHANNELGROUP_STATE;
77
78 //
79 // Describes the engine context memory for a channel
80 // (Stored in KernelChannelGroup because it's shared by all channels in the group)
81 //
82 typedef struct ENGINE_CTX_DESCRIPTOR
83 {
84 MEMORY_DESCRIPTOR *pMemDesc; // Context memory
85 VA_LIST vaList; // Map to track the gpu va mapping to the context buffer
86 NvU32 engDesc; // Which engine type
87 } ENGINE_CTX_DESCRIPTOR;
88
89
90 //
91 // HW method buffer used by supporting engines to save/restore
92 // faulting methods after corresponding fault is handled.
93 //
94 typedef struct _HW_ENG_FAULT_METHOD_BUFFER
95 {
96 NvU64 bar2Addr;
97 MEMORY_DESCRIPTOR *pMemDesc;
98 } HW_ENG_FAULT_METHOD_BUFFER;
99
100 //
101 // dword array size used to track the valid subcontext mask.
102 // We use 1 bit per subcontext; so need 2 dwords to store the valid bitmask.
103 //
104 #define SUBCTX_MASK_ARRAY_SIZE 2
105
106 /**
107 * This class represents data that is shared when a TSG is duped.
108 *
109 * Instances of this class are ref-counted and will be kept alive until
110 * all TSG copies have been freed.
111 */
112
113 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
114 // the matching C source file, but causes diagnostics to be issued if another
115 // source file references the field.
116 #ifdef NVOC_KERNEL_CHANNEL_GROUP_H_PRIVATE_ACCESS_ALLOWED
117 #define PRIVATE_FIELD(x) x
118 #else
119 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
120 #endif
121
122 struct KernelChannelGroup {
123 const struct NVOC_RTTI *__nvoc_rtti;
124 struct RsShared __nvoc_base_RsShared;
125 struct Object *__nvoc_pbase_Object;
126 struct RsShared *__nvoc_pbase_RsShared;
127 struct KernelChannelGroup *__nvoc_pbase_KernelChannelGroup;
128 NODE node;
129 NvU32 grpID;
130 NvU32 runlistId;
131 NvU32 chanCount;
132 RM_ENGINE_TYPE engineType;
133 struct OBJVASPACE *pVAS;
134 NvU32 gfid;
135 struct OBJEHEAP *pSubctxIdHeap;
136 CHANNEL_LIST *pChanList;
137 NvU64 timesliceUs;
138 ENGINE_CTX_DESCRIPTOR **ppEngCtxDesc;
139 NvBool bAllocatedByRm;
140 NvBool bLegacyMode;
141 HW_ENG_FAULT_METHOD_BUFFER *pMthdBuffers;
142 NvU32 (*ppSubctxMask)[2];
143 NvU32 (*ppZombieSubctxMask)[2];
144 NvU32 *pStateMask;
145 NvU32 *pInterleaveLevel;
146 NvBool bRunlistAssigned;
147 struct CTX_BUF_POOL_INFO *pCtxBufPool;
148 struct CTX_BUF_POOL_INFO *pChannelBufPool;
149 struct MapNode mapNode;
150 KernelChannelGroupApiList apiObjList;
151 NvBool bIsCallingContextVgpuPlugin;
152 };
153
154 #ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
155 #define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
156 typedef struct KernelChannelGroup KernelChannelGroup;
157 #endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */
158
159 #ifndef __nvoc_class_id_KernelChannelGroup
160 #define __nvoc_class_id_KernelChannelGroup 0xec6de1
161 #endif /* __nvoc_class_id_KernelChannelGroup */
162
163 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroup;
164
165 #define __staticCast_KernelChannelGroup(pThis) \
166 ((pThis)->__nvoc_pbase_KernelChannelGroup)
167
168 #ifdef __nvoc_kernel_channel_group_h_disabled
169 #define __dynamicCast_KernelChannelGroup(pThis) ((KernelChannelGroup*)NULL)
170 #else //__nvoc_kernel_channel_group_h_disabled
171 #define __dynamicCast_KernelChannelGroup(pThis) \
172 ((KernelChannelGroup*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelChannelGroup)))
173 #endif //__nvoc_kernel_channel_group_h_disabled
174
175
176 NV_STATUS __nvoc_objCreateDynamic_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32, va_list);
177
178 NV_STATUS __nvoc_objCreate_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32);
179 #define __objCreate_KernelChannelGroup(ppNewObj, pParent, createFlags) \
180 __nvoc_objCreate_KernelChannelGroup((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
181
kchangrpSetInterleaveLevelSched_56cd7a(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 value)182 static inline NV_STATUS kchangrpSetInterleaveLevelSched_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
183 return NV_OK;
184 }
185
186 NV_STATUS kchangrpSetInterleaveLevelSched_GM107(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value);
187
188
189 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetInterleaveLevelSched(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 value)190 static inline NV_STATUS kchangrpSetInterleaveLevelSched(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
191 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
192 return NV_ERR_NOT_SUPPORTED;
193 }
194 #else //__nvoc_kernel_channel_group_h_disabled
195 #define kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched_56cd7a(pGpu, pKernelChannelGroup, value)
196 #endif //__nvoc_kernel_channel_group_h_disabled
197
198 #define kchangrpSetInterleaveLevelSched_HAL(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value)
199
kchangrpUpdateSubcontextMask_b3696a(struct OBJGPU * pGpu,struct KernelChannelGroup * arg0,NvU32 arg1,NvBool arg2)200 static inline void kchangrpUpdateSubcontextMask_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
201 return;
202 }
203
204 void kchangrpUpdateSubcontextMask_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2);
205
206
207 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpUpdateSubcontextMask(struct OBJGPU * pGpu,struct KernelChannelGroup * arg0,NvU32 arg1,NvBool arg2)208 static inline void kchangrpUpdateSubcontextMask(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
209 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
210 }
211 #else //__nvoc_kernel_channel_group_h_disabled
212 #define kchangrpUpdateSubcontextMask(pGpu, arg0, arg1, arg2) kchangrpUpdateSubcontextMask_b3696a(pGpu, arg0, arg1, arg2)
213 #endif //__nvoc_kernel_channel_group_h_disabled
214
215 #define kchangrpUpdateSubcontextMask_HAL(pGpu, arg0, arg1, arg2) kchangrpUpdateSubcontextMask(pGpu, arg0, arg1, arg2)
216
kchangrpSetSubcontextZombieState_b3696a(struct OBJGPU * pGpu,struct KernelChannelGroup * arg0,NvU32 arg1,NvBool arg2)217 static inline void kchangrpSetSubcontextZombieState_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
218 return;
219 }
220
221
222 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetSubcontextZombieState(struct OBJGPU * pGpu,struct KernelChannelGroup * arg0,NvU32 arg1,NvBool arg2)223 static inline void kchangrpSetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) {
224 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
225 }
226 #else //__nvoc_kernel_channel_group_h_disabled
227 #define kchangrpSetSubcontextZombieState(pGpu, arg0, arg1, arg2) kchangrpSetSubcontextZombieState_b3696a(pGpu, arg0, arg1, arg2)
228 #endif //__nvoc_kernel_channel_group_h_disabled
229
230 #define kchangrpSetSubcontextZombieState_HAL(pGpu, arg0, arg1, arg2) kchangrpSetSubcontextZombieState(pGpu, arg0, arg1, arg2)
231
kchangrpGetSubcontextZombieState_ceaee8(struct OBJGPU * pGpu,struct KernelChannelGroup * arg0,NvU32 arg1)232 static inline NvBool kchangrpGetSubcontextZombieState_ceaee8(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1) {
233 NV_ASSERT_PRECOMP(0);
234 return ((NvBool)(0 != 0));
235 }
236
237
238 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpGetSubcontextZombieState(struct OBJGPU * pGpu,struct KernelChannelGroup * arg0,NvU32 arg1)239 static inline NvBool kchangrpGetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1) {
240 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
241 return NV_FALSE;
242 }
243 #else //__nvoc_kernel_channel_group_h_disabled
244 #define kchangrpGetSubcontextZombieState(pGpu, arg0, arg1) kchangrpGetSubcontextZombieState_ceaee8(pGpu, arg0, arg1)
245 #endif //__nvoc_kernel_channel_group_h_disabled
246
247 #define kchangrpGetSubcontextZombieState_HAL(pGpu, arg0, arg1) kchangrpGetSubcontextZombieState(pGpu, arg0, arg1)
248
249 NV_STATUS kchangrpAllocFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
250
251
252 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpAllocFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)253 static inline NV_STATUS kchangrpAllocFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
254 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
255 return NV_ERR_NOT_SUPPORTED;
256 }
257 #else //__nvoc_kernel_channel_group_h_disabled
258 #define kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup)
259 #endif //__nvoc_kernel_channel_group_h_disabled
260
261 #define kchangrpAllocFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup)
262
263 NV_STATUS kchangrpFreeFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
264
265
266 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpFreeFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)267 static inline NV_STATUS kchangrpFreeFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
268 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
269 return NV_ERR_NOT_SUPPORTED;
270 }
271 #else //__nvoc_kernel_channel_group_h_disabled
272 #define kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup)
273 #endif //__nvoc_kernel_channel_group_h_disabled
274
275 #define kchangrpFreeFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup)
276
277 NV_STATUS kchangrpMapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue);
278
279
280 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpMapFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 runqueue)281 static inline NV_STATUS kchangrpMapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) {
282 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
283 return NV_ERR_NOT_SUPPORTED;
284 }
285 #else //__nvoc_kernel_channel_group_h_disabled
286 #define kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue)
287 #endif //__nvoc_kernel_channel_group_h_disabled
288
289 #define kchangrpMapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue)
290
291 NV_STATUS kchangrpUnmapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue);
292
293
294 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpUnmapFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 runqueue)295 static inline NV_STATUS kchangrpUnmapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) {
296 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
297 return NV_ERR_NOT_SUPPORTED;
298 }
299 #else //__nvoc_kernel_channel_group_h_disabled
300 #define kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue)
301 #endif //__nvoc_kernel_channel_group_h_disabled
302
303 #define kchangrpUnmapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue)
304
kchangrpSetRealtime_56cd7a(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvBool bRealtime)305 static inline NV_STATUS kchangrpSetRealtime_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) {
306 return NV_OK;
307 }
308
309
310 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetRealtime(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvBool bRealtime)311 static inline NV_STATUS kchangrpSetRealtime(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) {
312 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
313 return NV_ERR_NOT_SUPPORTED;
314 }
315 #else //__nvoc_kernel_channel_group_h_disabled
316 #define kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime_56cd7a(pGpu, pKernelChannelGroup, bRealtime)
317 #endif //__nvoc_kernel_channel_group_h_disabled
318
319 #define kchangrpSetRealtime_HAL(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime)
320
321 NV_STATUS kchangrpConstruct_IMPL(struct KernelChannelGroup *arg_pKernelChannelGroup);
322
323 #define __nvoc_kchangrpConstruct(arg_pKernelChannelGroup) kchangrpConstruct_IMPL(arg_pKernelChannelGroup)
324 void kchangrpDestruct_IMPL(struct KernelChannelGroup *pKernelChannelGroup);
325
326 #define __nvoc_kchangrpDestruct(pKernelChannelGroup) kchangrpDestruct_IMPL(pKernelChannelGroup)
327 void kchangrpSetState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
328
329 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetState(struct KernelChannelGroup * pKernelChannelGroup,NvU32 subdevice,CHANNELGROUP_STATE state)330 static inline void kchangrpSetState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
331 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
332 }
333 #else //__nvoc_kernel_channel_group_h_disabled
334 #define kchangrpSetState(pKernelChannelGroup, subdevice, state) kchangrpSetState_IMPL(pKernelChannelGroup, subdevice, state)
335 #endif //__nvoc_kernel_channel_group_h_disabled
336
337 void kchangrpClearState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
338
339 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpClearState(struct KernelChannelGroup * pKernelChannelGroup,NvU32 subdevice,CHANNELGROUP_STATE state)340 static inline void kchangrpClearState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
341 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
342 }
343 #else //__nvoc_kernel_channel_group_h_disabled
344 #define kchangrpClearState(pKernelChannelGroup, subdevice, state) kchangrpClearState_IMPL(pKernelChannelGroup, subdevice, state)
345 #endif //__nvoc_kernel_channel_group_h_disabled
346
347 NvBool kchangrpIsStateSet_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state);
348
349 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpIsStateSet(struct KernelChannelGroup * pKernelChannelGroup,NvU32 subdevice,CHANNELGROUP_STATE state)350 static inline NvBool kchangrpIsStateSet(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) {
351 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
352 return NV_FALSE;
353 }
354 #else //__nvoc_kernel_channel_group_h_disabled
355 #define kchangrpIsStateSet(pKernelChannelGroup, subdevice, state) kchangrpIsStateSet_IMPL(pKernelChannelGroup, subdevice, state)
356 #endif //__nvoc_kernel_channel_group_h_disabled
357
358 NV_STATUS kchangrpAddChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel);
359
360 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpAddChannel(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,struct KernelChannel * pKernelChannel)361 static inline NV_STATUS kchangrpAddChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) {
362 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
363 return NV_ERR_NOT_SUPPORTED;
364 }
365 #else //__nvoc_kernel_channel_group_h_disabled
366 #define kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpAddChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel)
367 #endif //__nvoc_kernel_channel_group_h_disabled
368
369 NV_STATUS kchangrpRemoveChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel);
370
371 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpRemoveChannel(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,struct KernelChannel * pKernelChannel)372 static inline NV_STATUS kchangrpRemoveChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) {
373 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
374 return NV_ERR_NOT_SUPPORTED;
375 }
376 #else //__nvoc_kernel_channel_group_h_disabled
377 #define kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpRemoveChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel)
378 #endif //__nvoc_kernel_channel_group_h_disabled
379
380 NV_STATUS kchangrpInit_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid);
381
382 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpInit(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,struct OBJVASPACE * pVAS,NvU32 gfid)383 static inline NV_STATUS kchangrpInit(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid) {
384 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
385 return NV_ERR_NOT_SUPPORTED;
386 }
387 #else //__nvoc_kernel_channel_group_h_disabled
388 #define kchangrpInit(pGpu, pKernelChannelGroup, pVAS, gfid) kchangrpInit_IMPL(pGpu, pKernelChannelGroup, pVAS, gfid)
389 #endif //__nvoc_kernel_channel_group_h_disabled
390
391 NV_STATUS kchangrpDestroy_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
392
393 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpDestroy(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)394 static inline NV_STATUS kchangrpDestroy(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
395 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
396 return NV_ERR_NOT_SUPPORTED;
397 }
398 #else //__nvoc_kernel_channel_group_h_disabled
399 #define kchangrpDestroy(pGpu, pKernelChannelGroup) kchangrpDestroy_IMPL(pGpu, pKernelChannelGroup)
400 #endif //__nvoc_kernel_channel_group_h_disabled
401
402 NV_STATUS kchangrpAllocEngineContextDescriptor_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup);
403
404 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpAllocEngineContextDescriptor(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup)405 static inline NV_STATUS kchangrpAllocEngineContextDescriptor(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) {
406 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
407 return NV_ERR_NOT_SUPPORTED;
408 }
409 #else //__nvoc_kernel_channel_group_h_disabled
410 #define kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup) kchangrpAllocEngineContextDescriptor_IMPL(pGpu, pKernelChannelGroup)
411 #endif //__nvoc_kernel_channel_group_h_disabled
412
413 NV_STATUS kchangrpGetEngineContextMemDesc_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, MEMORY_DESCRIPTOR **arg1);
414
415 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpGetEngineContextMemDesc(struct OBJGPU * pGpu,struct KernelChannelGroup * arg0,MEMORY_DESCRIPTOR ** arg1)416 static inline NV_STATUS kchangrpGetEngineContextMemDesc(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, MEMORY_DESCRIPTOR **arg1) {
417 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
418 return NV_ERR_NOT_SUPPORTED;
419 }
420 #else //__nvoc_kernel_channel_group_h_disabled
421 #define kchangrpGetEngineContextMemDesc(pGpu, arg0, arg1) kchangrpGetEngineContextMemDesc_IMPL(pGpu, arg0, arg1)
422 #endif //__nvoc_kernel_channel_group_h_disabled
423
424 NV_STATUS kchangrpSetInterleaveLevel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value);
425
426 #ifdef __nvoc_kernel_channel_group_h_disabled
kchangrpSetInterleaveLevel(struct OBJGPU * pGpu,struct KernelChannelGroup * pKernelChannelGroup,NvU32 value)427 static inline NV_STATUS kchangrpSetInterleaveLevel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) {
428 NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!");
429 return NV_ERR_NOT_SUPPORTED;
430 }
431 #else //__nvoc_kernel_channel_group_h_disabled
432 #define kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevel_IMPL(pGpu, pKernelChannelGroup, value)
433 #endif //__nvoc_kernel_channel_group_h_disabled
434
435 #undef PRIVATE_FIELD
436
437
438 MAKE_INTRUSIVE_MAP(KernelChannelGroupMap, KernelChannelGroup, mapNode);
439
440 #endif // KERNEL_CHANNEL_GROUP_H
441
442 #ifdef __cplusplus
443 } // extern "C"
444 #endif
445
446 #endif // _G_KERNEL_CHANNEL_GROUP_NVOC_H_
447