1 #ifndef _G_KERNEL_CE_NVOC_H_
2 #define _G_KERNEL_CE_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_ce_nvoc.h"
33 
34 #ifndef KERNEL_CE_H
35 #define KERNEL_CE_H
36 
37 #include "core/core.h"
38 #include "core/info_block.h"
39 #include "ctrl/ctrl2080/ctrl2080ce.h"
40 #include "gpu/eng_state.h"
41 #include "gpu/gpu_halspec.h"
42 #include "gpu/gpu.h"
43 #include "kernel/gpu/intr/intr_service.h"
44 #include "gpu/ce/kernel_ce_shared.h"
45 
46 #define MAX_CE_CNT 18
47 
48 /*
49  * sysmemLinks
50  *     Represents the number of sysmem links detected
51  *     This affects how many PCEs LCE0(sysmem read CE)
52  *     and LCE1(sysmem write CE) should be mapped to
53  * maxLinksPerPeer
54  *     Represents the maximum number of peer links
55  *     between this GPU and all its peers. This affects
56  *     how many PCEs LCE3(P2P CE) should be mapped to
57  * numPeers
58  *     Represents the number of Peer GPUs discovered so far
59  * bSymmetric
60  *     Represents whether the topology detected so far
61  *     is symmetric i.e. has same number of links to all
62  *     peers connected through nvlink. This affects how
63  *     many PCEs to assign to LCEs3-5 (nvlink P2P CEs)
64  * bSwitchConfig
65  *     Represents whether the config listed is intended
66  *     for use with nvswitch systems
67  * pceLceMap
68  *     Value of NV_CE_PCE2LCE_CONFIG0 register with the
69  *     above values for sysmemLinks, maxLinksPerPeer,
70  *     numLinks and bSymmetric
71  * grceConfig
72  *     Value of NV_CE_GRCE_CONFIG register with the
73  *     above values for sysmemLinks, maxLinksPerPeer,
74  *     numLinks and bSymmetric
75  * exposeCeMask
76  *     Mask of CEs to expose to clients for the above
77  *     above values for sysmemLinks, maxLinksPerPeer,
78  *     numLinks and bSymmetric
79  */
80 typedef struct NVLINK_CE_AUTO_CONFIG_TABLE
81 {
82     NvU32  sysmemLinks;
83     NvU32  maxLinksPerPeer;
84     NvU32  numPeers;
85     NvBool bSymmetric;
86     NvBool bSwitchConfig;
87     NvU32  pceLceMap[MAX_CE_CNT];
88     NvU32  grceConfig[MAX_CE_CNT];
89     NvU32  exposeCeMask;
90 } NVLINK_CE_AUTO_CONFIG_TABLE;
91 
92 //
93 // Kernel Copy Engine
94 // This class provides Kernel-RM interface and state tracking for Copy Engine.
95 //
96 
97 #ifdef NVOC_KERNEL_CE_H_PRIVATE_ACCESS_ALLOWED
98 #define PRIVATE_FIELD(x) x
99 #else
100 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
101 #endif
102 struct KernelCE {
103     const struct NVOC_RTTI *__nvoc_rtti;
104     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
105     struct IntrService __nvoc_base_IntrService;
106     struct Object *__nvoc_pbase_Object;
107     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
108     struct IntrService *__nvoc_pbase_IntrService;
109     struct KernelCE *__nvoc_pbase_KernelCE;
110     NV_STATUS (*__kceConstructEngine__)(OBJGPU *, struct KernelCE *, ENGDESCRIPTOR);
111     NvBool (*__kceIsPresent__)(OBJGPU *, struct KernelCE *);
112     NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE *, NvU32);
113     NV_STATUS (*__kceStateUnload__)(OBJGPU *, struct KernelCE *, NvU32);
114     void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *);
115     NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *);
116     NV_STATUS (*__kceGetNvlinkAutoConfigCeValues__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *);
117     NvBool (*__kceGetNvlinkMaxTopoForTable__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, void *, NvU32, NvU32 *);
118     NvBool (*__kceIsCurrentMaxTopology__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *);
119     NvU32 (*__kceGetPce2lceConfigSize1__)(struct KernelCE *);
120     NV_STATUS (*__kceGetMappings__)(OBJGPU *, struct KernelCE *, NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *, NvU32 *);
121     NV_STATUS (*__kceMapPceLceForC2C__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *);
122     void (*__kceMapPceLceForGRCE__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU32);
123     NV_STATUS (*__kceMapPceLceForSysmemLinks__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32);
124     NV_STATUS (*__kceMapPceLceForNvlinkPeers__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *);
125     NvU32 (*__kceGetSysmemSupportedLceMask__)(OBJGPU *, struct KernelCE *);
126     NV_STATUS (*__kceMapAsyncLceDefault__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32);
127     NvU32 (*__kceGetNvlinkPeerSupportedLceMask__)(OBJGPU *, struct KernelCE *, NvU32);
128     NvU32 (*__kceGetGrceSupportedLceMask__)(OBJGPU *, struct KernelCE *);
129     NvBool (*__kceIsGenXorHigherSupported__)(OBJGPU *, struct KernelCE *, NvU32);
130     void (*__kceApplyGen4orHigherMapping__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32, NvU32);
131     NV_STATUS (*__kceStateInitLocked__)(POBJGPU, struct KernelCE *);
132     NV_STATUS (*__kceStatePreLoad__)(POBJGPU, struct KernelCE *, NvU32);
133     NV_STATUS (*__kceStatePostUnload__)(POBJGPU, struct KernelCE *, NvU32);
134     void (*__kceStateDestroy__)(POBJGPU, struct KernelCE *);
135     NV_STATUS (*__kceStatePreUnload__)(POBJGPU, struct KernelCE *, NvU32);
136     NV_STATUS (*__kceStateInitUnlocked__)(POBJGPU, struct KernelCE *);
137     void (*__kceInitMissing__)(POBJGPU, struct KernelCE *);
138     NV_STATUS (*__kceStatePreInitLocked__)(POBJGPU, struct KernelCE *);
139     NV_STATUS (*__kceStatePreInitUnlocked__)(POBJGPU, struct KernelCE *);
140     NvBool (*__kceClearInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceClearInterruptArguments *);
141     NV_STATUS (*__kceStatePostLoad__)(POBJGPU, struct KernelCE *, NvU32);
142     NvU32 (*__kceServiceInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceInterruptArguments *);
143     NvU32 publicID;
144     NvBool bShimOwner;
145     NvBool bStubbed;
146     NvU32 nvlinkPeerMask;
147     NvU32 nvlinkNumPeers;
148     NvBool bIsAutoConfigEnabled;
149     NvBool bUseGen4Mapping;
150     struct IoAperture aperture;
151 };
152 
153 #ifndef __NVOC_CLASS_KernelCE_TYPEDEF__
154 #define __NVOC_CLASS_KernelCE_TYPEDEF__
155 typedef struct KernelCE KernelCE;
156 #endif /* __NVOC_CLASS_KernelCE_TYPEDEF__ */
157 
158 #ifndef __nvoc_class_id_KernelCE
159 #define __nvoc_class_id_KernelCE 0x242aca
160 #endif /* __nvoc_class_id_KernelCE */
161 
162 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCE;
163 
164 #define __staticCast_KernelCE(pThis) \
165     ((pThis)->__nvoc_pbase_KernelCE)
166 
167 #ifdef __nvoc_kernel_ce_h_disabled
168 #define __dynamicCast_KernelCE(pThis) ((KernelCE*)NULL)
169 #else //__nvoc_kernel_ce_h_disabled
170 #define __dynamicCast_KernelCE(pThis) \
171     ((KernelCE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCE)))
172 #endif //__nvoc_kernel_ce_h_disabled
173 
174 #define PDB_PROP_KCE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
175 #define PDB_PROP_KCE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
176 
177 NV_STATUS __nvoc_objCreateDynamic_KernelCE(KernelCE**, Dynamic*, NvU32, va_list);
178 
179 NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
180 #define __objCreate_KernelCE(ppNewObj, pParent, createFlags) \
181     __nvoc_objCreate_KernelCE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
182 
183 #define kceConstructEngine(pGpu, pKCe, arg0) kceConstructEngine_DISPATCH(pGpu, pKCe, arg0)
184 #define kceIsPresent(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
185 #define kceIsPresent_HAL(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
186 #define kceStateLoad(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
187 #define kceStateLoad_HAL(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
188 #define kceStateUnload(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
189 #define kceStateUnload_HAL(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
190 #define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2)
191 #define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
192 #define kceGetNvlinkAutoConfigCeValues(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
193 #define kceGetNvlinkAutoConfigCeValues_HAL(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
194 #define kceGetNvlinkMaxTopoForTable(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3)
195 #define kceGetNvlinkMaxTopoForTable_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3)
196 #define kceIsCurrentMaxTopology(pGpu, arg0, arg1, arg2, arg3) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg0, arg1, arg2, arg3)
197 #define kceIsCurrentMaxTopology_HAL(pGpu, arg0, arg1, arg2, arg3) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg0, arg1, arg2, arg3)
198 #define kceGetPce2lceConfigSize1(arg0) kceGetPce2lceConfigSize1_DISPATCH(arg0)
199 #define kceGetPce2lceConfigSize1_HAL(arg0) kceGetPce2lceConfigSize1_DISPATCH(arg0)
200 #define kceGetMappings(pGpu, pCe, arg0, arg1, arg2, arg3) kceGetMappings_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
201 #define kceGetMappings_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceGetMappings_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
202 #define kceMapPceLceForC2C(pGpu, pKCe, arg0, arg1, arg2) kceMapPceLceForC2C_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
203 #define kceMapPceLceForC2C_HAL(pGpu, pKCe, arg0, arg1, arg2) kceMapPceLceForC2C_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
204 #define kceMapPceLceForGRCE(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceMapPceLceForGRCE_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
205 #define kceMapPceLceForGRCE_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceMapPceLceForGRCE_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
206 #define kceMapPceLceForSysmemLinks(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
207 #define kceMapPceLceForSysmemLinks_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
208 #define kceMapPceLceForNvlinkPeers(pGpu, pCe, arg0, arg1, arg2) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg0, arg1, arg2)
209 #define kceMapPceLceForNvlinkPeers_HAL(pGpu, pCe, arg0, arg1, arg2) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg0, arg1, arg2)
210 #define kceGetSysmemSupportedLceMask(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe)
211 #define kceGetSysmemSupportedLceMask_HAL(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe)
212 #define kceMapAsyncLceDefault(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
213 #define kceMapAsyncLceDefault_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
214 #define kceGetNvlinkPeerSupportedLceMask(pGpu, pCe, arg0) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg0)
215 #define kceGetNvlinkPeerSupportedLceMask_HAL(pGpu, pCe, arg0) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg0)
216 #define kceGetGrceSupportedLceMask(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe)
217 #define kceGetGrceSupportedLceMask_HAL(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe)
218 #define kceIsGenXorHigherSupported(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
219 #define kceIsGenXorHigherSupported_HAL(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
220 #define kceApplyGen4orHigherMapping(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
221 #define kceApplyGen4orHigherMapping_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
222 #define kceStateInitLocked(pGpu, pEngstate) kceStateInitLocked_DISPATCH(pGpu, pEngstate)
223 #define kceStatePreLoad(pGpu, pEngstate, arg0) kceStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
224 #define kceStatePostUnload(pGpu, pEngstate, arg0) kceStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
225 #define kceStateDestroy(pGpu, pEngstate) kceStateDestroy_DISPATCH(pGpu, pEngstate)
226 #define kceStatePreUnload(pGpu, pEngstate, arg0) kceStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
227 #define kceStateInitUnlocked(pGpu, pEngstate) kceStateInitUnlocked_DISPATCH(pGpu, pEngstate)
228 #define kceInitMissing(pGpu, pEngstate) kceInitMissing_DISPATCH(pGpu, pEngstate)
229 #define kceStatePreInitLocked(pGpu, pEngstate) kceStatePreInitLocked_DISPATCH(pGpu, pEngstate)
230 #define kceStatePreInitUnlocked(pGpu, pEngstate) kceStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
231 #define kceClearInterrupt(pGpu, pIntrService, pParams) kceClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
232 #define kceStatePostLoad(pGpu, pEngstate, arg0) kceStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
233 #define kceServiceInterrupt(pGpu, pIntrService, pParams) kceServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
234 static inline void kceNonstallIntrCheckAndClear_b3696a(OBJGPU *arg0, struct KernelCE *arg1, struct THREAD_STATE_NODE *arg2) {
235     return;
236 }
237 
238 
239 #ifdef __nvoc_kernel_ce_h_disabled
240 static inline void kceNonstallIntrCheckAndClear(OBJGPU *arg0, struct KernelCE *arg1, struct THREAD_STATE_NODE *arg2) {
241     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
242 }
243 #else //__nvoc_kernel_ce_h_disabled
244 #define kceNonstallIntrCheckAndClear(arg0, arg1, arg2) kceNonstallIntrCheckAndClear_b3696a(arg0, arg1, arg2)
245 #endif //__nvoc_kernel_ce_h_disabled
246 
247 #define kceNonstallIntrCheckAndClear_HAL(arg0, arg1, arg2) kceNonstallIntrCheckAndClear(arg0, arg1, arg2)
248 
249 NV_STATUS kceUpdateClassDB_KERNEL(OBJGPU *pGpu, struct KernelCE *pKCe);
250 
251 
252 #ifdef __nvoc_kernel_ce_h_disabled
253 static inline NV_STATUS kceUpdateClassDB(OBJGPU *pGpu, struct KernelCE *pKCe) {
254     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
255     return NV_ERR_NOT_SUPPORTED;
256 }
257 #else //__nvoc_kernel_ce_h_disabled
258 #define kceUpdateClassDB(pGpu, pKCe) kceUpdateClassDB_KERNEL(pGpu, pKCe)
259 #endif //__nvoc_kernel_ce_h_disabled
260 
261 #define kceUpdateClassDB_HAL(pGpu, pKCe) kceUpdateClassDB(pGpu, pKCe)
262 
263 NvBool kceIsCeSysmemRead_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
264 
265 
266 #ifdef __nvoc_kernel_ce_h_disabled
267 static inline NvBool kceIsCeSysmemRead(OBJGPU *pGpu, struct KernelCE *pKCe) {
268     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
269     return NV_FALSE;
270 }
271 #else //__nvoc_kernel_ce_h_disabled
272 #define kceIsCeSysmemRead(pGpu, pKCe) kceIsCeSysmemRead_GP100(pGpu, pKCe)
273 #endif //__nvoc_kernel_ce_h_disabled
274 
275 #define kceIsCeSysmemRead_HAL(pGpu, pKCe) kceIsCeSysmemRead(pGpu, pKCe)
276 
277 NvBool kceIsCeSysmemWrite_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
278 
279 
280 #ifdef __nvoc_kernel_ce_h_disabled
281 static inline NvBool kceIsCeSysmemWrite(OBJGPU *pGpu, struct KernelCE *pKCe) {
282     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
283     return NV_FALSE;
284 }
285 #else //__nvoc_kernel_ce_h_disabled
286 #define kceIsCeSysmemWrite(pGpu, pKCe) kceIsCeSysmemWrite_GP100(pGpu, pKCe)
287 #endif //__nvoc_kernel_ce_h_disabled
288 
289 #define kceIsCeSysmemWrite_HAL(pGpu, pKCe) kceIsCeSysmemWrite(pGpu, pKCe)
290 
291 NvBool kceIsCeNvlinkP2P_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
292 
293 
294 #ifdef __nvoc_kernel_ce_h_disabled
295 static inline NvBool kceIsCeNvlinkP2P(OBJGPU *pGpu, struct KernelCE *pKCe) {
296     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
297     return NV_FALSE;
298 }
299 #else //__nvoc_kernel_ce_h_disabled
300 #define kceIsCeNvlinkP2P(pGpu, pKCe) kceIsCeNvlinkP2P_GP100(pGpu, pKCe)
301 #endif //__nvoc_kernel_ce_h_disabled
302 
303 #define kceIsCeNvlinkP2P_HAL(pGpu, pKCe) kceIsCeNvlinkP2P(pGpu, pKCe)
304 
305 NV_STATUS kceGetP2PCes_GV100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask);
306 
307 
308 #ifdef __nvoc_kernel_ce_h_disabled
309 static inline NV_STATUS kceGetP2PCes(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask) {
310     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
311     return NV_ERR_NOT_SUPPORTED;
312 }
313 #else //__nvoc_kernel_ce_h_disabled
314 #define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_GV100(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
315 #endif //__nvoc_kernel_ce_h_disabled
316 
317 #define kceGetP2PCes_HAL(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
318 
319 void kceGetSysmemRWLCEs_GV100(struct KernelCE *arg0, NvU32 *rd, NvU32 *wr);
320 
321 
322 #ifdef __nvoc_kernel_ce_h_disabled
323 static inline void kceGetSysmemRWLCEs(struct KernelCE *arg0, NvU32 *rd, NvU32 *wr) {
324     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
325 }
326 #else //__nvoc_kernel_ce_h_disabled
327 #define kceGetSysmemRWLCEs(arg0, rd, wr) kceGetSysmemRWLCEs_GV100(arg0, rd, wr)
328 #endif //__nvoc_kernel_ce_h_disabled
329 
330 #define kceGetSysmemRWLCEs_HAL(arg0, rd, wr) kceGetSysmemRWLCEs(arg0, rd, wr)
331 
332 void kceClearAssignedNvlinkPeerMasks_GV100(OBJGPU *pGpu, struct KernelCE *pKCe);
333 
334 
335 #ifdef __nvoc_kernel_ce_h_disabled
336 static inline void kceClearAssignedNvlinkPeerMasks(OBJGPU *pGpu, struct KernelCE *pKCe) {
337     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
338 }
339 #else //__nvoc_kernel_ce_h_disabled
340 #define kceClearAssignedNvlinkPeerMasks(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks_GV100(pGpu, pKCe)
341 #endif //__nvoc_kernel_ce_h_disabled
342 
343 #define kceClearAssignedNvlinkPeerMasks_HAL(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks(pGpu, pKCe)
344 
345 NvBool kceGetAutoConfigTableEntry_GV100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg1, NvU32 arg2, NvU32 *arg3, NvU32 *arg4);
346 
347 
348 #ifdef __nvoc_kernel_ce_h_disabled
349 static inline NvBool kceGetAutoConfigTableEntry(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg1, NvU32 arg2, NvU32 *arg3, NvU32 *arg4) {
350     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
351     return NV_FALSE;
352 }
353 #else //__nvoc_kernel_ce_h_disabled
354 #define kceGetAutoConfigTableEntry(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceGetAutoConfigTableEntry_GV100(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
355 #endif //__nvoc_kernel_ce_h_disabled
356 
357 #define kceGetAutoConfigTableEntry_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceGetAutoConfigTableEntry(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
358 
359 NvU32 kceGetGrceConfigSize1_TU102(struct KernelCE *arg0);
360 
361 
362 #ifdef __nvoc_kernel_ce_h_disabled
363 static inline NvU32 kceGetGrceConfigSize1(struct KernelCE *arg0) {
364     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
365     return 0;
366 }
367 #else //__nvoc_kernel_ce_h_disabled
368 #define kceGetGrceConfigSize1(arg0) kceGetGrceConfigSize1_TU102(arg0)
369 #endif //__nvoc_kernel_ce_h_disabled
370 
371 #define kceGetGrceConfigSize1_HAL(arg0) kceGetGrceConfigSize1(arg0)
372 
373 NV_STATUS kceConstructEngine_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg0);
374 
375 static inline NV_STATUS kceConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg0) {
376     return pKCe->__kceConstructEngine__(pGpu, pKCe, arg0);
377 }
378 
379 NvBool kceIsPresent_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe);
380 
381 static inline NvBool kceIsPresent_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe) {
382     return pKCe->__kceIsPresent__(pGpu, pKCe);
383 }
384 
385 NV_STATUS kceStateLoad_GP100(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2);
386 
387 static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) {
388     return arg1->__kceStateLoad__(arg0, arg1, arg2);
389 }
390 
391 static inline NV_STATUS kceStateUnload_56cd7a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
392     return NV_OK;
393 }
394 
395 static inline NV_STATUS kceStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
396     return pKCe->__kceStateUnload__(pGpu, pKCe, flags);
397 }
398 
399 void kceRegisterIntrService_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[166]);
400 
401 static inline void kceRegisterIntrService_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[166]) {
402     arg1->__kceRegisterIntrService__(arg0, arg1, arg2);
403 }
404 
405 NV_STATUS kceServiceNotificationInterrupt_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceServiceNotificationInterruptArguments *arg2);
406 
407 static inline NV_STATUS kceServiceNotificationInterrupt_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) {
408     return arg1->__kceServiceNotificationInterrupt__(arg0, arg1, arg2);
409 }
410 
411 NV_STATUS kceGetNvlinkAutoConfigCeValues_TU102(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
412 
413 NV_STATUS kceGetNvlinkAutoConfigCeValues_GA100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
414 
415 static inline NV_STATUS kceGetNvlinkAutoConfigCeValues_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
416     return pKCe->__kceGetNvlinkAutoConfigCeValues__(pGpu, pKCe, arg0, arg1, arg2);
417 }
418 
419 NvBool kceGetNvlinkMaxTopoForTable_GP100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3);
420 
421 static inline NvBool kceGetNvlinkMaxTopoForTable_491d52(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3) {
422     return ((NvBool)(0 != 0));
423 }
424 
425 static inline NvBool kceGetNvlinkMaxTopoForTable_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3) {
426     return pKCe->__kceGetNvlinkMaxTopoForTable__(pGpu, pKCe, arg0, arg1, arg2, arg3);
427 }
428 
429 NvBool kceIsCurrentMaxTopology_GA100(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3);
430 
431 static inline NvBool kceIsCurrentMaxTopology_491d52(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3) {
432     return ((NvBool)(0 != 0));
433 }
434 
435 static inline NvBool kceIsCurrentMaxTopology_DISPATCH(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3) {
436     return arg0->__kceIsCurrentMaxTopology__(pGpu, arg0, arg1, arg2, arg3);
437 }
438 
439 NvU32 kceGetPce2lceConfigSize1_TU102(struct KernelCE *arg0);
440 
441 NvU32 kceGetPce2lceConfigSize1_GA100(struct KernelCE *arg0);
442 
443 NvU32 kceGetPce2lceConfigSize1_GA102(struct KernelCE *arg0);
444 
445 NvU32 kceGetPce2lceConfigSize1_GH100(struct KernelCE *arg0);
446 
447 static inline NvU32 kceGetPce2lceConfigSize1_DISPATCH(struct KernelCE *arg0) {
448     return arg0->__kceGetPce2lceConfigSize1__(arg0);
449 }
450 
451 NV_STATUS kceGetMappings_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3);
452 
453 NV_STATUS kceGetMappings_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3);
454 
455 static inline NV_STATUS kceGetMappings_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) {
456     return NV_ERR_NOT_SUPPORTED;
457 }
458 
459 static inline NV_STATUS kceGetMappings_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) {
460     return pCe->__kceGetMappings__(pGpu, pCe, arg0, arg1, arg2, arg3);
461 }
462 
463 NV_STATUS kceMapPceLceForC2C_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
464 
465 static inline NV_STATUS kceMapPceLceForC2C_46f6a7(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
466     return NV_ERR_NOT_SUPPORTED;
467 }
468 
469 static inline NV_STATUS kceMapPceLceForC2C_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
470     return pKCe->__kceMapPceLceForC2C__(pGpu, pKCe, arg0, arg1, arg2);
471 }
472 
473 void kceMapPceLceForGRCE_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 arg4);
474 
475 static inline void kceMapPceLceForGRCE_b3696a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 arg4) {
476     return;
477 }
478 
479 static inline void kceMapPceLceForGRCE_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 arg4) {
480     pKCe->__kceMapPceLceForGRCE__(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4);
481 }
482 
483 NV_STATUS kceMapPceLceForSysmemLinks_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
484 
485 NV_STATUS kceMapPceLceForSysmemLinks_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
486 
487 static inline NV_STATUS kceMapPceLceForSysmemLinks_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
488     return NV_ERR_NOT_SUPPORTED;
489 }
490 
491 static inline NV_STATUS kceMapPceLceForSysmemLinks_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
492     return pCe->__kceMapPceLceForSysmemLinks__(pGpu, pCe, arg0, arg1, arg2, arg3);
493 }
494 
495 NV_STATUS kceMapPceLceForNvlinkPeers_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
496 
497 NV_STATUS kceMapPceLceForNvlinkPeers_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
498 
499 static inline NV_STATUS kceMapPceLceForNvlinkPeers_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
500     return NV_ERR_NOT_SUPPORTED;
501 }
502 
503 static inline NV_STATUS kceMapPceLceForNvlinkPeers_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
504     return pCe->__kceMapPceLceForNvlinkPeers__(pGpu, pCe, arg0, arg1, arg2);
505 }
506 
507 NvU32 kceGetSysmemSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe);
508 
509 NvU32 kceGetSysmemSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe);
510 
511 static inline NvU32 kceGetSysmemSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) {
512     return 0;
513 }
514 
515 static inline NvU32 kceGetSysmemSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) {
516     return pCe->__kceGetSysmemSupportedLceMask__(pGpu, pCe);
517 }
518 
519 NV_STATUS kceMapAsyncLceDefault_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
520 
521 NV_STATUS kceMapAsyncLceDefault_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
522 
523 static inline NV_STATUS kceMapAsyncLceDefault_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
524     return NV_ERR_NOT_SUPPORTED;
525 }
526 
527 static inline NV_STATUS kceMapAsyncLceDefault_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
528     return pCe->__kceMapAsyncLceDefault__(pGpu, pCe, arg0, arg1, arg2, arg3);
529 }
530 
531 NvU32 kceGetNvlinkPeerSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0);
532 
533 NvU32 kceGetNvlinkPeerSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0);
534 
535 static inline NvU32 kceGetNvlinkPeerSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0) {
536     return 0;
537 }
538 
539 static inline NvU32 kceGetNvlinkPeerSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0) {
540     return pCe->__kceGetNvlinkPeerSupportedLceMask__(pGpu, pCe, arg0);
541 }
542 
543 NvU32 kceGetGrceSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe);
544 
545 NvU32 kceGetGrceSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe);
546 
547 static inline NvU32 kceGetGrceSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) {
548     return 0;
549 }
550 
551 static inline NvU32 kceGetGrceSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) {
552     return pCe->__kceGetGrceSupportedLceMask__(pGpu, pCe);
553 }
554 
555 NvBool kceIsGenXorHigherSupported_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen);
556 
557 NvBool kceIsGenXorHigherSupported_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen);
558 
559 static inline NvBool kceIsGenXorHigherSupported_cbe027(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen) {
560     return ((NvBool)(0 == 0));
561 }
562 
563 static inline NvBool kceIsGenXorHigherSupported_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen) {
564     return pCe->__kceIsGenXorHigherSupported__(pGpu, pCe, checkGen);
565 }
566 
567 void kceApplyGen4orHigherMapping_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3);
568 
569 static inline void kceApplyGen4orHigherMapping_b3696a(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3) {
570     return;
571 }
572 
573 static inline void kceApplyGen4orHigherMapping_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3) {
574     pCe->__kceApplyGen4orHigherMapping__(pGpu, pCe, arg0, arg1, arg2, arg3);
575 }
576 
577 static inline NV_STATUS kceStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
578     return pEngstate->__kceStateInitLocked__(pGpu, pEngstate);
579 }
580 
581 static inline NV_STATUS kceStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
582     return pEngstate->__kceStatePreLoad__(pGpu, pEngstate, arg0);
583 }
584 
585 static inline NV_STATUS kceStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
586     return pEngstate->__kceStatePostUnload__(pGpu, pEngstate, arg0);
587 }
588 
589 static inline void kceStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
590     pEngstate->__kceStateDestroy__(pGpu, pEngstate);
591 }
592 
593 static inline NV_STATUS kceStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
594     return pEngstate->__kceStatePreUnload__(pGpu, pEngstate, arg0);
595 }
596 
597 static inline NV_STATUS kceStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
598     return pEngstate->__kceStateInitUnlocked__(pGpu, pEngstate);
599 }
600 
601 static inline void kceInitMissing_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
602     pEngstate->__kceInitMissing__(pGpu, pEngstate);
603 }
604 
605 static inline NV_STATUS kceStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
606     return pEngstate->__kceStatePreInitLocked__(pGpu, pEngstate);
607 }
608 
609 static inline NV_STATUS kceStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
610     return pEngstate->__kceStatePreInitUnlocked__(pGpu, pEngstate);
611 }
612 
613 static inline NvBool kceClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceClearInterruptArguments *pParams) {
614     return pIntrService->__kceClearInterrupt__(pGpu, pIntrService, pParams);
615 }
616 
617 static inline NV_STATUS kceStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
618     return pEngstate->__kceStatePostLoad__(pGpu, pEngstate, arg0);
619 }
620 
621 static inline NvU32 kceServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
622     return pIntrService->__kceServiceInterrupt__(pGpu, pIntrService, pParams);
623 }
624 
625 NV_STATUS kceFindFirstInstance_IMPL(OBJGPU *pGpu, struct KernelCE **ppKCe);
626 
627 #define kceFindFirstInstance(pGpu, ppKCe) kceFindFirstInstance_IMPL(pGpu, ppKCe)
628 NV_STATUS kceTopLevelPceLceMappingsUpdate_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe);
629 
630 #ifdef __nvoc_kernel_ce_h_disabled
631 static inline NV_STATUS kceTopLevelPceLceMappingsUpdate(OBJGPU *pGpu, struct KernelCE *pKCe) {
632     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
633     return NV_ERR_NOT_SUPPORTED;
634 }
635 #else //__nvoc_kernel_ce_h_disabled
636 #define kceTopLevelPceLceMappingsUpdate(pGpu, pKCe) kceTopLevelPceLceMappingsUpdate_IMPL(pGpu, pKCe)
637 #endif //__nvoc_kernel_ce_h_disabled
638 
639 NV_STATUS kceGetFaultMethodBufferSize_IMPL(OBJGPU *pGpu, NvU32 *size);
640 
641 #define kceGetFaultMethodBufferSize(pGpu, size) kceGetFaultMethodBufferSize_IMPL(pGpu, size)
642 NV_STATUS kceGetAvailableHubPceMask_IMPL(OBJGPU *pGpu, NVLINK_TOPOLOGY_PARAMS *pTopoParams);
643 
644 #define kceGetAvailableHubPceMask(pGpu, pTopoParams) kceGetAvailableHubPceMask_IMPL(pGpu, pTopoParams)
645 void kceGetNvlinkCaps_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps);
646 
647 #ifdef __nvoc_kernel_ce_h_disabled
648 static inline void kceGetNvlinkCaps(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps) {
649     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
650 }
651 #else //__nvoc_kernel_ce_h_disabled
652 #define kceGetNvlinkCaps(pGpu, pKCe, pKCeCaps) kceGetNvlinkCaps_IMPL(pGpu, pKCe, pKCeCaps)
653 #endif //__nvoc_kernel_ce_h_disabled
654 
655 NV_STATUS kceGetDeviceCaps_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, NvU8 *ceCaps);
656 
657 #ifdef __nvoc_kernel_ce_h_disabled
658 static inline NV_STATUS kceGetDeviceCaps(OBJGPU *gpu, struct KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, NvU8 *ceCaps) {
659     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
660     return NV_ERR_NOT_SUPPORTED;
661 }
662 #else //__nvoc_kernel_ce_h_disabled
663 #define kceGetDeviceCaps(gpu, pKCe, rmEngineType, ceCaps) kceGetDeviceCaps_IMPL(gpu, pKCe, rmEngineType, ceCaps)
664 #endif //__nvoc_kernel_ce_h_disabled
665 
666 NV_STATUS kceFindShimOwner_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, struct KernelCE **ppKCe);
667 
668 #ifdef __nvoc_kernel_ce_h_disabled
669 static inline NV_STATUS kceFindShimOwner(OBJGPU *gpu, struct KernelCE *pKCe, struct KernelCE **ppKCe) {
670     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
671     return NV_ERR_NOT_SUPPORTED;
672 }
673 #else //__nvoc_kernel_ce_h_disabled
674 #define kceFindShimOwner(gpu, pKCe, ppKCe) kceFindShimOwner_IMPL(gpu, pKCe, ppKCe)
675 #endif //__nvoc_kernel_ce_h_disabled
676 
677 NV_STATUS kceGetCeFromNvlinkConfig_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3);
678 
679 #ifdef __nvoc_kernel_ce_h_disabled
680 static inline NV_STATUS kceGetCeFromNvlinkConfig(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) {
681     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
682     return NV_ERR_NOT_SUPPORTED;
683 }
684 #else //__nvoc_kernel_ce_h_disabled
685 #define kceGetCeFromNvlinkConfig(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetCeFromNvlinkConfig_IMPL(pGpu, pKCe, arg0, arg1, arg2, arg3)
686 #endif //__nvoc_kernel_ce_h_disabled
687 
688 #undef PRIVATE_FIELD
689 
690 
691 // Iterate over all KCE objects
692 #define KCE_ITER_ALL_BEGIN(pGpu, pKCeIter, si)               \
693     {                                                        \
694         NvU32 maxCe = gpuGetNumCEs(pGpu);                    \
695         NvU32 kceInst;                                       \
696         for (kceInst = (si); kceInst < maxCe; kceInst++)     \
697         {                                                    \
698              pKCeIter = GPU_GET_KCE(pGpu, kceInst);          \
699              if (pKCeIter == NULL)                           \
700              {                                               \
701                  continue;                                   \
702              }
703 
704 // Iterate over all CE visible to hClient
705 #define KCE_ITER_CLIENT_BEGIN(pGpu, pKCeIter, hClient)       \
706     {                                                        \
707         NvU32 maxCe = ENG_CE__SIZE_1;                        \
708         NV_STATUS kceStatus;                                 \
709         NvU32 kceInst;                                       \
710         NvU32 kceIdx;                                        \
711         for (kceInst = 0; kceInst < maxCe; kceInst++)        \
712         {                                                    \
713             kceStatus = ceIndexFromType(pGpu, hClient, RM_ENGINE_TYPE_COPY(kceInst), &kceIdx); \
714             if (kceStatus != NV_OK)                          \
715             {                                                \
716                 continue;                                    \
717             }                                                \
718             pKCeIter = GPU_GET_KCE(pGpu, kceIdx);            \
719             if (pKCeIter == NULL)                            \
720             {                                                \
721                 continue;                                    \
722             }
723 
724 #define KCE_ITER_END                                         \
725         }                                                    \
726     }
727 
728 #define KCE_ITER_END_OR_RETURN_ERROR                         \
729         }                                                    \
730         if (kceInst == maxCe)                                \
731         {                                                    \
732             return NV_ERR_INSUFFICIENT_RESOURCES;            \
733         }                                                    \
734     }
735 
736 #endif // KERNEL_CE_H
737 
738 #ifdef __cplusplus
739 } // extern "C"
740 #endif
741 #endif // _G_KERNEL_CE_NVOC_H_
742