1 #ifndef _G_KERNEL_CE_NVOC_H_
2 #define _G_KERNEL_CE_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_ce_nvoc.h"
33 
34 #ifndef KERNEL_CE_H
35 #define KERNEL_CE_H
36 
37 #include "core/core.h"
38 #include "ctrl/ctrl2080/ctrl2080ce.h"
39 #include "gpu/eng_state.h"
40 #include "gpu/gpu_halspec.h"
41 #include "gpu/gpu.h"
42 #include "kernel/gpu/intr/intr_service.h"
43 #include "gpu/ce/kernel_ce_shared.h"
44 
45 typedef struct NVLINK_TOPOLOGY_PARAMS NVLINK_TOPOLOGY_PARAMS;
46 
47 #define MAX_CE_CNT 18
48 
49 /*
50  * sysmemLinks
51  *     Represents the number of sysmem links detected
52  *     This affects how many PCEs LCE0(sysmem read CE)
53  *     and LCE1(sysmem write CE) should be mapped to
54  * maxLinksPerPeer
55  *     Represents the maximum number of peer links
56  *     between this GPU and all its peers. This affects
57  *     how many PCEs LCE3(P2P CE) should be mapped to
58  * numPeers
59  *     Represents the number of Peer GPUs discovered so far
60  * bSymmetric
61  *     Represents whether the topology detected so far
62  *     is symmetric i.e. has same number of links to all
63  *     peers connected through nvlink. This affects how
64  *     many PCEs to assign to LCEs3-5 (nvlink P2P CEs)
65  * bSwitchConfig
66  *     Represents whether the config listed is intended
67  *     for use with nvswitch systems
68  * pceLceMap
69  *     Value of NV_CE_PCE2LCE_CONFIG0 register with the
70  *     above values for sysmemLinks, maxLinksPerPeer,
71  *     numLinks and bSymmetric
72  * grceConfig
73  *     Value of NV_CE_GRCE_CONFIG register with the
74  *     above values for sysmemLinks, maxLinksPerPeer,
75  *     numLinks and bSymmetric
76  * exposeCeMask
77  *     Mask of CEs to expose to clients for the above
78  *     above values for sysmemLinks, maxLinksPerPeer,
79  *     numLinks and bSymmetric
80  */
81 typedef struct NVLINK_CE_AUTO_CONFIG_TABLE
82 {
83     NvU32  sysmemLinks;
84     NvU32  maxLinksPerPeer;
85     NvU32  numPeers;
86     NvBool bSymmetric;
87     NvBool bSwitchConfig;
88     NvU32  pceLceMap[MAX_CE_CNT];
89     NvU32  grceConfig[MAX_CE_CNT];
90     NvU32  exposeCeMask;
91 } NVLINK_CE_AUTO_CONFIG_TABLE;
92 
93 //
94 // Kernel Copy Engine
95 // This class provides Kernel-RM interface and state tracking for Copy Engine.
96 //
97 
98 
99 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
100 // the matching C source file, but causes diagnostics to be issued if another
101 // source file references the field.
102 #ifdef NVOC_KERNEL_CE_H_PRIVATE_ACCESS_ALLOWED
103 #define PRIVATE_FIELD(x) x
104 #else
105 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
106 #endif
107 
108 struct KernelCE {
109     const struct NVOC_RTTI *__nvoc_rtti;
110     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
111     struct IntrService __nvoc_base_IntrService;
112     struct Object *__nvoc_pbase_Object;
113     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
114     struct IntrService *__nvoc_pbase_IntrService;
115     struct KernelCE *__nvoc_pbase_KernelCE;
116     NV_STATUS (*__kceConstructEngine__)(OBJGPU *, struct KernelCE *, ENGDESCRIPTOR);
117     NvBool (*__kceIsPresent__)(OBJGPU *, struct KernelCE *);
118     NV_STATUS (*__kceStateInitLocked__)(OBJGPU *, struct KernelCE *);
119     NV_STATUS (*__kceStateUnload__)(OBJGPU *, struct KernelCE *, NvU32);
120     NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE *, NvU32);
121     void (*__kceStateDestroy__)(OBJGPU *, struct KernelCE *);
122     void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *);
123     NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *);
124     NV_STATUS (*__kceGetP2PCes__)(struct KernelCE *, OBJGPU *, NvU32, NvU32 *);
125     NV_STATUS (*__kceGetNvlinkAutoConfigCeValues__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *);
126     NvBool (*__kceGetNvlinkMaxTopoForTable__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, void *, NvU32, NvU32 *);
127     NvBool (*__kceIsCurrentMaxTopology__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *);
128     NvBool (*__kceGetAutoConfigTableEntry__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, struct NVLINK_CE_AUTO_CONFIG_TABLE *, NvU32, NvU32 *, NvU32 *);
129     NvU32 (*__kceGetPce2lceConfigSize1__)(struct KernelCE *);
130     NV_STATUS (*__kceGetMappings__)(OBJGPU *, struct KernelCE *, NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *, NvU32 *);
131     NV_STATUS (*__kceMapPceLceForC2C__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *);
132     void (*__kceMapPceLceForGRCE__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU32);
133     NV_STATUS (*__kceMapPceLceForSysmemLinks__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32);
134     NV_STATUS (*__kceMapPceLceForNvlinkPeers__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *);
135     NvU32 (*__kceGetSysmemSupportedLceMask__)(OBJGPU *, struct KernelCE *);
136     NV_STATUS (*__kceMapAsyncLceDefault__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32);
137     NvU32 (*__kceGetNvlinkPeerSupportedLceMask__)(OBJGPU *, struct KernelCE *, NvU32);
138     NvU32 (*__kceGetGrceSupportedLceMask__)(OBJGPU *, struct KernelCE *);
139     NvBool (*__kceIsGenXorHigherSupported__)(OBJGPU *, struct KernelCE *, NvU32);
140     void (*__kceApplyGen4orHigherMapping__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32, NvU32);
141     NV_STATUS (*__kceStatePreLoad__)(POBJGPU, struct KernelCE *, NvU32);
142     NV_STATUS (*__kceStatePostUnload__)(POBJGPU, struct KernelCE *, NvU32);
143     NV_STATUS (*__kceStatePreUnload__)(POBJGPU, struct KernelCE *, NvU32);
144     NV_STATUS (*__kceStateInitUnlocked__)(POBJGPU, struct KernelCE *);
145     void (*__kceInitMissing__)(POBJGPU, struct KernelCE *);
146     NV_STATUS (*__kceStatePreInitLocked__)(POBJGPU, struct KernelCE *);
147     NV_STATUS (*__kceStatePreInitUnlocked__)(POBJGPU, struct KernelCE *);
148     NvBool (*__kceClearInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceClearInterruptArguments *);
149     NV_STATUS (*__kceStatePostLoad__)(POBJGPU, struct KernelCE *, NvU32);
150     NvU32 (*__kceServiceInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceInterruptArguments *);
151     NvU32 publicID;
152     NvBool bShimOwner;
153     NvBool bStubbed;
154     NvU32 nvlinkPeerMask;
155     NvU32 nvlinkNumPeers;
156     NvBool bIsAutoConfigEnabled;
157     NvBool bUseGen4Mapping;
158     struct IoAperture aperture;
159     NvBool bCcFipsSelfTestRequired;
160 };
161 
162 #ifndef __NVOC_CLASS_KernelCE_TYPEDEF__
163 #define __NVOC_CLASS_KernelCE_TYPEDEF__
164 typedef struct KernelCE KernelCE;
165 #endif /* __NVOC_CLASS_KernelCE_TYPEDEF__ */
166 
167 #ifndef __nvoc_class_id_KernelCE
168 #define __nvoc_class_id_KernelCE 0x242aca
169 #endif /* __nvoc_class_id_KernelCE */
170 
171 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCE;
172 
173 #define __staticCast_KernelCE(pThis) \
174     ((pThis)->__nvoc_pbase_KernelCE)
175 
176 #ifdef __nvoc_kernel_ce_h_disabled
177 #define __dynamicCast_KernelCE(pThis) ((KernelCE*)NULL)
178 #else //__nvoc_kernel_ce_h_disabled
179 #define __dynamicCast_KernelCE(pThis) \
180     ((KernelCE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCE)))
181 #endif //__nvoc_kernel_ce_h_disabled
182 
183 #define PDB_PROP_KCE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
184 #define PDB_PROP_KCE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
185 
186 NV_STATUS __nvoc_objCreateDynamic_KernelCE(KernelCE**, Dynamic*, NvU32, va_list);
187 
188 NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
189 #define __objCreate_KernelCE(ppNewObj, pParent, createFlags) \
190     __nvoc_objCreate_KernelCE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
191 
192 #define kceConstructEngine(pGpu, pKCe, arg0) kceConstructEngine_DISPATCH(pGpu, pKCe, arg0)
193 #define kceIsPresent(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
194 #define kceIsPresent_HAL(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
195 #define kceStateInitLocked(arg0, arg1) kceStateInitLocked_DISPATCH(arg0, arg1)
196 #define kceStateUnload(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
197 #define kceStateUnload_HAL(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
198 #define kceStateLoad(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
199 #define kceStateLoad_HAL(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
200 #define kceStateDestroy(arg0, arg1) kceStateDestroy_DISPATCH(arg0, arg1)
201 #define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2)
202 #define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
203 #define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
204 #define kceGetP2PCes_HAL(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
205 #define kceGetNvlinkAutoConfigCeValues(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
206 #define kceGetNvlinkAutoConfigCeValues_HAL(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
207 #define kceGetNvlinkMaxTopoForTable(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3)
208 #define kceGetNvlinkMaxTopoForTable_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3)
209 #define kceIsCurrentMaxTopology(pGpu, arg0, arg1, arg2, arg3) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg0, arg1, arg2, arg3)
210 #define kceIsCurrentMaxTopology_HAL(pGpu, arg0, arg1, arg2, arg3) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg0, arg1, arg2, arg3)
211 #define kceGetAutoConfigTableEntry(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceGetAutoConfigTableEntry_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
212 #define kceGetAutoConfigTableEntry_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceGetAutoConfigTableEntry_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
213 #define kceGetPce2lceConfigSize1(arg0) kceGetPce2lceConfigSize1_DISPATCH(arg0)
214 #define kceGetPce2lceConfigSize1_HAL(arg0) kceGetPce2lceConfigSize1_DISPATCH(arg0)
215 #define kceGetMappings(pGpu, pCe, arg0, arg1, arg2, arg3) kceGetMappings_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
216 #define kceGetMappings_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceGetMappings_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
217 #define kceMapPceLceForC2C(pGpu, pKCe, arg0, arg1, arg2) kceMapPceLceForC2C_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
218 #define kceMapPceLceForC2C_HAL(pGpu, pKCe, arg0, arg1, arg2) kceMapPceLceForC2C_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
219 #define kceMapPceLceForGRCE(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceMapPceLceForGRCE_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
220 #define kceMapPceLceForGRCE_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceMapPceLceForGRCE_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4)
221 #define kceMapPceLceForSysmemLinks(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
222 #define kceMapPceLceForSysmemLinks_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
223 #define kceMapPceLceForNvlinkPeers(pGpu, pCe, arg0, arg1, arg2) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg0, arg1, arg2)
224 #define kceMapPceLceForNvlinkPeers_HAL(pGpu, pCe, arg0, arg1, arg2) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg0, arg1, arg2)
225 #define kceGetSysmemSupportedLceMask(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe)
226 #define kceGetSysmemSupportedLceMask_HAL(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe)
227 #define kceMapAsyncLceDefault(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
228 #define kceMapAsyncLceDefault_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
229 #define kceGetNvlinkPeerSupportedLceMask(pGpu, pCe, arg0) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg0)
230 #define kceGetNvlinkPeerSupportedLceMask_HAL(pGpu, pCe, arg0) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg0)
231 #define kceGetGrceSupportedLceMask(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe)
232 #define kceGetGrceSupportedLceMask_HAL(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe)
233 #define kceIsGenXorHigherSupported(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
234 #define kceIsGenXorHigherSupported_HAL(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
235 #define kceApplyGen4orHigherMapping(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
236 #define kceApplyGen4orHigherMapping_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
237 #define kceStatePreLoad(pGpu, pEngstate, arg0) kceStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
238 #define kceStatePostUnload(pGpu, pEngstate, arg0) kceStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
239 #define kceStatePreUnload(pGpu, pEngstate, arg0) kceStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
240 #define kceStateInitUnlocked(pGpu, pEngstate) kceStateInitUnlocked_DISPATCH(pGpu, pEngstate)
241 #define kceInitMissing(pGpu, pEngstate) kceInitMissing_DISPATCH(pGpu, pEngstate)
242 #define kceStatePreInitLocked(pGpu, pEngstate) kceStatePreInitLocked_DISPATCH(pGpu, pEngstate)
243 #define kceStatePreInitUnlocked(pGpu, pEngstate) kceStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
244 #define kceClearInterrupt(pGpu, pIntrService, pParams) kceClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
245 #define kceStatePostLoad(pGpu, pEngstate, arg0) kceStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
246 #define kceServiceInterrupt(pGpu, pIntrService, pParams) kceServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
kceNonstallIntrCheckAndClear_b3696a(OBJGPU * arg0,struct KernelCE * arg1,struct THREAD_STATE_NODE * arg2)247 static inline void kceNonstallIntrCheckAndClear_b3696a(OBJGPU *arg0, struct KernelCE *arg1, struct THREAD_STATE_NODE *arg2) {
248     return;
249 }
250 
251 
252 #ifdef __nvoc_kernel_ce_h_disabled
kceNonstallIntrCheckAndClear(OBJGPU * arg0,struct KernelCE * arg1,struct THREAD_STATE_NODE * arg2)253 static inline void kceNonstallIntrCheckAndClear(OBJGPU *arg0, struct KernelCE *arg1, struct THREAD_STATE_NODE *arg2) {
254     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
255 }
256 #else //__nvoc_kernel_ce_h_disabled
257 #define kceNonstallIntrCheckAndClear(arg0, arg1, arg2) kceNonstallIntrCheckAndClear_b3696a(arg0, arg1, arg2)
258 #endif //__nvoc_kernel_ce_h_disabled
259 
260 #define kceNonstallIntrCheckAndClear_HAL(arg0, arg1, arg2) kceNonstallIntrCheckAndClear(arg0, arg1, arg2)
261 
262 NV_STATUS kceUpdateClassDB_KERNEL(OBJGPU *pGpu, struct KernelCE *pKCe);
263 
264 
265 #ifdef __nvoc_kernel_ce_h_disabled
kceUpdateClassDB(OBJGPU * pGpu,struct KernelCE * pKCe)266 static inline NV_STATUS kceUpdateClassDB(OBJGPU *pGpu, struct KernelCE *pKCe) {
267     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
268     return NV_ERR_NOT_SUPPORTED;
269 }
270 #else //__nvoc_kernel_ce_h_disabled
271 #define kceUpdateClassDB(pGpu, pKCe) kceUpdateClassDB_KERNEL(pGpu, pKCe)
272 #endif //__nvoc_kernel_ce_h_disabled
273 
274 #define kceUpdateClassDB_HAL(pGpu, pKCe) kceUpdateClassDB(pGpu, pKCe)
275 
276 NvBool kceIsCeSysmemRead_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
277 
278 
279 #ifdef __nvoc_kernel_ce_h_disabled
kceIsCeSysmemRead(OBJGPU * pGpu,struct KernelCE * pKCe)280 static inline NvBool kceIsCeSysmemRead(OBJGPU *pGpu, struct KernelCE *pKCe) {
281     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
282     return NV_FALSE;
283 }
284 #else //__nvoc_kernel_ce_h_disabled
285 #define kceIsCeSysmemRead(pGpu, pKCe) kceIsCeSysmemRead_GP100(pGpu, pKCe)
286 #endif //__nvoc_kernel_ce_h_disabled
287 
288 #define kceIsCeSysmemRead_HAL(pGpu, pKCe) kceIsCeSysmemRead(pGpu, pKCe)
289 
290 NvBool kceIsCeSysmemWrite_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
291 
292 
293 #ifdef __nvoc_kernel_ce_h_disabled
kceIsCeSysmemWrite(OBJGPU * pGpu,struct KernelCE * pKCe)294 static inline NvBool kceIsCeSysmemWrite(OBJGPU *pGpu, struct KernelCE *pKCe) {
295     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
296     return NV_FALSE;
297 }
298 #else //__nvoc_kernel_ce_h_disabled
299 #define kceIsCeSysmemWrite(pGpu, pKCe) kceIsCeSysmemWrite_GP100(pGpu, pKCe)
300 #endif //__nvoc_kernel_ce_h_disabled
301 
302 #define kceIsCeSysmemWrite_HAL(pGpu, pKCe) kceIsCeSysmemWrite(pGpu, pKCe)
303 
304 NvBool kceIsCeNvlinkP2P_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
305 
306 
307 #ifdef __nvoc_kernel_ce_h_disabled
kceIsCeNvlinkP2P(OBJGPU * pGpu,struct KernelCE * pKCe)308 static inline NvBool kceIsCeNvlinkP2P(OBJGPU *pGpu, struct KernelCE *pKCe) {
309     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
310     return NV_FALSE;
311 }
312 #else //__nvoc_kernel_ce_h_disabled
313 #define kceIsCeNvlinkP2P(pGpu, pKCe) kceIsCeNvlinkP2P_GP100(pGpu, pKCe)
314 #endif //__nvoc_kernel_ce_h_disabled
315 
316 #define kceIsCeNvlinkP2P_HAL(pGpu, pKCe) kceIsCeNvlinkP2P(pGpu, pKCe)
317 
318 void kceGetSysmemRWLCEs_GV100(struct KernelCE *arg0, NvU32 *rd, NvU32 *wr);
319 
320 
321 #ifdef __nvoc_kernel_ce_h_disabled
kceGetSysmemRWLCEs(struct KernelCE * arg0,NvU32 * rd,NvU32 * wr)322 static inline void kceGetSysmemRWLCEs(struct KernelCE *arg0, NvU32 *rd, NvU32 *wr) {
323     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
324 }
325 #else //__nvoc_kernel_ce_h_disabled
326 #define kceGetSysmemRWLCEs(arg0, rd, wr) kceGetSysmemRWLCEs_GV100(arg0, rd, wr)
327 #endif //__nvoc_kernel_ce_h_disabled
328 
329 #define kceGetSysmemRWLCEs_HAL(arg0, rd, wr) kceGetSysmemRWLCEs(arg0, rd, wr)
330 
331 void kceClearAssignedNvlinkPeerMasks_GV100(OBJGPU *pGpu, struct KernelCE *pKCe);
332 
333 
334 #ifdef __nvoc_kernel_ce_h_disabled
kceClearAssignedNvlinkPeerMasks(OBJGPU * pGpu,struct KernelCE * pKCe)335 static inline void kceClearAssignedNvlinkPeerMasks(OBJGPU *pGpu, struct KernelCE *pKCe) {
336     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
337 }
338 #else //__nvoc_kernel_ce_h_disabled
339 #define kceClearAssignedNvlinkPeerMasks(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks_GV100(pGpu, pKCe)
340 #endif //__nvoc_kernel_ce_h_disabled
341 
342 #define kceClearAssignedNvlinkPeerMasks_HAL(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks(pGpu, pKCe)
343 
344 NvU32 kceGetGrceConfigSize1_TU102(struct KernelCE *arg0);
345 
346 
347 #ifdef __nvoc_kernel_ce_h_disabled
kceGetGrceConfigSize1(struct KernelCE * arg0)348 static inline NvU32 kceGetGrceConfigSize1(struct KernelCE *arg0) {
349     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
350     return 0;
351 }
352 #else //__nvoc_kernel_ce_h_disabled
353 #define kceGetGrceConfigSize1(arg0) kceGetGrceConfigSize1_TU102(arg0)
354 #endif //__nvoc_kernel_ce_h_disabled
355 
356 #define kceGetGrceConfigSize1_HAL(arg0) kceGetGrceConfigSize1(arg0)
357 
358 NV_STATUS kceConstructEngine_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg0);
359 
kceConstructEngine_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,ENGDESCRIPTOR arg0)360 static inline NV_STATUS kceConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg0) {
361     return pKCe->__kceConstructEngine__(pGpu, pKCe, arg0);
362 }
363 
364 NvBool kceIsPresent_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe);
365 
kceIsPresent_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe)366 static inline NvBool kceIsPresent_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe) {
367     return pKCe->__kceIsPresent__(pGpu, pKCe);
368 }
369 
370 NV_STATUS kceStateInitLocked_IMPL(OBJGPU *arg0, struct KernelCE *arg1);
371 
kceStateInitLocked_DISPATCH(OBJGPU * arg0,struct KernelCE * arg1)372 static inline NV_STATUS kceStateInitLocked_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1) {
373     return arg1->__kceStateInitLocked__(arg0, arg1);
374 }
375 
kceStateUnload_56cd7a(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 flags)376 static inline NV_STATUS kceStateUnload_56cd7a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
377     return NV_OK;
378 }
379 
kceStateUnload_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 flags)380 static inline NV_STATUS kceStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
381     return pKCe->__kceStateUnload__(pGpu, pKCe, flags);
382 }
383 
384 NV_STATUS kceStateLoad_GP100(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2);
385 
kceStateLoad_DISPATCH(OBJGPU * arg0,struct KernelCE * arg1,NvU32 arg2)386 static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) {
387     return arg1->__kceStateLoad__(arg0, arg1, arg2);
388 }
389 
390 void kceStateDestroy_IMPL(OBJGPU *arg0, struct KernelCE *arg1);
391 
kceStateDestroy_DISPATCH(OBJGPU * arg0,struct KernelCE * arg1)392 static inline void kceStateDestroy_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1) {
393     arg1->__kceStateDestroy__(arg0, arg1);
394 }
395 
396 void kceRegisterIntrService_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]);
397 
kceRegisterIntrService_DISPATCH(OBJGPU * arg0,struct KernelCE * arg1,IntrServiceRecord arg2[171])398 static inline void kceRegisterIntrService_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]) {
399     arg1->__kceRegisterIntrService__(arg0, arg1, arg2);
400 }
401 
402 NV_STATUS kceServiceNotificationInterrupt_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceServiceNotificationInterruptArguments *arg2);
403 
kceServiceNotificationInterrupt_DISPATCH(OBJGPU * arg0,struct KernelCE * arg1,IntrServiceServiceNotificationInterruptArguments * arg2)404 static inline NV_STATUS kceServiceNotificationInterrupt_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) {
405     return arg1->__kceServiceNotificationInterrupt__(arg0, arg1, arg2);
406 }
407 
408 NV_STATUS kceGetP2PCes_GV100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask);
409 
410 NV_STATUS kceGetP2PCes_GH100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask);
411 
kceGetP2PCes_DISPATCH(struct KernelCE * arg0,OBJGPU * pGpu,NvU32 gpuMask,NvU32 * nvlinkP2PCeMask)412 static inline NV_STATUS kceGetP2PCes_DISPATCH(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask) {
413     return arg0->__kceGetP2PCes__(arg0, pGpu, gpuMask, nvlinkP2PCeMask);
414 }
415 
416 NV_STATUS kceGetNvlinkAutoConfigCeValues_TU102(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
417 
418 NV_STATUS kceGetNvlinkAutoConfigCeValues_GA100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
419 
kceGetNvlinkAutoConfigCeValues_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2)420 static inline NV_STATUS kceGetNvlinkAutoConfigCeValues_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
421     return pKCe->__kceGetNvlinkAutoConfigCeValues__(pGpu, pKCe, arg0, arg1, arg2);
422 }
423 
424 NvBool kceGetNvlinkMaxTopoForTable_GP100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3);
425 
kceGetNvlinkMaxTopoForTable_491d52(OBJGPU * pGpu,struct KernelCE * pKCe,struct NVLINK_TOPOLOGY_PARAMS * arg0,void * arg1,NvU32 arg2,NvU32 * arg3)426 static inline NvBool kceGetNvlinkMaxTopoForTable_491d52(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3) {
427     return ((NvBool)(0 != 0));
428 }
429 
kceGetNvlinkMaxTopoForTable_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,struct NVLINK_TOPOLOGY_PARAMS * arg0,void * arg1,NvU32 arg2,NvU32 * arg3)430 static inline NvBool kceGetNvlinkMaxTopoForTable_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3) {
431     return pKCe->__kceGetNvlinkMaxTopoForTable__(pGpu, pKCe, arg0, arg1, arg2, arg3);
432 }
433 
434 NvBool kceIsCurrentMaxTopology_GA100(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3);
435 
kceIsCurrentMaxTopology_491d52(OBJGPU * pGpu,struct KernelCE * arg0,struct NVLINK_TOPOLOGY_PARAMS * arg1,NvU32 * arg2,NvU32 * arg3)436 static inline NvBool kceIsCurrentMaxTopology_491d52(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3) {
437     return ((NvBool)(0 != 0));
438 }
439 
kceIsCurrentMaxTopology_DISPATCH(OBJGPU * pGpu,struct KernelCE * arg0,struct NVLINK_TOPOLOGY_PARAMS * arg1,NvU32 * arg2,NvU32 * arg3)440 static inline NvBool kceIsCurrentMaxTopology_DISPATCH(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3) {
441     return arg0->__kceIsCurrentMaxTopology__(pGpu, arg0, arg1, arg2, arg3);
442 }
443 
444 NvBool kceGetAutoConfigTableEntry_GV100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg1, NvU32 arg2, NvU32 *arg3, NvU32 *arg4);
445 
446 NvBool kceGetAutoConfigTableEntry_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg1, NvU32 arg2, NvU32 *arg3, NvU32 *arg4);
447 
kceGetAutoConfigTableEntry_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,struct NVLINK_TOPOLOGY_PARAMS * arg0,struct NVLINK_CE_AUTO_CONFIG_TABLE * arg1,NvU32 arg2,NvU32 * arg3,NvU32 * arg4)448 static inline NvBool kceGetAutoConfigTableEntry_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg1, NvU32 arg2, NvU32 *arg3, NvU32 *arg4) {
449     return pKCe->__kceGetAutoConfigTableEntry__(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4);
450 }
451 
452 NvU32 kceGetPce2lceConfigSize1_TU102(struct KernelCE *arg0);
453 
454 NvU32 kceGetPce2lceConfigSize1_GA100(struct KernelCE *arg0);
455 
456 NvU32 kceGetPce2lceConfigSize1_GA102(struct KernelCE *arg0);
457 
458 NvU32 kceGetPce2lceConfigSize1_GH100(struct KernelCE *arg0);
459 
kceGetPce2lceConfigSize1_DISPATCH(struct KernelCE * arg0)460 static inline NvU32 kceGetPce2lceConfigSize1_DISPATCH(struct KernelCE *arg0) {
461     return arg0->__kceGetPce2lceConfigSize1__(arg0);
462 }
463 
464 NV_STATUS kceGetMappings_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3);
465 
466 NV_STATUS kceGetMappings_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3);
467 
kceGetMappings_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NVLINK_TOPOLOGY_PARAMS * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 * arg3)468 static inline NV_STATUS kceGetMappings_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) {
469     return NV_ERR_NOT_SUPPORTED;
470 }
471 
kceGetMappings_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NVLINK_TOPOLOGY_PARAMS * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 * arg3)472 static inline NV_STATUS kceGetMappings_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) {
473     return pCe->__kceGetMappings__(pGpu, pCe, arg0, arg1, arg2, arg3);
474 }
475 
476 NV_STATUS kceMapPceLceForC2C_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
477 
kceMapPceLceForC2C_46f6a7(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2)478 static inline NV_STATUS kceMapPceLceForC2C_46f6a7(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
479     return NV_ERR_NOT_SUPPORTED;
480 }
481 
kceMapPceLceForC2C_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2)482 static inline NV_STATUS kceMapPceLceForC2C_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
483     return pKCe->__kceMapPceLceForC2C__(pGpu, pKCe, arg0, arg1, arg2);
484 }
485 
486 void kceMapPceLceForGRCE_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 arg4);
487 
kceMapPceLceForGRCE_b3696a(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 * arg3,NvU32 arg4)488 static inline void kceMapPceLceForGRCE_b3696a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 arg4) {
489     return;
490 }
491 
kceMapPceLceForGRCE_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 * arg3,NvU32 arg4)492 static inline void kceMapPceLceForGRCE_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 arg4) {
493     pKCe->__kceMapPceLceForGRCE__(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4);
494 }
495 
496 NV_STATUS kceMapPceLceForSysmemLinks_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
497 
498 NV_STATUS kceMapPceLceForSysmemLinks_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
499 
kceMapPceLceForSysmemLinks_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 arg3)500 static inline NV_STATUS kceMapPceLceForSysmemLinks_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
501     return NV_ERR_NOT_SUPPORTED;
502 }
503 
kceMapPceLceForSysmemLinks_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 arg3)504 static inline NV_STATUS kceMapPceLceForSysmemLinks_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
505     return pCe->__kceMapPceLceForSysmemLinks__(pGpu, pCe, arg0, arg1, arg2, arg3);
506 }
507 
508 NV_STATUS kceMapPceLceForNvlinkPeers_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
509 
510 NV_STATUS kceMapPceLceForNvlinkPeers_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2);
511 
kceMapPceLceForNvlinkPeers_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2)512 static inline NV_STATUS kceMapPceLceForNvlinkPeers_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
513     return NV_ERR_NOT_SUPPORTED;
514 }
515 
kceMapPceLceForNvlinkPeers_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2)516 static inline NV_STATUS kceMapPceLceForNvlinkPeers_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) {
517     return pCe->__kceMapPceLceForNvlinkPeers__(pGpu, pCe, arg0, arg1, arg2);
518 }
519 
520 NvU32 kceGetSysmemSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe);
521 
522 NvU32 kceGetSysmemSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe);
523 
kceGetSysmemSupportedLceMask_4a4dee(OBJGPU * pGpu,struct KernelCE * pCe)524 static inline NvU32 kceGetSysmemSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) {
525     return 0;
526 }
527 
kceGetSysmemSupportedLceMask_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe)528 static inline NvU32 kceGetSysmemSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) {
529     return pCe->__kceGetSysmemSupportedLceMask__(pGpu, pCe);
530 }
531 
532 NV_STATUS kceMapAsyncLceDefault_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
533 
534 NV_STATUS kceMapAsyncLceDefault_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3);
535 
kceMapAsyncLceDefault_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 arg3)536 static inline NV_STATUS kceMapAsyncLceDefault_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
537     return NV_ERR_NOT_SUPPORTED;
538 }
539 
kceMapAsyncLceDefault_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 * arg2,NvU32 arg3)540 static inline NV_STATUS kceMapAsyncLceDefault_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) {
541     return pCe->__kceMapAsyncLceDefault__(pGpu, pCe, arg0, arg1, arg2, arg3);
542 }
543 
544 NvU32 kceGetNvlinkPeerSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0);
545 
546 NvU32 kceGetNvlinkPeerSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0);
547 
kceGetNvlinkPeerSupportedLceMask_4a4dee(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 arg0)548 static inline NvU32 kceGetNvlinkPeerSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0) {
549     return 0;
550 }
551 
kceGetNvlinkPeerSupportedLceMask_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 arg0)552 static inline NvU32 kceGetNvlinkPeerSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0) {
553     return pCe->__kceGetNvlinkPeerSupportedLceMask__(pGpu, pCe, arg0);
554 }
555 
556 NvU32 kceGetGrceSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe);
557 
558 NvU32 kceGetGrceSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe);
559 
kceGetGrceSupportedLceMask_4a4dee(OBJGPU * pGpu,struct KernelCE * pCe)560 static inline NvU32 kceGetGrceSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) {
561     return 0;
562 }
563 
kceGetGrceSupportedLceMask_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe)564 static inline NvU32 kceGetGrceSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) {
565     return pCe->__kceGetGrceSupportedLceMask__(pGpu, pCe);
566 }
567 
568 NvBool kceIsGenXorHigherSupported_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen);
569 
570 NvBool kceIsGenXorHigherSupported_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen);
571 
kceIsGenXorHigherSupported_cbe027(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 checkGen)572 static inline NvBool kceIsGenXorHigherSupported_cbe027(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen) {
573     return ((NvBool)(0 == 0));
574 }
575 
kceIsGenXorHigherSupported_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 checkGen)576 static inline NvBool kceIsGenXorHigherSupported_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen) {
577     return pCe->__kceIsGenXorHigherSupported__(pGpu, pCe, checkGen);
578 }
579 
580 void kceApplyGen4orHigherMapping_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3);
581 
kceApplyGen4orHigherMapping_b3696a(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 arg2,NvU32 arg3)582 static inline void kceApplyGen4orHigherMapping_b3696a(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3) {
583     return;
584 }
585 
kceApplyGen4orHigherMapping_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg0,NvU32 * arg1,NvU32 arg2,NvU32 arg3)586 static inline void kceApplyGen4orHigherMapping_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3) {
587     pCe->__kceApplyGen4orHigherMapping__(pGpu, pCe, arg0, arg1, arg2, arg3);
588 }
589 
kceStatePreLoad_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate,NvU32 arg0)590 static inline NV_STATUS kceStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
591     return pEngstate->__kceStatePreLoad__(pGpu, pEngstate, arg0);
592 }
593 
kceStatePostUnload_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate,NvU32 arg0)594 static inline NV_STATUS kceStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
595     return pEngstate->__kceStatePostUnload__(pGpu, pEngstate, arg0);
596 }
597 
kceStatePreUnload_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate,NvU32 arg0)598 static inline NV_STATUS kceStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
599     return pEngstate->__kceStatePreUnload__(pGpu, pEngstate, arg0);
600 }
601 
kceStateInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate)602 static inline NV_STATUS kceStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
603     return pEngstate->__kceStateInitUnlocked__(pGpu, pEngstate);
604 }
605 
kceInitMissing_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate)606 static inline void kceInitMissing_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
607     pEngstate->__kceInitMissing__(pGpu, pEngstate);
608 }
609 
kceStatePreInitLocked_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate)610 static inline NV_STATUS kceStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
611     return pEngstate->__kceStatePreInitLocked__(pGpu, pEngstate);
612 }
613 
kceStatePreInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate)614 static inline NV_STATUS kceStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
615     return pEngstate->__kceStatePreInitUnlocked__(pGpu, pEngstate);
616 }
617 
kceClearInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelCE * pIntrService,IntrServiceClearInterruptArguments * pParams)618 static inline NvBool kceClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceClearInterruptArguments *pParams) {
619     return pIntrService->__kceClearInterrupt__(pGpu, pIntrService, pParams);
620 }
621 
kceStatePostLoad_DISPATCH(POBJGPU pGpu,struct KernelCE * pEngstate,NvU32 arg0)622 static inline NV_STATUS kceStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
623     return pEngstate->__kceStatePostLoad__(pGpu, pEngstate, arg0);
624 }
625 
kceServiceInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelCE * pIntrService,IntrServiceServiceInterruptArguments * pParams)626 static inline NvU32 kceServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
627     return pIntrService->__kceServiceInterrupt__(pGpu, pIntrService, pParams);
628 }
629 
630 NV_STATUS kceFindFirstInstance_IMPL(OBJGPU *pGpu, struct KernelCE **ppKCe);
631 
632 #define kceFindFirstInstance(pGpu, ppKCe) kceFindFirstInstance_IMPL(pGpu, ppKCe)
633 NV_STATUS kceTopLevelPceLceMappingsUpdate_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe);
634 
635 #ifdef __nvoc_kernel_ce_h_disabled
kceTopLevelPceLceMappingsUpdate(OBJGPU * pGpu,struct KernelCE * pKCe)636 static inline NV_STATUS kceTopLevelPceLceMappingsUpdate(OBJGPU *pGpu, struct KernelCE *pKCe) {
637     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
638     return NV_ERR_NOT_SUPPORTED;
639 }
640 #else //__nvoc_kernel_ce_h_disabled
641 #define kceTopLevelPceLceMappingsUpdate(pGpu, pKCe) kceTopLevelPceLceMappingsUpdate_IMPL(pGpu, pKCe)
642 #endif //__nvoc_kernel_ce_h_disabled
643 
644 NV_STATUS kceGetFaultMethodBufferSize_IMPL(OBJGPU *pGpu, NvU32 *size);
645 
646 #define kceGetFaultMethodBufferSize(pGpu, size) kceGetFaultMethodBufferSize_IMPL(pGpu, size)
647 NV_STATUS kceGetAvailableHubPceMask_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NVLINK_TOPOLOGY_PARAMS *pTopoParams);
648 
649 #define kceGetAvailableHubPceMask(pGpu, pKCe, pTopoParams) kceGetAvailableHubPceMask_IMPL(pGpu, pKCe, pTopoParams)
650 void kceGetAvailableGrceLceMask_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *grceLceMask);
651 
652 #ifdef __nvoc_kernel_ce_h_disabled
kceGetAvailableGrceLceMask(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * grceLceMask)653 static inline void kceGetAvailableGrceLceMask(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *grceLceMask) {
654     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
655 }
656 #else //__nvoc_kernel_ce_h_disabled
657 #define kceGetAvailableGrceLceMask(pGpu, pKCe, grceLceMask) kceGetAvailableGrceLceMask_IMPL(pGpu, pKCe, grceLceMask)
658 #endif //__nvoc_kernel_ce_h_disabled
659 
660 void kceGetNvlinkCaps_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps);
661 
662 #ifdef __nvoc_kernel_ce_h_disabled
kceGetNvlinkCaps(OBJGPU * pGpu,struct KernelCE * pKCe,NvU8 * pKCeCaps)663 static inline void kceGetNvlinkCaps(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps) {
664     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
665 }
666 #else //__nvoc_kernel_ce_h_disabled
667 #define kceGetNvlinkCaps(pGpu, pKCe, pKCeCaps) kceGetNvlinkCaps_IMPL(pGpu, pKCe, pKCeCaps)
668 #endif //__nvoc_kernel_ce_h_disabled
669 
670 NV_STATUS kceGetDeviceCaps_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, NvU8 *ceCaps);
671 
672 #ifdef __nvoc_kernel_ce_h_disabled
kceGetDeviceCaps(OBJGPU * gpu,struct KernelCE * pKCe,RM_ENGINE_TYPE rmEngineType,NvU8 * ceCaps)673 static inline NV_STATUS kceGetDeviceCaps(OBJGPU *gpu, struct KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, NvU8 *ceCaps) {
674     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
675     return NV_ERR_NOT_SUPPORTED;
676 }
677 #else //__nvoc_kernel_ce_h_disabled
678 #define kceGetDeviceCaps(gpu, pKCe, rmEngineType, ceCaps) kceGetDeviceCaps_IMPL(gpu, pKCe, rmEngineType, ceCaps)
679 #endif //__nvoc_kernel_ce_h_disabled
680 
681 NV_STATUS kceFindShimOwner_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, struct KernelCE **ppKCe);
682 
683 #ifdef __nvoc_kernel_ce_h_disabled
kceFindShimOwner(OBJGPU * gpu,struct KernelCE * pKCe,struct KernelCE ** ppKCe)684 static inline NV_STATUS kceFindShimOwner(OBJGPU *gpu, struct KernelCE *pKCe, struct KernelCE **ppKCe) {
685     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
686     return NV_ERR_NOT_SUPPORTED;
687 }
688 #else //__nvoc_kernel_ce_h_disabled
689 #define kceFindShimOwner(gpu, pKCe, ppKCe) kceFindShimOwner_IMPL(gpu, pKCe, ppKCe)
690 #endif //__nvoc_kernel_ce_h_disabled
691 
692 NV_STATUS kceGetCeFromNvlinkConfig_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3);
693 
694 #ifdef __nvoc_kernel_ce_h_disabled
kceGetCeFromNvlinkConfig(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 arg0,NvU32 * arg1,NvU32 * arg2,NvU32 * arg3)695 static inline NV_STATUS kceGetCeFromNvlinkConfig(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) {
696     NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
697     return NV_ERR_NOT_SUPPORTED;
698 }
699 #else //__nvoc_kernel_ce_h_disabled
700 #define kceGetCeFromNvlinkConfig(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetCeFromNvlinkConfig_IMPL(pGpu, pKCe, arg0, arg1, arg2, arg3)
701 #endif //__nvoc_kernel_ce_h_disabled
702 
703 #undef PRIVATE_FIELD
704 
705 
706 // Iterate over all KCE objects
707 #define KCE_ITER_ALL_BEGIN(pGpu, pKCeIter, si)               \
708     {                                                        \
709         NvU32 maxCe = gpuGetNumCEs(pGpu);                    \
710         NvU32 kceInst;                                       \
711         for (kceInst = (si); kceInst < maxCe; kceInst++)     \
712         {                                                    \
713              pKCeIter = GPU_GET_KCE(pGpu, kceInst);          \
714              if (pKCeIter == NULL)                           \
715              {                                               \
716                  continue;                                   \
717              }
718 
719 // Iterate over all CE visible to Device
720 #define KCE_ITER_DEVICE_BEGIN(pGpu, pKCeIter, pDevice)                                         \
721     {                                                                                          \
722         NvU32 maxCe = ENG_CE__SIZE_1;                                                          \
723         NV_STATUS kceStatus;                                                                   \
724         NvU32 kceInst;                                                                         \
725         NvU32 kceIdx;                                                                          \
726         for (kceInst = 0; kceInst < maxCe; kceInst++)                                          \
727         {                                                                                      \
728             kceStatus = ceIndexFromType(pGpu, pDevice, RM_ENGINE_TYPE_COPY(kceInst), &kceIdx); \
729             if (kceStatus != NV_OK)                                                            \
730             {                                                                                  \
731                 continue;                                                                      \
732             }                                                                                  \
733             pKCeIter = GPU_GET_KCE(pGpu, kceIdx);                                              \
734             if (pKCeIter == NULL)                                                              \
735             {                                                                                  \
736                 continue;                                                                      \
737             }
738 
739 #define KCE_ITER_END                                         \
740         }                                                    \
741     }
742 
743 #define KCE_ITER_END_OR_RETURN_ERROR                         \
744         }                                                    \
745         if (kceInst == maxCe)                                \
746         {                                                    \
747             return NV_ERR_INSUFFICIENT_RESOURCES;            \
748         }                                                    \
749     }
750 
751 #endif // KERNEL_CE_H
752 
753 #ifdef __cplusplus
754 } // extern "C"
755 #endif
756 
757 #endif // _G_KERNEL_CE_NVOC_H_
758