1
2 #ifndef _G_KERNEL_CE_NVOC_H_
3 #define _G_KERNEL_CE_NVOC_H_
4 #include "nvoc/runtime.h"
5
6 // Version of generated metadata structures
7 #ifdef NVOC_METADATA_VERSION
8 #undef NVOC_METADATA_VERSION
9 #endif
10 #define NVOC_METADATA_VERSION 0
11
12 #ifdef __cplusplus
13 extern "C" {
14 #endif
15
16 /*
17 * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
18 * SPDX-License-Identifier: MIT
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice shall be included in
28 * all copies or substantial portions of the Software.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
36 * DEALINGS IN THE SOFTWARE.
37 */
38
39 #pragma once
40 #include "g_kernel_ce_nvoc.h"
41
42 #ifndef KERNEL_CE_H
43 #define KERNEL_CE_H
44
45 #include "core/core.h"
46 #include "ctrl/ctrl2080/ctrl2080ce.h"
47 #include "gpu/eng_state.h"
48 #include "gpu/gpu_halspec.h"
49 #include "gpu/gpu.h"
50 #include "kernel/gpu/intr/intr_service.h"
51 #include "gpu/ce/kernel_ce_shared.h"
52
53 typedef struct NVLINK_TOPOLOGY_PARAMS NVLINK_TOPOLOGY_PARAMS;
54
55 #define MAX_CE_COUNT 24
56
57 /*
58 * sysmemLinks
59 * Represents the number of sysmem links detected
60 * This affects how many PCEs LCE0(sysmem read CE)
61 * and LCE1(sysmem write CE) should be mapped to
62 * maxLinksPerPeer
63 * Represents the maximum number of peer links
64 * between this GPU and all its peers. This affects
65 * how many PCEs LCE3(P2P CE) should be mapped to
66 * numPeers
67 * Represents the number of Peer GPUs discovered so far
68 * bSymmetric
69 * Represents whether the topology detected so far
70 * is symmetric i.e. has same number of links to all
71 * peers connected through nvlink. This affects how
72 * many PCEs to assign to LCEs3-5 (nvlink P2P CEs)
73 * bSwitchConfig
74 * Represents whether the config listed is intended
75 * for use with nvswitch systems
76 * pceLceMap
77 * Value of NV_CE_PCE2LCE_CONFIG0 register with the
78 * above values for sysmemLinks, maxLinksPerPeer,
79 * numLinks and bSymmetric
80 * grceConfig
81 * Value of NV_CE_GRCE_CONFIG register with the
82 * above values for sysmemLinks, maxLinksPerPeer,
83 * numLinks and bSymmetric
84 * exposeCeMask
85 * Mask of CEs to expose to clients for the above
86 * above values for sysmemLinks, maxLinksPerPeer,
87 * numLinks and bSymmetric
88 */
89 typedef struct NVLINK_CE_AUTO_CONFIG_TABLE
90 {
91 NvU32 sysmemLinks;
92 NvU32 maxLinksPerPeer;
93 NvU32 numPeers;
94 NvBool bSymmetric;
95 NvBool bSwitchConfig;
96 NvU32 pceLceMap[MAX_CE_COUNT];
97 NvU32 grceConfig[MAX_CE_COUNT];
98 NvU32 exposeCeMask;
99 } NVLINK_CE_AUTO_CONFIG_TABLE;
100
101 //
102 // Kernel Copy Engine
103 // This class provides Kernel-RM interface and state tracking for Copy Engine.
104 //
105
106
107 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
108 // the matching C source file, but causes diagnostics to be issued if another
109 // source file references the field.
110 #ifdef NVOC_KERNEL_CE_H_PRIVATE_ACCESS_ALLOWED
111 #define PRIVATE_FIELD(x) x
112 #else
113 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
114 #endif
115
116
117 struct KernelCE {
118
119 // Metadata
120 const struct NVOC_RTTI *__nvoc_rtti;
121
122 // Parent (i.e. superclass or base class) object pointers
123 struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
124 struct IntrService __nvoc_base_IntrService;
125
126 // Ancestor object pointers for `staticCast` feature
127 struct Object *__nvoc_pbase_Object; // obj super^2
128 struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super
129 struct IntrService *__nvoc_pbase_IntrService; // intrserv super
130 struct KernelCE *__nvoc_pbase_KernelCE; // kce
131
132 // Vtable with 43 per-object function pointers
133 NV_STATUS (*__kceConstructEngine__)(OBJGPU *, struct KernelCE * /*this*/, ENGDESCRIPTOR); // virtual override (engstate) base (engstate)
134 NvBool (*__kceIsPresent__)(OBJGPU *, struct KernelCE * /*this*/); // virtual halified (singleton optimized) override (engstate) base (engstate) body
135 NV_STATUS (*__kceStateInitLocked__)(OBJGPU *, struct KernelCE * /*this*/); // virtual override (engstate) base (engstate)
136 NV_STATUS (*__kceStateUnload__)(OBJGPU *, struct KernelCE * /*this*/, NvU32); // virtual halified (singleton optimized) override (engstate) base (engstate) body
137 NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE * /*this*/, NvU32); // virtual halified (singleton optimized) override (engstate) base (engstate)
138 void (*__kceStateDestroy__)(OBJGPU *, struct KernelCE * /*this*/); // virtual override (engstate) base (engstate)
139 void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE * /*this*/, IntrServiceRecord *); // virtual override (intrserv) base (intrserv)
140 NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE * /*this*/, IntrServiceServiceNotificationInterruptArguments *); // virtual override (intrserv) base (intrserv)
141 void (*__kceSetShimInstance__)(OBJGPU *, struct KernelCE * /*this*/); // halified (2 hals) body
142 NvBool (*__kceCheckForDecompCapability__)(OBJGPU *, struct KernelCE * /*this*/, NvU32); // halified (2 hals) body
143 NV_STATUS (*__kceGetP2PCes__)(struct KernelCE * /*this*/, OBJGPU *, NvU32, NvU32 *); // halified (2 hals)
144 void (*__kceGetSysmemRWLCEs__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *); // halified (2 hals)
145 NV_STATUS (*__kceGetNvlinkAutoConfigCeValues__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *, NvU32 *); // halified (3 hals) body
146 NvBool (*__kceGetNvlinkMaxTopoForTable__)(OBJGPU *, struct KernelCE * /*this*/, struct NVLINK_TOPOLOGY_PARAMS *, void *, NvU32, NvU32 *); // halified (2 hals) body
147 NvBool (*__kceIsCurrentMaxTopology__)(OBJGPU *, struct KernelCE * /*this*/, struct NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *); // halified (2 hals)
148 NvBool (*__kceGetAutoConfigTableEntry__)(OBJGPU *, struct KernelCE * /*this*/, struct NVLINK_TOPOLOGY_PARAMS *, struct NVLINK_CE_AUTO_CONFIG_TABLE *, NvU32, NvU32 *, NvU32 *); // halified (2 hals) body
149 NvU32 (*__kceGetGrceConfigSize1__)(struct KernelCE * /*this*/); // halified (2 hals)
150 NvU32 (*__kceGetPce2lceConfigSize1__)(struct KernelCE * /*this*/); // halified (5 hals)
151 NV_STATUS (*__kceGetMappings__)(OBJGPU *, struct KernelCE * /*this*/, NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *, NvU32 *); // halified (4 hals) body
152 NV_STATUS (*__kceMapPceLceForC2C__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *, NvU32 *); // halified (3 hals) body
153 NV_STATUS (*__kceMapPceLceForScrub__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *); // halified (2 hals) body
154 void (*__kceMapPceLceForDecomp__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *); // halified (2 hals) body
155 void (*__kceMapPceLceForPCIe__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *); // halified (2 hals) body
156 void (*__kceMapPceLceForGRCE__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU32); // halified (3 hals) body
157 NvU32 (*__kceGetLceMaskForShimInstance__)(OBJGPU *, struct KernelCE * /*this*/); // halified (2 hals) body
158 NV_STATUS (*__kceMapPceLceForSysmemLinks__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *, NvU32 *, NvU32); // halified (3 hals) body
159 NV_STATUS (*__kceMapPceLceForNvlinkPeers__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *, NvU32 *); // halified (4 hals) body
160 NvU32 (*__kceGetSysmemSupportedLceMask__)(OBJGPU *, struct KernelCE * /*this*/); // halified (3 hals) body
161 NV_STATUS (*__kceMapAsyncLceDefault__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *, NvU32 *, NvU32); // halified (4 hals) body
162 NvU32 (*__kceGetNvlinkPeerSupportedLceMask__)(OBJGPU *, struct KernelCE * /*this*/, NvU32); // halified (3 hals) body
163 NvU32 (*__kceGetGrceSupportedLceMask__)(OBJGPU *, struct KernelCE * /*this*/); // halified (3 hals) body
164 NvBool (*__kceIsGenXorHigherSupported__)(OBJGPU *, struct KernelCE * /*this*/, NvU32); // halified (3 hals) body
165 void (*__kceApplyGen4orHigherMapping__)(OBJGPU *, struct KernelCE * /*this*/, NvU32 *, NvU32 *, NvU32, NvU32); // halified (2 hals) body
166 void (*__kceInitMissing__)(struct OBJGPU *, struct KernelCE * /*this*/); // virtual inherited (engstate) base (engstate)
167 NV_STATUS (*__kceStatePreInitLocked__)(struct OBJGPU *, struct KernelCE * /*this*/); // virtual inherited (engstate) base (engstate)
168 NV_STATUS (*__kceStatePreInitUnlocked__)(struct OBJGPU *, struct KernelCE * /*this*/); // virtual inherited (engstate) base (engstate)
169 NV_STATUS (*__kceStateInitUnlocked__)(struct OBJGPU *, struct KernelCE * /*this*/); // virtual inherited (engstate) base (engstate)
170 NV_STATUS (*__kceStatePreLoad__)(struct OBJGPU *, struct KernelCE * /*this*/, NvU32); // virtual inherited (engstate) base (engstate)
171 NV_STATUS (*__kceStatePostLoad__)(struct OBJGPU *, struct KernelCE * /*this*/, NvU32); // virtual inherited (engstate) base (engstate)
172 NV_STATUS (*__kceStatePreUnload__)(struct OBJGPU *, struct KernelCE * /*this*/, NvU32); // virtual inherited (engstate) base (engstate)
173 NV_STATUS (*__kceStatePostUnload__)(struct OBJGPU *, struct KernelCE * /*this*/, NvU32); // virtual inherited (engstate) base (engstate)
174 NvBool (*__kceClearInterrupt__)(OBJGPU *, struct KernelCE * /*this*/, IntrServiceClearInterruptArguments *); // virtual inherited (intrserv) base (intrserv)
175 NvU32 (*__kceServiceInterrupt__)(OBJGPU *, struct KernelCE * /*this*/, IntrServiceServiceInterruptArguments *); // virtual inherited (intrserv) base (intrserv)
176
177 // Data members
178 NvU32 publicID;
179 NvU32 shimInstance;
180 NvU32 *pPceLceMap;
181 NvU32 shimConnectingHubMask;
182 NvBool bMapComplete;
183 NvU32 decompPceMask;
184 NvBool bShimOwner;
185 NvBool bStubbed;
186 NvU32 nvlinkPeerMask;
187 NvU32 nvlinkNumPeers;
188 NvBool bIsAutoConfigEnabled;
189 NvBool bUseGen4Mapping;
190 struct IoAperture aperture;
191 NvBool bCcFipsSelfTestRequired;
192 };
193
194 #ifndef __NVOC_CLASS_KernelCE_TYPEDEF__
195 #define __NVOC_CLASS_KernelCE_TYPEDEF__
196 typedef struct KernelCE KernelCE;
197 #endif /* __NVOC_CLASS_KernelCE_TYPEDEF__ */
198
199 #ifndef __nvoc_class_id_KernelCE
200 #define __nvoc_class_id_KernelCE 0x242aca
201 #endif /* __nvoc_class_id_KernelCE */
202
203 // Casting support
204 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCE;
205
206 #define __staticCast_KernelCE(pThis) \
207 ((pThis)->__nvoc_pbase_KernelCE)
208
209 #ifdef __nvoc_kernel_ce_h_disabled
210 #define __dynamicCast_KernelCE(pThis) ((KernelCE*)NULL)
211 #else //__nvoc_kernel_ce_h_disabled
212 #define __dynamicCast_KernelCE(pThis) \
213 ((KernelCE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCE)))
214 #endif //__nvoc_kernel_ce_h_disabled
215
216 // Property macros
217 #define PDB_PROP_KCE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
218 #define PDB_PROP_KCE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
219
220 NV_STATUS __nvoc_objCreateDynamic_KernelCE(KernelCE**, Dynamic*, NvU32, va_list);
221
222 NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
223 #define __objCreate_KernelCE(ppNewObj, pParent, createFlags) \
224 __nvoc_objCreate_KernelCE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
225
226
227 // Wrapper macros
228 #define kceConstructEngine_FNPTR(pKCe) pKCe->__kceConstructEngine__
229 #define kceConstructEngine(pGpu, pKCe, arg3) kceConstructEngine_DISPATCH(pGpu, pKCe, arg3)
230 #define kceIsPresent_FNPTR(pKCe) pKCe->__kceIsPresent__
231 #define kceIsPresent(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
232 #define kceIsPresent_HAL(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
233 #define kceStateInitLocked_FNPTR(arg_this) arg_this->__kceStateInitLocked__
234 #define kceStateInitLocked(arg1, arg_this) kceStateInitLocked_DISPATCH(arg1, arg_this)
235 #define kceStateUnload_FNPTR(pKCe) pKCe->__kceStateUnload__
236 #define kceStateUnload(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
237 #define kceStateUnload_HAL(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
238 #define kceStateLoad_FNPTR(arg_this) arg_this->__kceStateLoad__
239 #define kceStateLoad(arg1, arg_this, arg3) kceStateLoad_DISPATCH(arg1, arg_this, arg3)
240 #define kceStateLoad_HAL(arg1, arg_this, arg3) kceStateLoad_DISPATCH(arg1, arg_this, arg3)
241 #define kceStateDestroy_FNPTR(arg_this) arg_this->__kceStateDestroy__
242 #define kceStateDestroy(arg1, arg_this) kceStateDestroy_DISPATCH(arg1, arg_this)
243 #define kceRegisterIntrService_FNPTR(arg_this) arg_this->__kceRegisterIntrService__
244 #define kceRegisterIntrService(arg1, arg_this, arg3) kceRegisterIntrService_DISPATCH(arg1, arg_this, arg3)
245 #define kceServiceNotificationInterrupt_FNPTR(arg_this) arg_this->__kceServiceNotificationInterrupt__
246 #define kceServiceNotificationInterrupt(arg1, arg_this, arg3) kceServiceNotificationInterrupt_DISPATCH(arg1, arg_this, arg3)
247 #define kceSetShimInstance_FNPTR(pKCe) pKCe->__kceSetShimInstance__
248 #define kceSetShimInstance(gpu, pKCe) kceSetShimInstance_DISPATCH(gpu, pKCe)
249 #define kceSetShimInstance_HAL(gpu, pKCe) kceSetShimInstance_DISPATCH(gpu, pKCe)
250 #define kceCheckForDecompCapability_FNPTR(pKCe) pKCe->__kceCheckForDecompCapability__
251 #define kceCheckForDecompCapability(pGpu, pKCe, nv2080EngineId) kceCheckForDecompCapability_DISPATCH(pGpu, pKCe, nv2080EngineId)
252 #define kceCheckForDecompCapability_HAL(pGpu, pKCe, nv2080EngineId) kceCheckForDecompCapability_DISPATCH(pGpu, pKCe, nv2080EngineId)
253 #define kceGetP2PCes_FNPTR(arg_this) arg_this->__kceGetP2PCes__
254 #define kceGetP2PCes(arg_this, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg_this, pGpu, gpuMask, nvlinkP2PCeMask)
255 #define kceGetP2PCes_HAL(arg_this, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg_this, pGpu, gpuMask, nvlinkP2PCeMask)
256 #define kceGetSysmemRWLCEs_FNPTR(arg_this) arg_this->__kceGetSysmemRWLCEs__
257 #define kceGetSysmemRWLCEs(pGpu, arg_this, rd, wr) kceGetSysmemRWLCEs_DISPATCH(pGpu, arg_this, rd, wr)
258 #define kceGetSysmemRWLCEs_HAL(pGpu, arg_this, rd, wr) kceGetSysmemRWLCEs_DISPATCH(pGpu, arg_this, rd, wr)
259 #define kceGetNvlinkAutoConfigCeValues_FNPTR(pKCe) pKCe->__kceGetNvlinkAutoConfigCeValues__
260 #define kceGetNvlinkAutoConfigCeValues(pGpu, pKCe, arg3, arg4, arg5) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg3, arg4, arg5)
261 #define kceGetNvlinkAutoConfigCeValues_HAL(pGpu, pKCe, arg3, arg4, arg5) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg3, arg4, arg5)
262 #define kceGetNvlinkMaxTopoForTable_FNPTR(pKCe) pKCe->__kceGetNvlinkMaxTopoForTable__
263 #define kceGetNvlinkMaxTopoForTable(pGpu, pKCe, arg3, arg4, arg5, arg6) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg3, arg4, arg5, arg6)
264 #define kceGetNvlinkMaxTopoForTable_HAL(pGpu, pKCe, arg3, arg4, arg5, arg6) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg3, arg4, arg5, arg6)
265 #define kceIsCurrentMaxTopology_FNPTR(arg_this) arg_this->__kceIsCurrentMaxTopology__
266 #define kceIsCurrentMaxTopology(pGpu, arg_this, arg3, arg4, arg5) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg_this, arg3, arg4, arg5)
267 #define kceIsCurrentMaxTopology_HAL(pGpu, arg_this, arg3, arg4, arg5) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg_this, arg3, arg4, arg5)
268 #define kceGetAutoConfigTableEntry_FNPTR(pKCe) pKCe->__kceGetAutoConfigTableEntry__
269 #define kceGetAutoConfigTableEntry(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7) kceGetAutoConfigTableEntry_DISPATCH(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7)
270 #define kceGetAutoConfigTableEntry_HAL(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7) kceGetAutoConfigTableEntry_DISPATCH(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7)
271 #define kceGetGrceConfigSize1_FNPTR(arg_this) arg_this->__kceGetGrceConfigSize1__
272 #define kceGetGrceConfigSize1(arg_this) kceGetGrceConfigSize1_DISPATCH(arg_this)
273 #define kceGetGrceConfigSize1_HAL(arg_this) kceGetGrceConfigSize1_DISPATCH(arg_this)
274 #define kceGetPce2lceConfigSize1_FNPTR(arg_this) arg_this->__kceGetPce2lceConfigSize1__
275 #define kceGetPce2lceConfigSize1(arg_this) kceGetPce2lceConfigSize1_DISPATCH(arg_this)
276 #define kceGetPce2lceConfigSize1_HAL(arg_this) kceGetPce2lceConfigSize1_DISPATCH(arg_this)
277 #define kceGetMappings_FNPTR(pCe) pCe->__kceGetMappings__
278 #define kceGetMappings(pGpu, pCe, arg3, arg4, arg5, arg6) kceGetMappings_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
279 #define kceGetMappings_HAL(pGpu, pCe, arg3, arg4, arg5, arg6) kceGetMappings_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
280 #define kceMapPceLceForC2C_FNPTR(pKCe) pKCe->__kceMapPceLceForC2C__
281 #define kceMapPceLceForC2C(pGpu, pKCe, arg3, arg4, arg5) kceMapPceLceForC2C_DISPATCH(pGpu, pKCe, arg3, arg4, arg5)
282 #define kceMapPceLceForC2C_HAL(pGpu, pKCe, arg3, arg4, arg5) kceMapPceLceForC2C_DISPATCH(pGpu, pKCe, arg3, arg4, arg5)
283 #define kceMapPceLceForScrub_FNPTR(pKCe) pKCe->__kceMapPceLceForScrub__
284 #define kceMapPceLceForScrub(pGpu, pKCe, arg3, arg4) kceMapPceLceForScrub_DISPATCH(pGpu, pKCe, arg3, arg4)
285 #define kceMapPceLceForScrub_HAL(pGpu, pKCe, arg3, arg4) kceMapPceLceForScrub_DISPATCH(pGpu, pKCe, arg3, arg4)
286 #define kceMapPceLceForDecomp_FNPTR(pKCe) pKCe->__kceMapPceLceForDecomp__
287 #define kceMapPceLceForDecomp(pGpu, pKCe, arg3, arg4) kceMapPceLceForDecomp_DISPATCH(pGpu, pKCe, arg3, arg4)
288 #define kceMapPceLceForDecomp_HAL(pGpu, pKCe, arg3, arg4) kceMapPceLceForDecomp_DISPATCH(pGpu, pKCe, arg3, arg4)
289 #define kceMapPceLceForPCIe_FNPTR(pKCe) pKCe->__kceMapPceLceForPCIe__
290 #define kceMapPceLceForPCIe(pGpu, pKCe, arg3, arg4) kceMapPceLceForPCIe_DISPATCH(pGpu, pKCe, arg3, arg4)
291 #define kceMapPceLceForPCIe_HAL(pGpu, pKCe, arg3, arg4) kceMapPceLceForPCIe_DISPATCH(pGpu, pKCe, arg3, arg4)
292 #define kceMapPceLceForGRCE_FNPTR(pKCe) pKCe->__kceMapPceLceForGRCE__
293 #define kceMapPceLceForGRCE(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7) kceMapPceLceForGRCE_DISPATCH(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7)
294 #define kceMapPceLceForGRCE_HAL(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7) kceMapPceLceForGRCE_DISPATCH(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7)
295 #define kceGetLceMaskForShimInstance_FNPTR(pKCe) pKCe->__kceGetLceMaskForShimInstance__
296 #define kceGetLceMaskForShimInstance(pGpu, pKCe) kceGetLceMaskForShimInstance_DISPATCH(pGpu, pKCe)
297 #define kceGetLceMaskForShimInstance_HAL(pGpu, pKCe) kceGetLceMaskForShimInstance_DISPATCH(pGpu, pKCe)
298 #define kceMapPceLceForSysmemLinks_FNPTR(pCe) pCe->__kceMapPceLceForSysmemLinks__
299 #define kceMapPceLceForSysmemLinks(pGpu, pCe, arg3, arg4, arg5, arg6) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
300 #define kceMapPceLceForSysmemLinks_HAL(pGpu, pCe, arg3, arg4, arg5, arg6) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
301 #define kceMapPceLceForNvlinkPeers_FNPTR(pCe) pCe->__kceMapPceLceForNvlinkPeers__
302 #define kceMapPceLceForNvlinkPeers(pGpu, pCe, arg3, arg4, arg5) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg3, arg4, arg5)
303 #define kceMapPceLceForNvlinkPeers_HAL(pGpu, pCe, arg3, arg4, arg5) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg3, arg4, arg5)
304 #define kceGetSysmemSupportedLceMask_FNPTR(pCe) pCe->__kceGetSysmemSupportedLceMask__
305 #define kceGetSysmemSupportedLceMask(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe)
306 #define kceGetSysmemSupportedLceMask_HAL(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe)
307 #define kceMapAsyncLceDefault_FNPTR(pCe) pCe->__kceMapAsyncLceDefault__
308 #define kceMapAsyncLceDefault(pGpu, pCe, arg3, arg4, arg5, arg6) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
309 #define kceMapAsyncLceDefault_HAL(pGpu, pCe, arg3, arg4, arg5, arg6) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
310 #define kceGetNvlinkPeerSupportedLceMask_FNPTR(pCe) pCe->__kceGetNvlinkPeerSupportedLceMask__
311 #define kceGetNvlinkPeerSupportedLceMask(pGpu, pCe, arg3) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg3)
312 #define kceGetNvlinkPeerSupportedLceMask_HAL(pGpu, pCe, arg3) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg3)
313 #define kceGetGrceSupportedLceMask_FNPTR(pCe) pCe->__kceGetGrceSupportedLceMask__
314 #define kceGetGrceSupportedLceMask(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe)
315 #define kceGetGrceSupportedLceMask_HAL(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe)
316 #define kceIsGenXorHigherSupported_FNPTR(pCe) pCe->__kceIsGenXorHigherSupported__
317 #define kceIsGenXorHigherSupported(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
318 #define kceIsGenXorHigherSupported_HAL(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
319 #define kceApplyGen4orHigherMapping_FNPTR(pCe) pCe->__kceApplyGen4orHigherMapping__
320 #define kceApplyGen4orHigherMapping(pGpu, pCe, arg3, arg4, arg5, arg6) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
321 #define kceApplyGen4orHigherMapping_HAL(pGpu, pCe, arg3, arg4, arg5, arg6) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg3, arg4, arg5, arg6)
322 #define kceInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateInitMissing__
323 #define kceInitMissing(pGpu, pEngstate) kceInitMissing_DISPATCH(pGpu, pEngstate)
324 #define kceStatePreInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__
325 #define kceStatePreInitLocked(pGpu, pEngstate) kceStatePreInitLocked_DISPATCH(pGpu, pEngstate)
326 #define kceStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePreInitUnlocked__
327 #define kceStatePreInitUnlocked(pGpu, pEngstate) kceStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
328 #define kceStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStateInitUnlocked__
329 #define kceStateInitUnlocked(pGpu, pEngstate) kceStateInitUnlocked_DISPATCH(pGpu, pEngstate)
330 #define kceStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePreLoad__
331 #define kceStatePreLoad(pGpu, pEngstate, arg3) kceStatePreLoad_DISPATCH(pGpu, pEngstate, arg3)
332 #define kceStatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__
333 #define kceStatePostLoad(pGpu, pEngstate, arg3) kceStatePostLoad_DISPATCH(pGpu, pEngstate, arg3)
334 #define kceStatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePreUnload__
335 #define kceStatePreUnload(pGpu, pEngstate, arg3) kceStatePreUnload_DISPATCH(pGpu, pEngstate, arg3)
336 #define kceStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__engstateStatePostUnload__
337 #define kceStatePostUnload(pGpu, pEngstate, arg3) kceStatePostUnload_DISPATCH(pGpu, pEngstate, arg3)
338 #define kceClearInterrupt_FNPTR(pIntrService) pIntrService->__nvoc_base_IntrService.__intrservClearInterrupt__
339 #define kceClearInterrupt(pGpu, pIntrService, pParams) kceClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
340 #define kceServiceInterrupt_FNPTR(pIntrService) pIntrService->__nvoc_base_IntrService.__intrservServiceInterrupt__
341 #define kceServiceInterrupt(pGpu, pIntrService, pParams) kceServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
342
343 // Dispatch functions
kceConstructEngine_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,ENGDESCRIPTOR arg3)344 static inline NV_STATUS kceConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg3) {
345 return pKCe->__kceConstructEngine__(pGpu, pKCe, arg3);
346 }
347
kceIsPresent_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe)348 static inline NvBool kceIsPresent_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe) {
349 return pKCe->__kceIsPresent__(pGpu, pKCe);
350 }
351
kceStateInitLocked_DISPATCH(OBJGPU * arg1,struct KernelCE * arg_this)352 static inline NV_STATUS kceStateInitLocked_DISPATCH(OBJGPU *arg1, struct KernelCE *arg_this) {
353 return arg_this->__kceStateInitLocked__(arg1, arg_this);
354 }
355
kceStateUnload_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 flags)356 static inline NV_STATUS kceStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
357 return pKCe->__kceStateUnload__(pGpu, pKCe, flags);
358 }
359
kceStateLoad_DISPATCH(OBJGPU * arg1,struct KernelCE * arg_this,NvU32 arg3)360 static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg1, struct KernelCE *arg_this, NvU32 arg3) {
361 return arg_this->__kceStateLoad__(arg1, arg_this, arg3);
362 }
363
kceStateDestroy_DISPATCH(OBJGPU * arg1,struct KernelCE * arg_this)364 static inline void kceStateDestroy_DISPATCH(OBJGPU *arg1, struct KernelCE *arg_this) {
365 arg_this->__kceStateDestroy__(arg1, arg_this);
366 }
367
kceRegisterIntrService_DISPATCH(OBJGPU * arg1,struct KernelCE * arg_this,IntrServiceRecord arg3[175])368 static inline void kceRegisterIntrService_DISPATCH(OBJGPU *arg1, struct KernelCE *arg_this, IntrServiceRecord arg3[175]) {
369 arg_this->__kceRegisterIntrService__(arg1, arg_this, arg3);
370 }
371
kceServiceNotificationInterrupt_DISPATCH(OBJGPU * arg1,struct KernelCE * arg_this,IntrServiceServiceNotificationInterruptArguments * arg3)372 static inline NV_STATUS kceServiceNotificationInterrupt_DISPATCH(OBJGPU *arg1, struct KernelCE *arg_this, IntrServiceServiceNotificationInterruptArguments *arg3) {
373 return arg_this->__kceServiceNotificationInterrupt__(arg1, arg_this, arg3);
374 }
375
kceSetShimInstance_DISPATCH(OBJGPU * gpu,struct KernelCE * pKCe)376 static inline void kceSetShimInstance_DISPATCH(OBJGPU *gpu, struct KernelCE *pKCe) {
377 pKCe->__kceSetShimInstance__(gpu, pKCe);
378 }
379
kceCheckForDecompCapability_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 nv2080EngineId)380 static inline NvBool kceCheckForDecompCapability_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 nv2080EngineId) {
381 return pKCe->__kceCheckForDecompCapability__(pGpu, pKCe, nv2080EngineId);
382 }
383
kceGetP2PCes_DISPATCH(struct KernelCE * arg_this,OBJGPU * pGpu,NvU32 gpuMask,NvU32 * nvlinkP2PCeMask)384 static inline NV_STATUS kceGetP2PCes_DISPATCH(struct KernelCE *arg_this, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask) {
385 return arg_this->__kceGetP2PCes__(arg_this, pGpu, gpuMask, nvlinkP2PCeMask);
386 }
387
kceGetSysmemRWLCEs_DISPATCH(OBJGPU * pGpu,struct KernelCE * arg_this,NvU32 * rd,NvU32 * wr)388 static inline void kceGetSysmemRWLCEs_DISPATCH(OBJGPU *pGpu, struct KernelCE *arg_this, NvU32 *rd, NvU32 *wr) {
389 arg_this->__kceGetSysmemRWLCEs__(pGpu, arg_this, rd, wr);
390 }
391
kceGetNvlinkAutoConfigCeValues_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5)392 static inline NV_STATUS kceGetNvlinkAutoConfigCeValues_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5) {
393 return pKCe->__kceGetNvlinkAutoConfigCeValues__(pGpu, pKCe, arg3, arg4, arg5);
394 }
395
kceGetNvlinkMaxTopoForTable_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,struct NVLINK_TOPOLOGY_PARAMS * arg3,void * arg4,NvU32 arg5,NvU32 * arg6)396 static inline NvBool kceGetNvlinkMaxTopoForTable_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg3, void *arg4, NvU32 arg5, NvU32 *arg6) {
397 return pKCe->__kceGetNvlinkMaxTopoForTable__(pGpu, pKCe, arg3, arg4, arg5, arg6);
398 }
399
kceIsCurrentMaxTopology_DISPATCH(OBJGPU * pGpu,struct KernelCE * arg_this,struct NVLINK_TOPOLOGY_PARAMS * arg3,NvU32 * arg4,NvU32 * arg5)400 static inline NvBool kceIsCurrentMaxTopology_DISPATCH(OBJGPU *pGpu, struct KernelCE *arg_this, struct NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5) {
401 return arg_this->__kceIsCurrentMaxTopology__(pGpu, arg_this, arg3, arg4, arg5);
402 }
403
kceGetAutoConfigTableEntry_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,struct NVLINK_TOPOLOGY_PARAMS * arg3,struct NVLINK_CE_AUTO_CONFIG_TABLE * arg4,NvU32 arg5,NvU32 * arg6,NvU32 * arg7)404 static inline NvBool kceGetAutoConfigTableEntry_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg3, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg4, NvU32 arg5, NvU32 *arg6, NvU32 *arg7) {
405 return pKCe->__kceGetAutoConfigTableEntry__(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7);
406 }
407
kceGetGrceConfigSize1_DISPATCH(struct KernelCE * arg_this)408 static inline NvU32 kceGetGrceConfigSize1_DISPATCH(struct KernelCE *arg_this) {
409 return arg_this->__kceGetGrceConfigSize1__(arg_this);
410 }
411
kceGetPce2lceConfigSize1_DISPATCH(struct KernelCE * arg_this)412 static inline NvU32 kceGetPce2lceConfigSize1_DISPATCH(struct KernelCE *arg_this) {
413 return arg_this->__kceGetPce2lceConfigSize1__(arg_this);
414 }
415
kceGetMappings_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NVLINK_TOPOLOGY_PARAMS * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 * arg6)416 static inline NV_STATUS kceGetMappings_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6) {
417 return pCe->__kceGetMappings__(pGpu, pCe, arg3, arg4, arg5, arg6);
418 }
419
kceMapPceLceForC2C_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5)420 static inline NV_STATUS kceMapPceLceForC2C_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5) {
421 return pKCe->__kceMapPceLceForC2C__(pGpu, pKCe, arg3, arg4, arg5);
422 }
423
kceMapPceLceForScrub_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4)424 static inline NV_STATUS kceMapPceLceForScrub_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4) {
425 return pKCe->__kceMapPceLceForScrub__(pGpu, pKCe, arg3, arg4);
426 }
427
kceMapPceLceForDecomp_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4)428 static inline void kceMapPceLceForDecomp_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4) {
429 pKCe->__kceMapPceLceForDecomp__(pGpu, pKCe, arg3, arg4);
430 }
431
kceMapPceLceForPCIe_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4)432 static inline void kceMapPceLceForPCIe_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4) {
433 pKCe->__kceMapPceLceForPCIe__(pGpu, pKCe, arg3, arg4);
434 }
435
kceMapPceLceForGRCE_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 * arg6,NvU32 arg7)436 static inline void kceMapPceLceForGRCE_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6, NvU32 arg7) {
437 pKCe->__kceMapPceLceForGRCE__(pGpu, pKCe, arg3, arg4, arg5, arg6, arg7);
438 }
439
kceGetLceMaskForShimInstance_DISPATCH(OBJGPU * pGpu,struct KernelCE * pKCe)440 static inline NvU32 kceGetLceMaskForShimInstance_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe) {
441 return pKCe->__kceGetLceMaskForShimInstance__(pGpu, pKCe);
442 }
443
kceMapPceLceForSysmemLinks_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 arg6)444 static inline NV_STATUS kceMapPceLceForSysmemLinks_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6) {
445 return pCe->__kceMapPceLceForSysmemLinks__(pGpu, pCe, arg3, arg4, arg5, arg6);
446 }
447
kceMapPceLceForNvlinkPeers_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5)448 static inline NV_STATUS kceMapPceLceForNvlinkPeers_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5) {
449 return pCe->__kceMapPceLceForNvlinkPeers__(pGpu, pCe, arg3, arg4, arg5);
450 }
451
kceGetSysmemSupportedLceMask_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe)452 static inline NvU32 kceGetSysmemSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) {
453 return pCe->__kceGetSysmemSupportedLceMask__(pGpu, pCe);
454 }
455
kceMapAsyncLceDefault_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 arg6)456 static inline NV_STATUS kceMapAsyncLceDefault_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6) {
457 return pCe->__kceMapAsyncLceDefault__(pGpu, pCe, arg3, arg4, arg5, arg6);
458 }
459
kceGetNvlinkPeerSupportedLceMask_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 arg3)460 static inline NvU32 kceGetNvlinkPeerSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg3) {
461 return pCe->__kceGetNvlinkPeerSupportedLceMask__(pGpu, pCe, arg3);
462 }
463
kceGetGrceSupportedLceMask_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe)464 static inline NvU32 kceGetGrceSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) {
465 return pCe->__kceGetGrceSupportedLceMask__(pGpu, pCe);
466 }
467
kceIsGenXorHigherSupported_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 checkGen)468 static inline NvBool kceIsGenXorHigherSupported_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen) {
469 return pCe->__kceIsGenXorHigherSupported__(pGpu, pCe, checkGen);
470 }
471
kceApplyGen4orHigherMapping_DISPATCH(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 arg5,NvU32 arg6)472 static inline void kceApplyGen4orHigherMapping_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 arg5, NvU32 arg6) {
473 pCe->__kceApplyGen4orHigherMapping__(pGpu, pCe, arg3, arg4, arg5, arg6);
474 }
475
kceInitMissing_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate)476 static inline void kceInitMissing_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate) {
477 pEngstate->__kceInitMissing__(pGpu, pEngstate);
478 }
479
kceStatePreInitLocked_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate)480 static inline NV_STATUS kceStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate) {
481 return pEngstate->__kceStatePreInitLocked__(pGpu, pEngstate);
482 }
483
kceStatePreInitUnlocked_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate)484 static inline NV_STATUS kceStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate) {
485 return pEngstate->__kceStatePreInitUnlocked__(pGpu, pEngstate);
486 }
487
kceStateInitUnlocked_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate)488 static inline NV_STATUS kceStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate) {
489 return pEngstate->__kceStateInitUnlocked__(pGpu, pEngstate);
490 }
491
kceStatePreLoad_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate,NvU32 arg3)492 static inline NV_STATUS kceStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate, NvU32 arg3) {
493 return pEngstate->__kceStatePreLoad__(pGpu, pEngstate, arg3);
494 }
495
kceStatePostLoad_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate,NvU32 arg3)496 static inline NV_STATUS kceStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate, NvU32 arg3) {
497 return pEngstate->__kceStatePostLoad__(pGpu, pEngstate, arg3);
498 }
499
kceStatePreUnload_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate,NvU32 arg3)500 static inline NV_STATUS kceStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate, NvU32 arg3) {
501 return pEngstate->__kceStatePreUnload__(pGpu, pEngstate, arg3);
502 }
503
kceStatePostUnload_DISPATCH(struct OBJGPU * pGpu,struct KernelCE * pEngstate,NvU32 arg3)504 static inline NV_STATUS kceStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelCE *pEngstate, NvU32 arg3) {
505 return pEngstate->__kceStatePostUnload__(pGpu, pEngstate, arg3);
506 }
507
kceClearInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelCE * pIntrService,IntrServiceClearInterruptArguments * pParams)508 static inline NvBool kceClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceClearInterruptArguments *pParams) {
509 return pIntrService->__kceClearInterrupt__(pGpu, pIntrService, pParams);
510 }
511
kceServiceInterrupt_DISPATCH(OBJGPU * pGpu,struct KernelCE * pIntrService,IntrServiceServiceInterruptArguments * pParams)512 static inline NvU32 kceServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
513 return pIntrService->__kceServiceInterrupt__(pGpu, pIntrService, pParams);
514 }
515
kceNonstallIntrCheckAndClear_b3696a(OBJGPU * arg1,struct KernelCE * arg2,struct THREAD_STATE_NODE * arg3)516 static inline void kceNonstallIntrCheckAndClear_b3696a(OBJGPU *arg1, struct KernelCE *arg2, struct THREAD_STATE_NODE *arg3) {
517 return;
518 }
519
520
521 #ifdef __nvoc_kernel_ce_h_disabled
kceNonstallIntrCheckAndClear(OBJGPU * arg1,struct KernelCE * arg2,struct THREAD_STATE_NODE * arg3)522 static inline void kceNonstallIntrCheckAndClear(OBJGPU *arg1, struct KernelCE *arg2, struct THREAD_STATE_NODE *arg3) {
523 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
524 }
525 #else //__nvoc_kernel_ce_h_disabled
526 #define kceNonstallIntrCheckAndClear(arg1, arg2, arg3) kceNonstallIntrCheckAndClear_b3696a(arg1, arg2, arg3)
527 #endif //__nvoc_kernel_ce_h_disabled
528
529 #define kceNonstallIntrCheckAndClear_HAL(arg1, arg2, arg3) kceNonstallIntrCheckAndClear(arg1, arg2, arg3)
530
531 NV_STATUS kceUpdateClassDB_KERNEL(OBJGPU *pGpu, struct KernelCE *pKCe);
532
533
534 #ifdef __nvoc_kernel_ce_h_disabled
kceUpdateClassDB(OBJGPU * pGpu,struct KernelCE * pKCe)535 static inline NV_STATUS kceUpdateClassDB(OBJGPU *pGpu, struct KernelCE *pKCe) {
536 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
537 return NV_ERR_NOT_SUPPORTED;
538 }
539 #else //__nvoc_kernel_ce_h_disabled
540 #define kceUpdateClassDB(pGpu, pKCe) kceUpdateClassDB_KERNEL(pGpu, pKCe)
541 #endif //__nvoc_kernel_ce_h_disabled
542
543 #define kceUpdateClassDB_HAL(pGpu, pKCe) kceUpdateClassDB(pGpu, pKCe)
544
545 NvBool kceIsCeSysmemRead_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
546
547
548 #ifdef __nvoc_kernel_ce_h_disabled
kceIsCeSysmemRead(OBJGPU * pGpu,struct KernelCE * pKCe)549 static inline NvBool kceIsCeSysmemRead(OBJGPU *pGpu, struct KernelCE *pKCe) {
550 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
551 return NV_FALSE;
552 }
553 #else //__nvoc_kernel_ce_h_disabled
554 #define kceIsCeSysmemRead(pGpu, pKCe) kceIsCeSysmemRead_GP100(pGpu, pKCe)
555 #endif //__nvoc_kernel_ce_h_disabled
556
557 #define kceIsCeSysmemRead_HAL(pGpu, pKCe) kceIsCeSysmemRead(pGpu, pKCe)
558
559 NvBool kceIsCeSysmemWrite_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
560
561
562 #ifdef __nvoc_kernel_ce_h_disabled
kceIsCeSysmemWrite(OBJGPU * pGpu,struct KernelCE * pKCe)563 static inline NvBool kceIsCeSysmemWrite(OBJGPU *pGpu, struct KernelCE *pKCe) {
564 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
565 return NV_FALSE;
566 }
567 #else //__nvoc_kernel_ce_h_disabled
568 #define kceIsCeSysmemWrite(pGpu, pKCe) kceIsCeSysmemWrite_GP100(pGpu, pKCe)
569 #endif //__nvoc_kernel_ce_h_disabled
570
571 #define kceIsCeSysmemWrite_HAL(pGpu, pKCe) kceIsCeSysmemWrite(pGpu, pKCe)
572
573 NvBool kceIsCeNvlinkP2P_GP100(OBJGPU *pGpu, struct KernelCE *pKCe);
574
575
576 #ifdef __nvoc_kernel_ce_h_disabled
kceIsCeNvlinkP2P(OBJGPU * pGpu,struct KernelCE * pKCe)577 static inline NvBool kceIsCeNvlinkP2P(OBJGPU *pGpu, struct KernelCE *pKCe) {
578 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
579 return NV_FALSE;
580 }
581 #else //__nvoc_kernel_ce_h_disabled
582 #define kceIsCeNvlinkP2P(pGpu, pKCe) kceIsCeNvlinkP2P_GP100(pGpu, pKCe)
583 #endif //__nvoc_kernel_ce_h_disabled
584
585 #define kceIsCeNvlinkP2P_HAL(pGpu, pKCe) kceIsCeNvlinkP2P(pGpu, pKCe)
586
587 void kceClearAssignedNvlinkPeerMasks_GV100(OBJGPU *pGpu, struct KernelCE *pKCe);
588
589
590 #ifdef __nvoc_kernel_ce_h_disabled
kceClearAssignedNvlinkPeerMasks(OBJGPU * pGpu,struct KernelCE * pKCe)591 static inline void kceClearAssignedNvlinkPeerMasks(OBJGPU *pGpu, struct KernelCE *pKCe) {
592 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
593 }
594 #else //__nvoc_kernel_ce_h_disabled
595 #define kceClearAssignedNvlinkPeerMasks(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks_GV100(pGpu, pKCe)
596 #endif //__nvoc_kernel_ce_h_disabled
597
598 #define kceClearAssignedNvlinkPeerMasks_HAL(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks(pGpu, pKCe)
599
kceGetGrceMaskReg_4a4dee(OBJGPU * pGpu,struct KernelCE * pKCe)600 static inline NvU32 kceGetGrceMaskReg_4a4dee(OBJGPU *pGpu, struct KernelCE *pKCe) {
601 return 0;
602 }
603
604
605 #ifdef __nvoc_kernel_ce_h_disabled
kceGetGrceMaskReg(OBJGPU * pGpu,struct KernelCE * pKCe)606 static inline NvU32 kceGetGrceMaskReg(OBJGPU *pGpu, struct KernelCE *pKCe) {
607 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
608 return 0;
609 }
610 #else //__nvoc_kernel_ce_h_disabled
611 #define kceGetGrceMaskReg(pGpu, pKCe) kceGetGrceMaskReg_4a4dee(pGpu, pKCe)
612 #endif //__nvoc_kernel_ce_h_disabled
613
614 #define kceGetGrceMaskReg_HAL(pGpu, pKCe) kceGetGrceMaskReg(pGpu, pKCe)
615
616 NV_STATUS kceConstructEngine_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg3);
617
618 NvBool kceIsPresent_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe);
619
620 NV_STATUS kceStateInitLocked_IMPL(OBJGPU *arg1, struct KernelCE *arg2);
621
622 NV_STATUS kceStateUnload_GP100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags);
623
624 NV_STATUS kceStateLoad_GP100(OBJGPU *arg1, struct KernelCE *arg2, NvU32 arg3);
625
626 void kceStateDestroy_IMPL(OBJGPU *arg1, struct KernelCE *arg2);
627
628 void kceRegisterIntrService_IMPL(OBJGPU *arg1, struct KernelCE *arg2, IntrServiceRecord arg3[175]);
629
630 NV_STATUS kceServiceNotificationInterrupt_IMPL(OBJGPU *arg1, struct KernelCE *arg2, IntrServiceServiceNotificationInterruptArguments *arg3);
631
632 void kceSetShimInstance_GB100(OBJGPU *gpu, struct KernelCE *pKCe);
633
kceSetShimInstance_b3696a(OBJGPU * gpu,struct KernelCE * pKCe)634 static inline void kceSetShimInstance_b3696a(OBJGPU *gpu, struct KernelCE *pKCe) {
635 return;
636 }
637
638 NvBool kceCheckForDecompCapability_GB100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 nv2080EngineId);
639
kceCheckForDecompCapability_491d52(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 nv2080EngineId)640 static inline NvBool kceCheckForDecompCapability_491d52(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 nv2080EngineId) {
641 return ((NvBool)(0 != 0));
642 }
643
644 NV_STATUS kceGetP2PCes_GV100(struct KernelCE *arg1, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask);
645
646 NV_STATUS kceGetP2PCes_GH100(struct KernelCE *arg1, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask);
647
648 void kceGetSysmemRWLCEs_GV100(OBJGPU *pGpu, struct KernelCE *arg2, NvU32 *rd, NvU32 *wr);
649
650 void kceGetSysmemRWLCEs_GB100(OBJGPU *pGpu, struct KernelCE *arg2, NvU32 *rd, NvU32 *wr);
651
652 NV_STATUS kceGetNvlinkAutoConfigCeValues_TU102(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
653
654 NV_STATUS kceGetNvlinkAutoConfigCeValues_GA100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
655
656 NV_STATUS kceGetNvlinkAutoConfigCeValues_GB100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
657
658 NvBool kceGetNvlinkMaxTopoForTable_GP100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg3, void *arg4, NvU32 arg5, NvU32 *arg6);
659
kceGetNvlinkMaxTopoForTable_491d52(OBJGPU * pGpu,struct KernelCE * pKCe,struct NVLINK_TOPOLOGY_PARAMS * arg3,void * arg4,NvU32 arg5,NvU32 * arg6)660 static inline NvBool kceGetNvlinkMaxTopoForTable_491d52(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg3, void *arg4, NvU32 arg5, NvU32 *arg6) {
661 return ((NvBool)(0 != 0));
662 }
663
664 NvBool kceIsCurrentMaxTopology_GA100(OBJGPU *pGpu, struct KernelCE *arg2, struct NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5);
665
kceIsCurrentMaxTopology_491d52(OBJGPU * pGpu,struct KernelCE * arg2,struct NVLINK_TOPOLOGY_PARAMS * arg3,NvU32 * arg4,NvU32 * arg5)666 static inline NvBool kceIsCurrentMaxTopology_491d52(OBJGPU *pGpu, struct KernelCE *arg2, struct NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5) {
667 return ((NvBool)(0 != 0));
668 }
669
670 NvBool kceGetAutoConfigTableEntry_GV100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg3, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg4, NvU32 arg5, NvU32 *arg6, NvU32 *arg7);
671
672 NvBool kceGetAutoConfigTableEntry_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg3, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg4, NvU32 arg5, NvU32 *arg6, NvU32 *arg7);
673
674 NvU32 kceGetGrceConfigSize1_TU102(struct KernelCE *arg1);
675
676 NvU32 kceGetGrceConfigSize1_GB100(struct KernelCE *arg1);
677
678 NvU32 kceGetPce2lceConfigSize1_TU102(struct KernelCE *arg1);
679
680 NvU32 kceGetPce2lceConfigSize1_GA100(struct KernelCE *arg1);
681
682 NvU32 kceGetPce2lceConfigSize1_GA102(struct KernelCE *arg1);
683
684 NvU32 kceGetPce2lceConfigSize1_GH100(struct KernelCE *arg1);
685
686 NvU32 kceGetPce2lceConfigSize1_GB100(struct KernelCE *arg1);
687
688 NV_STATUS kceGetMappings_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6);
689
690 NV_STATUS kceGetMappings_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6);
691
692 NV_STATUS kceGetMappings_GB100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6);
693
kceGetMappings_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NVLINK_TOPOLOGY_PARAMS * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 * arg6)694 static inline NV_STATUS kceGetMappings_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6) {
695 return NV_ERR_NOT_SUPPORTED;
696 }
697
698 NV_STATUS kceMapPceLceForC2C_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
699
700 NV_STATUS kceMapPceLceForC2C_GB100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
701
kceMapPceLceForC2C_46f6a7(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5)702 static inline NV_STATUS kceMapPceLceForC2C_46f6a7(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5) {
703 return NV_ERR_NOT_SUPPORTED;
704 }
705
706 NV_STATUS kceMapPceLceForScrub_GB100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4);
707
kceMapPceLceForScrub_46f6a7(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4)708 static inline NV_STATUS kceMapPceLceForScrub_46f6a7(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4) {
709 return NV_ERR_NOT_SUPPORTED;
710 }
711
712 void kceMapPceLceForDecomp_GB100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4);
713
kceMapPceLceForDecomp_b3696a(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4)714 static inline void kceMapPceLceForDecomp_b3696a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4) {
715 return;
716 }
717
718 void kceMapPceLceForPCIe_GB100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4);
719
kceMapPceLceForPCIe_b3696a(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4)720 static inline void kceMapPceLceForPCIe_b3696a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4) {
721 return;
722 }
723
724 void kceMapPceLceForGRCE_GH100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6, NvU32 arg7);
725
726 void kceMapPceLceForGRCE_GB100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6, NvU32 arg7);
727
kceMapPceLceForGRCE_b3696a(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 * arg6,NvU32 arg7)728 static inline void kceMapPceLceForGRCE_b3696a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6, NvU32 arg7) {
729 return;
730 }
731
732 NvU32 kceGetLceMaskForShimInstance_GB100(OBJGPU *pGpu, struct KernelCE *pKCe);
733
kceGetLceMaskForShimInstance_4a4dee(OBJGPU * pGpu,struct KernelCE * pKCe)734 static inline NvU32 kceGetLceMaskForShimInstance_4a4dee(OBJGPU *pGpu, struct KernelCE *pKCe) {
735 return 0;
736 }
737
738 NV_STATUS kceMapPceLceForSysmemLinks_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6);
739
740 NV_STATUS kceMapPceLceForSysmemLinks_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6);
741
kceMapPceLceForSysmemLinks_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 arg6)742 static inline NV_STATUS kceMapPceLceForSysmemLinks_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6) {
743 return NV_ERR_NOT_SUPPORTED;
744 }
745
746 NV_STATUS kceMapPceLceForNvlinkPeers_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
747
748 NV_STATUS kceMapPceLceForNvlinkPeers_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
749
750 NV_STATUS kceMapPceLceForNvlinkPeers_GB100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5);
751
kceMapPceLceForNvlinkPeers_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5)752 static inline NV_STATUS kceMapPceLceForNvlinkPeers_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5) {
753 return NV_ERR_NOT_SUPPORTED;
754 }
755
756 NvU32 kceGetSysmemSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe);
757
758 NvU32 kceGetSysmemSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe);
759
kceGetSysmemSupportedLceMask_4a4dee(OBJGPU * pGpu,struct KernelCE * pCe)760 static inline NvU32 kceGetSysmemSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) {
761 return 0;
762 }
763
764 NV_STATUS kceMapAsyncLceDefault_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6);
765
766 NV_STATUS kceMapAsyncLceDefault_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6);
767
768 NV_STATUS kceMapAsyncLceDefault_GB100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6);
769
kceMapAsyncLceDefault_46f6a7(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 * arg5,NvU32 arg6)770 static inline NV_STATUS kceMapAsyncLceDefault_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 arg6) {
771 return NV_ERR_NOT_SUPPORTED;
772 }
773
774 NvU32 kceGetNvlinkPeerSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg3);
775
776 NvU32 kceGetNvlinkPeerSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg3);
777
kceGetNvlinkPeerSupportedLceMask_4a4dee(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 arg3)778 static inline NvU32 kceGetNvlinkPeerSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg3) {
779 return 0;
780 }
781
782 NvU32 kceGetGrceSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe);
783
784 NvU32 kceGetGrceSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe);
785
kceGetGrceSupportedLceMask_4a4dee(OBJGPU * pGpu,struct KernelCE * pCe)786 static inline NvU32 kceGetGrceSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) {
787 return 0;
788 }
789
790 NvBool kceIsGenXorHigherSupported_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen);
791
792 NvBool kceIsGenXorHigherSupported_GH100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen);
793
kceIsGenXorHigherSupported_cbe027(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 checkGen)794 static inline NvBool kceIsGenXorHigherSupported_cbe027(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 checkGen) {
795 return ((NvBool)(0 == 0));
796 }
797
798 void kceApplyGen4orHigherMapping_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 arg5, NvU32 arg6);
799
kceApplyGen4orHigherMapping_b3696a(OBJGPU * pGpu,struct KernelCE * pCe,NvU32 * arg3,NvU32 * arg4,NvU32 arg5,NvU32 arg6)800 static inline void kceApplyGen4orHigherMapping_b3696a(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg3, NvU32 *arg4, NvU32 arg5, NvU32 arg6) {
801 return;
802 }
803
804 NV_STATUS kceFindFirstInstance_IMPL(OBJGPU *pGpu, struct KernelCE **ppKCe);
805
806 #define kceFindFirstInstance(pGpu, ppKCe) kceFindFirstInstance_IMPL(pGpu, ppKCe)
807 NV_STATUS kceTopLevelPceLceMappingsUpdate_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe);
808
809 #ifdef __nvoc_kernel_ce_h_disabled
kceTopLevelPceLceMappingsUpdate(OBJGPU * pGpu,struct KernelCE * pKCe)810 static inline NV_STATUS kceTopLevelPceLceMappingsUpdate(OBJGPU *pGpu, struct KernelCE *pKCe) {
811 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
812 return NV_ERR_NOT_SUPPORTED;
813 }
814 #else //__nvoc_kernel_ce_h_disabled
815 #define kceTopLevelPceLceMappingsUpdate(pGpu, pKCe) kceTopLevelPceLceMappingsUpdate_IMPL(pGpu, pKCe)
816 #endif //__nvoc_kernel_ce_h_disabled
817
818 NV_STATUS kceGetFaultMethodBufferSize_IMPL(OBJGPU *pGpu, NvU32 *size);
819
820 #define kceGetFaultMethodBufferSize(pGpu, size) kceGetFaultMethodBufferSize_IMPL(pGpu, size)
821 NV_STATUS kceGetAvailableHubPceMask_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NVLINK_TOPOLOGY_PARAMS *pTopoParams);
822
823 #define kceGetAvailableHubPceMask(pGpu, pKCe, pTopoParams) kceGetAvailableHubPceMask_IMPL(pGpu, pKCe, pTopoParams)
824 NvU32 kceGetLceMask_IMPL(OBJGPU *pGpu);
825
826 #define kceGetLceMask(pGpu) kceGetLceMask_IMPL(pGpu)
827 void kceGetAvailableGrceLceMask_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *grceLceMask);
828
829 #ifdef __nvoc_kernel_ce_h_disabled
kceGetAvailableGrceLceMask(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 * grceLceMask)830 static inline void kceGetAvailableGrceLceMask(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *grceLceMask) {
831 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
832 }
833 #else //__nvoc_kernel_ce_h_disabled
834 #define kceGetAvailableGrceLceMask(pGpu, pKCe, grceLceMask) kceGetAvailableGrceLceMask_IMPL(pGpu, pKCe, grceLceMask)
835 #endif //__nvoc_kernel_ce_h_disabled
836
837 void kceGetNvlinkCaps_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps);
838
839 #ifdef __nvoc_kernel_ce_h_disabled
kceGetNvlinkCaps(OBJGPU * pGpu,struct KernelCE * pKCe,NvU8 * pKCeCaps)840 static inline void kceGetNvlinkCaps(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps) {
841 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
842 }
843 #else //__nvoc_kernel_ce_h_disabled
844 #define kceGetNvlinkCaps(pGpu, pKCe, pKCeCaps) kceGetNvlinkCaps_IMPL(pGpu, pKCe, pKCeCaps)
845 #endif //__nvoc_kernel_ce_h_disabled
846
847 NV_STATUS kceGetDeviceCaps_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, NvU8 *ceCaps);
848
849 #ifdef __nvoc_kernel_ce_h_disabled
kceGetDeviceCaps(OBJGPU * gpu,struct KernelCE * pKCe,RM_ENGINE_TYPE rmEngineType,NvU8 * ceCaps)850 static inline NV_STATUS kceGetDeviceCaps(OBJGPU *gpu, struct KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, NvU8 *ceCaps) {
851 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
852 return NV_ERR_NOT_SUPPORTED;
853 }
854 #else //__nvoc_kernel_ce_h_disabled
855 #define kceGetDeviceCaps(gpu, pKCe, rmEngineType, ceCaps) kceGetDeviceCaps_IMPL(gpu, pKCe, rmEngineType, ceCaps)
856 #endif //__nvoc_kernel_ce_h_disabled
857
858 NV_STATUS kceGetPceConfigForLceType_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 lceType, NvU32 *pNumPcesPerLce, NvU32 *pNumLces, NvU32 *pSupportedPceMask, NvU32 *pSupportedLceMask, NvU32 *pPcesPerHshub);
859
860 #ifdef __nvoc_kernel_ce_h_disabled
kceGetPceConfigForLceType(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 lceType,NvU32 * pNumPcesPerLce,NvU32 * pNumLces,NvU32 * pSupportedPceMask,NvU32 * pSupportedLceMask,NvU32 * pPcesPerHshub)861 static inline NV_STATUS kceGetPceConfigForLceType(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 lceType, NvU32 *pNumPcesPerLce, NvU32 *pNumLces, NvU32 *pSupportedPceMask, NvU32 *pSupportedLceMask, NvU32 *pPcesPerHshub) {
862 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
863 return NV_ERR_NOT_SUPPORTED;
864 }
865 #else //__nvoc_kernel_ce_h_disabled
866 #define kceGetPceConfigForLceType(pGpu, pKCe, lceType, pNumPcesPerLce, pNumLces, pSupportedPceMask, pSupportedLceMask, pPcesPerHshub) kceGetPceConfigForLceType_IMPL(pGpu, pKCe, lceType, pNumPcesPerLce, pNumLces, pSupportedPceMask, pSupportedLceMask, pPcesPerHshub)
867 #endif //__nvoc_kernel_ce_h_disabled
868
869 NV_STATUS kceFindShimOwner_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, struct KernelCE **ppKCe);
870
871 #ifdef __nvoc_kernel_ce_h_disabled
kceFindShimOwner(OBJGPU * gpu,struct KernelCE * pKCe,struct KernelCE ** ppKCe)872 static inline NV_STATUS kceFindShimOwner(OBJGPU *gpu, struct KernelCE *pKCe, struct KernelCE **ppKCe) {
873 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
874 return NV_ERR_NOT_SUPPORTED;
875 }
876 #else //__nvoc_kernel_ce_h_disabled
877 #define kceFindShimOwner(gpu, pKCe, ppKCe) kceFindShimOwner_IMPL(gpu, pKCe, ppKCe)
878 #endif //__nvoc_kernel_ce_h_disabled
879
880 NV_STATUS kceGetCeFromNvlinkConfig_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6);
881
882 #ifdef __nvoc_kernel_ce_h_disabled
kceGetCeFromNvlinkConfig(OBJGPU * pGpu,struct KernelCE * pKCe,NvU32 arg3,NvU32 * arg4,NvU32 * arg5,NvU32 * arg6)883 static inline NV_STATUS kceGetCeFromNvlinkConfig(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6) {
884 NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
885 return NV_ERR_NOT_SUPPORTED;
886 }
887 #else //__nvoc_kernel_ce_h_disabled
888 #define kceGetCeFromNvlinkConfig(pGpu, pKCe, arg3, arg4, arg5, arg6) kceGetCeFromNvlinkConfig_IMPL(pGpu, pKCe, arg3, arg4, arg5, arg6)
889 #endif //__nvoc_kernel_ce_h_disabled
890
891 #undef PRIVATE_FIELD
892
893
894 /* KCE PublicId stride for each device info Group ID. */
895 #define NV_KCE_GROUP_ID_STRIDE 10
896
897 // Iterate over all KCE objects
898 #define KCE_ITER_ALL_BEGIN(pGpu, pKCeIter, si) \
899 { \
900 NvU32 maxCe = gpuGetNumCEs(pGpu); \
901 NvU32 kceInst; \
902 for (kceInst = (si); kceInst < maxCe; kceInst++) \
903 { \
904 pKCeIter = GPU_GET_KCE(pGpu, kceInst); \
905 if (pKCeIter == NULL) \
906 { \
907 continue; \
908 }
909
910 // Iterate over all CE objects on the shim with pCeConfig
911 #define KCE_ITER_BEGIN(pGpu, pKCeConfig, pKCeIter, si) \
912 KCE_ITER_ALL_BEGIN(pGpu, pKCeIter, si) \
913 if (pKCeIter->shimInstance != pKCeConfig->shimInstance) \
914 { \
915 continue; \
916 }
917
918 // Iterate over all CE config objects
919 #define KCE_ITER_SHIM_BEGIN(pGpu, pKCeIter) \
920 KCE_ITER_ALL_BEGIN(pGpu, pKCeIter, 0) \
921 if (!pKCeIter->bShimOwner) \
922 { \
923 continue; \
924 }
925
926 // Iterate over all CE visible to Device
927 #define KCE_ITER_DEVICE_BEGIN(pGpu, pKCeIter, pDevice) \
928 { \
929 NvU32 maxCe = ENG_CE__SIZE_1; \
930 NV_STATUS kceStatus; \
931 NvU32 kceInst; \
932 NvU32 kceIdx; \
933 for (kceInst = 0; kceInst < maxCe; kceInst++) \
934 { \
935 kceStatus = ceIndexFromType(pGpu, pDevice, RM_ENGINE_TYPE_COPY(kceInst), &kceIdx); \
936 if (kceStatus != NV_OK) \
937 { \
938 continue; \
939 } \
940 pKCeIter = GPU_GET_KCE(pGpu, kceIdx); \
941 if (pKCeIter == NULL) \
942 { \
943 continue; \
944 }
945
946 #define KCE_ITER_END \
947 } \
948 }
949
950 #define KCE_ITER_END_OR_RETURN_ERROR \
951 } \
952 if (kceInst == maxCe) \
953 { \
954 return NV_ERR_INSUFFICIENT_RESOURCES; \
955 } \
956 }
957
958 #endif // KERNEL_CE_H
959
960 #ifdef __cplusplus
961 } // extern "C"
962 #endif
963
964 #endif // _G_KERNEL_CE_NVOC_H_
965