1 #ifndef _G_KERNEL_FALCON_NVOC_H_
2 #define _G_KERNEL_FALCON_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 /*!
33  * Provides definitions for all KernelFalcon data structures and
34  * interfaces.
35  */
36 
37 #include "g_kernel_falcon_nvoc.h"
38 
39 #ifndef KERNEL_FALCON_H
40 #define KERNEL_FALCON_H
41 
42 #include "core/core.h"
43 #include "gpu/falcon/falcon_common.h"
44 #include "gpu/falcon/kernel_crashcat_engine.h"
45 #include "gpu/intr/intr_service.h"
46 
47 struct KernelChannel;
48 
49 #ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__
50 #define __NVOC_CLASS_KernelChannel_TYPEDEF__
51 typedef struct KernelChannel KernelChannel;
52 #endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */
53 
54 #ifndef __nvoc_class_id_KernelChannel
55 #define __nvoc_class_id_KernelChannel 0x5d8d70
56 #endif /* __nvoc_class_id_KernelChannel */
57 
58 
59 
60 typedef struct KernelFalconEngineConfig {
61     NvU32 registerBase;        // i.e. NV_P{GSP,SEC,NVDEC}
62     NvU32 riscvRegisterBase;   // i.e. NV_FALCON2_{GSP,SEC,NVDEC}_BASE
63     NvU32 fbifBase;            // i.e. NV_P{GSP,SEC,NVDEC}_FBIF_BASE
64     NvBool bBootFromHs;        // whether engine has Boot-from-HS (true for HS-capable engines GA10X+)
65     NvU32 pmcEnableMask;       // engine's enable bitmask in PMC (or 0 if engine reset is not in PMC)
66     NvU32 bIsPmcDeviceEngine;  // whether engine's enable bit is in NV_PMC_DEVICE_ENABLE (vs. NV_PMC_ENABLE)
67     ENGDESCRIPTOR physEngDesc; // The engine descriptor for the falcon (e.g. ENG_SEC2)
68     NvU32 ctxAttr;             // Memory attributes used for context buffers
69     NvU32 ctxBufferSize;       // Context buffer size in bytes
70     NvU32 addrSpaceList;       // index into ADDRLIST array in mem_desc.h
71 
72     KernelCrashCatEngineConfig crashcatEngConfig;
73 } KernelFalconEngineConfig;
74 
75 /*!
76  * Base class for booting Falcon cores (including RISC-V)
77  */
78 #ifdef NVOC_KERNEL_FALCON_H_PRIVATE_ACCESS_ALLOWED
79 #define PRIVATE_FIELD(x) x
80 #else
81 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
82 #endif
83 struct KernelFalcon {
84     const struct NVOC_RTTI *__nvoc_rtti;
85     struct KernelCrashCatEngine __nvoc_base_KernelCrashCatEngine;
86     struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
87     struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
88     struct KernelFalcon *__nvoc_pbase_KernelFalcon;
89     NvU32 (*__kflcnRegRead__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
90     void (*__kflcnRegWrite__)(struct OBJGPU *, struct KernelFalcon *, NvU32, NvU32);
91     NvBool (*__kflcnIsRiscvActive__)(struct OBJGPU *, struct KernelFalcon *);
92     void (*__kflcnRiscvProgramBcr__)(struct OBJGPU *, struct KernelFalcon *, NvBool);
93     void (*__kflcnSwitchToFalcon__)(struct OBJGPU *, struct KernelFalcon *);
94     NV_STATUS (*__kflcnResetHw__)(struct OBJGPU *, struct KernelFalcon *);
95     NV_STATUS (*__kflcnPreResetWait__)(struct OBJGPU *, struct KernelFalcon *);
96     NV_STATUS (*__kflcnWaitForResetToFinish__)(struct OBJGPU *, struct KernelFalcon *);
97     NvU32 (*__kflcnReadIntrStatus__)(struct OBJGPU *, struct KernelFalcon *);
98     void (*__kflcnIntrRetrigger__)(struct OBJGPU *, struct KernelFalcon *);
99     NvU32 (*__kflcnMaskImemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
100     NvU32 (*__kflcnMaskDmemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
101     void (*__kflcnReadEmem__)(struct KernelFalcon *, NvU64, NvU64, void *);
102     NvU32 (*__kflcnGetWFL0Offset__)(struct KernelFalcon *);
103     const NvU32 *(*__kflcnGetScratchOffsets__)(struct KernelFalcon *, NV_CRASHCAT_SCRATCH_GROUP_ID);
104     void (*__kflcnUnload__)(struct KernelFalcon *);
105     NvBool (*__kflcnConfigured__)(struct KernelFalcon *);
106     NvU32 (*__kflcnPriRead__)(struct KernelFalcon *, NvU32);
107     void (*__kflcnVprintf__)(struct KernelFalcon *, NvBool, const char *, va_list);
108     void (*__kflcnPriWrite__)(struct KernelFalcon *, NvU32, NvU32);
109     void (*__kflcnSyncBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *, NvU32, NvU32);
110     void *(*__kflcnMapBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *);
111     void (*__kflcnUnmapBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *);
112     void (*__kflcnReadDmem__)(struct KernelFalcon *, NvU32, NvU32, void *);
113     NvU32 registerBase;
114     NvU32 riscvRegisterBase;
115     NvU32 fbifBase;
116     NvBool bBootFromHs;
117     NvU32 pmcEnableMask;
118     NvU32 bIsPmcDeviceEngine;
119     ENGDESCRIPTOR physEngDesc;
120     NvU32 ctxAttr;
121     NvU32 ctxBufferSize;
122     NvU32 addrSpaceList;
123 };
124 
125 #ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__
126 #define __NVOC_CLASS_KernelFalcon_TYPEDEF__
127 typedef struct KernelFalcon KernelFalcon;
128 #endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */
129 
130 #ifndef __nvoc_class_id_KernelFalcon
131 #define __nvoc_class_id_KernelFalcon 0xb6b1af
132 #endif /* __nvoc_class_id_KernelFalcon */
133 
134 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
135 
136 #define __staticCast_KernelFalcon(pThis) \
137     ((pThis)->__nvoc_pbase_KernelFalcon)
138 
139 #ifdef __nvoc_kernel_falcon_h_disabled
140 #define __dynamicCast_KernelFalcon(pThis) ((KernelFalcon*)NULL)
141 #else //__nvoc_kernel_falcon_h_disabled
142 #define __dynamicCast_KernelFalcon(pThis) \
143     ((KernelFalcon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelFalcon)))
144 #endif //__nvoc_kernel_falcon_h_disabled
145 
146 
147 NV_STATUS __nvoc_objCreateDynamic_KernelFalcon(KernelFalcon**, Dynamic*, NvU32, va_list);
148 
149 NV_STATUS __nvoc_objCreate_KernelFalcon(KernelFalcon**, Dynamic*, NvU32);
150 #define __objCreate_KernelFalcon(ppNewObj, pParent, createFlags) \
151     __nvoc_objCreate_KernelFalcon((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
152 
153 #define kflcnRegRead(pGpu, pKernelFlcn, offset) kflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
154 #define kflcnRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
155 #define kflcnRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
156 #define kflcnRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
157 #define kflcnIsRiscvActive(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn)
158 #define kflcnIsRiscvActive_HAL(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn)
159 #define kflcnRiscvProgramBcr(pGpu, pKernelFlcn, bBRFetch) kflcnRiscvProgramBcr_DISPATCH(pGpu, pKernelFlcn, bBRFetch)
160 #define kflcnRiscvProgramBcr_HAL(pGpu, pKernelFlcn, bBRFetch) kflcnRiscvProgramBcr_DISPATCH(pGpu, pKernelFlcn, bBRFetch)
161 #define kflcnSwitchToFalcon(pGpu, pKernelFlcn) kflcnSwitchToFalcon_DISPATCH(pGpu, pKernelFlcn)
162 #define kflcnSwitchToFalcon_HAL(pGpu, pKernelFlcn) kflcnSwitchToFalcon_DISPATCH(pGpu, pKernelFlcn)
163 #define kflcnResetHw(pGpu, pKernelFlcn) kflcnResetHw_DISPATCH(pGpu, pKernelFlcn)
164 #define kflcnPreResetWait(pGpu, pKernelFlcn) kflcnPreResetWait_DISPATCH(pGpu, pKernelFlcn)
165 #define kflcnPreResetWait_HAL(pGpu, pKernelFlcn) kflcnPreResetWait_DISPATCH(pGpu, pKernelFlcn)
166 #define kflcnWaitForResetToFinish(pGpu, pKernelFlcn) kflcnWaitForResetToFinish_DISPATCH(pGpu, pKernelFlcn)
167 #define kflcnWaitForResetToFinish_HAL(pGpu, pKernelFlcn) kflcnWaitForResetToFinish_DISPATCH(pGpu, pKernelFlcn)
168 #define kflcnReadIntrStatus(pGpu, pKerneFlcn) kflcnReadIntrStatus_DISPATCH(pGpu, pKerneFlcn)
169 #define kflcnReadIntrStatus_HAL(pGpu, pKerneFlcn) kflcnReadIntrStatus_DISPATCH(pGpu, pKerneFlcn)
170 #define kflcnIntrRetrigger(pGpu, pKernelFlcn) kflcnIntrRetrigger_DISPATCH(pGpu, pKernelFlcn)
171 #define kflcnIntrRetrigger_HAL(pGpu, pKernelFlcn) kflcnIntrRetrigger_DISPATCH(pGpu, pKernelFlcn)
172 #define kflcnMaskImemAddr(pGpu, pKernelFlcn, addr) kflcnMaskImemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
173 #define kflcnMaskImemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskImemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
174 #define kflcnMaskDmemAddr(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
175 #define kflcnMaskDmemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
176 #define kflcnReadEmem(arg0, offset, size, pBuf) kflcnReadEmem_DISPATCH(arg0, offset, size, pBuf)
177 #define kflcnGetWFL0Offset(arg0) kflcnGetWFL0Offset_DISPATCH(arg0)
178 #define kflcnGetScratchOffsets(arg0, scratchGroupId) kflcnGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
179 #define kflcnUnload(arg0) kflcnUnload_DISPATCH(arg0)
180 #define kflcnConfigured(arg0) kflcnConfigured_DISPATCH(arg0)
181 #define kflcnPriRead(arg0, offset) kflcnPriRead_DISPATCH(arg0, offset)
182 #define kflcnVprintf(arg0, bReportStart, fmt, args) kflcnVprintf_DISPATCH(arg0, bReportStart, fmt, args)
183 #define kflcnPriWrite(arg0, offset, data) kflcnPriWrite_DISPATCH(arg0, offset, data)
184 #define kflcnSyncBufferDescriptor(arg0, pBufDesc, offset, size) kflcnSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
185 #define kflcnMapBufferDescriptor(arg0, pBufDesc) kflcnMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
186 #define kflcnUnmapBufferDescriptor(arg0, pBufDesc) kflcnUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
187 #define kflcnReadDmem(arg0, offset, size, pBuf) kflcnReadDmem_DISPATCH(arg0, offset, size, pBuf)
188 NvU32 kflcnRiscvRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
189 
190 
191 #ifdef __nvoc_kernel_falcon_h_disabled
192 static inline NvU32 kflcnRiscvRegRead(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) {
193     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
194     return 0;
195 }
196 #else //__nvoc_kernel_falcon_h_disabled
197 #define kflcnRiscvRegRead(pGpu, pKernelFlcn, offset) kflcnRiscvRegRead_TU102(pGpu, pKernelFlcn, offset)
198 #endif //__nvoc_kernel_falcon_h_disabled
199 
200 #define kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRiscvRegRead(pGpu, pKernelFlcn, offset)
201 
202 void kflcnRiscvRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data);
203 
204 
205 #ifdef __nvoc_kernel_falcon_h_disabled
206 static inline void kflcnRiscvRegWrite(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
207     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
208 }
209 #else //__nvoc_kernel_falcon_h_disabled
210 #define kflcnRiscvRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRiscvRegWrite_TU102(pGpu, pKernelFlcn, offset, data)
211 #endif //__nvoc_kernel_falcon_h_disabled
212 
213 #define kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRiscvRegWrite(pGpu, pKernelFlcn, offset, data)
214 
215 NvBool kflcnIsRiscvCpuEnabled_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
216 
217 
218 #ifdef __nvoc_kernel_falcon_h_disabled
219 static inline NvBool kflcnIsRiscvCpuEnabled(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
220     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
221     return NV_FALSE;
222 }
223 #else //__nvoc_kernel_falcon_h_disabled
224 #define kflcnIsRiscvCpuEnabled(pGpu, pKernelFlcn) kflcnIsRiscvCpuEnabled_TU102(pGpu, pKernelFlcn)
225 #endif //__nvoc_kernel_falcon_h_disabled
226 
227 #define kflcnIsRiscvCpuEnabled_HAL(pGpu, pKernelFlcn) kflcnIsRiscvCpuEnabled(pGpu, pKernelFlcn)
228 
229 void kflcnReset_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
230 
231 
232 #ifdef __nvoc_kernel_falcon_h_disabled
233 static inline void kflcnReset(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
234     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
235 }
236 #else //__nvoc_kernel_falcon_h_disabled
237 #define kflcnReset(pGpu, pKernelFlcn) kflcnReset_TU102(pGpu, pKernelFlcn)
238 #endif //__nvoc_kernel_falcon_h_disabled
239 
240 #define kflcnReset_HAL(pGpu, pKernelFlcn) kflcnReset(pGpu, pKernelFlcn)
241 
242 void kflcnSecureReset_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
243 
244 
245 #ifdef __nvoc_kernel_falcon_h_disabled
246 static inline void kflcnSecureReset(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
247     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
248 }
249 #else //__nvoc_kernel_falcon_h_disabled
250 #define kflcnSecureReset(pGpu, pKernelFlcn) kflcnSecureReset_TU102(pGpu, pKernelFlcn)
251 #endif //__nvoc_kernel_falcon_h_disabled
252 
253 #define kflcnSecureReset_HAL(pGpu, pKernelFlcn) kflcnSecureReset(pGpu, pKernelFlcn)
254 
255 void kflcnEnable_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bEnable);
256 
257 
258 #ifdef __nvoc_kernel_falcon_h_disabled
259 static inline void kflcnEnable(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bEnable) {
260     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
261 }
262 #else //__nvoc_kernel_falcon_h_disabled
263 #define kflcnEnable(pGpu, pKernelFlcn, bEnable) kflcnEnable_TU102(pGpu, pKernelFlcn, bEnable)
264 #endif //__nvoc_kernel_falcon_h_disabled
265 
266 #define kflcnEnable_HAL(pGpu, pKernelFlcn, bEnable) kflcnEnable(pGpu, pKernelFlcn, bEnable)
267 
268 void kflcnStartCpu_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
269 
270 
271 #ifdef __nvoc_kernel_falcon_h_disabled
272 static inline void kflcnStartCpu(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
273     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
274 }
275 #else //__nvoc_kernel_falcon_h_disabled
276 #define kflcnStartCpu(pGpu, pKernelFlcn) kflcnStartCpu_TU102(pGpu, pKernelFlcn)
277 #endif //__nvoc_kernel_falcon_h_disabled
278 
279 #define kflcnStartCpu_HAL(pGpu, pKernelFlcn) kflcnStartCpu(pGpu, pKernelFlcn)
280 
281 void kflcnDisableCtxReq_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
282 
283 
284 #ifdef __nvoc_kernel_falcon_h_disabled
285 static inline void kflcnDisableCtxReq(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
286     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
287 }
288 #else //__nvoc_kernel_falcon_h_disabled
289 #define kflcnDisableCtxReq(pGpu, pKernelFlcn) kflcnDisableCtxReq_TU102(pGpu, pKernelFlcn)
290 #endif //__nvoc_kernel_falcon_h_disabled
291 
292 #define kflcnDisableCtxReq_HAL(pGpu, pKernelFlcn) kflcnDisableCtxReq(pGpu, pKernelFlcn)
293 
294 NV_STATUS kflcnWaitForHalt_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 timeoutUs, NvU32 flags);
295 
296 
297 #ifdef __nvoc_kernel_falcon_h_disabled
298 static inline NV_STATUS kflcnWaitForHalt(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 timeoutUs, NvU32 flags) {
299     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
300     return NV_ERR_NOT_SUPPORTED;
301 }
302 #else //__nvoc_kernel_falcon_h_disabled
303 #define kflcnWaitForHalt(pGpu, pKernelFlcn, timeoutUs, flags) kflcnWaitForHalt_TU102(pGpu, pKernelFlcn, timeoutUs, flags)
304 #endif //__nvoc_kernel_falcon_h_disabled
305 
306 #define kflcnWaitForHalt_HAL(pGpu, pKernelFlcn, timeoutUs, flags) kflcnWaitForHalt(pGpu, pKernelFlcn, timeoutUs, flags)
307 
308 NvU32 kflcnRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
309 
310 static inline NvU32 kflcnRegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) {
311     return pKernelFlcn->__kflcnRegRead__(pGpu, pKernelFlcn, offset);
312 }
313 
314 void kflcnRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data);
315 
316 static inline void kflcnRegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
317     pKernelFlcn->__kflcnRegWrite__(pGpu, pKernelFlcn, offset, data);
318 }
319 
320 NvBool kflcnIsRiscvActive_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
321 
322 NvBool kflcnIsRiscvActive_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
323 
324 static inline NvBool kflcnIsRiscvActive_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
325     return pKernelFlcn->__kflcnIsRiscvActive__(pGpu, pKernelFlcn);
326 }
327 
328 void kflcnRiscvProgramBcr_GA102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bBRFetch);
329 
330 static inline void kflcnRiscvProgramBcr_f2d351(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bBRFetch) {
331     NV_ASSERT_PRECOMP(0);
332 }
333 
334 static inline void kflcnRiscvProgramBcr_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bBRFetch) {
335     pKernelFlcn->__kflcnRiscvProgramBcr__(pGpu, pKernelFlcn, bBRFetch);
336 }
337 
338 void kflcnSwitchToFalcon_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
339 
340 static inline void kflcnSwitchToFalcon_b3696a(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
341     return;
342 }
343 
344 static inline void kflcnSwitchToFalcon_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
345     pKernelFlcn->__kflcnSwitchToFalcon__(pGpu, pKernelFlcn);
346 }
347 
348 static inline NV_STATUS kflcnResetHw_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
349     return pKernelFlcn->__kflcnResetHw__(pGpu, pKernelFlcn);
350 }
351 
352 NV_STATUS kflcnPreResetWait_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
353 
354 static inline NV_STATUS kflcnPreResetWait_56cd7a(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
355     return NV_OK;
356 }
357 
358 static inline NV_STATUS kflcnPreResetWait_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
359     return pKernelFlcn->__kflcnPreResetWait__(pGpu, pKernelFlcn);
360 }
361 
362 NV_STATUS kflcnWaitForResetToFinish_GA102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
363 
364 NV_STATUS kflcnWaitForResetToFinish_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
365 
366 static inline NV_STATUS kflcnWaitForResetToFinish_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
367     return pKernelFlcn->__kflcnWaitForResetToFinish__(pGpu, pKernelFlcn);
368 }
369 
370 NvU32 kflcnReadIntrStatus_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKerneFlcn);
371 
372 NvU32 kflcnReadIntrStatus_GA102(struct OBJGPU *pGpu, struct KernelFalcon *pKerneFlcn);
373 
374 static inline NvU32 kflcnReadIntrStatus_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKerneFlcn) {
375     return pKerneFlcn->__kflcnReadIntrStatus__(pGpu, pKerneFlcn);
376 }
377 
378 void kflcnIntrRetrigger_GA100(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
379 
380 static inline void kflcnIntrRetrigger_b3696a(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
381     return;
382 }
383 
384 static inline void kflcnIntrRetrigger_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) {
385     pKernelFlcn->__kflcnIntrRetrigger__(pGpu, pKernelFlcn);
386 }
387 
388 NvU32 kflcnMaskImemAddr_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr);
389 
390 NvU32 kflcnMaskImemAddr_GA100(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr);
391 
392 static inline NvU32 kflcnMaskImemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr) {
393     return pKernelFlcn->__kflcnMaskImemAddr__(pGpu, pKernelFlcn, addr);
394 }
395 
396 NvU32 kflcnMaskDmemAddr_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr);
397 
398 NvU32 kflcnMaskDmemAddr_GA100(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr);
399 
400 static inline NvU32 kflcnMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr) {
401     return pKernelFlcn->__kflcnMaskDmemAddr__(pGpu, pKernelFlcn, addr);
402 }
403 
404 static inline void kflcnReadEmem_DISPATCH(struct KernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
405     arg0->__kflcnReadEmem__(arg0, offset, size, pBuf);
406 }
407 
408 static inline NvU32 kflcnGetWFL0Offset_DISPATCH(struct KernelFalcon *arg0) {
409     return arg0->__kflcnGetWFL0Offset__(arg0);
410 }
411 
412 static inline const NvU32 *kflcnGetScratchOffsets_DISPATCH(struct KernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
413     return arg0->__kflcnGetScratchOffsets__(arg0, scratchGroupId);
414 }
415 
416 static inline void kflcnUnload_DISPATCH(struct KernelFalcon *arg0) {
417     arg0->__kflcnUnload__(arg0);
418 }
419 
420 static inline NvBool kflcnConfigured_DISPATCH(struct KernelFalcon *arg0) {
421     return arg0->__kflcnConfigured__(arg0);
422 }
423 
424 static inline NvU32 kflcnPriRead_DISPATCH(struct KernelFalcon *arg0, NvU32 offset) {
425     return arg0->__kflcnPriRead__(arg0, offset);
426 }
427 
428 static inline void kflcnVprintf_DISPATCH(struct KernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
429     arg0->__kflcnVprintf__(arg0, bReportStart, fmt, args);
430 }
431 
432 static inline void kflcnPriWrite_DISPATCH(struct KernelFalcon *arg0, NvU32 offset, NvU32 data) {
433     arg0->__kflcnPriWrite__(arg0, offset, data);
434 }
435 
436 static inline void kflcnSyncBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
437     arg0->__kflcnSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
438 }
439 
440 static inline void *kflcnMapBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
441     return arg0->__kflcnMapBufferDescriptor__(arg0, pBufDesc);
442 }
443 
444 static inline void kflcnUnmapBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
445     arg0->__kflcnUnmapBufferDescriptor__(arg0, pBufDesc);
446 }
447 
448 static inline void kflcnReadDmem_DISPATCH(struct KernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
449     arg0->__kflcnReadDmem__(arg0, offset, size, pBuf);
450 }
451 
452 void kflcnConfigureEngine_IMPL(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig);
453 
454 #ifdef __nvoc_kernel_falcon_h_disabled
455 static inline void kflcnConfigureEngine(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig) {
456     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
457 }
458 #else //__nvoc_kernel_falcon_h_disabled
459 #define kflcnConfigureEngine(pGpu, pKernelFalcon, pFalconConfig) kflcnConfigureEngine_IMPL(pGpu, pKernelFalcon, pFalconConfig)
460 #endif //__nvoc_kernel_falcon_h_disabled
461 
462 NV_STATUS kflcnAllocContext_IMPL(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3);
463 
464 #ifdef __nvoc_kernel_falcon_h_disabled
465 static inline NV_STATUS kflcnAllocContext(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3) {
466     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
467     return NV_ERR_NOT_SUPPORTED;
468 }
469 #else //__nvoc_kernel_falcon_h_disabled
470 #define kflcnAllocContext(arg0, arg1, arg2, arg3) kflcnAllocContext_IMPL(arg0, arg1, arg2, arg3)
471 #endif //__nvoc_kernel_falcon_h_disabled
472 
473 NV_STATUS kflcnFreeContext_IMPL(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3);
474 
475 #ifdef __nvoc_kernel_falcon_h_disabled
476 static inline NV_STATUS kflcnFreeContext(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3) {
477     NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
478     return NV_ERR_NOT_SUPPORTED;
479 }
480 #else //__nvoc_kernel_falcon_h_disabled
481 #define kflcnFreeContext(arg0, arg1, arg2, arg3) kflcnFreeContext_IMPL(arg0, arg1, arg2, arg3)
482 #endif //__nvoc_kernel_falcon_h_disabled
483 
484 struct KernelFalcon *kflcnGetKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR physEngDesc);
485 
486 #define kflcnGetKernelFalconForEngine(pGpu, physEngDesc) kflcnGetKernelFalconForEngine_IMPL(pGpu, physEngDesc)
487 #undef PRIVATE_FIELD
488 
489 
490 // Basic implementation of KernelFalcon that can be instantiated.
491 #ifdef NVOC_KERNEL_FALCON_H_PRIVATE_ACCESS_ALLOWED
492 #define PRIVATE_FIELD(x) x
493 #else
494 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
495 #endif
496 struct GenericKernelFalcon {
497     const struct NVOC_RTTI *__nvoc_rtti;
498     struct KernelFalcon __nvoc_base_KernelFalcon;
499     struct IntrService __nvoc_base_IntrService;
500     struct Object __nvoc_base_Object;
501     struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
502     struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
503     struct KernelFalcon *__nvoc_pbase_KernelFalcon;
504     struct IntrService *__nvoc_pbase_IntrService;
505     struct Object *__nvoc_pbase_Object;
506     struct GenericKernelFalcon *__nvoc_pbase_GenericKernelFalcon;
507     NV_STATUS (*__gkflcnResetHw__)(struct OBJGPU *, struct GenericKernelFalcon *);
508     void (*__gkflcnRegisterIntrService__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceRecord *);
509     NV_STATUS (*__gkflcnServiceNotificationInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceNotificationInterruptArguments *);
510     void (*__gkflcnReadEmem__)(struct GenericKernelFalcon *, NvU64, NvU64, void *);
511     NvU32 (*__gkflcnGetWFL0Offset__)(struct GenericKernelFalcon *);
512     void (*__gkflcnUnload__)(struct GenericKernelFalcon *);
513     NvBool (*__gkflcnConfigured__)(struct GenericKernelFalcon *);
514     NvU32 (*__gkflcnPriRead__)(struct GenericKernelFalcon *, NvU32);
515     const NvU32 *(*__gkflcnGetScratchOffsets__)(struct GenericKernelFalcon *, NV_CRASHCAT_SCRATCH_GROUP_ID);
516     void (*__gkflcnRegWrite__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32, NvU32);
517     NvU32 (*__gkflcnMaskDmemAddr__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32);
518     void (*__gkflcnVprintf__)(struct GenericKernelFalcon *, NvBool, const char *, va_list);
519     NvBool (*__gkflcnClearInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceClearInterruptArguments *);
520     void (*__gkflcnPriWrite__)(struct GenericKernelFalcon *, NvU32, NvU32);
521     void *(*__gkflcnMapBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *);
522     void (*__gkflcnSyncBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *, NvU32, NvU32);
523     NvU32 (*__gkflcnRegRead__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32);
524     void (*__gkflcnUnmapBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *);
525     NvU32 (*__gkflcnServiceInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceInterruptArguments *);
526     void (*__gkflcnReadDmem__)(struct GenericKernelFalcon *, NvU32, NvU32, void *);
527 };
528 
529 #ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__
530 #define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__
531 typedef struct GenericKernelFalcon GenericKernelFalcon;
532 #endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */
533 
534 #ifndef __nvoc_class_id_GenericKernelFalcon
535 #define __nvoc_class_id_GenericKernelFalcon 0xabcf08
536 #endif /* __nvoc_class_id_GenericKernelFalcon */
537 
538 extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericKernelFalcon;
539 
540 #define __staticCast_GenericKernelFalcon(pThis) \
541     ((pThis)->__nvoc_pbase_GenericKernelFalcon)
542 
543 #ifdef __nvoc_kernel_falcon_h_disabled
544 #define __dynamicCast_GenericKernelFalcon(pThis) ((GenericKernelFalcon*)NULL)
545 #else //__nvoc_kernel_falcon_h_disabled
546 #define __dynamicCast_GenericKernelFalcon(pThis) \
547     ((GenericKernelFalcon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GenericKernelFalcon)))
548 #endif //__nvoc_kernel_falcon_h_disabled
549 
550 
551 NV_STATUS __nvoc_objCreateDynamic_GenericKernelFalcon(GenericKernelFalcon**, Dynamic*, NvU32, va_list);
552 
553 NV_STATUS __nvoc_objCreate_GenericKernelFalcon(GenericKernelFalcon**, Dynamic*, NvU32, struct OBJGPU * arg_pGpu, KernelFalconEngineConfig * arg_pFalconConfig);
554 #define __objCreate_GenericKernelFalcon(ppNewObj, pParent, createFlags, arg_pGpu, arg_pFalconConfig) \
555     __nvoc_objCreate_GenericKernelFalcon((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pGpu, arg_pFalconConfig)
556 
557 #define gkflcnResetHw(pGpu, pGenKernFlcn) gkflcnResetHw_DISPATCH(pGpu, pGenKernFlcn)
558 #define gkflcnRegisterIntrService(arg0, arg1, arg2) gkflcnRegisterIntrService_DISPATCH(arg0, arg1, arg2)
559 #define gkflcnServiceNotificationInterrupt(arg0, arg1, arg2) gkflcnServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
560 #define gkflcnReadEmem(arg0, offset, size, pBuf) gkflcnReadEmem_DISPATCH(arg0, offset, size, pBuf)
561 #define gkflcnGetWFL0Offset(arg0) gkflcnGetWFL0Offset_DISPATCH(arg0)
562 #define gkflcnUnload(arg0) gkflcnUnload_DISPATCH(arg0)
563 #define gkflcnConfigured(arg0) gkflcnConfigured_DISPATCH(arg0)
564 #define gkflcnPriRead(arg0, offset) gkflcnPriRead_DISPATCH(arg0, offset)
565 #define gkflcnGetScratchOffsets(arg0, scratchGroupId) gkflcnGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
566 #define gkflcnRegWrite(pGpu, pKernelFlcn, offset, data) gkflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
567 #define gkflcnMaskDmemAddr(pGpu, pKernelFlcn, addr) gkflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
568 #define gkflcnVprintf(arg0, bReportStart, fmt, args) gkflcnVprintf_DISPATCH(arg0, bReportStart, fmt, args)
569 #define gkflcnClearInterrupt(pGpu, pIntrService, pParams) gkflcnClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
570 #define gkflcnPriWrite(arg0, offset, data) gkflcnPriWrite_DISPATCH(arg0, offset, data)
571 #define gkflcnMapBufferDescriptor(arg0, pBufDesc) gkflcnMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
572 #define gkflcnSyncBufferDescriptor(arg0, pBufDesc, offset, size) gkflcnSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
573 #define gkflcnRegRead(pGpu, pKernelFlcn, offset) gkflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
574 #define gkflcnUnmapBufferDescriptor(arg0, pBufDesc) gkflcnUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
575 #define gkflcnServiceInterrupt(pGpu, pIntrService, pParams) gkflcnServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
576 #define gkflcnReadDmem(arg0, offset, size, pBuf) gkflcnReadDmem_DISPATCH(arg0, offset, size, pBuf)
577 NV_STATUS gkflcnResetHw_IMPL(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn);
578 
579 static inline NV_STATUS gkflcnResetHw_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn) {
580     return pGenKernFlcn->__gkflcnResetHw__(pGpu, pGenKernFlcn);
581 }
582 
583 void gkflcnRegisterIntrService_IMPL(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceRecord arg2[167]);
584 
585 static inline void gkflcnRegisterIntrService_DISPATCH(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceRecord arg2[167]) {
586     arg1->__gkflcnRegisterIntrService__(arg0, arg1, arg2);
587 }
588 
589 NV_STATUS gkflcnServiceNotificationInterrupt_IMPL(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceServiceNotificationInterruptArguments *arg2);
590 
591 static inline NV_STATUS gkflcnServiceNotificationInterrupt_DISPATCH(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) {
592     return arg1->__gkflcnServiceNotificationInterrupt__(arg0, arg1, arg2);
593 }
594 
595 static inline void gkflcnReadEmem_DISPATCH(struct GenericKernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
596     arg0->__gkflcnReadEmem__(arg0, offset, size, pBuf);
597 }
598 
599 static inline NvU32 gkflcnGetWFL0Offset_DISPATCH(struct GenericKernelFalcon *arg0) {
600     return arg0->__gkflcnGetWFL0Offset__(arg0);
601 }
602 
603 static inline void gkflcnUnload_DISPATCH(struct GenericKernelFalcon *arg0) {
604     arg0->__gkflcnUnload__(arg0);
605 }
606 
607 static inline NvBool gkflcnConfigured_DISPATCH(struct GenericKernelFalcon *arg0) {
608     return arg0->__gkflcnConfigured__(arg0);
609 }
610 
611 static inline NvU32 gkflcnPriRead_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset) {
612     return arg0->__gkflcnPriRead__(arg0, offset);
613 }
614 
615 static inline const NvU32 *gkflcnGetScratchOffsets_DISPATCH(struct GenericKernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
616     return arg0->__gkflcnGetScratchOffsets__(arg0, scratchGroupId);
617 }
618 
619 static inline void gkflcnRegWrite_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
620     pKernelFlcn->__gkflcnRegWrite__(pGpu, pKernelFlcn, offset, data);
621 }
622 
623 static inline NvU32 gkflcnMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 addr) {
624     return pKernelFlcn->__gkflcnMaskDmemAddr__(pGpu, pKernelFlcn, addr);
625 }
626 
627 static inline void gkflcnVprintf_DISPATCH(struct GenericKernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
628     arg0->__gkflcnVprintf__(arg0, bReportStart, fmt, args);
629 }
630 
631 static inline NvBool gkflcnClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceClearInterruptArguments *pParams) {
632     return pIntrService->__gkflcnClearInterrupt__(pGpu, pIntrService, pParams);
633 }
634 
635 static inline void gkflcnPriWrite_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 data) {
636     arg0->__gkflcnPriWrite__(arg0, offset, data);
637 }
638 
639 static inline void *gkflcnMapBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
640     return arg0->__gkflcnMapBufferDescriptor__(arg0, pBufDesc);
641 }
642 
643 static inline void gkflcnSyncBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
644     arg0->__gkflcnSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
645 }
646 
647 static inline NvU32 gkflcnRegRead_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset) {
648     return pKernelFlcn->__gkflcnRegRead__(pGpu, pKernelFlcn, offset);
649 }
650 
651 static inline void gkflcnUnmapBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
652     arg0->__gkflcnUnmapBufferDescriptor__(arg0, pBufDesc);
653 }
654 
655 static inline NvU32 gkflcnServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
656     return pIntrService->__gkflcnServiceInterrupt__(pGpu, pIntrService, pParams);
657 }
658 
659 static inline void gkflcnReadDmem_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
660     arg0->__gkflcnReadDmem__(arg0, offset, size, pBuf);
661 }
662 
663 NV_STATUS gkflcnConstruct_IMPL(struct GenericKernelFalcon *arg_pGenKernFlcn, struct OBJGPU *arg_pGpu, KernelFalconEngineConfig *arg_pFalconConfig);
664 
665 #define __nvoc_gkflcnConstruct(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig) gkflcnConstruct_IMPL(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig)
666 #undef PRIVATE_FIELD
667 
668 
669 #endif // KERNEL_FALCON_H
670 
671 #ifdef __cplusplus
672 } // extern "C"
673 #endif
674 #endif // _G_KERNEL_FALCON_NVOC_H_
675