1 #define NVOC_KERNEL_CCU_API_H_PRIVATE_ACCESS_ALLOWED
2 #include "nvoc/runtime.h"
3 #include "nvoc/rtti.h"
4 #include "nvtypes.h"
5 #include "nvport/nvport.h"
6 #include "nvport/inline/util_valist.h"
7 #include "utils/nvassert.h"
8 #include "g_kernel_ccu_api_nvoc.h"
9
10 #ifdef DEBUG
11 char __nvoc_class_id_uniqueness_check_0x3abed3 = 1;
12 #endif
13
14 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCcuApi;
15
16 extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
17
18 extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
19
20 extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
21
22 extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
23
24 extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
25
26 void __nvoc_init_KernelCcuApi(KernelCcuApi*);
27 void __nvoc_init_funcTable_KernelCcuApi(KernelCcuApi*);
28 NV_STATUS __nvoc_ctor_KernelCcuApi(KernelCcuApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
29 void __nvoc_init_dataField_KernelCcuApi(KernelCcuApi*);
30 void __nvoc_dtor_KernelCcuApi(KernelCcuApi*);
31 extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCcuApi;
32
33 static const struct NVOC_RTTI __nvoc_rtti_KernelCcuApi_KernelCcuApi = {
34 /*pClassDef=*/ &__nvoc_class_def_KernelCcuApi,
35 /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelCcuApi,
36 /*offset=*/ 0,
37 };
38
39 static const struct NVOC_RTTI __nvoc_rtti_KernelCcuApi_Object = {
40 /*pClassDef=*/ &__nvoc_class_def_Object,
41 /*dtor=*/ &__nvoc_destructFromBase,
42 /*offset=*/ NV_OFFSETOF(KernelCcuApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
43 };
44
45 static const struct NVOC_RTTI __nvoc_rtti_KernelCcuApi_RsResource = {
46 /*pClassDef=*/ &__nvoc_class_def_RsResource,
47 /*dtor=*/ &__nvoc_destructFromBase,
48 /*offset=*/ NV_OFFSETOF(KernelCcuApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
49 };
50
51 static const struct NVOC_RTTI __nvoc_rtti_KernelCcuApi_RmResourceCommon = {
52 /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
53 /*dtor=*/ &__nvoc_destructFromBase,
54 /*offset=*/ NV_OFFSETOF(KernelCcuApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
55 };
56
57 static const struct NVOC_RTTI __nvoc_rtti_KernelCcuApi_RmResource = {
58 /*pClassDef=*/ &__nvoc_class_def_RmResource,
59 /*dtor=*/ &__nvoc_destructFromBase,
60 /*offset=*/ NV_OFFSETOF(KernelCcuApi, __nvoc_base_GpuResource.__nvoc_base_RmResource),
61 };
62
63 static const struct NVOC_RTTI __nvoc_rtti_KernelCcuApi_GpuResource = {
64 /*pClassDef=*/ &__nvoc_class_def_GpuResource,
65 /*dtor=*/ &__nvoc_destructFromBase,
66 /*offset=*/ NV_OFFSETOF(KernelCcuApi, __nvoc_base_GpuResource),
67 };
68
69 static const struct NVOC_CASTINFO __nvoc_castinfo_KernelCcuApi = {
70 /*numRelatives=*/ 6,
71 /*relatives=*/ {
72 &__nvoc_rtti_KernelCcuApi_KernelCcuApi,
73 &__nvoc_rtti_KernelCcuApi_GpuResource,
74 &__nvoc_rtti_KernelCcuApi_RmResource,
75 &__nvoc_rtti_KernelCcuApi_RmResourceCommon,
76 &__nvoc_rtti_KernelCcuApi_RsResource,
77 &__nvoc_rtti_KernelCcuApi_Object,
78 },
79 };
80
81 const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCcuApi =
82 {
83 /*classInfo=*/ {
84 /*size=*/ sizeof(KernelCcuApi),
85 /*classId=*/ classId(KernelCcuApi),
86 /*providerId=*/ &__nvoc_rtti_provider,
87 #if NV_PRINTF_STRINGS_ALLOWED
88 /*name=*/ "KernelCcuApi",
89 #endif
90 },
91 /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelCcuApi,
92 /*pCastInfo=*/ &__nvoc_castinfo_KernelCcuApi,
93 /*pExportInfo=*/ &__nvoc_export_info_KernelCcuApi
94 };
95
__nvoc_thunk_KernelCcuApi_gpuresMap(struct GpuResource * pKernelCcuApi,struct CALL_CONTEXT * pCallContext,struct RS_CPU_MAP_PARAMS * pParams,struct RsCpuMapping * pCpuMapping)96 static NV_STATUS __nvoc_thunk_KernelCcuApi_gpuresMap(struct GpuResource *pKernelCcuApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
97 return kccuapiMap((struct KernelCcuApi *)(((unsigned char *)pKernelCcuApi) - __nvoc_rtti_KernelCcuApi_GpuResource.offset), pCallContext, pParams, pCpuMapping);
98 }
99
__nvoc_thunk_KernelCcuApi_gpuresUnmap(struct GpuResource * pKernelCcuApi,struct CALL_CONTEXT * pCallContext,struct RsCpuMapping * pCpuMapping)100 static NV_STATUS __nvoc_thunk_KernelCcuApi_gpuresUnmap(struct GpuResource *pKernelCcuApi, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
101 return kccuapiUnmap((struct KernelCcuApi *)(((unsigned char *)pKernelCcuApi) - __nvoc_rtti_KernelCcuApi_GpuResource.offset), pCallContext, pCpuMapping);
102 }
103
__nvoc_thunk_KernelCcuApi_gpuresGetMapAddrSpace(struct GpuResource * pKernelCcuApi,struct CALL_CONTEXT * pCallContext,NvU32 mapFlags,NV_ADDRESS_SPACE * pAddrSpace)104 static NV_STATUS __nvoc_thunk_KernelCcuApi_gpuresGetMapAddrSpace(struct GpuResource *pKernelCcuApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
105 return kccuapiGetMapAddrSpace((struct KernelCcuApi *)(((unsigned char *)pKernelCcuApi) - __nvoc_rtti_KernelCcuApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
106 }
107
__nvoc_thunk_KernelCcuApi_rmresGetMemoryMappingDescriptor(struct RmResource * pKernelCcuApi,struct MEMORY_DESCRIPTOR ** ppMemDesc)108 static NV_STATUS __nvoc_thunk_KernelCcuApi_rmresGetMemoryMappingDescriptor(struct RmResource *pKernelCcuApi, struct MEMORY_DESCRIPTOR **ppMemDesc) {
109 return kccuapiGetMemoryMappingDescriptor((struct KernelCcuApi *)(((unsigned char *)pKernelCcuApi) - __nvoc_rtti_KernelCcuApi_RmResource.offset), ppMemDesc);
110 }
111
__nvoc_thunk_GpuResource_kccuapiShareCallback(struct KernelCcuApi * pGpuResource,struct RsClient * pInvokingClient,struct RsResourceRef * pParentRef,RS_SHARE_POLICY * pSharePolicy)112 static NvBool __nvoc_thunk_GpuResource_kccuapiShareCallback(struct KernelCcuApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
113 return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCcuApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
114 }
115
__nvoc_thunk_RmResource_kccuapiCheckMemInterUnmap(struct KernelCcuApi * pRmResource,NvBool bSubdeviceHandleProvided)116 static NV_STATUS __nvoc_thunk_RmResource_kccuapiCheckMemInterUnmap(struct KernelCcuApi *pRmResource, NvBool bSubdeviceHandleProvided) {
117 return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelCcuApi_RmResource.offset), bSubdeviceHandleProvided);
118 }
119
__nvoc_thunk_RsResource_kccuapiMapTo(struct KernelCcuApi * pResource,RS_RES_MAP_TO_PARAMS * pParams)120 static NV_STATUS __nvoc_thunk_RsResource_kccuapiMapTo(struct KernelCcuApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
121 return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset), pParams);
122 }
123
__nvoc_thunk_RsResource_kccuapiGetRefCount(struct KernelCcuApi * pResource)124 static NvU32 __nvoc_thunk_RsResource_kccuapiGetRefCount(struct KernelCcuApi *pResource) {
125 return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset));
126 }
127
__nvoc_thunk_RsResource_kccuapiAddAdditionalDependants(struct RsClient * pClient,struct KernelCcuApi * pResource,RsResourceRef * pReference)128 static void __nvoc_thunk_RsResource_kccuapiAddAdditionalDependants(struct RsClient *pClient, struct KernelCcuApi *pResource, RsResourceRef *pReference) {
129 resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset), pReference);
130 }
131
__nvoc_thunk_RmResource_kccuapiControl_Prologue(struct KernelCcuApi * pResource,struct CALL_CONTEXT * pCallContext,struct RS_RES_CONTROL_PARAMS_INTERNAL * pParams)132 static NV_STATUS __nvoc_thunk_RmResource_kccuapiControl_Prologue(struct KernelCcuApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
133 return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RmResource.offset), pCallContext, pParams);
134 }
135
__nvoc_thunk_GpuResource_kccuapiGetRegBaseOffsetAndSize(struct KernelCcuApi * pGpuResource,struct OBJGPU * pGpu,NvU32 * pOffset,NvU32 * pSize)136 static NV_STATUS __nvoc_thunk_GpuResource_kccuapiGetRegBaseOffsetAndSize(struct KernelCcuApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
137 return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCcuApi_GpuResource.offset), pGpu, pOffset, pSize);
138 }
139
__nvoc_thunk_GpuResource_kccuapiInternalControlForward(struct KernelCcuApi * pGpuResource,NvU32 command,void * pParams,NvU32 size)140 static NV_STATUS __nvoc_thunk_GpuResource_kccuapiInternalControlForward(struct KernelCcuApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
141 return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCcuApi_GpuResource.offset), command, pParams, size);
142 }
143
__nvoc_thunk_RsResource_kccuapiUnmapFrom(struct KernelCcuApi * pResource,RS_RES_UNMAP_FROM_PARAMS * pParams)144 static NV_STATUS __nvoc_thunk_RsResource_kccuapiUnmapFrom(struct KernelCcuApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
145 return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset), pParams);
146 }
147
__nvoc_thunk_RmResource_kccuapiControl_Epilogue(struct KernelCcuApi * pResource,struct CALL_CONTEXT * pCallContext,struct RS_RES_CONTROL_PARAMS_INTERNAL * pParams)148 static void __nvoc_thunk_RmResource_kccuapiControl_Epilogue(struct KernelCcuApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
149 rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RmResource.offset), pCallContext, pParams);
150 }
151
__nvoc_thunk_GpuResource_kccuapiGetInternalObjectHandle(struct KernelCcuApi * pGpuResource)152 static NvHandle __nvoc_thunk_GpuResource_kccuapiGetInternalObjectHandle(struct KernelCcuApi *pGpuResource) {
153 return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCcuApi_GpuResource.offset));
154 }
155
__nvoc_thunk_GpuResource_kccuapiControl(struct KernelCcuApi * pGpuResource,struct CALL_CONTEXT * pCallContext,struct RS_RES_CONTROL_PARAMS_INTERNAL * pParams)156 static NV_STATUS __nvoc_thunk_GpuResource_kccuapiControl(struct KernelCcuApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
157 return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCcuApi_GpuResource.offset), pCallContext, pParams);
158 }
159
__nvoc_thunk_RmResource_kccuapiGetMemInterMapParams(struct KernelCcuApi * pRmResource,RMRES_MEM_INTER_MAP_PARAMS * pParams)160 static NV_STATUS __nvoc_thunk_RmResource_kccuapiGetMemInterMapParams(struct KernelCcuApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
161 return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelCcuApi_RmResource.offset), pParams);
162 }
163
__nvoc_thunk_RsResource_kccuapiControlFilter(struct KernelCcuApi * pResource,struct CALL_CONTEXT * pCallContext,struct RS_RES_CONTROL_PARAMS_INTERNAL * pParams)164 static NV_STATUS __nvoc_thunk_RsResource_kccuapiControlFilter(struct KernelCcuApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
165 return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset), pCallContext, pParams);
166 }
167
__nvoc_thunk_RmResource_kccuapiControlSerialization_Prologue(struct KernelCcuApi * pResource,struct CALL_CONTEXT * pCallContext,struct RS_RES_CONTROL_PARAMS_INTERNAL * pParams)168 static NV_STATUS __nvoc_thunk_RmResource_kccuapiControlSerialization_Prologue(struct KernelCcuApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
169 return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RmResource.offset), pCallContext, pParams);
170 }
171
__nvoc_thunk_RsResource_kccuapiCanCopy(struct KernelCcuApi * pResource)172 static NvBool __nvoc_thunk_RsResource_kccuapiCanCopy(struct KernelCcuApi *pResource) {
173 return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset));
174 }
175
__nvoc_thunk_RsResource_kccuapiIsPartialUnmapSupported(struct KernelCcuApi * pResource)176 static NvBool __nvoc_thunk_RsResource_kccuapiIsPartialUnmapSupported(struct KernelCcuApi *pResource) {
177 return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset));
178 }
179
__nvoc_thunk_RsResource_kccuapiPreDestruct(struct KernelCcuApi * pResource)180 static void __nvoc_thunk_RsResource_kccuapiPreDestruct(struct KernelCcuApi *pResource) {
181 resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset));
182 }
183
__nvoc_thunk_RsResource_kccuapiIsDuplicate(struct KernelCcuApi * pResource,NvHandle hMemory,NvBool * pDuplicate)184 static NV_STATUS __nvoc_thunk_RsResource_kccuapiIsDuplicate(struct KernelCcuApi *pResource, NvHandle hMemory, NvBool *pDuplicate) {
185 return resIsDuplicate((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RsResource.offset), hMemory, pDuplicate);
186 }
187
__nvoc_thunk_RmResource_kccuapiControlSerialization_Epilogue(struct KernelCcuApi * pResource,struct CALL_CONTEXT * pCallContext,struct RS_RES_CONTROL_PARAMS_INTERNAL * pParams)188 static void __nvoc_thunk_RmResource_kccuapiControlSerialization_Epilogue(struct KernelCcuApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
189 rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RmResource.offset), pCallContext, pParams);
190 }
191
__nvoc_thunk_RmResource_kccuapiAccessCallback(struct KernelCcuApi * pResource,struct RsClient * pInvokingClient,void * pAllocParams,RsAccessRight accessRight)192 static NvBool __nvoc_thunk_RmResource_kccuapiAccessCallback(struct KernelCcuApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
193 return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCcuApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
194 }
195
196 #if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
197 #define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
198 #endif
199
200 static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelCcuApi[] =
201 {
202 { /* [0] */
203 #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
204 /*pFunc=*/ (void (*)(void)) NULL,
205 #else
206 /*pFunc=*/ (void (*)(void)) kccuapiCtrlCmdSubscribe_IMPL,
207 #endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
208 /*flags=*/ 0x10u,
209 /*accessRight=*/0x0u,
210 /*methodId=*/ 0xcbca0101u,
211 /*paramSize=*/ sizeof(NV_COUNTER_COLLECTION_UNIT_SUBSCRIBE_PARAMS),
212 /*pClassInfo=*/ &(__nvoc_class_def_KernelCcuApi.classInfo),
213 #if NV_PRINTF_STRINGS_ALLOWED
214 /*func=*/ "kccuapiCtrlCmdSubscribe"
215 #endif
216 },
217 { /* [1] */
218 #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
219 /*pFunc=*/ (void (*)(void)) NULL,
220 #else
221 /*pFunc=*/ (void (*)(void)) kccuapiCtrlCmdUnsubscribe_IMPL,
222 #endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
223 /*flags=*/ 0x10u,
224 /*accessRight=*/0x0u,
225 /*methodId=*/ 0xcbca0102u,
226 /*paramSize=*/ 0,
227 /*pClassInfo=*/ &(__nvoc_class_def_KernelCcuApi.classInfo),
228 #if NV_PRINTF_STRINGS_ALLOWED
229 /*func=*/ "kccuapiCtrlCmdUnsubscribe"
230 #endif
231 },
232 { /* [2] */
233 #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
234 /*pFunc=*/ (void (*)(void)) NULL,
235 #else
236 /*pFunc=*/ (void (*)(void)) kccuapiCtrlCmdSetStreamState_IMPL,
237 #endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
238 /*flags=*/ 0x10u,
239 /*accessRight=*/0x0u,
240 /*methodId=*/ 0xcbca0103u,
241 /*paramSize=*/ sizeof(NV_COUNTER_COLLECTION_UNIT_STREAM_STATE_PARAMS),
242 /*pClassInfo=*/ &(__nvoc_class_def_KernelCcuApi.classInfo),
243 #if NV_PRINTF_STRINGS_ALLOWED
244 /*func=*/ "kccuapiCtrlCmdSetStreamState"
245 #endif
246 },
247 { /* [3] */
248 #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
249 /*pFunc=*/ (void (*)(void)) NULL,
250 #else
251 /*pFunc=*/ (void (*)(void)) kccuapiCtrlCmdGetStreamState_IMPL,
252 #endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
253 /*flags=*/ 0x10u,
254 /*accessRight=*/0x0u,
255 /*methodId=*/ 0xcbca0104u,
256 /*paramSize=*/ sizeof(NV_COUNTER_COLLECTION_UNIT_STREAM_STATE_PARAMS),
257 /*pClassInfo=*/ &(__nvoc_class_def_KernelCcuApi.classInfo),
258 #if NV_PRINTF_STRINGS_ALLOWED
259 /*func=*/ "kccuapiCtrlCmdGetStreamState"
260 #endif
261 },
262
263 };
264
265 const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCcuApi =
266 {
267 /*numEntries=*/ 4,
268 /*pExportEntries=*/ __nvoc_exported_method_def_KernelCcuApi
269 };
270
271 void __nvoc_dtor_GpuResource(GpuResource*);
__nvoc_dtor_KernelCcuApi(KernelCcuApi * pThis)272 void __nvoc_dtor_KernelCcuApi(KernelCcuApi *pThis) {
273 __nvoc_kccuapiDestruct(pThis);
274 __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
275 PORT_UNREFERENCED_VARIABLE(pThis);
276 }
277
__nvoc_init_dataField_KernelCcuApi(KernelCcuApi * pThis)278 void __nvoc_init_dataField_KernelCcuApi(KernelCcuApi *pThis) {
279 PORT_UNREFERENCED_VARIABLE(pThis);
280 }
281
282 NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
__nvoc_ctor_KernelCcuApi(KernelCcuApi * pThis,struct CALL_CONTEXT * arg_pCallContext,struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams)283 NV_STATUS __nvoc_ctor_KernelCcuApi(KernelCcuApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
284 NV_STATUS status = NV_OK;
285 status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
286 if (status != NV_OK) goto __nvoc_ctor_KernelCcuApi_fail_GpuResource;
287 __nvoc_init_dataField_KernelCcuApi(pThis);
288
289 status = __nvoc_kccuapiConstruct(pThis, arg_pCallContext, arg_pParams);
290 if (status != NV_OK) goto __nvoc_ctor_KernelCcuApi_fail__init;
291 goto __nvoc_ctor_KernelCcuApi_exit; // Success
292
293 __nvoc_ctor_KernelCcuApi_fail__init:
294 __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
295 __nvoc_ctor_KernelCcuApi_fail_GpuResource:
296 __nvoc_ctor_KernelCcuApi_exit:
297
298 return status;
299 }
300
__nvoc_init_funcTable_KernelCcuApi_1(KernelCcuApi * pThis)301 static void __nvoc_init_funcTable_KernelCcuApi_1(KernelCcuApi *pThis) {
302 PORT_UNREFERENCED_VARIABLE(pThis);
303
304 pThis->__kccuapiMap__ = &kccuapiMap_IMPL;
305
306 pThis->__kccuapiUnmap__ = &kccuapiUnmap_IMPL;
307
308 pThis->__kccuapiGetMapAddrSpace__ = &kccuapiGetMapAddrSpace_IMPL;
309
310 pThis->__kccuapiGetMemoryMappingDescriptor__ = &kccuapiGetMemoryMappingDescriptor_IMPL;
311
312 #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
313 pThis->__kccuapiCtrlCmdSubscribe__ = &kccuapiCtrlCmdSubscribe_IMPL;
314 #endif
315
316 #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
317 pThis->__kccuapiCtrlCmdUnsubscribe__ = &kccuapiCtrlCmdUnsubscribe_IMPL;
318 #endif
319
320 #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
321 pThis->__kccuapiCtrlCmdSetStreamState__ = &kccuapiCtrlCmdSetStreamState_IMPL;
322 #endif
323
324 #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
325 pThis->__kccuapiCtrlCmdGetStreamState__ = &kccuapiCtrlCmdGetStreamState_IMPL;
326 #endif
327
328 pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_KernelCcuApi_gpuresMap;
329
330 pThis->__nvoc_base_GpuResource.__gpuresUnmap__ = &__nvoc_thunk_KernelCcuApi_gpuresUnmap;
331
332 pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_KernelCcuApi_gpuresGetMapAddrSpace;
333
334 pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__rmresGetMemoryMappingDescriptor__ = &__nvoc_thunk_KernelCcuApi_rmresGetMemoryMappingDescriptor;
335
336 pThis->__kccuapiShareCallback__ = &__nvoc_thunk_GpuResource_kccuapiShareCallback;
337
338 pThis->__kccuapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_kccuapiCheckMemInterUnmap;
339
340 pThis->__kccuapiMapTo__ = &__nvoc_thunk_RsResource_kccuapiMapTo;
341
342 pThis->__kccuapiGetRefCount__ = &__nvoc_thunk_RsResource_kccuapiGetRefCount;
343
344 pThis->__kccuapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_kccuapiAddAdditionalDependants;
345
346 pThis->__kccuapiControl_Prologue__ = &__nvoc_thunk_RmResource_kccuapiControl_Prologue;
347
348 pThis->__kccuapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_kccuapiGetRegBaseOffsetAndSize;
349
350 pThis->__kccuapiInternalControlForward__ = &__nvoc_thunk_GpuResource_kccuapiInternalControlForward;
351
352 pThis->__kccuapiUnmapFrom__ = &__nvoc_thunk_RsResource_kccuapiUnmapFrom;
353
354 pThis->__kccuapiControl_Epilogue__ = &__nvoc_thunk_RmResource_kccuapiControl_Epilogue;
355
356 pThis->__kccuapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_kccuapiGetInternalObjectHandle;
357
358 pThis->__kccuapiControl__ = &__nvoc_thunk_GpuResource_kccuapiControl;
359
360 pThis->__kccuapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_kccuapiGetMemInterMapParams;
361
362 pThis->__kccuapiControlFilter__ = &__nvoc_thunk_RsResource_kccuapiControlFilter;
363
364 pThis->__kccuapiControlSerialization_Prologue__ = &__nvoc_thunk_RmResource_kccuapiControlSerialization_Prologue;
365
366 pThis->__kccuapiCanCopy__ = &__nvoc_thunk_RsResource_kccuapiCanCopy;
367
368 pThis->__kccuapiIsPartialUnmapSupported__ = &__nvoc_thunk_RsResource_kccuapiIsPartialUnmapSupported;
369
370 pThis->__kccuapiPreDestruct__ = &__nvoc_thunk_RsResource_kccuapiPreDestruct;
371
372 pThis->__kccuapiIsDuplicate__ = &__nvoc_thunk_RsResource_kccuapiIsDuplicate;
373
374 pThis->__kccuapiControlSerialization_Epilogue__ = &__nvoc_thunk_RmResource_kccuapiControlSerialization_Epilogue;
375
376 pThis->__kccuapiAccessCallback__ = &__nvoc_thunk_RmResource_kccuapiAccessCallback;
377 }
378
__nvoc_init_funcTable_KernelCcuApi(KernelCcuApi * pThis)379 void __nvoc_init_funcTable_KernelCcuApi(KernelCcuApi *pThis) {
380 __nvoc_init_funcTable_KernelCcuApi_1(pThis);
381 }
382
383 void __nvoc_init_GpuResource(GpuResource*);
__nvoc_init_KernelCcuApi(KernelCcuApi * pThis)384 void __nvoc_init_KernelCcuApi(KernelCcuApi *pThis) {
385 pThis->__nvoc_pbase_KernelCcuApi = pThis;
386 pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
387 pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
388 pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
389 pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
390 pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
391 __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
392 __nvoc_init_funcTable_KernelCcuApi(pThis);
393 }
394
__nvoc_objCreate_KernelCcuApi(KernelCcuApi ** ppThis,Dynamic * pParent,NvU32 createFlags,struct CALL_CONTEXT * arg_pCallContext,struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams)395 NV_STATUS __nvoc_objCreate_KernelCcuApi(KernelCcuApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams)
396 {
397 NV_STATUS status;
398 Object *pParentObj = NULL;
399 KernelCcuApi *pThis;
400
401 // Assign `pThis`, allocating memory unless suppressed by flag.
402 status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(KernelCcuApi), (void**)&pThis, (void**)ppThis);
403 if (status != NV_OK)
404 return status;
405
406 // Zero is the initial value for everything.
407 portMemSet(pThis, 0, sizeof(KernelCcuApi));
408
409 // Initialize runtime type information.
410 __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelCcuApi);
411
412 pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags;
413
414 // Link the child into the parent if there is one unless flagged not to do so.
415 if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
416 {
417 pParentObj = dynamicCast(pParent, Object);
418 objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
419 }
420 else
421 {
422 pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
423 }
424
425 __nvoc_init_KernelCcuApi(pThis);
426 status = __nvoc_ctor_KernelCcuApi(pThis, arg_pCallContext, arg_pParams);
427 if (status != NV_OK) goto __nvoc_objCreate_KernelCcuApi_cleanup;
428
429 // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set.
430 *ppThis = pThis;
431
432 return NV_OK;
433
434 __nvoc_objCreate_KernelCcuApi_cleanup:
435
436 // Unlink the child from the parent if it was linked above.
437 if (pParentObj != NULL)
438 objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
439
440 // Do not call destructors here since the constructor already called them.
441 if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
442 portMemSet(pThis, 0, sizeof(KernelCcuApi));
443 else
444 {
445 portMemFree(pThis);
446 *ppThis = NULL;
447 }
448
449 // coverity[leaked_storage:FALSE]
450 return status;
451 }
452
__nvoc_objCreateDynamic_KernelCcuApi(KernelCcuApi ** ppThis,Dynamic * pParent,NvU32 createFlags,va_list args)453 NV_STATUS __nvoc_objCreateDynamic_KernelCcuApi(KernelCcuApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
454 NV_STATUS status;
455 struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
456 struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
457
458 status = __nvoc_objCreate_KernelCcuApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
459
460 return status;
461 }
462
463