1 #define NVOC_KERN_PMU_H_PRIVATE_ACCESS_ALLOWED
2 #include "nvoc/runtime.h"
3 #include "nvoc/rtti.h"
4 #include "nvtypes.h"
5 #include "nvport/nvport.h"
6 #include "nvport/inline/util_valist.h"
7 #include "utils/nvassert.h"
8 #include "g_kern_pmu_nvoc.h"
9 
10 #ifdef DEBUG
11 char __nvoc_class_id_uniqueness_check_0xab9d7d = 1;
12 #endif
13 
14 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPmu;
15 
16 extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
17 
18 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
19 
20 void __nvoc_init_KernelPmu(KernelPmu*);
21 void __nvoc_init_funcTable_KernelPmu(KernelPmu*);
22 NV_STATUS __nvoc_ctor_KernelPmu(KernelPmu*);
23 void __nvoc_init_dataField_KernelPmu(KernelPmu*);
24 void __nvoc_dtor_KernelPmu(KernelPmu*);
25 extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPmu;
26 
27 static const struct NVOC_RTTI __nvoc_rtti_KernelPmu_KernelPmu = {
28     /*pClassDef=*/          &__nvoc_class_def_KernelPmu,
29     /*dtor=*/               (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelPmu,
30     /*offset=*/             0,
31 };
32 
33 static const struct NVOC_RTTI __nvoc_rtti_KernelPmu_Object = {
34     /*pClassDef=*/          &__nvoc_class_def_Object,
35     /*dtor=*/               &__nvoc_destructFromBase,
36     /*offset=*/             NV_OFFSETOF(KernelPmu, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
37 };
38 
39 static const struct NVOC_RTTI __nvoc_rtti_KernelPmu_OBJENGSTATE = {
40     /*pClassDef=*/          &__nvoc_class_def_OBJENGSTATE,
41     /*dtor=*/               &__nvoc_destructFromBase,
42     /*offset=*/             NV_OFFSETOF(KernelPmu, __nvoc_base_OBJENGSTATE),
43 };
44 
45 static const struct NVOC_CASTINFO __nvoc_castinfo_KernelPmu = {
46     /*numRelatives=*/       3,
47     /*relatives=*/ {
48         &__nvoc_rtti_KernelPmu_KernelPmu,
49         &__nvoc_rtti_KernelPmu_OBJENGSTATE,
50         &__nvoc_rtti_KernelPmu_Object,
51     },
52 };
53 
54 const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPmu =
55 {
56     /*classInfo=*/ {
57         /*size=*/               sizeof(KernelPmu),
58         /*classId=*/            classId(KernelPmu),
59         /*providerId=*/         &__nvoc_rtti_provider,
60 #if NV_PRINTF_STRINGS_ALLOWED
61         /*name=*/               "KernelPmu",
62 #endif
63     },
64     /*objCreatefn=*/        (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelPmu,
65     /*pCastInfo=*/          &__nvoc_castinfo_KernelPmu,
66     /*pExportInfo=*/        &__nvoc_export_info_KernelPmu
67 };
68 
69 static NV_STATUS __nvoc_thunk_KernelPmu_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPmu, ENGDESCRIPTOR engDesc) {
70     return kpmuConstructEngine(pGpu, (struct KernelPmu *)(((unsigned char *)pKernelPmu) - __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), engDesc);
71 }
72 
73 static NV_STATUS __nvoc_thunk_KernelPmu_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPmu) {
74     return kpmuStateInitLocked(pGpu, (struct KernelPmu *)(((unsigned char *)pKernelPmu) - __nvoc_rtti_KernelPmu_OBJENGSTATE.offset));
75 }
76 
77 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStateLoad(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
78     return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0);
79 }
80 
81 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStateUnload(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
82     return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0);
83 }
84 
85 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreLoad(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
86     return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0);
87 }
88 
89 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePostUnload(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
90     return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0);
91 }
92 
93 static void __nvoc_thunk_OBJENGSTATE_kpmuStateDestroy(POBJGPU pGpu, struct KernelPmu *pEngstate) {
94     engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset));
95 }
96 
97 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreUnload(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
98     return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0);
99 }
100 
101 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStateInitUnlocked(POBJGPU pGpu, struct KernelPmu *pEngstate) {
102     return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset));
103 }
104 
105 static void __nvoc_thunk_OBJENGSTATE_kpmuInitMissing(POBJGPU pGpu, struct KernelPmu *pEngstate) {
106     engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset));
107 }
108 
109 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreInitLocked(POBJGPU pGpu, struct KernelPmu *pEngstate) {
110     return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset));
111 }
112 
113 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreInitUnlocked(POBJGPU pGpu, struct KernelPmu *pEngstate) {
114     return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset));
115 }
116 
117 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePostLoad(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
118     return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0);
119 }
120 
121 static NvBool __nvoc_thunk_OBJENGSTATE_kpmuIsPresent(POBJGPU pGpu, struct KernelPmu *pEngstate) {
122     return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset));
123 }
124 
125 const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPmu =
126 {
127     /*numEntries=*/     0,
128     /*pExportEntries=*/  0
129 };
130 
131 void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
132 void __nvoc_dtor_KernelPmu(KernelPmu *pThis) {
133     __nvoc_kpmuDestruct(pThis);
134     __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
135     PORT_UNREFERENCED_VARIABLE(pThis);
136 }
137 
138 void __nvoc_init_dataField_KernelPmu(KernelPmu *pThis) {
139     PORT_UNREFERENCED_VARIABLE(pThis);
140 }
141 
142 NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
143 NV_STATUS __nvoc_ctor_KernelPmu(KernelPmu *pThis) {
144     NV_STATUS status = NV_OK;
145     status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
146     if (status != NV_OK) goto __nvoc_ctor_KernelPmu_fail_OBJENGSTATE;
147     __nvoc_init_dataField_KernelPmu(pThis);
148     goto __nvoc_ctor_KernelPmu_exit; // Success
149 
150 __nvoc_ctor_KernelPmu_fail_OBJENGSTATE:
151 __nvoc_ctor_KernelPmu_exit:
152 
153     return status;
154 }
155 
156 static void __nvoc_init_funcTable_KernelPmu_1(KernelPmu *pThis) {
157     PORT_UNREFERENCED_VARIABLE(pThis);
158 
159     pThis->__kpmuConstructEngine__ = &kpmuConstructEngine_IMPL;
160 
161     pThis->__kpmuStateInitLocked__ = &kpmuStateInitLocked_IMPL;
162 
163     pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelPmu_engstateConstructEngine;
164 
165     pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelPmu_engstateStateInitLocked;
166 
167     pThis->__kpmuStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateLoad;
168 
169     pThis->__kpmuStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateUnload;
170 
171     pThis->__kpmuStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreLoad;
172 
173     pThis->__kpmuStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePostUnload;
174 
175     pThis->__kpmuStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateDestroy;
176 
177     pThis->__kpmuStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreUnload;
178 
179     pThis->__kpmuStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateInitUnlocked;
180 
181     pThis->__kpmuInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kpmuInitMissing;
182 
183     pThis->__kpmuStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreInitLocked;
184 
185     pThis->__kpmuStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreInitUnlocked;
186 
187     pThis->__kpmuStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePostLoad;
188 
189     pThis->__kpmuIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kpmuIsPresent;
190 }
191 
192 void __nvoc_init_funcTable_KernelPmu(KernelPmu *pThis) {
193     __nvoc_init_funcTable_KernelPmu_1(pThis);
194 }
195 
196 void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
197 void __nvoc_init_KernelPmu(KernelPmu *pThis) {
198     pThis->__nvoc_pbase_KernelPmu = pThis;
199     pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
200     pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
201     __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
202     __nvoc_init_funcTable_KernelPmu(pThis);
203 }
204 
205 NV_STATUS __nvoc_objCreate_KernelPmu(KernelPmu **ppThis, Dynamic *pParent, NvU32 createFlags) {
206     NV_STATUS status;
207     Object *pParentObj;
208     KernelPmu *pThis;
209 
210     status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(KernelPmu), (void**)&pThis, (void**)ppThis);
211     if (status != NV_OK)
212         return status;
213 
214     portMemSet(pThis, 0, sizeof(KernelPmu));
215 
216     __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelPmu);
217 
218     pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.createFlags = createFlags;
219 
220     if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
221     {
222         pParentObj = dynamicCast(pParent, Object);
223         objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
224     }
225     else
226     {
227         pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
228     }
229 
230     __nvoc_init_KernelPmu(pThis);
231     status = __nvoc_ctor_KernelPmu(pThis);
232     if (status != NV_OK) goto __nvoc_objCreate_KernelPmu_cleanup;
233 
234     *ppThis = pThis;
235 
236     return NV_OK;
237 
238 __nvoc_objCreate_KernelPmu_cleanup:
239     // do not call destructors here since the constructor already called them
240     if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
241         portMemSet(pThis, 0, sizeof(KernelPmu));
242     else
243         portMemFree(pThis);
244 
245     // coverity[leaked_storage:FALSE]
246     return status;
247 }
248 
249 NV_STATUS __nvoc_objCreateDynamic_KernelPmu(KernelPmu **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
250     NV_STATUS status;
251 
252     status = __nvoc_objCreate_KernelPmu(ppThis, pParent, createFlags);
253 
254     return status;
255 }
256 
257