1 #define NVOC_KERN_PERF_H_PRIVATE_ACCESS_ALLOWED 2 #include "nvoc/runtime.h" 3 #include "nvoc/rtti.h" 4 #include "nvtypes.h" 5 #include "nvport/nvport.h" 6 #include "nvport/inline/util_valist.h" 7 #include "utils/nvassert.h" 8 #include "g_kern_perf_nvoc.h" 9 10 #ifdef DEBUG 11 char __nvoc_class_id_uniqueness_check_0xc53a57 = 1; 12 #endif 13 14 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPerf; 15 16 extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; 17 18 extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; 19 20 void __nvoc_init_KernelPerf(KernelPerf*, RmHalspecOwner* ); 21 void __nvoc_init_funcTable_KernelPerf(KernelPerf*, RmHalspecOwner* ); 22 NV_STATUS __nvoc_ctor_KernelPerf(KernelPerf*, RmHalspecOwner* ); 23 void __nvoc_init_dataField_KernelPerf(KernelPerf*, RmHalspecOwner* ); 24 void __nvoc_dtor_KernelPerf(KernelPerf*); 25 extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPerf; 26 27 static const struct NVOC_RTTI __nvoc_rtti_KernelPerf_KernelPerf = { 28 /*pClassDef=*/ &__nvoc_class_def_KernelPerf, 29 /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelPerf, 30 /*offset=*/ 0, 31 }; 32 33 static const struct NVOC_RTTI __nvoc_rtti_KernelPerf_Object = { 34 /*pClassDef=*/ &__nvoc_class_def_Object, 35 /*dtor=*/ &__nvoc_destructFromBase, 36 /*offset=*/ NV_OFFSETOF(KernelPerf, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), 37 }; 38 39 static const struct NVOC_RTTI __nvoc_rtti_KernelPerf_OBJENGSTATE = { 40 /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, 41 /*dtor=*/ &__nvoc_destructFromBase, 42 /*offset=*/ NV_OFFSETOF(KernelPerf, __nvoc_base_OBJENGSTATE), 43 }; 44 45 static const struct NVOC_CASTINFO __nvoc_castinfo_KernelPerf = { 46 /*numRelatives=*/ 3, 47 /*relatives=*/ { 48 &__nvoc_rtti_KernelPerf_KernelPerf, 49 &__nvoc_rtti_KernelPerf_OBJENGSTATE, 50 &__nvoc_rtti_KernelPerf_Object, 51 }, 52 }; 53 54 const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPerf = 55 { 56 /*classInfo=*/ { 57 /*size=*/ sizeof(KernelPerf), 58 /*classId=*/ classId(KernelPerf), 59 /*providerId=*/ &__nvoc_rtti_provider, 60 #if NV_PRINTF_STRINGS_ALLOWED 61 /*name=*/ "KernelPerf", 62 #endif 63 }, 64 /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelPerf, 65 /*pCastInfo=*/ &__nvoc_castinfo_KernelPerf, 66 /*pExportInfo=*/ &__nvoc_export_info_KernelPerf 67 }; 68 69 static NV_STATUS __nvoc_thunk_KernelPerf_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf, ENGDESCRIPTOR engDesc) { 70 return kperfConstructEngine(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), engDesc); 71 } 72 73 static NV_STATUS __nvoc_thunk_KernelPerf_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf) { 74 return kperfStateInitLocked(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); 75 } 76 77 static NV_STATUS __nvoc_thunk_KernelPerf_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf, NvU32 flags) { 78 return kperfStateLoad(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), flags); 79 } 80 81 static NV_STATUS __nvoc_thunk_KernelPerf_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf, NvU32 flags) { 82 return kperfStateUnload(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), flags); 83 } 84 85 static void __nvoc_thunk_KernelPerf_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf) { 86 kperfStateDestroy(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); 87 } 88 89 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreLoad(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { 90 return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); 91 } 92 93 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePostUnload(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { 94 return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); 95 } 96 97 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreUnload(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { 98 return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); 99 } 100 101 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStateInitUnlocked(POBJGPU pGpu, struct KernelPerf *pEngstate) { 102 return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); 103 } 104 105 static void __nvoc_thunk_OBJENGSTATE_kperfInitMissing(POBJGPU pGpu, struct KernelPerf *pEngstate) { 106 engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); 107 } 108 109 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreInitLocked(POBJGPU pGpu, struct KernelPerf *pEngstate) { 110 return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); 111 } 112 113 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreInitUnlocked(POBJGPU pGpu, struct KernelPerf *pEngstate) { 114 return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); 115 } 116 117 static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePostLoad(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { 118 return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); 119 } 120 121 static NvBool __nvoc_thunk_OBJENGSTATE_kperfIsPresent(POBJGPU pGpu, struct KernelPerf *pEngstate) { 122 return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); 123 } 124 125 const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPerf = 126 { 127 /*numEntries=*/ 0, 128 /*pExportEntries=*/ 0 129 }; 130 131 void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); 132 void __nvoc_dtor_KernelPerf(KernelPerf *pThis) { 133 __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); 134 PORT_UNREFERENCED_VARIABLE(pThis); 135 } 136 137 void __nvoc_init_dataField_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { 138 RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; 139 const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; 140 PORT_UNREFERENCED_VARIABLE(pThis); 141 PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); 142 PORT_UNREFERENCED_VARIABLE(rmVariantHal); 143 PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); 144 } 145 146 NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); 147 NV_STATUS __nvoc_ctor_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { 148 NV_STATUS status = NV_OK; 149 status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); 150 if (status != NV_OK) goto __nvoc_ctor_KernelPerf_fail_OBJENGSTATE; 151 __nvoc_init_dataField_KernelPerf(pThis, pRmhalspecowner); 152 goto __nvoc_ctor_KernelPerf_exit; // Success 153 154 __nvoc_ctor_KernelPerf_fail_OBJENGSTATE: 155 __nvoc_ctor_KernelPerf_exit: 156 157 return status; 158 } 159 160 static void __nvoc_init_funcTable_KernelPerf_1(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { 161 RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; 162 const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; 163 PORT_UNREFERENCED_VARIABLE(pThis); 164 PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); 165 PORT_UNREFERENCED_VARIABLE(rmVariantHal); 166 PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); 167 168 pThis->__kperfConstructEngine__ = &kperfConstructEngine_IMPL; 169 170 pThis->__kperfStateInitLocked__ = &kperfStateInitLocked_IMPL; 171 172 pThis->__kperfStateLoad__ = &kperfStateLoad_IMPL; 173 174 pThis->__kperfStateUnload__ = &kperfStateUnload_IMPL; 175 176 pThis->__kperfStateDestroy__ = &kperfStateDestroy_IMPL; 177 178 pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelPerf_engstateConstructEngine; 179 180 pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelPerf_engstateStateInitLocked; 181 182 pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelPerf_engstateStateLoad; 183 184 pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelPerf_engstateStateUnload; 185 186 pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelPerf_engstateStateDestroy; 187 188 pThis->__kperfStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreLoad; 189 190 pThis->__kperfStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePostUnload; 191 192 pThis->__kperfStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreUnload; 193 194 pThis->__kperfStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kperfStateInitUnlocked; 195 196 pThis->__kperfInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kperfInitMissing; 197 198 pThis->__kperfStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreInitLocked; 199 200 pThis->__kperfStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreInitUnlocked; 201 202 pThis->__kperfStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePostLoad; 203 204 pThis->__kperfIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kperfIsPresent; 205 } 206 207 void __nvoc_init_funcTable_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { 208 __nvoc_init_funcTable_KernelPerf_1(pThis, pRmhalspecowner); 209 } 210 211 void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); 212 void __nvoc_init_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { 213 pThis->__nvoc_pbase_KernelPerf = pThis; 214 pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; 215 pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; 216 __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); 217 __nvoc_init_funcTable_KernelPerf(pThis, pRmhalspecowner); 218 } 219 220 NV_STATUS __nvoc_objCreate_KernelPerf(KernelPerf **ppThis, Dynamic *pParent, NvU32 createFlags) { 221 NV_STATUS status; 222 Object *pParentObj; 223 KernelPerf *pThis; 224 RmHalspecOwner *pRmhalspecowner; 225 226 status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(KernelPerf), (void**)&pThis, (void**)ppThis); 227 if (status != NV_OK) 228 return status; 229 230 portMemSet(pThis, 0, sizeof(KernelPerf)); 231 232 __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelPerf); 233 234 pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.createFlags = createFlags; 235 236 if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) 237 { 238 pParentObj = dynamicCast(pParent, Object); 239 objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); 240 } 241 else 242 { 243 pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; 244 } 245 246 if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) 247 pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); 248 NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); 249 250 __nvoc_init_KernelPerf(pThis, pRmhalspecowner); 251 status = __nvoc_ctor_KernelPerf(pThis, pRmhalspecowner); 252 if (status != NV_OK) goto __nvoc_objCreate_KernelPerf_cleanup; 253 254 *ppThis = pThis; 255 256 return NV_OK; 257 258 __nvoc_objCreate_KernelPerf_cleanup: 259 // do not call destructors here since the constructor already called them 260 if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) 261 portMemSet(pThis, 0, sizeof(KernelPerf)); 262 else 263 portMemFree(pThis); 264 265 // coverity[leaked_storage:FALSE] 266 return status; 267 } 268 269 NV_STATUS __nvoc_objCreateDynamic_KernelPerf(KernelPerf **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { 270 NV_STATUS status; 271 272 status = __nvoc_objCreate_KernelPerf(ppThis, pParent, createFlags); 273 274 return status; 275 } 276 277