1 #ifndef _G_KERN_PMU_NVOC_H_
2 #define _G_KERN_PMU_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kern_pmu_nvoc.h"
33 
34 #ifndef KERNEL_PMU_H
35 #define KERNEL_PMU_H
36 
37 /******************************************************************************
38 *
39 *   Kernel Pmu module header
40 *   This file contains functions managing PMU core on CPU RM
41 *
42 ******************************************************************************/
43 
44 #include "gpu/gpu.h"
45 #include "gpu/eng_state.h"
46 #include "liblogdecode.h"
47 
48 #define PMU_LOG_BUFFER_MAX_SIZE 0x1000
49 
50 /*!
51  * KernelPmu is a logical abstraction of the GPU Pmu Engine. The
52  * Public API of the Pmu Engine is exposed through this object, and any
53  * interfaces which do not manage the underlying Pmu hardware can be
54  * managed by this object.
55  */
56 #ifdef NVOC_KERN_PMU_H_PRIVATE_ACCESS_ALLOWED
57 #define PRIVATE_FIELD(x) x
58 #else
59 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
60 #endif
61 struct KernelPmu {
62     const struct NVOC_RTTI *__nvoc_rtti;
63     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
64     struct Object *__nvoc_pbase_Object;
65     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
66     struct KernelPmu *__nvoc_pbase_KernelPmu;
67     NV_STATUS (*__kpmuConstructEngine__)(struct OBJGPU *, struct KernelPmu *, ENGDESCRIPTOR);
68     NV_STATUS (*__kpmuStateInitLocked__)(struct OBJGPU *, struct KernelPmu *);
69     NV_STATUS (*__kpmuStateLoad__)(POBJGPU, struct KernelPmu *, NvU32);
70     NV_STATUS (*__kpmuStateUnload__)(POBJGPU, struct KernelPmu *, NvU32);
71     NV_STATUS (*__kpmuStatePreLoad__)(POBJGPU, struct KernelPmu *, NvU32);
72     NV_STATUS (*__kpmuStatePostUnload__)(POBJGPU, struct KernelPmu *, NvU32);
73     void (*__kpmuStateDestroy__)(POBJGPU, struct KernelPmu *);
74     NV_STATUS (*__kpmuStatePreUnload__)(POBJGPU, struct KernelPmu *, NvU32);
75     NV_STATUS (*__kpmuStateInitUnlocked__)(POBJGPU, struct KernelPmu *);
76     void (*__kpmuInitMissing__)(POBJGPU, struct KernelPmu *);
77     NV_STATUS (*__kpmuStatePreInitLocked__)(POBJGPU, struct KernelPmu *);
78     NV_STATUS (*__kpmuStatePreInitUnlocked__)(POBJGPU, struct KernelPmu *);
79     NV_STATUS (*__kpmuStatePostLoad__)(POBJGPU, struct KernelPmu *, NvU32);
80     NvBool (*__kpmuIsPresent__)(POBJGPU, struct KernelPmu *);
81     LIBOS_LOG_DECODE logDecode;
82     NvU32 printBufSize;
83     NvU8 *pPrintBuf;
84     void *pLogElf;
85     NvU32 logElfSize;
86 };
87 
88 #ifndef __NVOC_CLASS_KernelPmu_TYPEDEF__
89 #define __NVOC_CLASS_KernelPmu_TYPEDEF__
90 typedef struct KernelPmu KernelPmu;
91 #endif /* __NVOC_CLASS_KernelPmu_TYPEDEF__ */
92 
93 #ifndef __nvoc_class_id_KernelPmu
94 #define __nvoc_class_id_KernelPmu 0xab9d7d
95 #endif /* __nvoc_class_id_KernelPmu */
96 
97 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPmu;
98 
99 #define __staticCast_KernelPmu(pThis) \
100     ((pThis)->__nvoc_pbase_KernelPmu)
101 
102 #ifdef __nvoc_kern_pmu_h_disabled
103 #define __dynamicCast_KernelPmu(pThis) ((KernelPmu*)NULL)
104 #else //__nvoc_kern_pmu_h_disabled
105 #define __dynamicCast_KernelPmu(pThis) \
106     ((KernelPmu*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelPmu)))
107 #endif //__nvoc_kern_pmu_h_disabled
108 
109 #define PDB_PROP_KPMU_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
110 #define PDB_PROP_KPMU_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
111 
112 NV_STATUS __nvoc_objCreateDynamic_KernelPmu(KernelPmu**, Dynamic*, NvU32, va_list);
113 
114 NV_STATUS __nvoc_objCreate_KernelPmu(KernelPmu**, Dynamic*, NvU32);
115 #define __objCreate_KernelPmu(ppNewObj, pParent, createFlags) \
116     __nvoc_objCreate_KernelPmu((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
117 
118 #define kpmuConstructEngine(pGpu, pKernelPmu, engDesc) kpmuConstructEngine_DISPATCH(pGpu, pKernelPmu, engDesc)
119 #define kpmuStateInitLocked(pGpu, pKernelPmu) kpmuStateInitLocked_DISPATCH(pGpu, pKernelPmu)
120 #define kpmuStateLoad(pGpu, pEngstate, arg0) kpmuStateLoad_DISPATCH(pGpu, pEngstate, arg0)
121 #define kpmuStateUnload(pGpu, pEngstate, arg0) kpmuStateUnload_DISPATCH(pGpu, pEngstate, arg0)
122 #define kpmuStatePreLoad(pGpu, pEngstate, arg0) kpmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
123 #define kpmuStatePostUnload(pGpu, pEngstate, arg0) kpmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
124 #define kpmuStateDestroy(pGpu, pEngstate) kpmuStateDestroy_DISPATCH(pGpu, pEngstate)
125 #define kpmuStatePreUnload(pGpu, pEngstate, arg0) kpmuStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
126 #define kpmuStateInitUnlocked(pGpu, pEngstate) kpmuStateInitUnlocked_DISPATCH(pGpu, pEngstate)
127 #define kpmuInitMissing(pGpu, pEngstate) kpmuInitMissing_DISPATCH(pGpu, pEngstate)
128 #define kpmuStatePreInitLocked(pGpu, pEngstate) kpmuStatePreInitLocked_DISPATCH(pGpu, pEngstate)
129 #define kpmuStatePreInitUnlocked(pGpu, pEngstate) kpmuStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
130 #define kpmuStatePostLoad(pGpu, pEngstate, arg0) kpmuStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
131 #define kpmuIsPresent(pGpu, pEngstate) kpmuIsPresent_DISPATCH(pGpu, pEngstate)
132 NV_STATUS kpmuConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, ENGDESCRIPTOR engDesc);
133 
134 static inline NV_STATUS kpmuConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, ENGDESCRIPTOR engDesc) {
135     return pKernelPmu->__kpmuConstructEngine__(pGpu, pKernelPmu, engDesc);
136 }
137 
138 NV_STATUS kpmuStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu);
139 
140 static inline NV_STATUS kpmuStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu) {
141     return pKernelPmu->__kpmuStateInitLocked__(pGpu, pKernelPmu);
142 }
143 
144 static inline NV_STATUS kpmuStateLoad_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
145     return pEngstate->__kpmuStateLoad__(pGpu, pEngstate, arg0);
146 }
147 
148 static inline NV_STATUS kpmuStateUnload_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
149     return pEngstate->__kpmuStateUnload__(pGpu, pEngstate, arg0);
150 }
151 
152 static inline NV_STATUS kpmuStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
153     return pEngstate->__kpmuStatePreLoad__(pGpu, pEngstate, arg0);
154 }
155 
156 static inline NV_STATUS kpmuStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
157     return pEngstate->__kpmuStatePostUnload__(pGpu, pEngstate, arg0);
158 }
159 
160 static inline void kpmuStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) {
161     pEngstate->__kpmuStateDestroy__(pGpu, pEngstate);
162 }
163 
164 static inline NV_STATUS kpmuStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
165     return pEngstate->__kpmuStatePreUnload__(pGpu, pEngstate, arg0);
166 }
167 
168 static inline NV_STATUS kpmuStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) {
169     return pEngstate->__kpmuStateInitUnlocked__(pGpu, pEngstate);
170 }
171 
172 static inline void kpmuInitMissing_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) {
173     pEngstate->__kpmuInitMissing__(pGpu, pEngstate);
174 }
175 
176 static inline NV_STATUS kpmuStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) {
177     return pEngstate->__kpmuStatePreInitLocked__(pGpu, pEngstate);
178 }
179 
180 static inline NV_STATUS kpmuStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) {
181     return pEngstate->__kpmuStatePreInitUnlocked__(pGpu, pEngstate);
182 }
183 
184 static inline NV_STATUS kpmuStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) {
185     return pEngstate->__kpmuStatePostLoad__(pGpu, pEngstate, arg0);
186 }
187 
188 static inline NvBool kpmuIsPresent_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) {
189     return pEngstate->__kpmuIsPresent__(pGpu, pEngstate);
190 }
191 
192 void kpmuDestruct_IMPL(struct KernelPmu *pKernelPmu);
193 
194 #define __nvoc_kpmuDestruct(pKernelPmu) kpmuDestruct_IMPL(pKernelPmu)
195 NV_STATUS kpmuInitLibosLoggingStructures_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu);
196 
197 #ifdef __nvoc_kern_pmu_h_disabled
198 static inline NV_STATUS kpmuInitLibosLoggingStructures(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu) {
199     NV_ASSERT_FAILED_PRECOMP("KernelPmu was disabled!");
200     return NV_ERR_NOT_SUPPORTED;
201 }
202 #else //__nvoc_kern_pmu_h_disabled
203 #define kpmuInitLibosLoggingStructures(pGpu, pKernelPmu) kpmuInitLibosLoggingStructures_IMPL(pGpu, pKernelPmu)
204 #endif //__nvoc_kern_pmu_h_disabled
205 
206 void kpmuFreeLibosLoggingStructures_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu);
207 
208 #ifdef __nvoc_kern_pmu_h_disabled
209 static inline void kpmuFreeLibosLoggingStructures(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu) {
210     NV_ASSERT_FAILED_PRECOMP("KernelPmu was disabled!");
211 }
212 #else //__nvoc_kern_pmu_h_disabled
213 #define kpmuFreeLibosLoggingStructures(pGpu, pKernelPmu) kpmuFreeLibosLoggingStructures_IMPL(pGpu, pKernelPmu)
214 #endif //__nvoc_kern_pmu_h_disabled
215 
216 void kpmuLogBuf_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, NvU8 *pBuf, NvU32 bufSize);
217 
218 #ifdef __nvoc_kern_pmu_h_disabled
219 static inline void kpmuLogBuf(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, NvU8 *pBuf, NvU32 bufSize) {
220     NV_ASSERT_FAILED_PRECOMP("KernelPmu was disabled!");
221 }
222 #else //__nvoc_kern_pmu_h_disabled
223 #define kpmuLogBuf(pGpu, pKernelPmu, pBuf, bufSize) kpmuLogBuf_IMPL(pGpu, pKernelPmu, pBuf, bufSize)
224 #endif //__nvoc_kern_pmu_h_disabled
225 
226 #undef PRIVATE_FIELD
227 
228 
229 #endif // KERNEL_PMU_H
230 
231 #ifdef __cplusplus
232 } // extern "C"
233 #endif
234 #endif // _G_KERN_PMU_NVOC_H_
235