1 #ifndef _G_KERN_PERF_NVOC_H_
2 #define _G_KERN_PERF_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kern_perf_nvoc.h"
33 
34 #ifndef KERNEL_PERF_H
35 #define KERNEL_PERF_H
36 
37 /******************************************************************************
38 *
39 *       Kernel Perf module header
40 *       This file contains functions managing performance on CPU RM
41 *
42 ******************************************************************************/
43 /* ------------------------ Includes --------------------------------------- */
44 #include "gpu/gpu.h"
45 #include "gpu/eng_state.h"
46 #include "gpu/gpu_halspec.h"
47 #include "gpu/perf/kern_perf_gpuboostsync.h"
48 #include "ctrl/ctrl2080/ctrl2080perf.h"
49 
50 /* ----------------------------- Macros ------------------------------------- */
51 /*!
52  * The rule of reentrancy is that routine can't run unless its flag and all lower
53  * flags are clear. This is Kernel Perf reentrancy function ID for 1HZ Callback.
54  */
55 #define KERNEL_PERF_REENTRANCY_TIMER_1HZ_CALLBACK    NVBIT(0)
56 
57 /* -------------------------- Datatypes ------------------------------------- */
58 /*!
59  * KernelPerf is a logical abstraction of the GPU Perf Engine. The
60  * Public API of the Perf Engine is exposed through this object, and any
61  * interfaces which do not manage the underlying Perf hardware can be
62  * managed by this object.
63  */
64 
65 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
66 // the matching C source file, but causes diagnostics to be issued if another
67 // source file references the field.
68 #ifdef NVOC_KERN_PERF_H_PRIVATE_ACCESS_ALLOWED
69 #define PRIVATE_FIELD(x) x
70 #else
71 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
72 #endif
73 
74 struct KernelPerf {
75     const struct NVOC_RTTI *__nvoc_rtti;
76     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
77     struct Object *__nvoc_pbase_Object;
78     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
79     struct KernelPerf *__nvoc_pbase_KernelPerf;
80     NV_STATUS (*__kperfConstructEngine__)(struct OBJGPU *, struct KernelPerf *, ENGDESCRIPTOR);
81     NV_STATUS (*__kperfStateInitLocked__)(struct OBJGPU *, struct KernelPerf *);
82     NV_STATUS (*__kperfStateLoad__)(struct OBJGPU *, struct KernelPerf *, NvU32);
83     NV_STATUS (*__kperfStateUnload__)(struct OBJGPU *, struct KernelPerf *, NvU32);
84     void (*__kperfStateDestroy__)(struct OBJGPU *, struct KernelPerf *);
85     NV_STATUS (*__kperfGpuBoostSyncStateInit__)(struct OBJGPU *, struct KernelPerf *);
86     NV_STATUS (*__kperfStatePreLoad__)(POBJGPU, struct KernelPerf *, NvU32);
87     NV_STATUS (*__kperfStatePostUnload__)(POBJGPU, struct KernelPerf *, NvU32);
88     NV_STATUS (*__kperfStatePreUnload__)(POBJGPU, struct KernelPerf *, NvU32);
89     NV_STATUS (*__kperfStateInitUnlocked__)(POBJGPU, struct KernelPerf *);
90     void (*__kperfInitMissing__)(POBJGPU, struct KernelPerf *);
91     NV_STATUS (*__kperfStatePreInitLocked__)(POBJGPU, struct KernelPerf *);
92     NV_STATUS (*__kperfStatePreInitUnlocked__)(POBJGPU, struct KernelPerf *);
93     NV_STATUS (*__kperfStatePostLoad__)(POBJGPU, struct KernelPerf *, NvU32);
94     NvBool (*__kperfIsPresent__)(POBJGPU, struct KernelPerf *);
95     struct KERNEL_PERF_GPU_BOOST_SYNC sliGpuBoostSync;
96     NvU32 reentrancyMask;
97 };
98 
99 #ifndef __NVOC_CLASS_KernelPerf_TYPEDEF__
100 #define __NVOC_CLASS_KernelPerf_TYPEDEF__
101 typedef struct KernelPerf KernelPerf;
102 #endif /* __NVOC_CLASS_KernelPerf_TYPEDEF__ */
103 
104 #ifndef __nvoc_class_id_KernelPerf
105 #define __nvoc_class_id_KernelPerf 0xc53a57
106 #endif /* __nvoc_class_id_KernelPerf */
107 
108 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPerf;
109 
110 #define __staticCast_KernelPerf(pThis) \
111     ((pThis)->__nvoc_pbase_KernelPerf)
112 
113 #ifdef __nvoc_kern_perf_h_disabled
114 #define __dynamicCast_KernelPerf(pThis) ((KernelPerf*)NULL)
115 #else //__nvoc_kern_perf_h_disabled
116 #define __dynamicCast_KernelPerf(pThis) \
117     ((KernelPerf*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelPerf)))
118 #endif //__nvoc_kern_perf_h_disabled
119 
120 #define PDB_PROP_KPERF_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
121 #define PDB_PROP_KPERF_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
122 
123 NV_STATUS __nvoc_objCreateDynamic_KernelPerf(KernelPerf**, Dynamic*, NvU32, va_list);
124 
125 NV_STATUS __nvoc_objCreate_KernelPerf(KernelPerf**, Dynamic*, NvU32);
126 #define __objCreate_KernelPerf(ppNewObj, pParent, createFlags) \
127     __nvoc_objCreate_KernelPerf((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
128 
129 #define kperfConstructEngine(pGpu, pKernelPerf, engDesc) kperfConstructEngine_DISPATCH(pGpu, pKernelPerf, engDesc)
130 #define kperfStateInitLocked(pGpu, pKernelPerf) kperfStateInitLocked_DISPATCH(pGpu, pKernelPerf)
131 #define kperfStateLoad(pGpu, pKernelPerf, flags) kperfStateLoad_DISPATCH(pGpu, pKernelPerf, flags)
132 #define kperfStateUnload(pGpu, pKernelPerf, flags) kperfStateUnload_DISPATCH(pGpu, pKernelPerf, flags)
133 #define kperfStateDestroy(pGpu, pKernelPerf) kperfStateDestroy_DISPATCH(pGpu, pKernelPerf)
134 #define kperfGpuBoostSyncStateInit(pGpu, pKernelPerf) kperfGpuBoostSyncStateInit_DISPATCH(pGpu, pKernelPerf)
135 #define kperfGpuBoostSyncStateInit_HAL(pGpu, pKernelPerf) kperfGpuBoostSyncStateInit_DISPATCH(pGpu, pKernelPerf)
136 #define kperfStatePreLoad(pGpu, pEngstate, arg0) kperfStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
137 #define kperfStatePostUnload(pGpu, pEngstate, arg0) kperfStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
138 #define kperfStatePreUnload(pGpu, pEngstate, arg0) kperfStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
139 #define kperfStateInitUnlocked(pGpu, pEngstate) kperfStateInitUnlocked_DISPATCH(pGpu, pEngstate)
140 #define kperfInitMissing(pGpu, pEngstate) kperfInitMissing_DISPATCH(pGpu, pEngstate)
141 #define kperfStatePreInitLocked(pGpu, pEngstate) kperfStatePreInitLocked_DISPATCH(pGpu, pEngstate)
142 #define kperfStatePreInitUnlocked(pGpu, pEngstate) kperfStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
143 #define kperfStatePostLoad(pGpu, pEngstate, arg0) kperfStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
144 #define kperfIsPresent(pGpu, pEngstate) kperfIsPresent_DISPATCH(pGpu, pEngstate)
145 NV_STATUS kperfConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, ENGDESCRIPTOR engDesc);
146 
kperfConstructEngine_DISPATCH(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf,ENGDESCRIPTOR engDesc)147 static inline NV_STATUS kperfConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, ENGDESCRIPTOR engDesc) {
148     return pKernelPerf->__kperfConstructEngine__(pGpu, pKernelPerf, engDesc);
149 }
150 
151 NV_STATUS kperfStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf);
152 
kperfStateInitLocked_DISPATCH(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf)153 static inline NV_STATUS kperfStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf) {
154     return pKernelPerf->__kperfStateInitLocked__(pGpu, pKernelPerf);
155 }
156 
157 NV_STATUS kperfStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags);
158 
kperfStateLoad_DISPATCH(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf,NvU32 flags)159 static inline NV_STATUS kperfStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags) {
160     return pKernelPerf->__kperfStateLoad__(pGpu, pKernelPerf, flags);
161 }
162 
163 NV_STATUS kperfStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags);
164 
kperfStateUnload_DISPATCH(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf,NvU32 flags)165 static inline NV_STATUS kperfStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags) {
166     return pKernelPerf->__kperfStateUnload__(pGpu, pKernelPerf, flags);
167 }
168 
169 void kperfStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf);
170 
kperfStateDestroy_DISPATCH(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf)171 static inline void kperfStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf) {
172     pKernelPerf->__kperfStateDestroy__(pGpu, pKernelPerf);
173 }
174 
kperfGpuBoostSyncStateInit_56cd7a(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf)175 static inline NV_STATUS kperfGpuBoostSyncStateInit_56cd7a(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf) {
176     return NV_OK;
177 }
178 
179 NV_STATUS kperfGpuBoostSyncStateInit_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf);
180 
kperfGpuBoostSyncStateInit_DISPATCH(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf)181 static inline NV_STATUS kperfGpuBoostSyncStateInit_DISPATCH(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf) {
182     return pKernelPerf->__kperfGpuBoostSyncStateInit__(pGpu, pKernelPerf);
183 }
184 
kperfStatePreLoad_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate,NvU32 arg0)185 static inline NV_STATUS kperfStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) {
186     return pEngstate->__kperfStatePreLoad__(pGpu, pEngstate, arg0);
187 }
188 
kperfStatePostUnload_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate,NvU32 arg0)189 static inline NV_STATUS kperfStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) {
190     return pEngstate->__kperfStatePostUnload__(pGpu, pEngstate, arg0);
191 }
192 
kperfStatePreUnload_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate,NvU32 arg0)193 static inline NV_STATUS kperfStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) {
194     return pEngstate->__kperfStatePreUnload__(pGpu, pEngstate, arg0);
195 }
196 
kperfStateInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate)197 static inline NV_STATUS kperfStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) {
198     return pEngstate->__kperfStateInitUnlocked__(pGpu, pEngstate);
199 }
200 
kperfInitMissing_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate)201 static inline void kperfInitMissing_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) {
202     pEngstate->__kperfInitMissing__(pGpu, pEngstate);
203 }
204 
kperfStatePreInitLocked_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate)205 static inline NV_STATUS kperfStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) {
206     return pEngstate->__kperfStatePreInitLocked__(pGpu, pEngstate);
207 }
208 
kperfStatePreInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate)209 static inline NV_STATUS kperfStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) {
210     return pEngstate->__kperfStatePreInitUnlocked__(pGpu, pEngstate);
211 }
212 
kperfStatePostLoad_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate,NvU32 arg0)213 static inline NV_STATUS kperfStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) {
214     return pEngstate->__kperfStatePostLoad__(pGpu, pEngstate, arg0);
215 }
216 
kperfIsPresent_DISPATCH(POBJGPU pGpu,struct KernelPerf * pEngstate)217 static inline NvBool kperfIsPresent_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) {
218     return pEngstate->__kperfIsPresent__(pGpu, pEngstate);
219 }
220 
221 NV_STATUS kperfGpuBoostSyncActivate_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvBool bActivate);
222 
223 #ifdef __nvoc_kern_perf_h_disabled
kperfGpuBoostSyncActivate(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf,NvBool bActivate)224 static inline NV_STATUS kperfGpuBoostSyncActivate(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvBool bActivate) {
225     NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!");
226     return NV_ERR_NOT_SUPPORTED;
227 }
228 #else //__nvoc_kern_perf_h_disabled
229 #define kperfGpuBoostSyncActivate(pGpu, pKernelPerf, bActivate) kperfGpuBoostSyncActivate_IMPL(pGpu, pKernelPerf, bActivate)
230 #endif //__nvoc_kern_perf_h_disabled
231 
232 NV_STATUS kperfDoSyncGpuBoostLimits_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *pParams);
233 
234 #ifdef __nvoc_kern_perf_h_disabled
kperfDoSyncGpuBoostLimits(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf,NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS * pParams)235 static inline NV_STATUS kperfDoSyncGpuBoostLimits(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *pParams) {
236     NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!");
237     return NV_ERR_NOT_SUPPORTED;
238 }
239 #else //__nvoc_kern_perf_h_disabled
240 #define kperfDoSyncGpuBoostLimits(pGpu, pKernelPerf, pParams) kperfDoSyncGpuBoostLimits_IMPL(pGpu, pKernelPerf, pParams)
241 #endif //__nvoc_kern_perf_h_disabled
242 
243 NV_STATUS kperfBoostSet_IMPL(struct KernelPerf *pKernelPerf, struct Subdevice *pSubdevice, NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams);
244 
245 #ifdef __nvoc_kern_perf_h_disabled
kperfBoostSet(struct KernelPerf * pKernelPerf,struct Subdevice * pSubdevice,NV2080_CTRL_PERF_BOOST_PARAMS * pBoostParams)246 static inline NV_STATUS kperfBoostSet(struct KernelPerf *pKernelPerf, struct Subdevice *pSubdevice, NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams) {
247     NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!");
248     return NV_ERR_NOT_SUPPORTED;
249 }
250 #else //__nvoc_kern_perf_h_disabled
251 #define kperfBoostSet(pKernelPerf, pSubdevice, pBoostParams) kperfBoostSet_IMPL(pKernelPerf, pSubdevice, pBoostParams)
252 #endif //__nvoc_kern_perf_h_disabled
253 
254 NV_STATUS kperfReentrancy_IMPL(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 function, NvBool bSet);
255 
256 #ifdef __nvoc_kern_perf_h_disabled
kperfReentrancy(struct OBJGPU * pGpu,struct KernelPerf * pKernelPerf,NvU32 function,NvBool bSet)257 static inline NV_STATUS kperfReentrancy(struct OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 function, NvBool bSet) {
258     NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!");
259     return NV_ERR_NOT_SUPPORTED;
260 }
261 #else //__nvoc_kern_perf_h_disabled
262 #define kperfReentrancy(pGpu, pKernelPerf, function, bSet) kperfReentrancy_IMPL(pGpu, pKernelPerf, function, bSet)
263 #endif //__nvoc_kern_perf_h_disabled
264 
265 #undef PRIVATE_FIELD
266 
267 
268 /* ------------------------ External Definitions --------------------------- */
269 /* ------------------------ Function Prototypes ---------------------------- */
270 /* ------------------------ Include Derived Types -------------------------- */
271 
272 #endif // KERNEL_PERF_H
273 
274 #ifdef __cplusplus
275 } // extern "C"
276 #endif
277 
278 #endif // _G_KERN_PERF_NVOC_H_
279