1 #ifndef _G_KERNEL_MC_NVOC_H_
2 #define _G_KERNEL_MC_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_mc_nvoc.h"
33 
34 #ifndef KERNEL_MC_H
35 #define KERNEL_MC_H
36 
37 /******************************************************************************
38 *
39 *       Kernel Master Control module header
40 *       This file contains functions required for MC in Kernel RM
41 *
42 ******************************************************************************/
43 
44 #include "gpu/eng_state.h"
45 #include "gpu/gpu_halspec.h"
46 
47 // Latency Timer Control determines how we set or dont set the PCI latency timer.
48 typedef struct LATENCY_TIMER_CONTROL
49 {
50     NvBool DontModifyTimerValue;      // Dont touch the timer value at all.
51     NvU32 LatencyTimerValue;        // Requested value for PCI latency timer.
52 } LATENCY_TIMER_CONTROL;
53 
54 
55 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
56 // the matching C source file, but causes diagnostics to be issued if another
57 // source file references the field.
58 #ifdef NVOC_KERNEL_MC_H_PRIVATE_ACCESS_ALLOWED
59 #define PRIVATE_FIELD(x) x
60 #else
61 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
62 #endif
63 
64 struct KernelMc {
65     const struct NVOC_RTTI *__nvoc_rtti;
66     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
67     struct Object *__nvoc_pbase_Object;
68     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
69     struct KernelMc *__nvoc_pbase_KernelMc;
70     NV_STATUS (*__kmcStateInitLocked__)(struct OBJGPU *, struct KernelMc *);
71     NV_STATUS (*__kmcStateLoad__)(struct OBJGPU *, struct KernelMc *, NvU32);
72     NV_STATUS (*__kmcWritePmcEnableReg__)(struct OBJGPU *, struct KernelMc *, NvU32, NvBool, NvBool);
73     NvU32 (*__kmcReadPmcEnableReg__)(struct OBJGPU *, struct KernelMc *, NvBool);
74     NV_STATUS (*__kmcStateUnload__)(POBJGPU, struct KernelMc *, NvU32);
75     NV_STATUS (*__kmcStatePreLoad__)(POBJGPU, struct KernelMc *, NvU32);
76     NV_STATUS (*__kmcStatePostUnload__)(POBJGPU, struct KernelMc *, NvU32);
77     void (*__kmcStateDestroy__)(POBJGPU, struct KernelMc *);
78     NV_STATUS (*__kmcStatePreUnload__)(POBJGPU, struct KernelMc *, NvU32);
79     NV_STATUS (*__kmcStateInitUnlocked__)(POBJGPU, struct KernelMc *);
80     void (*__kmcInitMissing__)(POBJGPU, struct KernelMc *);
81     NV_STATUS (*__kmcStatePreInitLocked__)(POBJGPU, struct KernelMc *);
82     NV_STATUS (*__kmcStatePreInitUnlocked__)(POBJGPU, struct KernelMc *);
83     NV_STATUS (*__kmcStatePostLoad__)(POBJGPU, struct KernelMc *, NvU32);
84     NV_STATUS (*__kmcConstructEngine__)(POBJGPU, struct KernelMc *, ENGDESCRIPTOR);
85     NvBool (*__kmcIsPresent__)(POBJGPU, struct KernelMc *);
86     LATENCY_TIMER_CONTROL LatencyTimerControl;
87 };
88 
89 #ifndef __NVOC_CLASS_KernelMc_TYPEDEF__
90 #define __NVOC_CLASS_KernelMc_TYPEDEF__
91 typedef struct KernelMc KernelMc;
92 #endif /* __NVOC_CLASS_KernelMc_TYPEDEF__ */
93 
94 #ifndef __nvoc_class_id_KernelMc
95 #define __nvoc_class_id_KernelMc 0x3827ff
96 #endif /* __nvoc_class_id_KernelMc */
97 
98 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMc;
99 
100 #define __staticCast_KernelMc(pThis) \
101     ((pThis)->__nvoc_pbase_KernelMc)
102 
103 #ifdef __nvoc_kernel_mc_h_disabled
104 #define __dynamicCast_KernelMc(pThis) ((KernelMc*)NULL)
105 #else //__nvoc_kernel_mc_h_disabled
106 #define __dynamicCast_KernelMc(pThis) \
107     ((KernelMc*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelMc)))
108 #endif //__nvoc_kernel_mc_h_disabled
109 
110 #define PDB_PROP_KMC_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
111 #define PDB_PROP_KMC_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
112 
113 NV_STATUS __nvoc_objCreateDynamic_KernelMc(KernelMc**, Dynamic*, NvU32, va_list);
114 
115 NV_STATUS __nvoc_objCreate_KernelMc(KernelMc**, Dynamic*, NvU32);
116 #define __objCreate_KernelMc(ppNewObj, pParent, createFlags) \
117     __nvoc_objCreate_KernelMc((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
118 
119 #define kmcStateInitLocked(pGpu, pKernelMc) kmcStateInitLocked_DISPATCH(pGpu, pKernelMc)
120 #define kmcStateLoad(pGpu, pKernelMc, arg0) kmcStateLoad_DISPATCH(pGpu, pKernelMc, arg0)
121 #define kmcWritePmcEnableReg(pGpu, pKernelMc, arg0, arg1, arg2) kmcWritePmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0, arg1, arg2)
122 #define kmcWritePmcEnableReg_HAL(pGpu, pKernelMc, arg0, arg1, arg2) kmcWritePmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0, arg1, arg2)
123 #define kmcReadPmcEnableReg(pGpu, pKernelMc, arg0) kmcReadPmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0)
124 #define kmcReadPmcEnableReg_HAL(pGpu, pKernelMc, arg0) kmcReadPmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0)
125 #define kmcStateUnload(pGpu, pEngstate, arg0) kmcStateUnload_DISPATCH(pGpu, pEngstate, arg0)
126 #define kmcStatePreLoad(pGpu, pEngstate, arg0) kmcStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
127 #define kmcStatePostUnload(pGpu, pEngstate, arg0) kmcStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
128 #define kmcStateDestroy(pGpu, pEngstate) kmcStateDestroy_DISPATCH(pGpu, pEngstate)
129 #define kmcStatePreUnload(pGpu, pEngstate, arg0) kmcStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
130 #define kmcStateInitUnlocked(pGpu, pEngstate) kmcStateInitUnlocked_DISPATCH(pGpu, pEngstate)
131 #define kmcInitMissing(pGpu, pEngstate) kmcInitMissing_DISPATCH(pGpu, pEngstate)
132 #define kmcStatePreInitLocked(pGpu, pEngstate) kmcStatePreInitLocked_DISPATCH(pGpu, pEngstate)
133 #define kmcStatePreInitUnlocked(pGpu, pEngstate) kmcStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
134 #define kmcStatePostLoad(pGpu, pEngstate, arg0) kmcStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
135 #define kmcConstructEngine(pGpu, pEngstate, arg0) kmcConstructEngine_DISPATCH(pGpu, pEngstate, arg0)
136 #define kmcIsPresent(pGpu, pEngstate) kmcIsPresent_DISPATCH(pGpu, pEngstate)
137 NV_STATUS kmcPrepareForXVEReset_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc);
138 
139 
140 #ifdef __nvoc_kernel_mc_h_disabled
kmcPrepareForXVEReset(struct OBJGPU * pGpu,struct KernelMc * pKernelMc)141 static inline NV_STATUS kmcPrepareForXVEReset(struct OBJGPU *pGpu, struct KernelMc *pKernelMc) {
142     NV_ASSERT_FAILED_PRECOMP("KernelMc was disabled!");
143     return NV_ERR_NOT_SUPPORTED;
144 }
145 #else //__nvoc_kernel_mc_h_disabled
146 #define kmcPrepareForXVEReset(pGpu, pKernelMc) kmcPrepareForXVEReset_GK104(pGpu, pKernelMc)
147 #endif //__nvoc_kernel_mc_h_disabled
148 
149 #define kmcPrepareForXVEReset_HAL(pGpu, pKernelMc) kmcPrepareForXVEReset(pGpu, pKernelMc)
150 
151 NV_STATUS kmcGetMcBar0MapInfo_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU64 *arg0, NvU32 *arg1);
152 
153 
154 #ifdef __nvoc_kernel_mc_h_disabled
kmcGetMcBar0MapInfo(struct OBJGPU * pGpu,struct KernelMc * pKernelMc,NvU64 * arg0,NvU32 * arg1)155 static inline NV_STATUS kmcGetMcBar0MapInfo(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU64 *arg0, NvU32 *arg1) {
156     NV_ASSERT_FAILED_PRECOMP("KernelMc was disabled!");
157     return NV_ERR_NOT_SUPPORTED;
158 }
159 #else //__nvoc_kernel_mc_h_disabled
160 #define kmcGetMcBar0MapInfo(pGpu, pKernelMc, arg0, arg1) kmcGetMcBar0MapInfo_GK104(pGpu, pKernelMc, arg0, arg1)
161 #endif //__nvoc_kernel_mc_h_disabled
162 
163 #define kmcGetMcBar0MapInfo_HAL(pGpu, pKernelMc, arg0, arg1) kmcGetMcBar0MapInfo(pGpu, pKernelMc, arg0, arg1)
164 
165 NV_STATUS kmcStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelMc *pKernelMc);
166 
kmcStateInitLocked_DISPATCH(struct OBJGPU * pGpu,struct KernelMc * pKernelMc)167 static inline NV_STATUS kmcStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelMc *pKernelMc) {
168     return pKernelMc->__kmcStateInitLocked__(pGpu, pKernelMc);
169 }
170 
171 NV_STATUS kmcStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0);
172 
kmcStateLoad_DISPATCH(struct OBJGPU * pGpu,struct KernelMc * pKernelMc,NvU32 arg0)173 static inline NV_STATUS kmcStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0) {
174     return pKernelMc->__kmcStateLoad__(pGpu, pKernelMc, arg0);
175 }
176 
177 NV_STATUS kmcWritePmcEnableReg_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2);
178 
179 NV_STATUS kmcWritePmcEnableReg_GA100(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2);
180 
181 NV_STATUS kmcWritePmcEnableReg_GH100(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2);
182 
kmcWritePmcEnableReg_DISPATCH(struct OBJGPU * pGpu,struct KernelMc * pKernelMc,NvU32 arg0,NvBool arg1,NvBool arg2)183 static inline NV_STATUS kmcWritePmcEnableReg_DISPATCH(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2) {
184     return pKernelMc->__kmcWritePmcEnableReg__(pGpu, pKernelMc, arg0, arg1, arg2);
185 }
186 
187 NvU32 kmcReadPmcEnableReg_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0);
188 
189 NvU32 kmcReadPmcEnableReg_GA100(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0);
190 
191 NvU32 kmcReadPmcEnableReg_GH100(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0);
192 
kmcReadPmcEnableReg_DISPATCH(struct OBJGPU * pGpu,struct KernelMc * pKernelMc,NvBool arg0)193 static inline NvU32 kmcReadPmcEnableReg_DISPATCH(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0) {
194     return pKernelMc->__kmcReadPmcEnableReg__(pGpu, pKernelMc, arg0);
195 }
196 
kmcStateUnload_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate,NvU32 arg0)197 static inline NV_STATUS kmcStateUnload_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) {
198     return pEngstate->__kmcStateUnload__(pGpu, pEngstate, arg0);
199 }
200 
kmcStatePreLoad_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate,NvU32 arg0)201 static inline NV_STATUS kmcStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) {
202     return pEngstate->__kmcStatePreLoad__(pGpu, pEngstate, arg0);
203 }
204 
kmcStatePostUnload_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate,NvU32 arg0)205 static inline NV_STATUS kmcStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) {
206     return pEngstate->__kmcStatePostUnload__(pGpu, pEngstate, arg0);
207 }
208 
kmcStateDestroy_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate)209 static inline void kmcStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) {
210     pEngstate->__kmcStateDestroy__(pGpu, pEngstate);
211 }
212 
kmcStatePreUnload_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate,NvU32 arg0)213 static inline NV_STATUS kmcStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) {
214     return pEngstate->__kmcStatePreUnload__(pGpu, pEngstate, arg0);
215 }
216 
kmcStateInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate)217 static inline NV_STATUS kmcStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) {
218     return pEngstate->__kmcStateInitUnlocked__(pGpu, pEngstate);
219 }
220 
kmcInitMissing_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate)221 static inline void kmcInitMissing_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) {
222     pEngstate->__kmcInitMissing__(pGpu, pEngstate);
223 }
224 
kmcStatePreInitLocked_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate)225 static inline NV_STATUS kmcStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) {
226     return pEngstate->__kmcStatePreInitLocked__(pGpu, pEngstate);
227 }
228 
kmcStatePreInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate)229 static inline NV_STATUS kmcStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) {
230     return pEngstate->__kmcStatePreInitUnlocked__(pGpu, pEngstate);
231 }
232 
kmcStatePostLoad_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate,NvU32 arg0)233 static inline NV_STATUS kmcStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) {
234     return pEngstate->__kmcStatePostLoad__(pGpu, pEngstate, arg0);
235 }
236 
kmcConstructEngine_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate,ENGDESCRIPTOR arg0)237 static inline NV_STATUS kmcConstructEngine_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, ENGDESCRIPTOR arg0) {
238     return pEngstate->__kmcConstructEngine__(pGpu, pEngstate, arg0);
239 }
240 
kmcIsPresent_DISPATCH(POBJGPU pGpu,struct KernelMc * pEngstate)241 static inline NvBool kmcIsPresent_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) {
242     return pEngstate->__kmcIsPresent__(pGpu, pEngstate);
243 }
244 
245 #undef PRIVATE_FIELD
246 
247 
248 #endif // KERNEL_MC_H
249 
250 #ifdef __cplusplus
251 } // extern "C"
252 #endif
253 
254 #endif // _G_KERNEL_MC_NVOC_H_
255