1 #ifndef _G_KERN_HWPM_NVOC_H_
2 #define _G_KERN_HWPM_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kern_hwpm_nvoc.h"
33 
34 #ifndef KERNEL_HWPM_H
35 #define KERNEL_HWPM_H
36 
37 /******************************************************************************
38  *
39  *   Kernel Hwpm module header
40  *   This file contains functions managing HWPM on CPU RM
41  *
42  ******************************************************************************/
43 
44 #include "gpu/gpu.h"
45 #include "gpu/eng_state.h"
46 #include "gpu/hwpm/kern_hwpm_power.h"
47 #include "gpu/hwpm/kern_hwpm_common_defs.h"
48 
49 #define INVALID_PMA_CHANNEL_IDX NV_U32_MAX
50 
51 // default values for the perf vaspace base and size
52 #define PERF_VASPACE_BASE (4*1024*1024*1024ULL)
53 #define PERF_VASPACE_SIZE (1024*1024*1024*1024ULL)
54 
55 
56 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
57 // the matching C source file, but causes diagnostics to be issued if another
58 // source file references the field.
59 #ifdef NVOC_KERN_HWPM_H_PRIVATE_ACCESS_ALLOWED
60 #define PRIVATE_FIELD(x) x
61 #else
62 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
63 #endif
64 
65 struct KernelHwpm {
66     const struct NVOC_RTTI *__nvoc_rtti;
67     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
68     struct Object *__nvoc_pbase_Object;
69     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
70     struct KernelHwpm *__nvoc_pbase_KernelHwpm;
71     NV_STATUS (*__khwpmStateInitUnlocked__)(OBJGPU *, struct KernelHwpm *);
72     void (*__khwpmStateDestroy__)(OBJGPU *, struct KernelHwpm *);
73     void (*__khwpmGetCblockInfo__)(OBJGPU *, struct KernelHwpm *, NvU32 *, NvU32 *);
74     NV_STATUS (*__khwpmStateLoad__)(POBJGPU, struct KernelHwpm *, NvU32);
75     NV_STATUS (*__khwpmStateUnload__)(POBJGPU, struct KernelHwpm *, NvU32);
76     NV_STATUS (*__khwpmStateInitLocked__)(POBJGPU, struct KernelHwpm *);
77     NV_STATUS (*__khwpmStatePreLoad__)(POBJGPU, struct KernelHwpm *, NvU32);
78     NV_STATUS (*__khwpmStatePostUnload__)(POBJGPU, struct KernelHwpm *, NvU32);
79     NV_STATUS (*__khwpmStatePreUnload__)(POBJGPU, struct KernelHwpm *, NvU32);
80     void (*__khwpmInitMissing__)(POBJGPU, struct KernelHwpm *);
81     NV_STATUS (*__khwpmStatePreInitLocked__)(POBJGPU, struct KernelHwpm *);
82     NV_STATUS (*__khwpmStatePreInitUnlocked__)(POBJGPU, struct KernelHwpm *);
83     NV_STATUS (*__khwpmStatePostLoad__)(POBJGPU, struct KernelHwpm *, NvU32);
84     NV_STATUS (*__khwpmConstructEngine__)(POBJGPU, struct KernelHwpm *, ENGDESCRIPTOR);
85     NvBool (*__khwpmIsPresent__)(POBJGPU, struct KernelHwpm *);
86     NvBool PDB_PROP_KHWPM_MULTIPLE_PMA_SUPPORTED;
87     NvU32 numPma;
88     NvU32 maxCblocks;
89     NvU32 maxCblocksPerPma;
90     NvU32 maxPmaChannels;
91     NvU32 maxChannelPerCblock;
92     HWPM_STREAMOUT_STATE *streamoutState;
93     NvU64 vaSpaceBase;
94     NvU64 vaSpaceSize;
95 };
96 
97 #ifndef __NVOC_CLASS_KernelHwpm_TYPEDEF__
98 #define __NVOC_CLASS_KernelHwpm_TYPEDEF__
99 typedef struct KernelHwpm KernelHwpm;
100 #endif /* __NVOC_CLASS_KernelHwpm_TYPEDEF__ */
101 
102 #ifndef __nvoc_class_id_KernelHwpm
103 #define __nvoc_class_id_KernelHwpm 0xc8c00f
104 #endif /* __nvoc_class_id_KernelHwpm */
105 
106 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHwpm;
107 
108 #define __staticCast_KernelHwpm(pThis) \
109     ((pThis)->__nvoc_pbase_KernelHwpm)
110 
111 #ifdef __nvoc_kern_hwpm_h_disabled
112 #define __dynamicCast_KernelHwpm(pThis) ((KernelHwpm*)NULL)
113 #else //__nvoc_kern_hwpm_h_disabled
114 #define __dynamicCast_KernelHwpm(pThis) \
115     ((KernelHwpm*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelHwpm)))
116 #endif //__nvoc_kern_hwpm_h_disabled
117 
118 #define PDB_PROP_KHWPM_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
119 #define PDB_PROP_KHWPM_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
120 #define PDB_PROP_KHWPM_MULTIPLE_PMA_SUPPORTED_BASE_CAST
121 #define PDB_PROP_KHWPM_MULTIPLE_PMA_SUPPORTED_BASE_NAME PDB_PROP_KHWPM_MULTIPLE_PMA_SUPPORTED
122 
123 NV_STATUS __nvoc_objCreateDynamic_KernelHwpm(KernelHwpm**, Dynamic*, NvU32, va_list);
124 
125 NV_STATUS __nvoc_objCreate_KernelHwpm(KernelHwpm**, Dynamic*, NvU32);
126 #define __objCreate_KernelHwpm(ppNewObj, pParent, createFlags) \
127     __nvoc_objCreate_KernelHwpm((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
128 
129 #define khwpmStateInitUnlocked(pGpu, pKernelHwpm) khwpmStateInitUnlocked_DISPATCH(pGpu, pKernelHwpm)
130 #define khwpmStateDestroy(pGpu, pKernelHwpm) khwpmStateDestroy_DISPATCH(pGpu, pKernelHwpm)
131 #define khwpmGetCblockInfo(pGpu, pKernelHwpm, arg0, arg1) khwpmGetCblockInfo_DISPATCH(pGpu, pKernelHwpm, arg0, arg1)
132 #define khwpmGetCblockInfo_HAL(pGpu, pKernelHwpm, arg0, arg1) khwpmGetCblockInfo_DISPATCH(pGpu, pKernelHwpm, arg0, arg1)
133 #define khwpmStateLoad(pGpu, pEngstate, arg0) khwpmStateLoad_DISPATCH(pGpu, pEngstate, arg0)
134 #define khwpmStateUnload(pGpu, pEngstate, arg0) khwpmStateUnload_DISPATCH(pGpu, pEngstate, arg0)
135 #define khwpmStateInitLocked(pGpu, pEngstate) khwpmStateInitLocked_DISPATCH(pGpu, pEngstate)
136 #define khwpmStatePreLoad(pGpu, pEngstate, arg0) khwpmStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
137 #define khwpmStatePostUnload(pGpu, pEngstate, arg0) khwpmStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
138 #define khwpmStatePreUnload(pGpu, pEngstate, arg0) khwpmStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
139 #define khwpmInitMissing(pGpu, pEngstate) khwpmInitMissing_DISPATCH(pGpu, pEngstate)
140 #define khwpmStatePreInitLocked(pGpu, pEngstate) khwpmStatePreInitLocked_DISPATCH(pGpu, pEngstate)
141 #define khwpmStatePreInitUnlocked(pGpu, pEngstate) khwpmStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
142 #define khwpmStatePostLoad(pGpu, pEngstate, arg0) khwpmStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
143 #define khwpmConstructEngine(pGpu, pEngstate, arg0) khwpmConstructEngine_DISPATCH(pGpu, pEngstate, arg0)
144 #define khwpmIsPresent(pGpu, pEngstate) khwpmIsPresent_DISPATCH(pGpu, pEngstate)
khwpmPmaStreamSriovSetGfid_56cd7a(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU32 arg0,NvU32 arg1)145 static inline NV_STATUS khwpmPmaStreamSriovSetGfid_56cd7a(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0, NvU32 arg1) {
146     return NV_OK;
147 }
148 
149 NV_STATUS khwpmPmaStreamSriovSetGfid_GA100(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0, NvU32 arg1);
150 
151 NV_STATUS khwpmPmaStreamSriovSetGfid_GH100(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0, NvU32 arg1);
152 
khwpmPmaStreamSriovSetGfid_92bfc3(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU32 arg0,NvU32 arg1)153 static inline NV_STATUS khwpmPmaStreamSriovSetGfid_92bfc3(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0, NvU32 arg1) {
154     NV_ASSERT_PRECOMP(0);
155     return NV_ERR_NOT_SUPPORTED;
156 }
157 
158 
159 #ifdef __nvoc_kern_hwpm_h_disabled
khwpmPmaStreamSriovSetGfid(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU32 arg0,NvU32 arg1)160 static inline NV_STATUS khwpmPmaStreamSriovSetGfid(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0, NvU32 arg1) {
161     NV_ASSERT_FAILED_PRECOMP("KernelHwpm was disabled!");
162     return NV_ERR_NOT_SUPPORTED;
163 }
164 #else //__nvoc_kern_hwpm_h_disabled
165 #define khwpmPmaStreamSriovSetGfid(pGpu, pKernelHwpm, arg0, arg1) khwpmPmaStreamSriovSetGfid_56cd7a(pGpu, pKernelHwpm, arg0, arg1)
166 #endif //__nvoc_kern_hwpm_h_disabled
167 
168 #define khwpmPmaStreamSriovSetGfid_HAL(pGpu, pKernelHwpm, arg0, arg1) khwpmPmaStreamSriovSetGfid(pGpu, pKernelHwpm, arg0, arg1)
169 
170 NV_STATUS khwpmStateInitUnlocked_IMPL(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm);
171 
khwpmStateInitUnlocked_DISPATCH(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm)172 static inline NV_STATUS khwpmStateInitUnlocked_DISPATCH(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm) {
173     return pKernelHwpm->__khwpmStateInitUnlocked__(pGpu, pKernelHwpm);
174 }
175 
176 void khwpmStateDestroy_IMPL(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm);
177 
khwpmStateDestroy_DISPATCH(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm)178 static inline void khwpmStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm) {
179     pKernelHwpm->__khwpmStateDestroy__(pGpu, pKernelHwpm);
180 }
181 
182 void khwpmGetCblockInfo_GM107(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 *arg0, NvU32 *arg1);
183 
184 void khwpmGetCblockInfo_GH100(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 *arg0, NvU32 *arg1);
185 
khwpmGetCblockInfo_DISPATCH(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU32 * arg0,NvU32 * arg1)186 static inline void khwpmGetCblockInfo_DISPATCH(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 *arg0, NvU32 *arg1) {
187     pKernelHwpm->__khwpmGetCblockInfo__(pGpu, pKernelHwpm, arg0, arg1);
188 }
189 
khwpmStateLoad_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate,NvU32 arg0)190 static inline NV_STATUS khwpmStateLoad_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate, NvU32 arg0) {
191     return pEngstate->__khwpmStateLoad__(pGpu, pEngstate, arg0);
192 }
193 
khwpmStateUnload_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate,NvU32 arg0)194 static inline NV_STATUS khwpmStateUnload_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate, NvU32 arg0) {
195     return pEngstate->__khwpmStateUnload__(pGpu, pEngstate, arg0);
196 }
197 
khwpmStateInitLocked_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate)198 static inline NV_STATUS khwpmStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate) {
199     return pEngstate->__khwpmStateInitLocked__(pGpu, pEngstate);
200 }
201 
khwpmStatePreLoad_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate,NvU32 arg0)202 static inline NV_STATUS khwpmStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate, NvU32 arg0) {
203     return pEngstate->__khwpmStatePreLoad__(pGpu, pEngstate, arg0);
204 }
205 
khwpmStatePostUnload_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate,NvU32 arg0)206 static inline NV_STATUS khwpmStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate, NvU32 arg0) {
207     return pEngstate->__khwpmStatePostUnload__(pGpu, pEngstate, arg0);
208 }
209 
khwpmStatePreUnload_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate,NvU32 arg0)210 static inline NV_STATUS khwpmStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate, NvU32 arg0) {
211     return pEngstate->__khwpmStatePreUnload__(pGpu, pEngstate, arg0);
212 }
213 
khwpmInitMissing_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate)214 static inline void khwpmInitMissing_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate) {
215     pEngstate->__khwpmInitMissing__(pGpu, pEngstate);
216 }
217 
khwpmStatePreInitLocked_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate)218 static inline NV_STATUS khwpmStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate) {
219     return pEngstate->__khwpmStatePreInitLocked__(pGpu, pEngstate);
220 }
221 
khwpmStatePreInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate)222 static inline NV_STATUS khwpmStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate) {
223     return pEngstate->__khwpmStatePreInitUnlocked__(pGpu, pEngstate);
224 }
225 
khwpmStatePostLoad_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate,NvU32 arg0)226 static inline NV_STATUS khwpmStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate, NvU32 arg0) {
227     return pEngstate->__khwpmStatePostLoad__(pGpu, pEngstate, arg0);
228 }
229 
khwpmConstructEngine_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate,ENGDESCRIPTOR arg0)230 static inline NV_STATUS khwpmConstructEngine_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate, ENGDESCRIPTOR arg0) {
231     return pEngstate->__khwpmConstructEngine__(pGpu, pEngstate, arg0);
232 }
233 
khwpmIsPresent_DISPATCH(POBJGPU pGpu,struct KernelHwpm * pEngstate)234 static inline NvBool khwpmIsPresent_DISPATCH(POBJGPU pGpu, struct KernelHwpm *pEngstate) {
235     return pEngstate->__khwpmIsPresent__(pGpu, pEngstate);
236 }
237 
238 NV_STATUS khwpmStreamoutAllocPmaStream_IMPL(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU64 arg0, MEMORY_DESCRIPTOR *arg1, MEMORY_DESCRIPTOR *arg2, NvU32 arg3, HWPM_PMA_STREAM *arg4);
239 
240 #ifdef __nvoc_kern_hwpm_h_disabled
khwpmStreamoutAllocPmaStream(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU64 arg0,MEMORY_DESCRIPTOR * arg1,MEMORY_DESCRIPTOR * arg2,NvU32 arg3,HWPM_PMA_STREAM * arg4)241 static inline NV_STATUS khwpmStreamoutAllocPmaStream(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU64 arg0, MEMORY_DESCRIPTOR *arg1, MEMORY_DESCRIPTOR *arg2, NvU32 arg3, HWPM_PMA_STREAM *arg4) {
242     NV_ASSERT_FAILED_PRECOMP("KernelHwpm was disabled!");
243     return NV_ERR_NOT_SUPPORTED;
244 }
245 #else //__nvoc_kern_hwpm_h_disabled
246 #define khwpmStreamoutAllocPmaStream(pGpu, pKernelHwpm, arg0, arg1, arg2, arg3, arg4) khwpmStreamoutAllocPmaStream_IMPL(pGpu, pKernelHwpm, arg0, arg1, arg2, arg3, arg4)
247 #endif //__nvoc_kern_hwpm_h_disabled
248 
249 NV_STATUS khwpmStreamoutFreePmaStream_IMPL(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU64 arg0, HWPM_PMA_STREAM *arg1, NvU32 arg2);
250 
251 #ifdef __nvoc_kern_hwpm_h_disabled
khwpmStreamoutFreePmaStream(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU64 arg0,HWPM_PMA_STREAM * arg1,NvU32 arg2)252 static inline NV_STATUS khwpmStreamoutFreePmaStream(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU64 arg0, HWPM_PMA_STREAM *arg1, NvU32 arg2) {
253     NV_ASSERT_FAILED_PRECOMP("KernelHwpm was disabled!");
254     return NV_ERR_NOT_SUPPORTED;
255 }
256 #else //__nvoc_kern_hwpm_h_disabled
257 #define khwpmStreamoutFreePmaStream(pGpu, pKernelHwpm, arg0, arg1, arg2) khwpmStreamoutFreePmaStream_IMPL(pGpu, pKernelHwpm, arg0, arg1, arg2)
258 #endif //__nvoc_kern_hwpm_h_disabled
259 
260 NV_STATUS khwpmStreamoutCreatePmaVaSpace_IMPL(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0);
261 
262 #ifdef __nvoc_kern_hwpm_h_disabled
khwpmStreamoutCreatePmaVaSpace(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU32 arg0)263 static inline NV_STATUS khwpmStreamoutCreatePmaVaSpace(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0) {
264     NV_ASSERT_FAILED_PRECOMP("KernelHwpm was disabled!");
265     return NV_ERR_NOT_SUPPORTED;
266 }
267 #else //__nvoc_kern_hwpm_h_disabled
268 #define khwpmStreamoutCreatePmaVaSpace(pGpu, pKernelHwpm, arg0) khwpmStreamoutCreatePmaVaSpace_IMPL(pGpu, pKernelHwpm, arg0)
269 #endif //__nvoc_kern_hwpm_h_disabled
270 
271 NV_STATUS khwpmStreamoutFreePmaVaSpace_IMPL(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0);
272 
273 #ifdef __nvoc_kern_hwpm_h_disabled
khwpmStreamoutFreePmaVaSpace(OBJGPU * pGpu,struct KernelHwpm * pKernelHwpm,NvU32 arg0)274 static inline NV_STATUS khwpmStreamoutFreePmaVaSpace(OBJGPU *pGpu, struct KernelHwpm *pKernelHwpm, NvU32 arg0) {
275     NV_ASSERT_FAILED_PRECOMP("KernelHwpm was disabled!");
276     return NV_ERR_NOT_SUPPORTED;
277 }
278 #else //__nvoc_kern_hwpm_h_disabled
279 #define khwpmStreamoutFreePmaVaSpace(pGpu, pKernelHwpm, arg0) khwpmStreamoutFreePmaVaSpace_IMPL(pGpu, pKernelHwpm, arg0)
280 #endif //__nvoc_kern_hwpm_h_disabled
281 
282 #undef PRIVATE_FIELD
283 
284 
285 NV_STATUS khwpmGetRequestCgStatusMask(NvU32 *pCgStatusMask, HWPM_POWER_REQUEST_FEATURES_PARAMS *pParams);
286 
287 #endif // KERNEL_HWPM_H
288 
289 #ifdef __cplusplus
290 } // extern "C"
291 #endif
292 
293 #endif // _G_KERN_HWPM_NVOC_H_
294