1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /*!
25 * @file
26 * @brief ACPI common routines - non OS dependant
27 */
28
29 #include "os/os.h"
30 #include "platform/acpi_common.h"
31 #include "acpidsmguids.h"
32 #include "nvhybridacpi.h"
33 #include "nbci.h"
34 #include "gps.h"
35 #include "nvop.h"
36 #include "nvhybridacpi.h"
37 #include "jt.h"
38 #include "pex.h"
39 #include "mxm_spec.h"
40 #include "gpu/gsp/gsp_static_config.h"
41 #include "platform/nbsi/nbsi_read.h"
42
43 //
44 // DSM ACPI Routines common routines for Linux
45 //
46 // these are debug strings for printing which DSM subfunction didn't work.
47 // These map directly to the ACPI_DSM_FUNCTION enum in \interface\nvacpitypes.h.
48 //
49 #if NV_PRINTF_ENABLED
50 const char * const DSMCalls[] = {"NBSI","NVHG","MXM","NBCI","NVOP","PFCG","GPS_2X","JT","PEX","NVPCF_2X","GPS","NVPCF","UNKNOWN","CURRENT"};
51 #endif
52
53 /*
54 * @_isDsmError returns status if the return data is an error indicator.
55 * ACPI returns a dword such as 0x80000002 to indicate a failure. Note there
56 * is a possibility that if a subfunction returns the same data this could be
57 * confused.
58 *
59 * @param[in] status NV_STATUS return status from ACPI call
60 * @param[in] rtnSize NvU16 number of bytes at rtnvalue
61 * @param[in] rtnvalue NvU32 * pointer to returned value/status.
62 *
63 * @returns NV_TRUE if an error was found
64 * NV_FALSE if an error wasn't found
65 */
66 static NvBool
_isDsmError(NV_STATUS status,NvU16 rtnSize,NvU32 * rtnvalue)67 _isDsmError
68 (
69 NV_STATUS status,
70 NvU16 rtnSize,
71 NvU32 *rtnvalue
72 )
73 {
74 if (status != NV_OK)
75 {
76 return NV_TRUE;
77 }
78 else if (rtnSize == sizeof(NvU32))
79 {
80 if ((*rtnvalue >= NVHG_ERROR_UNSPECIFIED) &&
81 (*rtnvalue <= NVHG_ERROR_PARM_INVALID))
82 {
83 return NV_TRUE;
84 }
85 }
86 return NV_FALSE;
87 }
88
89 /*
90 * @cacheDsmSupportedFunction caches the return from the DSM get supported
91 * functions call. Used later to determine whether to call again.
92 *
93 * @param[in] pGpu OBJGPU
94 * @param[in] acpiDsmFunction ACPI_DSM_FUNCTION DSM call indicator
95 * @param[in] subfunction NvU32 subfunction number
96 * @param[in] pInOut NvU32 * pointer to get supported return data.
97 * @param[in] inOutSize NvU32 size of data in pInOut
98 *
99 * @returns void
100 */
101 void
cacheDsmSupportedFunction(OBJGPU * pGpu,ACPI_DSM_FUNCTION acpiDsmFunction,NvU32 acpiDsmSubFunction,NvU32 * pInOut,NvU32 inOutSize)102 cacheDsmSupportedFunction
103 (
104 OBJGPU *pGpu,
105 ACPI_DSM_FUNCTION acpiDsmFunction,
106 NvU32 acpiDsmSubFunction,
107 NvU32 *pInOut,
108 NvU32 inOutSize
109 )
110 {
111 if ((acpiDsmSubFunction == NV_ACPI_ALL_FUNC_SUPPORT) &&
112 (inOutSize > 0) &&
113 (inOutSize <= sizeof(pGpu->acpi.dsm[acpiDsmFunction].suppFuncs)))
114 {
115 if (_isDsmError(NV_OK, (NvU16) inOutSize, pInOut))
116 {
117 NV_PRINTF(LEVEL_INFO,
118 "%s DSM functions not available.\n",
119 DSMFuncStr(acpiDsmFunction));
120 return;
121 }
122
123 // cache return from get supported function list
124 portMemCopy((NvU8 *)pGpu->acpi.dsm[acpiDsmFunction].suppFuncs,
125 inOutSize,
126 (NvU8 *)pInOut,
127 inOutSize);
128
129 pGpu->acpi.dsm[acpiDsmFunction].suppFuncsLen = inOutSize;
130
131 // if bit 0 of get supported function is set, indicate success
132 if (pGpu->acpi.dsm[acpiDsmFunction].suppFuncs[NV_ACPI_ALL_FUNC_SUPPORT/8] & NV_ACPI_ALL_FUNC_SUPPORTED)
133 {
134 pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus = DSM_FUNC_STATUS_SUCCESS;
135 }
136
137 #ifdef NV_PRINTF_ENABLED
138 if (inOutSize == 8)
139 {
140 NV_PRINTF(LEVEL_INFO,
141 "%s DSM get supported subfunction returned 0x%08x size=%d suppStatus=%d\n",
142 DSMFuncStr(acpiDsmFunction), *pInOut,
143 inOutSize,
144 pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus);
145 }
146 else
147 {
148 NV_PRINTF(LEVEL_INFO,
149 "%s DSM get supported subfunction returned 0x%04x size=%d suppStatus=%d\n",
150 DSMFuncStr(acpiDsmFunction), *pInOut,
151 inOutSize,
152 pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus);
153 }
154 #endif
155 }
156 }
157
158 /*
159 * @testIfDsmSubFunctionEnabled tests if a DSM subfunction is enabled.
160 *
161 * @param[in] pGpu OBJGPU
162 * @param[in] acpiDsmFunction ACPI_DSM_FUNCTION DSM call indicator
163 * @param[in] subfunction NvU32 subfunction number
164 *
165 * @returns NV_STATUS of
166 * NV_OK when the subfunction is enabled/supported.
167 * NV_ERR_NOT_SUPPORTED when the subfunction is not supported.
168 * NV_ERR_OPERATING_SYSTEM when get supported list was tried and failed.
169 * NV_ERR_OBJECT_NOT_FOUND when get supportee list hasn't been tried.
170 */
171 NV_STATUS
testIfDsmSubFunctionEnabled(OBJGPU * pGpu,ACPI_DSM_FUNCTION acpiDsmFunction,NvU32 acpiDsmSubFunction)172 testIfDsmSubFunctionEnabled
173 (
174 OBJGPU *pGpu,
175 ACPI_DSM_FUNCTION acpiDsmFunction,
176 NvU32 acpiDsmSubFunction
177 )
178 {
179 NvU32 idx;
180 NvU32 bitToTest;
181
182 NV_ASSERT_OR_RETURN((acpiDsmFunction < ACPI_DSM_FUNCTION_COUNT) || (acpiDsmFunction == ACPI_DSM_FUNCTION_CURRENT),
183 NV_ERR_INVALID_ARGUMENT);
184
185 if (remapDsmFunctionAndSubFunction(pGpu, &acpiDsmFunction, &acpiDsmSubFunction) != NV_OK)
186 {
187 return NV_ERR_NOT_SUPPORTED;
188 }
189
190 NV_ASSERT_OR_RETURN(acpiDsmFunction < ACPI_DSM_FUNCTION_COUNT, NV_ERR_INVALID_ARGUMENT);
191
192 idx = acpiDsmSubFunction / 8;
193 bitToTest = NVBIT(acpiDsmSubFunction % 8);
194
195 // Caller asked for a subfunction... do we support it
196 switch (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus)
197 {
198 case DSM_FUNC_STATUS_OVERRIDE:
199 case DSM_FUNC_STATUS_SUCCESS:
200 // confirm the supported subfunction bit is set in.
201 if (idx > pGpu->acpi.dsm[acpiDsmFunction].suppFuncsLen)
202 {
203 return NV_ERR_NOT_SUPPORTED;
204 }
205 if (!(pGpu->acpi.dsm[acpiDsmFunction].suppFuncs[idx] & bitToTest))
206 {
207 return NV_ERR_NOT_SUPPORTED;
208 }
209 return NV_OK;
210 break;
211
212 case DSM_FUNC_STATUS_FAILED:
213 // the get supported function failed... assume all subfunctions won't work.
214 return NV_ERR_OPERATING_SYSTEM;
215 break;
216
217 default:
218 case DSM_FUNC_STATUS_UNKNOWN:
219 //
220 // Somebody forgot to call _acpiDsmSupportedFuncCacheInit before trying
221 // the dsm subfunction itself. This should be fixed!
222 //
223 NV_PRINTF(LEVEL_ERROR,
224 "%s ACPI DSM called before _acpiDsmSupportedFuncCacheInit subfunction = %x.\n",
225 DSMFuncStr(acpiDsmFunction),
226 acpiDsmSubFunction);
227 // DBG_BREAKPOINT();
228 return NV_ERR_OBJECT_NOT_FOUND;
229 }
230 }
231
232 /*
233 * @testIfDsmFuncSupported returns if a DSM function is supported.
234 * This checks the status of the previously cached copy of the supported
235 * functions list.
236 *
237 * @param[in] pGpu OBJGPU
238 * @param[in] acpiDsmFunction ACPI_DSM_FUNCTION DSM function
239 *
240 * @returns NV_STATUS of
241 * NV_ERR_INVALID_STATE if the pGpu pointer is NULL
242 * NV_ERR_NOT_SUPPORTED if the the get supported functions call
243 * succeedes but the specific DSM subfunction
244 * is not supported.
245 * NV_WARN_MORE_PROCESSING_REQUIRED if the get supported subfunctions
246 * list call hasn't been done yet.
247 * NV_OK if the DSM function is supported.
248 */
249 NV_STATUS
testIfDsmFuncSupported(OBJGPU * pGpu,ACPI_DSM_FUNCTION acpiDsmFunction)250 testIfDsmFuncSupported
251 (
252 OBJGPU *pGpu,
253 ACPI_DSM_FUNCTION acpiDsmFunction
254 )
255 {
256 if (pGpu == NULL)
257 {
258 return NV_ERR_INVALID_STATE;
259 }
260
261 // no generic functions allowed
262 NV_ASSERT_OR_RETURN(acpiDsmFunction < ACPI_DSM_FUNCTION_COUNT, NV_ERR_INVALID_ARGUMENT);
263
264 // should only be called after the cache is inited.
265 NV_ASSERT_OR_RETURN((pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus == DSM_FUNC_STATUS_SUCCESS) ||
266 (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus == DSM_FUNC_STATUS_OVERRIDE) ||
267 (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus == DSM_FUNC_STATUS_FAILED),
268 NV_ERR_INVALID_ARGUMENT);
269
270 switch (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus)
271 {
272 case DSM_FUNC_STATUS_OVERRIDE:
273 case DSM_FUNC_STATUS_SUCCESS:
274 return NV_OK;
275 case DSM_FUNC_STATUS_FAILED:
276 return NV_ERR_NOT_SUPPORTED;
277 default:
278 case DSM_FUNC_STATUS_UNKNOWN:
279 // just in case... should be asserted.
280 // NV_ASSERT(0);
281 return NV_WARN_MORE_PROCESSING_REQUIRED;
282 }
283 }
284
285 //
286 // This table defines the generic subfunction remapping.
287 //
288 // At driver startup the following occurs.
289 // 1) The get supported subfunctions (0) subfunction call is used for each of
290 // the acpi functions guids.
291 // 2) For each of the generic subfunctions, the return data from the get supported
292 // subfunction list is used to determine which GUID supports that function.
293 // The order is based on the dsmOrderOfPrecedenceList below.
294 // Example:
295 // For NV_ACPI_GENERIC_FUNC_HYBRIDCAPS we look at the following (in the
296 // following order.
297 // ACPI_DSM_FUNCTION_NVOP - ignored no compatible subfunction
298 // ACPI_DSM_FUNCTION_NBCI/NV_NBCI_FUNC_PLATCAPS
299 // ACPI_DSM_FUNCTION_MXM - ignored no compatible subfunction
300 // ACPI_DSM_FUNCTION_NVHG/NVHG_FUNC_HYBRIDCAPS
301 // ACPI_DSM_FUNCTION_NBSI/NBSI_FUNC_PLATCAPS
302 // The first subfunction which is supported in the above list is saved
303 // and used whenever a call to
304 // ACPI_DSM_FUNCTION_CURRENT/NV_ACPI_GENERIC_FUNC_HYBRIDCAPS is used.
305 // So if ACPI_DSM_FUNCTION_NBCI/NV_NBCI_FUNC_PLATCAPS is not supported but
306 // ACPI_DSM_FUNCTION_NVHG/NVHG_FUNC_HYBRIDCAPS is. Then the NVHG GUID will
307 // be used.
308 //
309 static const NvU32 genDsmSubFuncRemapTable[] =
310 {
311 // generic function
312 // ACPI_DSM_FUNCTION_CURRENT , ACPI_DSM_FUNCTION_NBSI , ACPI_DSM_FUNCTION_NVHG , ACPI_DSM_FUNCTION_MXM , ACPI_DSM_FUNCTION_NBCI , ACPI_DSM_FUNCTION_NVOP , ACPI_DSM_FUNCTION_PCFG , ACPI_DSM_FUNCTION_GPS_2X , ACPI_DSM_FUNCTION_JT , ACPI_DSM_FUNCTION_PEX , ACPI_DSM_FUNCTION_NVPCF_2X, ACPI_DSM_FUNCTION_GPS , ACPI_DSM_FUNCTION_NVPCF,
313 NV_ACPI_GENERIC_FUNC_DISPLAYSTATUS , NBSI_FUNC_DISPLAYSTATUS , NVHG_FUNC_DISPLAYSTATUS , NV_ACPI_DSM_MXM_FUNC_MXDP , NV_NBCI_FUNC_DISPLAYSTATUS, NVOP_FUNC_DISPLAYSTATUS, 0 , 0 , JT_FUNC_DISPLAYSTATUS , 0 , 0 , 0 , 0 ,
314 NV_ACPI_GENERIC_FUNC_MDTL , NBSI_FUNC_MDTL , NVHG_FUNC_MDTL , NV_ACPI_DSM_MXM_FUNC_MDTL , NV_NBCI_FUNC_MDTL , NVOP_FUNC_MDTL , 0 , 0 , JT_FUNC_MDTL , 0 , 0 , 0 , 0 ,
315 NV_ACPI_GENERIC_FUNC_GETOBJBYTYPE , NBSI_FUNC_GETOBJBYTYPE , NVHG_FUNC_GETOBJBYTYPE , 0 , NV_NBCI_FUNC_GETOBJBYTYPE , NVOP_FUNC_GETOBJBYTYPE , 0 , GPS_FUNC_GETOBJBYTYPE , 0 , 0 , 0 , GPS_FUNC_GETOBJBYTYPE , 0 ,
316 NV_ACPI_GENERIC_FUNC_GETALLOBJS , NBSI_FUNC_GETALLOBJS , NVHG_FUNC_GETALLOBJS , 0 , NV_NBCI_FUNC_GETALLOBJS , NVOP_FUNC_GETALLOBJS , 0 , GPS_FUNC_GETALLOBJS , 0 , 0 , 0 , GPS_FUNC_GETALLOBJS , 0 ,
317 NV_ACPI_GENERIC_FUNC_GETEVENTLIST , 0 , NVHG_FUNC_GETEVENTLIST , NV_ACPI_DSM_MXM_FUNC_GETEVENTLIST , NV_NBCI_FUNC_GETEVENTLIST , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
318 NV_ACPI_GENERIC_FUNC_CALLBACKS , NBSI_FUNC_CALLBACKS , NVHG_FUNC_CALLBACKS , NV_ACPI_DSM_MXM_FUNC_MXCB , NV_NBCI_FUNC_CALLBACKS , 0 , 0 , GPS_FUNC_GETCALLBACKS , 0 , 0 , 0 , GPS_FUNC_GETCALLBACKS , 0 ,
319 NV_ACPI_GENERIC_FUNC_GETBACKLIGHT , 0 , NVHG_FUNC_GETBACKLIGHT , 0 , NV_NBCI_FUNC_GETBACKLIGHT , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
320 NV_ACPI_GENERIC_FUNC_MSTL , 0 , 0 , 0 , NV_NBCI_FUNC_MSTL , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
321 };
322
323 /*
324 * @_getRemappedDsmSubfunction takes as input either a generic DSM subfunction
325 * or a NBSI subfunction number and returns the subfunction number as appropriate
326 * for the DSM function desired.
327 *
328 * @param[in] acpiDsmFunction ACPI_DSM_FUNCTION DSM call indicator (specific)
329 * @param[in] acpiDsmSubFunction NvU32 subfunction number (generic)
330 * @param[out] pRemappedDsmSubFunction NvU32 * pointer to return remapped subfunction number
331 *
332 * @returns NV_STATUS of
333 * NV_ERR_NOT_SUPPORTED if unknown acpi DSM function or the subfunction
334 * could not be remapped
335 * NV_OK if subfunction number was remapped
336 */
337
338 static NV_STATUS
_getRemappedDsmSubfunction(ACPI_DSM_FUNCTION acpiDsmFunction,NvU32 acpiDsmSubFunction,NvU32 * pRemappedDsmSubFunction)339 _getRemappedDsmSubfunction
340 (
341 ACPI_DSM_FUNCTION acpiDsmFunction,
342 NvU32 acpiDsmSubFunction,
343 NvU32 *pRemappedDsmSubFunction
344 )
345 {
346 NvU32 i;
347
348 NV_ASSERT_OR_RETURN(pRemappedDsmSubFunction, NV_ERR_INVALID_ARGUMENT);
349
350 // only specific dsm functions are allowed to remap.
351 NV_ASSERT_OR_RETURN(!isGenericDsmFunction(acpiDsmFunction), NV_ERR_INVALID_ARGUMENT);
352
353 // confirm the acpiDsmSubFunction is a generic acpiDsmSubFunction
354 NV_ASSERT_OR_RETURN(isGenericDsmSubFunction(acpiDsmSubFunction), NV_ERR_INVALID_ARGUMENT);
355
356 //
357 // confirm entry rows matches number of DSM functions
358 // Use NV_ASSERT_OR_ELSE_STR for embedded %.
359 //
360 NV_ASSERT_OR_ELSE_STR(
361 !(NV_ARRAY_ELEMENTS(genDsmSubFuncRemapTable) % (ACPI_DSM_FUNCTION_COUNT + 1)),
362 "!(NV_ARRAY_ELEMENTS(genDsmSubFuncRemapTable) %% (ACPI_DSM_FUNCTION_COUNT + 1))",
363 return NV_ERR_INVALID_ARGUMENT);
364
365 // find the event in the table
366 i = 0;
367 while (i <= (NV_ARRAY_ELEMENTS(genDsmSubFuncRemapTable) - ACPI_DSM_FUNCTION_COUNT - 1))
368 {
369 if (acpiDsmSubFunction == genDsmSubFuncRemapTable[i])
370 {
371 if (genDsmSubFuncRemapTable[i + acpiDsmFunction + 1])
372 {
373 *pRemappedDsmSubFunction = genDsmSubFuncRemapTable[i + acpiDsmFunction + 1];
374 return NV_OK;
375 }
376 else
377 {
378 // Found the entry in the table. But that function doesn't support that acpiDsmSubFunction.
379 return NV_ERR_NOT_SUPPORTED;
380 }
381 }
382 i += ACPI_DSM_FUNCTION_COUNT + 1;
383 }
384
385 //
386 // someone called us with a generic acpiDsmSubFunction which isn't the table.
387 // Either add it to the table, or don't use a generic acpiDsmSubFunction.
388 //
389 DBG_BREAKPOINT();
390 return NV_ERR_OBJECT_NOT_FOUND;
391 }
392
393 //
394 // This table defines the order of acpi function GUIDs in which generic
395 // subfunctions are searched for.
396 // top priority is first (NVOP). Lowest priority is last (NVPCF).
397 //
398 static const ACPI_DSM_FUNCTION dsmOrderOfPrecedenceList[] =
399 {ACPI_DSM_FUNCTION_NVOP,
400 ACPI_DSM_FUNCTION_NBCI,
401 ACPI_DSM_FUNCTION_MXM,
402 ACPI_DSM_FUNCTION_NVHG,
403 ACPI_DSM_FUNCTION_NBSI,
404 ACPI_DSM_FUNCTION_PCFG,
405 ACPI_DSM_FUNCTION_GPS_2X,
406 ACPI_DSM_FUNCTION_GPS,
407 ACPI_DSM_FUNCTION_JT,
408 ACPI_DSM_FUNCTION_PEX,
409 ACPI_DSM_FUNCTION_NVPCF_2X,
410 ACPI_DSM_FUNCTION_NVPCF};
411
412 /*
413 * @remapDsmFunctionAndSubFunction remaps generic DSM functions and subfunctions.
414 * When the function and subfunction are generic, this returns the
415 * function/subfunction which is enabled in SBIOS ASL for that subfunction.
416 * When the function is specific and the subfunction is generic it remaps
417 * the generic subfunction to the matching function number (whether the
418 * subfunction is enabled or not).
419 *
420 * @param[in] pGpu OBJGPU
421 * @param[in/out] pAcpiDsmFunction ACPI_DSM_FUNCTION * DSM function to remap
422 * @param[in/out] pRemappedDsmSubFunction NvU32 * subfunction to remap
423 *
424 * @returns NV_STATUS of
425 * NV_ERR_NOT_SUPPORTED if unknown acpi DSM function or the subfunction
426 * could not be remapped
427 * NV_OK if subfunction number was remapped
428 */
429 NV_STATUS
remapDsmFunctionAndSubFunction(OBJGPU * pGpu,ACPI_DSM_FUNCTION * pAcpiDsmFunction,NvU32 * pRemappedDsmSubFunction)430 remapDsmFunctionAndSubFunction
431 (
432 OBJGPU *pGpu,
433 ACPI_DSM_FUNCTION *pAcpiDsmFunction,
434 NvU32 *pRemappedDsmSubFunction
435 )
436 {
437 NvU32 testDSMfuncIndex;
438 NvU32 remappedDSMSubFunction;
439 NvU32 dsmIndex;
440
441 NV_ASSERT_OR_RETURN(pAcpiDsmFunction, NV_ERR_INVALID_ARGUMENT);
442 NV_ASSERT_OR_RETURN(pRemappedDsmSubFunction, NV_ERR_INVALID_ARGUMENT);
443
444 NV_PRINTF(LEVEL_INFO,
445 "ACPI DSM remapping function = %x Subfunction = %x\n",
446 *pAcpiDsmFunction, *pRemappedDsmSubFunction);
447
448 if (!isGenericDsmFunction(*pAcpiDsmFunction))
449 {
450 // if only subfunction is generic remap it.
451 if (isGenericDsmSubFunction(*pRemappedDsmSubFunction))
452 {
453 if ((_getRemappedDsmSubfunction(*pAcpiDsmFunction, *pRemappedDsmSubFunction, pRemappedDsmSubFunction)) != NV_OK)
454 {
455 return NV_ERR_NOT_SUPPORTED;
456 }
457 }
458 return NV_OK;
459 }
460
461 //
462 // Caller asked to use the DSM GUID that supports this subfunction.
463 // I need generic subfunction numbers so I can remap them to each DSM GUID in the loop.
464 //
465 NV_ASSERT_OR_RETURN(isGenericDsmSubFunction(*pRemappedDsmSubFunction), NV_ERR_INVALID_ARGUMENT);
466
467 // If we've done this before and cached the result, return it.
468 dsmIndex = *pRemappedDsmSubFunction-NV_ACPI_GENERIC_FUNC_START;
469 if (pGpu->acpi.dsmCurrentFuncSupport & NVBIT(dsmIndex))
470 {
471 *pAcpiDsmFunction = pGpu->acpi.dsmCurrentFunc[dsmIndex];
472 *pRemappedDsmSubFunction = pGpu->acpi.dsmCurrentSubFunc[dsmIndex];
473 return NV_OK;
474 }
475
476 // Loop through (in precendence order) to find the GUID/subfunction until we find one that supports the call.
477 testDSMfuncIndex = 0;
478 while(testDSMfuncIndex < sizeof(dsmOrderOfPrecedenceList)/sizeof(dsmOrderOfPrecedenceList[0]))
479 {
480 // remap the generic subfunction number of the one that matches this function.
481 if ((_getRemappedDsmSubfunction(dsmOrderOfPrecedenceList[testDSMfuncIndex], *pRemappedDsmSubFunction, &remappedDSMSubFunction)) == NV_OK)
482 {
483 // Does this DSM support this subfunction?
484 if (testIfDsmSubFunctionEnabled(pGpu, dsmOrderOfPrecedenceList[testDSMfuncIndex], remappedDSMSubFunction) == NV_OK)
485 {
486 NV_PRINTF(LEVEL_INFO,
487 "ACPI DSM remap (func=%s/subfunc=0x%x) remapped to (func=%s/subfunc=0x%x).\n",
488 DSMFuncStr(*pAcpiDsmFunction),
489 *pRemappedDsmSubFunction,
490 DSMFuncStr(dsmOrderOfPrecedenceList[testDSMfuncIndex]),
491 remappedDSMSubFunction);
492
493 // This DSM does support this subfunction. Use it.
494 *pAcpiDsmFunction = dsmOrderOfPrecedenceList[testDSMfuncIndex];
495 *pRemappedDsmSubFunction = remappedDSMSubFunction;
496
497 return NV_OK;
498 }
499 }
500 testDSMfuncIndex++;
501 }
502
503 return NV_ERR_NOT_SUPPORTED;
504 }
505
506 /*
507 * @getDsmGetObjectSubfunction This returns the subfunction numbers for
508 * the get object by type and get all object calls based on the dsm function
509 * requested.
510 *
511 * @param[in] pGpu OBJGPU
512 * @param[in/out] pAcpiDsmFunction ACPI_DSM_FUNCTION * DSM function to use/remap
513 * if generic then will return first function that
514 * supports get object by type/get all objects.
515 * @param[out] pGetObjByTypeSubFunction NvU32 * get object by type subfunction.
516 * @param[out] pGetAllObjsSubFunction NvU32 * get all objs subfunction.
517 *
518 * @returns NV_STATUS of
519 * NV_ERR_NOT_SUPPORTED if unknown acpi DSM function or the subfunction
520 * could not be remapped
521 * NV_OK if subfunction number was remapped
522 */
523 NV_STATUS
getDsmGetObjectSubfunction(OBJGPU * pGpu,ACPI_DSM_FUNCTION * pAcpiDsmFunction,NvU32 * pGetObjByTypeSubFunction,NvU32 * pGetAllObjsSubFunction)524 getDsmGetObjectSubfunction
525 (
526 OBJGPU *pGpu,
527 ACPI_DSM_FUNCTION *pAcpiDsmFunction,
528 NvU32 *pGetObjByTypeSubFunction,
529 NvU32 *pGetAllObjsSubFunction
530 )
531 {
532 NV_STATUS rmStatus = NV_ERR_NOT_SUPPORTED;
533 NvU32 testDSMfuncIndex;
534 ACPI_DSM_FUNCTION curFuncForGetObjByType;
535 ACPI_DSM_FUNCTION curFuncForGetAllObjects;
536 NvU32 dummySubFunc;
537
538 NV_PRINTF(LEVEL_INFO, "entry *pAcpiDsmFunction = %x\n",
539 *pAcpiDsmFunction);
540
541 if (*pAcpiDsmFunction == ACPI_DSM_FUNCTION_CURRENT)
542 {
543 // determine the function that supports getobjbytype and/or getallobjects
544 curFuncForGetObjByType = ACPI_DSM_FUNCTION_CURRENT;
545 dummySubFunc = NV_ACPI_GENERIC_FUNC_GETOBJBYTYPE;
546 if (remapDsmFunctionAndSubFunction(pGpu, &curFuncForGetObjByType, &dummySubFunc) == NV_OK)
547 {
548 // get object by type supported for at least one guid... default to it.
549 *pAcpiDsmFunction = curFuncForGetObjByType;
550
551 // test get all objects to see if it's higher priority.
552 curFuncForGetAllObjects = ACPI_DSM_FUNCTION_CURRENT;
553 dummySubFunc = NV_ACPI_GENERIC_FUNC_GETALLOBJS;
554 if (remapDsmFunctionAndSubFunction(pGpu, &curFuncForGetAllObjects, &dummySubFunc) == NV_OK)
555 {
556 testDSMfuncIndex = 0;
557 while(testDSMfuncIndex < sizeof(dsmOrderOfPrecedenceList)/sizeof(dsmOrderOfPrecedenceList[0]))
558 {
559 if (dsmOrderOfPrecedenceList[testDSMfuncIndex] == curFuncForGetObjByType)
560 {
561 // found get object by type first. break now... it's already the default
562 break;
563 } else if (dsmOrderOfPrecedenceList[testDSMfuncIndex] == curFuncForGetAllObjects)
564 {
565 // found get all objects at higher priority than get object by type... use it.
566 *pAcpiDsmFunction = curFuncForGetAllObjects;
567 break;
568 }
569 testDSMfuncIndex++;
570 }
571 }
572 }
573 else
574 {
575 curFuncForGetAllObjects = ACPI_DSM_FUNCTION_CURRENT;
576 dummySubFunc = NV_ACPI_GENERIC_FUNC_GETALLOBJS;
577 if (remapDsmFunctionAndSubFunction(pGpu, &curFuncForGetAllObjects, &dummySubFunc) != NV_OK)
578 {
579 return NV_ERR_NOT_SUPPORTED;
580 }
581 // get all objects supported for at least one guid.
582 *pAcpiDsmFunction = curFuncForGetAllObjects;
583 }
584 }
585
586 // determine the get object subfunction numbers for this acpi dsm function.
587 switch (*pAcpiDsmFunction)
588 {
589 case ACPI_DSM_FUNCTION_NBSI:
590 *pGetObjByTypeSubFunction = NBSI_FUNC_GETOBJBYTYPE;
591 *pGetAllObjsSubFunction = NBSI_FUNC_GETALLOBJS;
592 rmStatus = NV_OK;
593 break;
594
595 case ACPI_DSM_FUNCTION_NVHG:
596 *pGetObjByTypeSubFunction = NVHG_FUNC_GETOBJBYTYPE;
597 *pGetAllObjsSubFunction = NVHG_FUNC_GETALLOBJS;
598 rmStatus = NV_OK;
599 break;
600
601 case ACPI_DSM_FUNCTION_NBCI:
602 *pGetObjByTypeSubFunction = NV_NBCI_FUNC_GETOBJBYTYPE;
603 *pGetAllObjsSubFunction = NV_NBCI_FUNC_GETALLOBJS;
604 rmStatus = NV_OK;
605 break;
606
607 case ACPI_DSM_FUNCTION_NVOP:
608 *pGetObjByTypeSubFunction = NVOP_FUNC_GETOBJBYTYPE;
609 *pGetAllObjsSubFunction = NVOP_FUNC_GETALLOBJS;
610 rmStatus = NV_OK;
611 break;
612
613 case ACPI_DSM_FUNCTION_GPS_2X: /* fallthrough */
614 case ACPI_DSM_FUNCTION_GPS:
615 *pGetObjByTypeSubFunction = GPS_FUNC_GETOBJBYTYPE;
616 *pGetAllObjsSubFunction = GPS_FUNC_GETALLOBJS;
617 rmStatus = NV_OK;
618 break;
619
620 case ACPI_DSM_FUNCTION_PCFG:
621 case ACPI_DSM_FUNCTION_PEX:
622 default:
623 rmStatus = NV_ERR_NOT_SUPPORTED;
624 break;
625 }
626
627 NV_PRINTF(LEVEL_INFO,
628 "exit *pAcpiDsmFunction = 0x%x *pGetObjByTypeSubFunction=0x%x, status=%x\n",
629 *pAcpiDsmFunction, *pGetObjByTypeSubFunction,
630 rmStatus);
631
632 return rmStatus;
633 }
634
635 /*
636 * @checkDsmCall checks if the function/subfunction call should be performed
637 * Generic function/subfunctions are remapped to real/current functions and
638 * subfunctions.
639 *
640 * Notes on handling the Get supported functions list cache.
641 * 1) Initially the cache status is set to DSM_FUNC_STATUS_UNKNOWN.
642 * 2) First time osCallACPI_DSM is called for get supported functions list
643 * 2A) The cache status is DSM_FUNC_STATUS_UNKNOWN so it changes the cache
644 * state to DSM_FUNC_STATUS_FAILED and
645 * returns NV_WARN_MORE_PROCESSING_REQUIRED so
646 * osCallACPI_DSM will do the actual call.
647 * 2B) after doing the actual call osCallACPI_DSM calls
648 * cacheDsmSupportedFunction which caches (on success) the return data
649 * and changes the cache state to DSM_FUNC_STATUS_SUCCESS.
650 * If the call failed, the cache state is left at DSM_FUNC_STATUS_FAILED.
651 * 3) Later calls from osCallACPI_DSM to get supported functions list.
652 * 3A) If the cache status is DSM_FUNC_STATUS_SUCCESS... return cache and
653 * status NV_OK.
654 * 3B) If the cache status is DSM_FUNC_STATUS_FAILED... return status
655 * NV_ERR_OPERATING_SYSTEM
656 *
657 * The return status when subfunction is get supported functions list is either
658 * 1) NV_OK status... pInOut has cached copy... no more processing required.
659 * 2) NV_WARN_MORE_PROCESSING_REQUIRED status... osCallACPI_DSM should
660 * continue on and try the subfunction.
661 * 3) NV_ERR_OPERATING_SYSTEM status... if the call previously failed.
662 * 4) NV_ERR_BUFFER_TOO_SMALL status... if the buffer is too small.
663 *
664 * For subfunctions other than get supported subfunction return options are:
665 * 1) If the subfunction should be tried it returns NV_WARN_MORE_PROCESSING_REQUIRED.
666 * 2) If the subfunction should not be tried it returns NV_ERR_NOT_SUPPORTED
667 *
668 * @param[in] pGpu OBJGPU
669 * @param[in/out] pAcpiDsmFunction ACPI_DSM_FUNCTION * DSM function
670 * on return this may be remapped if
671 * current/generic function was input.
672 * @param[in/out] pAcpiDsmSubFunction NvU32 * DSM subfunction to use.
673 * on return this may be remapped if
674 * current/generic subfunction was input.
675 * @param[out] pInOut NvU32 * pointer to return get supported return
676 * data previously cached (if requested).
677 * @param[in/out] pSize NvU32 * size of data in pInOut. On input is
678 * size of pInOut. On output is size of data
679 * returned if get supported subfunction call is
680 * used.
681 *
682 * @returns NV_STATUS of
683 * NV_OK all processing is complete. Used when returning the cached
684 * copy of get supported functions list.
685 * NV_WARN_MORE_PROCESSING_REQUIRED call to subfunction should be
686 * performed.
687 * NV_ERR_NOT_SUPPORTED Unable to remap function/subfunction.
688 * NV_ERR_BUFFER_TOO_SMALL pInOut is too small to return cached data.
689 * NV_ERR_OPERATING_SYSTEM subfunction is not enabled.
690 */
691 NV_STATUS
checkDsmCall(OBJGPU * pGpu,ACPI_DSM_FUNCTION * pAcpiDsmFunction,NvU32 * pAcpiDsmSubFunction,NvU32 * pInOut,NvU16 * pSize)692 checkDsmCall
693 (
694 OBJGPU *pGpu,
695 ACPI_DSM_FUNCTION *pAcpiDsmFunction,
696 NvU32 *pAcpiDsmSubFunction,
697 NvU32 *pInOut,
698 NvU16 *pSize
699 )
700 {
701 NV_STATUS rmStatus = NV_ERR_NOT_SUPPORTED;
702
703 NV_ASSERT_OR_RETURN(pAcpiDsmFunction, NV_ERR_INVALID_ARGUMENT);
704 NV_ASSERT_OR_RETURN(pAcpiDsmSubFunction, NV_ERR_INVALID_ARGUMENT);
705 NV_ASSERT_OR_RETURN(pInOut, NV_ERR_INVALID_ARGUMENT);
706 NV_ASSERT_OR_RETURN(pSize, NV_ERR_INVALID_ARGUMENT);
707
708 // Do any remapping of subfunction if function is current
709 if (remapDsmFunctionAndSubFunction(pGpu, pAcpiDsmFunction, pAcpiDsmSubFunction) != NV_OK)
710 {
711 return NV_ERR_NOT_SUPPORTED;
712 }
713
714 NV_ASSERT_OR_RETURN(*pAcpiDsmFunction < ACPI_DSM_FUNCTION_COUNT, NV_ERR_INVALID_ARGUMENT);
715 NV_ASSERT_OR_RETURN(*pAcpiDsmSubFunction < NV_ACPI_GENERIC_FUNC_START, NV_ERR_INVALID_ARGUMENT);
716
717 if (isDsmGetSuppFuncListCached(pGpu, *pAcpiDsmFunction))
718 {
719 if (*pAcpiDsmSubFunction == NV_ACPI_ALL_FUNC_SUPPORT)
720 {
721 if (*pSize < pGpu->acpi.dsm[*pAcpiDsmFunction].suppFuncsLen)
722 {
723 return NV_ERR_BUFFER_TOO_SMALL;
724 }
725 else
726 {
727 // Return the cached values.
728 *pSize = (NvU16) pGpu->acpi.dsm[*pAcpiDsmFunction].suppFuncsLen;
729 portMemCopy((NvU8 *)pInOut,
730 *pSize,
731 (NvU8 *)pGpu->acpi.dsm[*pAcpiDsmFunction].suppFuncs,
732 *pSize);
733 return NV_OK;
734 }
735 }
736 else
737 {
738 // not subfunction 0... check cached supported functions list
739 rmStatus = testIfDsmSubFunctionEnabled(pGpu, *pAcpiDsmFunction, *pAcpiDsmSubFunction);
740 if (rmStatus != NV_OK)
741 {
742 // subfunction is not enabled in get supported subfunction list.
743 return rmStatus;
744 }
745 }
746 }
747 else
748 {
749 // haven't cached get supported functions yet... or it's failed.
750 if (isDsmGetSuppFuncListFailed(pGpu, *pAcpiDsmFunction))
751 {
752 // get supported subfunctions call failed before. Don't try again.
753 return NV_ERR_OPERATING_SYSTEM;
754 }
755
756 // assert if subfunction 0 is not the first one called.
757 NV_ASSERT_OR_RETURN(*pAcpiDsmSubFunction == NV_ACPI_ALL_FUNC_SUPPORT, NV_ERR_INVALID_ARGUMENT);
758 }
759
760 if (*pAcpiDsmSubFunction == NV_ACPI_ALL_FUNC_SUPPORT)
761 {
762 pGpu->acpi.dsm[*pAcpiDsmFunction].suppFuncStatus = DSM_FUNC_STATUS_FAILED;
763 }
764
765 // indicate we can go ahead and read it.
766 return NV_WARN_MORE_PROCESSING_REQUIRED;
767 }
768
769 static void
_acpiDsmSupportedFuncCacheInit(OBJGPU * pGpu)770 _acpiDsmSupportedFuncCacheInit
771 (
772 OBJGPU *pGpu
773 )
774 {
775 ACPI_DSM_FUNCTION func;
776 NV_STATUS status = NV_OK;
777 NvU16 rtnSize;
778 NvU8 supportFuncs[MAX_DSM_SUPPORTED_FUNCS_RTN_LEN];
779
780 // Just checking to make sure this is correct!
781 NV_ASSERT_OR_RETURN_VOID(0 == ACPI_DSM_FUNCTION_NBSI);
782
783 portMemSet(supportFuncs, 0, sizeof(supportFuncs));
784
785 //
786 // loop through all guids. The read will cache the subfunction list (if
787 // available)
788 //
789 for(func = ACPI_DSM_FUNCTION_NBSI; func < ACPI_DSM_FUNCTION_COUNT; func++)
790 {
791 if ((func == ACPI_DSM_FUNCTION_GPS) &&
792 (pGpu->acpi.dsm[ACPI_DSM_FUNCTION_GPS_2X].suppFuncStatus == DSM_FUNC_STATUS_SUCCESS))
793 {
794 //
795 // If GPS_2X is supported, skip checking leagacy GPS 1X.
796 //
797 continue;
798 }
799
800 //
801 // Skip over non-NBCI since we don't want to waste boot time here on Tegra
802 // Remove this if we want to support other ACPI functions.
803 //
804 if (IsTEGRA(pGpu) && (func != ACPI_DSM_FUNCTION_NBCI))
805 {
806 if (pGpu->acpi.dsm[func].suppFuncStatus == DSM_FUNC_STATUS_UNKNOWN)
807 {
808 pGpu->acpi.dsm[func].suppFuncStatus = DSM_FUNC_STATUS_FAILED;
809 }
810 }
811
812 if ((pGpu->acpi.dsm[func].suppFuncStatus == DSM_FUNC_STATUS_OVERRIDE) ||
813 (pGpu->acpi.dsm[func].suppFuncStatus == DSM_FUNC_STATUS_FAILED))
814 {
815 //
816 // skip reading the supported functions if
817 // it's been over-ridden or previously failed.
818 //
819 continue;
820 }
821
822
823 // try package type for argument 3.
824 pGpu->acpi.dsm[func].bArg3isInteger = NV_FALSE;
825 rtnSize = sizeof(supportFuncs);
826 status = osCallACPI_DSM(pGpu,
827 func,
828 NV_ACPI_ALL_FUNC_SUPPORT,
829 (NvU32 *) &supportFuncs,
830 &rtnSize);
831 if (status != NV_OK)
832 {
833 if (status == NV_ERR_INVALID_ARGUMENT)
834 {
835 // maybe an older SBIOS, try integer type for argument 3.
836 pGpu->acpi.dsm[func].bArg3isInteger = NV_TRUE;
837 rtnSize = sizeof(supportFuncs);
838 status = osCallACPI_DSM(pGpu,
839 func,
840 NV_ACPI_ALL_FUNC_SUPPORT,
841 (NvU32 *) &supportFuncs,
842 &rtnSize);
843 }
844 }
845 if (_isDsmError(status, rtnSize, (NvU32 *) &supportFuncs))
846 {
847 NV_PRINTF(LEVEL_INFO,
848 "%s DSM function not present in ASL.\n",
849 DSMFuncStr(func));
850 //
851 // If the call didn't set the failed status force it now.
852 // This might be because the call is stubbed out.
853 //
854 pGpu->acpi.dsm[func].suppFuncStatus = DSM_FUNC_STATUS_FAILED;
855 }
856
857 }
858 }
859
860 static void
_acpiDsmCallbackInit(OBJGPU * pGpu)861 _acpiDsmCallbackInit
862 (
863 OBJGPU *pGpu
864 )
865 {
866 ACPI_DSM_FUNCTION func;
867 NV_STATUS status = NV_OK;
868 NvU32 callbacks;
869 NvU16 rtnSize;
870 NvU32 testDSMfuncIndex;
871 // lowest priority is first entry, highest priority is last entry
872
873 // this list only includes GUIDS with callbacks.
874 ACPI_DSM_FUNCTION callbackOrderOfPrecedenceList[] =
875 {
876 ACPI_DSM_FUNCTION_NBCI,
877 ACPI_DSM_FUNCTION_NVHG,
878 ACPI_DSM_FUNCTION_MXM,
879 ACPI_DSM_FUNCTION_GPS_2X,
880 ACPI_DSM_FUNCTION_GPS,
881 };
882
883 // Initialize these now.
884 pGpu->acpi.dispStatusHotplugFunc = ACPI_DSM_FUNCTION_COUNT;
885 pGpu->acpi.dispStatusConfigFunc = ACPI_DSM_FUNCTION_COUNT;
886 pGpu->acpi.perfPostPowerStateFunc = ACPI_DSM_FUNCTION_COUNT;
887 pGpu->acpi.stereo3dStateActiveFunc = ACPI_DSM_FUNCTION_COUNT;
888
889 // Loop through the list of GUIDs that support callbacks in priority order
890 for (testDSMfuncIndex = 0;
891 testDSMfuncIndex < (sizeof(callbackOrderOfPrecedenceList)/sizeof(callbackOrderOfPrecedenceList[0]));
892 testDSMfuncIndex++)
893 {
894 func = callbackOrderOfPrecedenceList[testDSMfuncIndex];
895 pGpu->acpi.dsm[func].callbackStatus = DSM_FUNC_STATUS_FAILED;
896
897 rtnSize = sizeof(callbacks);
898 callbacks = 0;
899
900 if ((func == ACPI_DSM_FUNCTION_GPS) &&
901 (pGpu->acpi.dsm[ACPI_DSM_FUNCTION_GPS_2X].callbackStatus == DSM_FUNC_STATUS_SUCCESS))
902 {
903 //
904 // If GPS_2X has enabled callback, skip leagacy GPS 1X.
905 //
906 continue;
907 }
908
909 if (testIfDsmSubFunctionEnabled(pGpu, func, NV_ACPI_GENERIC_FUNC_CALLBACKS) == NV_OK)
910 {
911 status = osCallACPI_DSM(pGpu,
912 func,
913 NV_ACPI_GENERIC_FUNC_CALLBACKS,
914 &callbacks,
915 &rtnSize);
916
917 if (_isDsmError(status, rtnSize, &callbacks))
918 {
919 NV_PRINTF(LEVEL_ERROR,
920 "SBIOS suggested %s supports function %d, but the call failed!\n",
921 DSMFuncStr(func),
922 NV_ACPI_GENERIC_FUNC_CALLBACKS);
923 }
924 else
925 {
926 pGpu->acpi.dsm[func].callback = callbacks;
927 pGpu->acpi.dsm[func].callbackStatus = DSM_FUNC_STATUS_SUCCESS;
928
929 // replace lower priority hotplug callback.
930 if (FLD_TEST_DRF(_ACPI, _CALLBACKS_RET, _HOTPLUG, _NOTIFY, pGpu->acpi.dsm[func].callback))
931 {
932 pGpu->acpi.dispStatusHotplugFunc = func;
933 }
934
935 // replace lower priority status config callback.
936 if (FLD_TEST_DRF(_ACPI, _CALLBACKS_RET, _CONFIG, _NOTIFY, pGpu->acpi.dsm[func].callback))
937 {
938 pGpu->acpi.dispStatusConfigFunc = func;
939 }
940
941 // replace lower priority status config callback.
942 if (FLD_TEST_DRF(_ACPI, _CALLBACKS_RET, _POSTPOWERSTATE, _NOTIFY, pGpu->acpi.dsm[func].callback))
943 {
944 pGpu->acpi.perfPostPowerStateFunc = func;
945 }
946
947 // replace 3D stereo active state callback.
948 if (FLD_TEST_DRF(_ACPI, _CALLBACKS_RET, _3DSTEREOSTATE_ACTIVE, _NOTIFY, pGpu->acpi.dsm[func].callback))
949 {
950 pGpu->acpi.stereo3dStateActiveFunc = func;
951 }
952
953 }
954 }
955 }
956 }
957
958
959 /*
960 * @brief Initialize the ACPI DSM caps related information
961 *
962 * @param[in] pGpu OBJGPU pointer
963 *
964 * @returns
965 */
966 static void
_acpiDsmCapsInit(OBJGPU * pGpu)967 _acpiDsmCapsInit
968 (
969 OBJGPU *pGpu
970 )
971 {
972 ACPI_DSM_FUNCTION func;
973 NV_STATUS status = NV_OK;
974 NvU32 platCaps;
975 NvU16 rtnSize;
976 NvU32 asmDsmSubFunction;
977
978 // handle the NBCI specific platcaps init.
979 func = ACPI_DSM_FUNCTION_NBCI;
980 asmDsmSubFunction = NV_NBCI_FUNC_PLATCAPS;
981 if (testIfDsmSubFunctionEnabled(pGpu, func, asmDsmSubFunction) == NV_OK)
982 {
983 rtnSize = sizeof(platCaps);
984 status = osCallACPI_DSM(pGpu,
985 func,
986 asmDsmSubFunction,
987 &platCaps,
988 &rtnSize);
989
990 if (_isDsmError(status, rtnSize, &platCaps))
991 {
992 NV_PRINTF(LEVEL_ERROR,
993 "SBIOS suggested %s supports function %d, but the call failed!\n",
994 DSMFuncStr(func), asmDsmSubFunction);
995 }
996 else
997 {
998 // cache for later retrieval
999 pGpu->acpi.dsmPlatCapsCache[func] = platCaps;
1000
1001 }
1002 }
1003 }
1004
1005 /*
1006 * @brief Initialize the cache of generic function/subfunctions
1007 *
1008 * @param[in] pGpu OBJGPU pointer
1009 *
1010 * @returns
1011 */
1012 static void
_acpiGenFuncCacheInit(OBJGPU * pGpu)1013 _acpiGenFuncCacheInit
1014 (
1015 OBJGPU *pGpu
1016 )
1017 {
1018 NvU32 testGenSubFunc;
1019 NvU32 dsmIndex;
1020 NV_STATUS status = NV_OK;
1021
1022 // create a bitwise list of generic dsm supported functions available.
1023 pGpu->acpi.dsmCurrentFuncSupport = 0;
1024
1025 // Loop through each generic subfunction, determine which is active available.
1026 for (testGenSubFunc = NV_ACPI_GENERIC_FUNC_START; testGenSubFunc <= NV_ACPI_GENERIC_FUNC_LAST_SUBFUNCTION; testGenSubFunc++)
1027 {
1028 dsmIndex = testGenSubFunc-NV_ACPI_GENERIC_FUNC_START;
1029
1030 pGpu->acpi.dsmCurrentFunc[dsmIndex] = ACPI_DSM_FUNCTION_CURRENT;
1031 pGpu->acpi.dsmCurrentSubFunc[dsmIndex] = testGenSubFunc;
1032
1033 //
1034 // This remaps the generic function/subfunction if it can.
1035 // If not available it leaves it as ACPI_DSM_FUNCTION_CURRENT and the generic subfunction.
1036 //
1037
1038 status = remapDsmFunctionAndSubFunction(pGpu, &pGpu->acpi.dsmCurrentFunc[dsmIndex], &pGpu->acpi.dsmCurrentSubFunc[dsmIndex]);
1039 if (status == NV_OK)
1040 {
1041 if (pGpu->acpi.dsmCurrentFunc[dsmIndex] == ACPI_DSM_FUNCTION_CURRENT)
1042 {
1043 NV_PRINTF(LEVEL_INFO,
1044 "DSM Generic subfunction 0x%x is not supported. Leaving entry at func %s subfunction 0x%x.\n",
1045 testGenSubFunc,
1046 DSMFuncStr(pGpu->acpi.dsmCurrentFunc[dsmIndex]),
1047 pGpu->acpi.dsmCurrentSubFunc[dsmIndex]);
1048 }
1049 else
1050 {
1051 // set the bit indicating we do support this generic dsm subfunction.
1052 pGpu->acpi.dsmCurrentFuncSupport |= NVBIT(dsmIndex);
1053 NV_PRINTF(LEVEL_INFO,
1054 "DSM Generic subfunction 0x%x supported. Mapping to func %s subfunction 0x%x\n",
1055 testGenSubFunc,
1056 DSMFuncStr(pGpu->acpi.dsmCurrentFunc[dsmIndex]),
1057 pGpu->acpi.dsmCurrentSubFunc[dsmIndex]);
1058 }
1059 }
1060 else
1061 {
1062 NV_PRINTF(LEVEL_INFO,
1063 "DSM Test generic subfunction 0x%x is not supported. Indicates possible table corruption.\n",
1064 testGenSubFunc);
1065 }
1066 }
1067 }
1068
1069 /*
1070 * @brief Initialize the ACPI DSM features such as mdtl support.
1071 *
1072 * @param[in] pGpu OBJGPU pointer
1073 *
1074 * @returns
1075 */
1076 static void
_acpiDsmFeatureInit(OBJGPU * pGpu)1077 _acpiDsmFeatureInit
1078 (
1079 OBJGPU *pGpu
1080 )
1081 {
1082 if (pGpu->acpi.MDTLFeatureSupport == DSM_FUNC_STATUS_UNKNOWN)
1083 {
1084 //
1085 // The mdtl feature requires both mdtl and displaystatus subfunctions supported.
1086 // We could add verify more validity checks like...
1087 // 1) Read in the MDTL table and confirming the contents are valid
1088 // 2) Try the displaystatus status command and see it returns valid data.
1089 // For now, we trust the SBIOS ASL. If it tells us this is supported then
1090 // the rest will work as well.
1091 //
1092 if ((testIfDsmSubFunctionEnabled(pGpu, ACPI_DSM_FUNCTION_CURRENT, NV_ACPI_GENERIC_FUNC_MDTL) == NV_OK) &&
1093 (testIfDsmSubFunctionEnabled(pGpu, ACPI_DSM_FUNCTION_CURRENT, NV_ACPI_GENERIC_FUNC_DISPLAYSTATUS) == NV_OK))
1094 {
1095 pGpu->acpi.MDTLFeatureSupport = DSM_FUNC_STATUS_SUCCESS;
1096 }
1097 else
1098 {
1099 pGpu->acpi.MDTLFeatureSupport = DSM_FUNC_STATUS_FAILED;
1100 }
1101 }
1102 }
1103
1104 static void
_acpiCacheMethodData(OBJGPU * pGpu)1105 _acpiCacheMethodData
1106 (
1107 OBJGPU *pGpu
1108 )
1109 {
1110 NV_STATUS status;
1111 NvU32 inOut = 0;
1112 NvU16 rtnSize = sizeof(inOut);
1113 NvU32 tableLen = 0, acpiidIndex = 0, mode = 0, muxPartId = 0, state = 0;
1114
1115 // This bit is used for checking if pGpu::acpiMethodData need to be used or not.
1116 pGpu->acpiMethodData.bValid = NV_TRUE;
1117
1118 // Fill in the DOD Method Data.
1119 pGpu->acpiMethodData.dodMethodData.acpiIdListLen = sizeof(pGpu->acpiMethodData.dodMethodData.acpiIdList);
1120
1121 status = osCallACPI_DOD(pGpu, pGpu->acpiMethodData.dodMethodData.acpiIdList, &pGpu->acpiMethodData.dodMethodData.acpiIdListLen);
1122
1123 pGpu->acpiMethodData.dodMethodData.status = status;
1124
1125 // Fill in the JT Method Data.
1126 status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_JT, JT_FUNC_CAPS, &inOut, &rtnSize);
1127
1128 pGpu->acpiMethodData.jtMethodData.status = status;
1129 pGpu->acpiMethodData.jtMethodData.jtCaps = inOut;
1130 pGpu->acpiMethodData.jtMethodData.jtRevId = (NvU16)DRF_VAL(_JT_FUNC, _CAPS, _REVISION_ID, inOut);
1131 gpuSetGC6SBIOSCapabilities(pGpu);
1132
1133 // Fill in the MUX Method Data.
1134 portMemSet(pGpu->acpiMethodData.muxMethodData.acpiIdMuxModeTable, 0, sizeof(pGpu->acpiMethodData.muxMethodData.acpiIdMuxModeTable));
1135 portMemSet(pGpu->acpiMethodData.muxMethodData.acpiIdMuxPartTable, 0, sizeof(pGpu->acpiMethodData.muxMethodData.acpiIdMuxPartTable));
1136 portMemSet(pGpu->acpiMethodData.muxMethodData.acpiIdMuxStateTable, 0, sizeof(pGpu->acpiMethodData.muxMethodData.acpiIdMuxStateTable));
1137 if (pGpu->acpiMethodData.dodMethodData.status == NV_OK)
1138 {
1139 tableLen = pGpu->acpiMethodData.dodMethodData.acpiIdListLen / sizeof(NvU32);
1140 pGpu->acpiMethodData.muxMethodData.tableLen = tableLen;
1141 for (acpiidIndex = 0; acpiidIndex < tableLen; acpiidIndex++)
1142 {
1143 status = osCallACPI_MXDM(pGpu, pGpu->acpiMethodData.dodMethodData.acpiIdList[acpiidIndex], &mode);
1144 pGpu->acpiMethodData.muxMethodData.acpiIdMuxModeTable[acpiidIndex].acpiId = pGpu->acpiMethodData.dodMethodData.acpiIdList[acpiidIndex];
1145 pGpu->acpiMethodData.muxMethodData.acpiIdMuxModeTable[acpiidIndex].mode = mode;
1146 pGpu->acpiMethodData.muxMethodData.acpiIdMuxModeTable[acpiidIndex].status = status;
1147
1148 status = osCallACPI_MXID(pGpu, pGpu->acpiMethodData.dodMethodData.acpiIdList[acpiidIndex], &muxPartId);
1149 pGpu->acpiMethodData.muxMethodData.acpiIdMuxPartTable[acpiidIndex].acpiId = pGpu->acpiMethodData.dodMethodData.acpiIdList[acpiidIndex];
1150 pGpu->acpiMethodData.muxMethodData.acpiIdMuxPartTable[acpiidIndex].mode = muxPartId;
1151 pGpu->acpiMethodData.muxMethodData.acpiIdMuxPartTable[acpiidIndex].status = status;
1152
1153 status = osCallACPI_MXDS(pGpu, pGpu->acpiMethodData.dodMethodData.acpiIdList[acpiidIndex], &state);
1154 pGpu->acpiMethodData.muxMethodData.acpiIdMuxStateTable[acpiidIndex].acpiId = pGpu->acpiMethodData.dodMethodData.acpiIdList[acpiidIndex];
1155 pGpu->acpiMethodData.muxMethodData.acpiIdMuxStateTable[acpiidIndex].mode = state;
1156 pGpu->acpiMethodData.muxMethodData.acpiIdMuxStateTable[acpiidIndex].status = status;
1157 mode = muxPartId = state = 0;
1158 }
1159 }
1160
1161 // Fill in the Optimus caps Method Data.
1162 status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_NVOP, NVOP_FUNC_OPTIMUSCAPS,
1163 &pGpu->acpiMethodData.capsMethodData.optimusCaps, &rtnSize);
1164 pGpu->acpiMethodData.capsMethodData.status = status;
1165 }
1166
1167 /*
1168 * @brief Initialize the ACPI Device Specific Methods
1169 *
1170 * @param[in] pGpu OBJGPU pointer
1171 *
1172 * @returns
1173 */
1174
acpiDsmInit(OBJGPU * pGpu)1175 void acpiDsmInit
1176 (
1177 OBJGPU *pGpu
1178 )
1179 {
1180 // initialize the cache of the supported Functions list.
1181 _acpiDsmSupportedFuncCacheInit(pGpu);
1182 _acpiGenFuncCacheInit(pGpu);
1183 _acpiDsmCallbackInit(pGpu);
1184 _acpiDsmCapsInit(pGpu);
1185 _acpiDsmFeatureInit(pGpu);
1186 _acpiCacheMethodData(pGpu);
1187 }
1188
1189 /*!
1190 * @brief Get NBSI object data accordingly by global source through ACPI function
1191 *
1192 * @param[in] pGpu OBJGPU pointer
1193 * @param[out] pNbsiObjData NBSI object data
1194 * @param[in] pSzOfpNbsiObjData Size of NBSI object data
1195 * @param[in] acpiFunction ACPI function
1196 * @param[in] objType NBSI global object type
1197 * @param[in] validateOption NBSI valitation option
1198 *
1199 * @returns NV_OK All processing is complete.
1200 * @returns NV_ERR_NOT_SUPPORTED NBSI function not supported.
1201 * @returns NV_ERR_BUFFER_TOO_SMALL pNbsiObjData is too small to return cached data.
1202 * @returns NV_ERR_GENERIC Otherwise.
1203 */
1204 NV_STATUS
getAcpiDsmObjectData(OBJGPU * pGpu,NvU8 ** pNbsiObjData,NvU32 * pSzOfpNbsiObjData,ACPI_DSM_FUNCTION acpiFunction,NBSI_GLOB_TYPE objType,NBSI_VALIDATE validateOption)1205 getAcpiDsmObjectData
1206 (
1207 OBJGPU *pGpu,
1208 NvU8 **pNbsiObjData,
1209 NvU32 *pSzOfpNbsiObjData,
1210 ACPI_DSM_FUNCTION acpiFunction,
1211 NBSI_GLOB_TYPE objType,
1212 NBSI_VALIDATE validateOption
1213 )
1214 {
1215 NV_STATUS status;
1216 NvU32 rtnStatus;
1217 NBSI_SOURCE_LOC globSrc;
1218 NvU8 globIdx;
1219 NvU32 totalGlobSize;
1220
1221 NV_ASSERT(pNbsiObjData);
1222 NV_ASSERT(pSzOfpNbsiObjData);
1223
1224 // read best fit, but leave size 0 so it returns the size.
1225 globIdx = 0;
1226 if ((acpiFunction == ACPI_DSM_FUNCTION_NBSI) ||
1227 (acpiFunction == ACPI_DSM_FUNCTION_NBCI) ||
1228 (acpiFunction == ACPI_DSM_FUNCTION_CURRENT))
1229 {
1230 globSrc = 0; // scan all NBSI/NBCI sources
1231 }
1232 else
1233 {
1234 globSrc = NBSI_TBL_SOURCE_ACPI;
1235 }
1236
1237 status = getNbsiObjByType(pGpu, objType, &globSrc, &globIdx, 0, *pNbsiObjData, pSzOfpNbsiObjData, &totalGlobSize, &rtnStatus, acpiFunction, validateOption);
1238 if (status == NV_OK)
1239 {
1240 status = NV_ERR_GENERIC;
1241
1242 // got it
1243 if (rtnStatus == NV2080_CTRL_BIOS_GET_NBSI_SUCCESS)
1244 {
1245 status = NV_OK;
1246 }
1247
1248 // almost got it but a bad hash was found.
1249 if (rtnStatus == NV2080_CTRL_BIOS_GET_NBSI_BAD_HASH)
1250 {
1251 NV_PRINTF(LEVEL_INFO,
1252 "ACPI DSM object (type=0x%x) signature check failed!\n",
1253 objType);
1254 status = NV_ERR_GENERIC;
1255 return status;
1256 }
1257
1258 // couldn't fit it
1259 if (rtnStatus == NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE)
1260 {
1261 // return the actual size needed
1262 *pSzOfpNbsiObjData = totalGlobSize;
1263 status = NV_ERR_BUFFER_TOO_SMALL;
1264 }
1265 }
1266
1267 return status;
1268 }
1269
1270 /*
1271 * Clear DSM function cache status
1272 *
1273 * @param[in] pGpu OBJGPU
1274 * @param[in] acpiDsmFunction ACPI_DSM_FUNCTION DSM function
1275 * @param[in] acpiDsmSubFunction NvU32
1276 */
uncacheDsmFuncStatus(OBJGPU * pGpu,ACPI_DSM_FUNCTION acpiDsmFunction,NvU32 acpiDsmSubFunction)1277 void uncacheDsmFuncStatus
1278 (
1279 OBJGPU *pGpu,
1280 ACPI_DSM_FUNCTION acpiDsmFunction,
1281 NvU32 acpiDsmSubFunction
1282 )
1283 {
1284 if (acpiDsmSubFunction == NV_ACPI_ALL_FUNC_SUPPORT)
1285 {
1286 pGpu->acpi.dsm[acpiDsmFunction].suppFuncsLen = 0;
1287 pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus = DSM_FUNC_STATUS_UNKNOWN;
1288 }
1289 }
1290