1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L4
24
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_cmn.h"
28 #include "soc15_common.h"
29
30 /*
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
34 */
35 #undef pr_err
36 #undef pr_warn
37 #undef pr_info
38 #undef pr_debug
39
40 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
41
42 const int link_speed[] = {25, 50, 80, 160, 320, 640};
43
44 #undef __SMU_DUMMY_MAP
45 #define __SMU_DUMMY_MAP(type) #type
46 static const char * const __smu_message_names[] = {
47 SMU_MESSAGE_TYPES
48 };
49
50 #define smu_cmn_call_asic_func(intf, smu, args...) \
51 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
52 (smu)->ppt_funcs->intf(smu, ##args) : \
53 -ENOTSUPP) : \
54 -EINVAL)
55
smu_get_message_name(struct smu_context * smu,enum smu_message_type type)56 static const char *smu_get_message_name(struct smu_context *smu,
57 enum smu_message_type type)
58 {
59 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
60 return "unknown smu message";
61
62 return __smu_message_names[type];
63 }
64
smu_cmn_read_arg(struct smu_context * smu,uint32_t * arg)65 static void smu_cmn_read_arg(struct smu_context *smu,
66 uint32_t *arg)
67 {
68 struct amdgpu_device *adev = smu->adev;
69
70 *arg = RREG32(smu->param_reg);
71 }
72
73 /* Redefine the SMU error codes here.
74 *
75 * Note that these definitions are redundant and should be removed
76 * when the SMU has exported a unified header file containing these
77 * macros, which header file we can just include and use the SMU's
78 * macros. At the moment, these error codes are defined by the SMU
79 * per-ASIC unfortunately, yet we're a one driver for all ASICs.
80 */
81 #define SMU_RESP_NONE 0
82 #define SMU_RESP_OK 1
83 #define SMU_RESP_CMD_FAIL 0xFF
84 #define SMU_RESP_CMD_UNKNOWN 0xFE
85 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
86 #define SMU_RESP_BUSY_OTHER 0xFC
87 #define SMU_RESP_DEBUG_END 0xFB
88
89 /**
90 * __smu_cmn_poll_stat -- poll for a status from the SMU
91 * @smu: a pointer to SMU context
92 *
93 * Returns the status of the SMU, which could be,
94 * 0, the SMU is busy with your command;
95 * 1, execution status: success, execution result: success;
96 * 0xFF, execution status: success, execution result: failure;
97 * 0xFE, unknown command;
98 * 0xFD, valid command, but bad (command) prerequisites;
99 * 0xFC, the command was rejected as the SMU is busy;
100 * 0xFB, "SMC_Result_DebugDataDumpEnd".
101 *
102 * The values here are not defined by macros, because I'd rather we
103 * include a single header file which defines them, which is
104 * maintained by the SMU FW team, so that we're impervious to firmware
105 * changes. At the moment those values are defined in various header
106 * files, one for each ASIC, yet here we're a single ASIC-agnostic
107 * interface. Such a change can be followed-up by a subsequent patch.
108 */
__smu_cmn_poll_stat(struct smu_context * smu)109 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
110 {
111 struct amdgpu_device *adev = smu->adev;
112 int timeout = adev->usec_timeout * 20;
113 u32 reg;
114
115 for ( ; timeout > 0; timeout--) {
116 reg = RREG32(smu->resp_reg);
117 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
118 break;
119
120 udelay(1);
121 }
122
123 return reg;
124 }
125
__smu_cmn_reg_print_error(struct smu_context * smu,u32 reg_c2pmsg_90,int msg_index,u32 param,enum smu_message_type msg)126 static void __smu_cmn_reg_print_error(struct smu_context *smu,
127 u32 reg_c2pmsg_90,
128 int msg_index,
129 u32 param,
130 enum smu_message_type msg)
131 {
132 struct amdgpu_device *adev = smu->adev;
133 const char *message = smu_get_message_name(smu, msg);
134 u32 msg_idx, prm;
135
136 switch (reg_c2pmsg_90) {
137 case SMU_RESP_NONE: {
138 msg_idx = RREG32(smu->msg_reg);
139 prm = RREG32(smu->param_reg);
140 dev_err_ratelimited(adev->dev,
141 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
142 msg_idx, prm);
143 }
144 break;
145 case SMU_RESP_OK:
146 /* The SMU executed the command. It completed with a
147 * successful result.
148 */
149 break;
150 case SMU_RESP_CMD_FAIL:
151 /* The SMU executed the command. It completed with an
152 * unsuccessful result.
153 */
154 break;
155 case SMU_RESP_CMD_UNKNOWN:
156 dev_err_ratelimited(adev->dev,
157 "SMU: unknown command: index:%d param:0x%08X message:%s",
158 msg_index, param, message);
159 break;
160 case SMU_RESP_CMD_BAD_PREREQ:
161 dev_err_ratelimited(adev->dev,
162 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
163 msg_index, param, message);
164 break;
165 case SMU_RESP_BUSY_OTHER:
166 dev_err_ratelimited(adev->dev,
167 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
168 msg_index, param, message);
169 break;
170 case SMU_RESP_DEBUG_END:
171 dev_err_ratelimited(adev->dev,
172 "SMU: I'm debugging!");
173 break;
174 default:
175 dev_err_ratelimited(adev->dev,
176 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
177 reg_c2pmsg_90, msg_index, param, message);
178 break;
179 }
180 }
181
__smu_cmn_reg2errno(struct smu_context * smu,u32 reg_c2pmsg_90)182 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
183 {
184 int res;
185
186 switch (reg_c2pmsg_90) {
187 case SMU_RESP_NONE:
188 /* The SMU is busy--still executing your command.
189 */
190 res = -ETIME;
191 break;
192 case SMU_RESP_OK:
193 res = 0;
194 break;
195 case SMU_RESP_CMD_FAIL:
196 /* Command completed successfully, but the command
197 * status was failure.
198 */
199 res = -EIO;
200 break;
201 case SMU_RESP_CMD_UNKNOWN:
202 /* Unknown command--ignored by the SMU.
203 */
204 res = -EOPNOTSUPP;
205 break;
206 case SMU_RESP_CMD_BAD_PREREQ:
207 /* Valid command--bad prerequisites.
208 */
209 res = -EINVAL;
210 break;
211 case SMU_RESP_BUSY_OTHER:
212 /* The SMU is busy with other commands. The client
213 * should retry in 10 us.
214 */
215 res = -EBUSY;
216 break;
217 default:
218 /* Unknown or debug response from the SMU.
219 */
220 res = -EREMOTEIO;
221 break;
222 }
223
224 return res;
225 }
226
__smu_cmn_send_msg(struct smu_context * smu,u16 msg,u32 param)227 static void __smu_cmn_send_msg(struct smu_context *smu,
228 u16 msg,
229 u32 param)
230 {
231 struct amdgpu_device *adev = smu->adev;
232
233 WREG32(smu->resp_reg, 0);
234 WREG32(smu->param_reg, param);
235 WREG32(smu->msg_reg, msg);
236 }
237
__smu_cmn_get_msg_flags(struct smu_context * smu,enum smu_message_type msg)238 static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,
239 enum smu_message_type msg)
240 {
241 return smu->message_map[msg].flags;
242 }
243
__smu_cmn_ras_filter_msg(struct smu_context * smu,enum smu_message_type msg,bool * poll)244 static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
245 enum smu_message_type msg, bool *poll)
246 {
247 struct amdgpu_device *adev = smu->adev;
248 uint32_t flags, resp;
249 bool fed_status;
250
251 flags = __smu_cmn_get_msg_flags(smu, msg);
252 *poll = true;
253
254 /* When there is RAS fatal error, FW won't process non-RAS priority
255 * messages. Don't allow any messages other than RAS priority messages.
256 */
257 fed_status = amdgpu_ras_get_fed_status(adev);
258 if (fed_status) {
259 if (!(flags & SMU_MSG_RAS_PRI)) {
260 dev_dbg(adev->dev,
261 "RAS error detected, skip sending %s",
262 smu_get_message_name(smu, msg));
263 return -EACCES;
264 }
265
266 /* FW will ignore non-priority messages when a RAS fatal error
267 * is detected. Hence it is possible that a previous message
268 * wouldn't have got response. Allow to continue without polling
269 * for response status for priority messages.
270 */
271 resp = RREG32(smu->resp_reg);
272 dev_dbg(adev->dev,
273 "Sending RAS priority message %s response status: %x",
274 smu_get_message_name(smu, msg), resp);
275 if (resp == 0)
276 *poll = false;
277 }
278
279 return 0;
280 }
281
__smu_cmn_send_debug_msg(struct smu_context * smu,u32 msg,u32 param)282 static int __smu_cmn_send_debug_msg(struct smu_context *smu,
283 u32 msg,
284 u32 param)
285 {
286 struct amdgpu_device *adev = smu->adev;
287
288 WREG32(smu->debug_param_reg, param);
289 WREG32(smu->debug_msg_reg, msg);
290 WREG32(smu->debug_resp_reg, 0);
291
292 return 0;
293 }
294 /**
295 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
296 * @smu: pointer to an SMU context
297 * @msg_index: message index
298 * @param: message parameter to send to the SMU
299 *
300 * Send a message to the SMU with the parameter passed. Do not wait
301 * for status/result of the message, thus the "without_waiting".
302 *
303 * Return 0 on success, -errno on error if we weren't able to _send_
304 * the message for some reason. See __smu_cmn_reg2errno() for details
305 * of the -errno.
306 */
smu_cmn_send_msg_without_waiting(struct smu_context * smu,uint16_t msg_index,uint32_t param)307 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
308 uint16_t msg_index,
309 uint32_t param)
310 {
311 struct amdgpu_device *adev = smu->adev;
312 u32 reg;
313 int res;
314
315 if (adev->no_hw_access)
316 return 0;
317
318 reg = __smu_cmn_poll_stat(smu);
319 res = __smu_cmn_reg2errno(smu, reg);
320 if (reg == SMU_RESP_NONE ||
321 res == -EREMOTEIO)
322 goto Out;
323 __smu_cmn_send_msg(smu, msg_index, param);
324 res = 0;
325 Out:
326 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
327 res && (res != -ETIME)) {
328 amdgpu_device_halt(adev);
329 WARN_ON(1);
330 }
331
332 return res;
333 }
334
335 /**
336 * smu_cmn_wait_for_response -- wait for response from the SMU
337 * @smu: pointer to an SMU context
338 *
339 * Wait for status from the SMU.
340 *
341 * Return 0 on success, -errno on error, indicating the execution
342 * status and result of the message being waited for. See
343 * __smu_cmn_reg2errno() for details of the -errno.
344 */
smu_cmn_wait_for_response(struct smu_context * smu)345 int smu_cmn_wait_for_response(struct smu_context *smu)
346 {
347 u32 reg;
348 int res;
349
350 reg = __smu_cmn_poll_stat(smu);
351 res = __smu_cmn_reg2errno(smu, reg);
352
353 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
354 res && (res != -ETIME)) {
355 amdgpu_device_halt(smu->adev);
356 WARN_ON(1);
357 }
358
359 return res;
360 }
361
362 /**
363 * smu_cmn_send_smc_msg_with_param -- send a message with parameter
364 * @smu: pointer to an SMU context
365 * @msg: message to send
366 * @param: parameter to send to the SMU
367 * @read_arg: pointer to u32 to return a value from the SMU back
368 * to the caller
369 *
370 * Send the message @msg with parameter @param to the SMU, wait for
371 * completion of the command, and return back a value from the SMU in
372 * @read_arg pointer.
373 *
374 * Return 0 on success, -errno when a problem is encountered sending
375 * message or receiving reply. If there is a PCI bus recovery or
376 * the destination is a virtual GPU which does not allow this message
377 * type, the message is simply dropped and success is also returned.
378 * See __smu_cmn_reg2errno() for details of the -errno.
379 *
380 * If we weren't able to send the message to the SMU, we also print
381 * the error to the standard log.
382 *
383 * Command completion status is printed only if the -errno is
384 * -EREMOTEIO, indicating that the SMU returned back an
385 * undefined/unknown/unspecified result. All other cases are
386 * well-defined, not printed, but instead given back to the client to
387 * decide what further to do.
388 *
389 * The return value, @read_arg is read back regardless, to give back
390 * more information to the client, which on error would most likely be
391 * @param, but we can't assume that. This also eliminates more
392 * conditionals.
393 */
smu_cmn_send_smc_msg_with_param(struct smu_context * smu,enum smu_message_type msg,uint32_t param,uint32_t * read_arg)394 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
395 enum smu_message_type msg,
396 uint32_t param,
397 uint32_t *read_arg)
398 {
399 struct amdgpu_device *adev = smu->adev;
400 int res, index;
401 bool poll = true;
402 u32 reg;
403
404 if (adev->no_hw_access)
405 return 0;
406
407 index = smu_cmn_to_asic_specific_index(smu,
408 CMN2ASIC_MAPPING_MSG,
409 msg);
410 if (index < 0)
411 return index == -EACCES ? 0 : index;
412
413 mutex_lock(&smu->message_lock);
414
415 if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {
416 res = __smu_cmn_ras_filter_msg(smu, msg, &poll);
417 if (res)
418 goto Out;
419 }
420
421 if (poll) {
422 reg = __smu_cmn_poll_stat(smu);
423 res = __smu_cmn_reg2errno(smu, reg);
424 if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {
425 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
426 goto Out;
427 }
428 }
429 __smu_cmn_send_msg(smu, (uint16_t) index, param);
430 reg = __smu_cmn_poll_stat(smu);
431 res = __smu_cmn_reg2errno(smu, reg);
432 if (res != 0)
433 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
434 if (read_arg) {
435 smu_cmn_read_arg(smu, read_arg);
436 dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
437 readval: 0x%08x\n",
438 smu_get_message_name(smu, msg), index, param, reg, *read_arg);
439 } else {
440 dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
441 smu_get_message_name(smu, msg), index, param, reg);
442 }
443 Out:
444 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
445 amdgpu_device_halt(adev);
446 WARN_ON(1);
447 }
448
449 mutex_unlock(&smu->message_lock);
450 return res;
451 }
452
smu_cmn_send_smc_msg(struct smu_context * smu,enum smu_message_type msg,uint32_t * read_arg)453 int smu_cmn_send_smc_msg(struct smu_context *smu,
454 enum smu_message_type msg,
455 uint32_t *read_arg)
456 {
457 return smu_cmn_send_smc_msg_with_param(smu,
458 msg,
459 0,
460 read_arg);
461 }
462
smu_cmn_send_debug_smc_msg(struct smu_context * smu,uint32_t msg)463 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
464 uint32_t msg)
465 {
466 return __smu_cmn_send_debug_msg(smu, msg, 0);
467 }
468
smu_cmn_send_debug_smc_msg_with_param(struct smu_context * smu,uint32_t msg,uint32_t param)469 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
470 uint32_t msg, uint32_t param)
471 {
472 return __smu_cmn_send_debug_msg(smu, msg, param);
473 }
474
smu_cmn_to_asic_specific_index(struct smu_context * smu,enum smu_cmn2asic_mapping_type type,uint32_t index)475 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
476 enum smu_cmn2asic_mapping_type type,
477 uint32_t index)
478 {
479 struct cmn2asic_msg_mapping msg_mapping;
480 struct cmn2asic_mapping mapping;
481
482 switch (type) {
483 case CMN2ASIC_MAPPING_MSG:
484 if (index >= SMU_MSG_MAX_COUNT ||
485 !smu->message_map)
486 return -EINVAL;
487
488 msg_mapping = smu->message_map[index];
489 if (!msg_mapping.valid_mapping)
490 return -EINVAL;
491
492 if (amdgpu_sriov_vf(smu->adev) &&
493 !(msg_mapping.flags & SMU_MSG_VF_FLAG))
494 return -EACCES;
495
496 return msg_mapping.map_to;
497
498 case CMN2ASIC_MAPPING_CLK:
499 if (index >= SMU_CLK_COUNT ||
500 !smu->clock_map)
501 return -EINVAL;
502
503 mapping = smu->clock_map[index];
504 if (!mapping.valid_mapping)
505 return -EINVAL;
506
507 return mapping.map_to;
508
509 case CMN2ASIC_MAPPING_FEATURE:
510 if (index >= SMU_FEATURE_COUNT ||
511 !smu->feature_map)
512 return -EINVAL;
513
514 mapping = smu->feature_map[index];
515 if (!mapping.valid_mapping)
516 return -EINVAL;
517
518 return mapping.map_to;
519
520 case CMN2ASIC_MAPPING_TABLE:
521 if (index >= SMU_TABLE_COUNT ||
522 !smu->table_map)
523 return -EINVAL;
524
525 mapping = smu->table_map[index];
526 if (!mapping.valid_mapping)
527 return -EINVAL;
528
529 return mapping.map_to;
530
531 case CMN2ASIC_MAPPING_PWR:
532 if (index >= SMU_POWER_SOURCE_COUNT ||
533 !smu->pwr_src_map)
534 return -EINVAL;
535
536 mapping = smu->pwr_src_map[index];
537 if (!mapping.valid_mapping)
538 return -EINVAL;
539
540 return mapping.map_to;
541
542 case CMN2ASIC_MAPPING_WORKLOAD:
543 if (index >= PP_SMC_POWER_PROFILE_COUNT ||
544 !smu->workload_map)
545 return -EINVAL;
546
547 mapping = smu->workload_map[index];
548 if (!mapping.valid_mapping)
549 return -ENOTSUPP;
550
551 return mapping.map_to;
552
553 default:
554 return -EINVAL;
555 }
556 }
557
smu_cmn_feature_is_supported(struct smu_context * smu,enum smu_feature_mask mask)558 int smu_cmn_feature_is_supported(struct smu_context *smu,
559 enum smu_feature_mask mask)
560 {
561 struct smu_feature *feature = &smu->smu_feature;
562 int feature_id;
563
564 feature_id = smu_cmn_to_asic_specific_index(smu,
565 CMN2ASIC_MAPPING_FEATURE,
566 mask);
567 if (feature_id < 0)
568 return 0;
569
570 WARN_ON(feature_id > feature->feature_num);
571
572 return test_bit(feature_id, feature->supported);
573 }
574
__smu_get_enabled_features(struct smu_context * smu,uint64_t * enabled_features)575 static int __smu_get_enabled_features(struct smu_context *smu,
576 uint64_t *enabled_features)
577 {
578 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
579 }
580
smu_cmn_feature_is_enabled(struct smu_context * smu,enum smu_feature_mask mask)581 int smu_cmn_feature_is_enabled(struct smu_context *smu,
582 enum smu_feature_mask mask)
583 {
584 struct amdgpu_device *adev = smu->adev;
585 uint64_t enabled_features;
586 int feature_id;
587
588 if (__smu_get_enabled_features(smu, &enabled_features)) {
589 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
590 return 0;
591 }
592
593 /*
594 * For Renoir and Cyan Skillfish, they are assumed to have all features
595 * enabled. Also considering they have no feature_map available, the
596 * check here can avoid unwanted feature_map check below.
597 */
598 if (enabled_features == ULLONG_MAX)
599 return 1;
600
601 feature_id = smu_cmn_to_asic_specific_index(smu,
602 CMN2ASIC_MAPPING_FEATURE,
603 mask);
604 if (feature_id < 0)
605 return 0;
606
607 return test_bit(feature_id, (unsigned long *)&enabled_features);
608 }
609
smu_cmn_clk_dpm_is_enabled(struct smu_context * smu,enum smu_clk_type clk_type)610 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
611 enum smu_clk_type clk_type)
612 {
613 enum smu_feature_mask feature_id = 0;
614
615 switch (clk_type) {
616 case SMU_MCLK:
617 case SMU_UCLK:
618 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
619 break;
620 case SMU_GFXCLK:
621 case SMU_SCLK:
622 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
623 break;
624 case SMU_SOCCLK:
625 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
626 break;
627 case SMU_VCLK:
628 case SMU_VCLK1:
629 feature_id = SMU_FEATURE_DPM_VCLK_BIT;
630 break;
631 case SMU_DCLK:
632 case SMU_DCLK1:
633 feature_id = SMU_FEATURE_DPM_DCLK_BIT;
634 break;
635 case SMU_FCLK:
636 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
637 break;
638 default:
639 return true;
640 }
641
642 if (!smu_cmn_feature_is_enabled(smu, feature_id))
643 return false;
644
645 return true;
646 }
647
smu_cmn_get_enabled_mask(struct smu_context * smu,uint64_t * feature_mask)648 int smu_cmn_get_enabled_mask(struct smu_context *smu,
649 uint64_t *feature_mask)
650 {
651 uint32_t *feature_mask_high;
652 uint32_t *feature_mask_low;
653 int ret = 0, index = 0;
654
655 if (!feature_mask)
656 return -EINVAL;
657
658 feature_mask_low = &((uint32_t *)feature_mask)[0];
659 feature_mask_high = &((uint32_t *)feature_mask)[1];
660
661 index = smu_cmn_to_asic_specific_index(smu,
662 CMN2ASIC_MAPPING_MSG,
663 SMU_MSG_GetEnabledSmuFeatures);
664 if (index > 0) {
665 ret = smu_cmn_send_smc_msg_with_param(smu,
666 SMU_MSG_GetEnabledSmuFeatures,
667 0,
668 feature_mask_low);
669 if (ret)
670 return ret;
671
672 ret = smu_cmn_send_smc_msg_with_param(smu,
673 SMU_MSG_GetEnabledSmuFeatures,
674 1,
675 feature_mask_high);
676 } else {
677 ret = smu_cmn_send_smc_msg(smu,
678 SMU_MSG_GetEnabledSmuFeaturesHigh,
679 feature_mask_high);
680 if (ret)
681 return ret;
682
683 ret = smu_cmn_send_smc_msg(smu,
684 SMU_MSG_GetEnabledSmuFeaturesLow,
685 feature_mask_low);
686 }
687
688 return ret;
689 }
690
smu_cmn_get_indep_throttler_status(const unsigned long dep_status,const uint8_t * throttler_map)691 uint64_t smu_cmn_get_indep_throttler_status(
692 const unsigned long dep_status,
693 const uint8_t *throttler_map)
694 {
695 uint64_t indep_status = 0;
696 uint8_t dep_bit = 0;
697
698 for_each_set_bit(dep_bit, &dep_status, 32)
699 indep_status |= 1ULL << throttler_map[dep_bit];
700
701 return indep_status;
702 }
703
smu_cmn_feature_update_enable_state(struct smu_context * smu,uint64_t feature_mask,bool enabled)704 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
705 uint64_t feature_mask,
706 bool enabled)
707 {
708 int ret = 0;
709
710 if (enabled) {
711 ret = smu_cmn_send_smc_msg_with_param(smu,
712 SMU_MSG_EnableSmuFeaturesLow,
713 lower_32_bits(feature_mask),
714 NULL);
715 if (ret)
716 return ret;
717 ret = smu_cmn_send_smc_msg_with_param(smu,
718 SMU_MSG_EnableSmuFeaturesHigh,
719 upper_32_bits(feature_mask),
720 NULL);
721 } else {
722 ret = smu_cmn_send_smc_msg_with_param(smu,
723 SMU_MSG_DisableSmuFeaturesLow,
724 lower_32_bits(feature_mask),
725 NULL);
726 if (ret)
727 return ret;
728 ret = smu_cmn_send_smc_msg_with_param(smu,
729 SMU_MSG_DisableSmuFeaturesHigh,
730 upper_32_bits(feature_mask),
731 NULL);
732 }
733
734 return ret;
735 }
736
smu_cmn_feature_set_enabled(struct smu_context * smu,enum smu_feature_mask mask,bool enable)737 int smu_cmn_feature_set_enabled(struct smu_context *smu,
738 enum smu_feature_mask mask,
739 bool enable)
740 {
741 int feature_id;
742
743 feature_id = smu_cmn_to_asic_specific_index(smu,
744 CMN2ASIC_MAPPING_FEATURE,
745 mask);
746 if (feature_id < 0)
747 return -EINVAL;
748
749 return smu_cmn_feature_update_enable_state(smu,
750 1ULL << feature_id,
751 enable);
752 }
753
754 #undef __SMU_DUMMY_MAP
755 #define __SMU_DUMMY_MAP(fea) #fea
756 static const char *__smu_feature_names[] = {
757 SMU_FEATURE_MASKS
758 };
759
smu_get_feature_name(struct smu_context * smu,enum smu_feature_mask feature)760 static const char *smu_get_feature_name(struct smu_context *smu,
761 enum smu_feature_mask feature)
762 {
763 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
764 return "unknown smu feature";
765 return __smu_feature_names[feature];
766 }
767
smu_cmn_get_pp_feature_mask(struct smu_context * smu,char * buf)768 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
769 char *buf)
770 {
771 int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
772 uint64_t feature_mask;
773 int i, feature_index;
774 uint32_t count = 0;
775 size_t size = 0;
776
777 if (__smu_get_enabled_features(smu, &feature_mask))
778 return 0;
779
780 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
781 upper_32_bits(feature_mask), lower_32_bits(feature_mask));
782
783 memset(sort_feature, -1, sizeof(sort_feature));
784
785 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
786 feature_index = smu_cmn_to_asic_specific_index(smu,
787 CMN2ASIC_MAPPING_FEATURE,
788 i);
789 if (feature_index < 0)
790 continue;
791
792 sort_feature[feature_index] = i;
793 }
794
795 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
796 "No", "Feature", "Bit", "State");
797
798 for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
799 if (sort_feature[feature_index] < 0)
800 continue;
801
802 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
803 count++,
804 smu_get_feature_name(smu, sort_feature[feature_index]),
805 feature_index,
806 !!test_bit(feature_index, (unsigned long *)&feature_mask) ?
807 "enabled" : "disabled");
808 }
809
810 return size;
811 }
812
smu_cmn_set_pp_feature_mask(struct smu_context * smu,uint64_t new_mask)813 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
814 uint64_t new_mask)
815 {
816 int ret = 0;
817 uint64_t feature_mask;
818 uint64_t feature_2_enabled = 0;
819 uint64_t feature_2_disabled = 0;
820
821 ret = __smu_get_enabled_features(smu, &feature_mask);
822 if (ret)
823 return ret;
824
825 feature_2_enabled = ~feature_mask & new_mask;
826 feature_2_disabled = feature_mask & ~new_mask;
827
828 if (feature_2_enabled) {
829 ret = smu_cmn_feature_update_enable_state(smu,
830 feature_2_enabled,
831 true);
832 if (ret)
833 return ret;
834 }
835 if (feature_2_disabled) {
836 ret = smu_cmn_feature_update_enable_state(smu,
837 feature_2_disabled,
838 false);
839 if (ret)
840 return ret;
841 }
842
843 return ret;
844 }
845
846 /**
847 * smu_cmn_disable_all_features_with_exception - disable all dpm features
848 * except this specified by
849 * @mask
850 *
851 * @smu: smu_context pointer
852 * @mask: the dpm feature which should not be disabled
853 * SMU_FEATURE_COUNT: no exception, all dpm features
854 * to disable
855 *
856 * Returns:
857 * 0 on success or a negative error code on failure.
858 */
smu_cmn_disable_all_features_with_exception(struct smu_context * smu,enum smu_feature_mask mask)859 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
860 enum smu_feature_mask mask)
861 {
862 uint64_t features_to_disable = U64_MAX;
863 int skipped_feature_id;
864
865 if (mask != SMU_FEATURE_COUNT) {
866 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
867 CMN2ASIC_MAPPING_FEATURE,
868 mask);
869 if (skipped_feature_id < 0)
870 return -EINVAL;
871
872 features_to_disable &= ~(1ULL << skipped_feature_id);
873 }
874
875 return smu_cmn_feature_update_enable_state(smu,
876 features_to_disable,
877 0);
878 }
879
smu_cmn_get_smc_version(struct smu_context * smu,uint32_t * if_version,uint32_t * smu_version)880 int smu_cmn_get_smc_version(struct smu_context *smu,
881 uint32_t *if_version,
882 uint32_t *smu_version)
883 {
884 int ret = 0;
885
886 if (!if_version && !smu_version)
887 return -EINVAL;
888
889 if (smu->smc_fw_if_version && smu->smc_fw_version)
890 {
891 if (if_version)
892 *if_version = smu->smc_fw_if_version;
893
894 if (smu_version)
895 *smu_version = smu->smc_fw_version;
896
897 return 0;
898 }
899
900 if (if_version) {
901 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
902 if (ret)
903 return ret;
904
905 smu->smc_fw_if_version = *if_version;
906 }
907
908 if (smu_version) {
909 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
910 if (ret)
911 return ret;
912
913 smu->smc_fw_version = *smu_version;
914 }
915
916 return ret;
917 }
918
smu_cmn_update_table(struct smu_context * smu,enum smu_table_id table_index,int argument,void * table_data,bool drv2smu)919 int smu_cmn_update_table(struct smu_context *smu,
920 enum smu_table_id table_index,
921 int argument,
922 void *table_data,
923 bool drv2smu)
924 {
925 struct smu_table_context *smu_table = &smu->smu_table;
926 struct amdgpu_device *adev = smu->adev;
927 struct smu_table *table = &smu_table->driver_table;
928 int table_id = smu_cmn_to_asic_specific_index(smu,
929 CMN2ASIC_MAPPING_TABLE,
930 table_index);
931 uint32_t table_size;
932 int ret = 0;
933 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
934 return -EINVAL;
935
936 table_size = smu_table->tables[table_index].size;
937
938 if (drv2smu) {
939 memcpy(table->cpu_addr, table_data, table_size);
940 /*
941 * Flush hdp cache: to guard the content seen by
942 * GPU is consitent with CPU.
943 */
944 amdgpu_asic_flush_hdp(adev, NULL);
945 }
946
947 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
948 SMU_MSG_TransferTableDram2Smu :
949 SMU_MSG_TransferTableSmu2Dram,
950 table_id | ((argument & 0xFFFF) << 16),
951 NULL);
952 if (ret)
953 return ret;
954
955 if (!drv2smu) {
956 amdgpu_asic_invalidate_hdp(adev, NULL);
957 memcpy(table_data, table->cpu_addr, table_size);
958 }
959
960 return 0;
961 }
962
smu_cmn_write_watermarks_table(struct smu_context * smu)963 int smu_cmn_write_watermarks_table(struct smu_context *smu)
964 {
965 void *watermarks_table = smu->smu_table.watermarks_table;
966
967 if (!watermarks_table)
968 return -EINVAL;
969
970 return smu_cmn_update_table(smu,
971 SMU_TABLE_WATERMARKS,
972 0,
973 watermarks_table,
974 true);
975 }
976
smu_cmn_write_pptable(struct smu_context * smu)977 int smu_cmn_write_pptable(struct smu_context *smu)
978 {
979 void *pptable = smu->smu_table.driver_pptable;
980
981 return smu_cmn_update_table(smu,
982 SMU_TABLE_PPTABLE,
983 0,
984 pptable,
985 true);
986 }
987
smu_cmn_get_metrics_table(struct smu_context * smu,void * metrics_table,bool bypass_cache)988 int smu_cmn_get_metrics_table(struct smu_context *smu,
989 void *metrics_table,
990 bool bypass_cache)
991 {
992 struct smu_table_context *smu_table = &smu->smu_table;
993 uint32_t table_size =
994 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
995 int ret = 0;
996
997 if (bypass_cache ||
998 !smu_table->metrics_time ||
999 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
1000 ret = smu_cmn_update_table(smu,
1001 SMU_TABLE_SMU_METRICS,
1002 0,
1003 smu_table->metrics_table,
1004 false);
1005 if (ret) {
1006 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
1007 return ret;
1008 }
1009 smu_table->metrics_time = jiffies;
1010 }
1011
1012 if (metrics_table)
1013 memcpy(metrics_table, smu_table->metrics_table, table_size);
1014
1015 return 0;
1016 }
1017
smu_cmn_get_combo_pptable(struct smu_context * smu)1018 int smu_cmn_get_combo_pptable(struct smu_context *smu)
1019 {
1020 void *pptable = smu->smu_table.combo_pptable;
1021
1022 return smu_cmn_update_table(smu,
1023 SMU_TABLE_COMBO_PPTABLE,
1024 0,
1025 pptable,
1026 false);
1027 }
1028
smu_cmn_init_soft_gpu_metrics(void * table,uint8_t frev,uint8_t crev)1029 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
1030 {
1031 struct metrics_table_header *header = (struct metrics_table_header *)table;
1032 uint16_t structure_size;
1033
1034 #define METRICS_VERSION(a, b) ((a << 16) | b)
1035
1036 switch (METRICS_VERSION(frev, crev)) {
1037 case METRICS_VERSION(1, 0):
1038 structure_size = sizeof(struct gpu_metrics_v1_0);
1039 break;
1040 case METRICS_VERSION(1, 1):
1041 structure_size = sizeof(struct gpu_metrics_v1_1);
1042 break;
1043 case METRICS_VERSION(1, 2):
1044 structure_size = sizeof(struct gpu_metrics_v1_2);
1045 break;
1046 case METRICS_VERSION(1, 3):
1047 structure_size = sizeof(struct gpu_metrics_v1_3);
1048 break;
1049 case METRICS_VERSION(1, 4):
1050 structure_size = sizeof(struct gpu_metrics_v1_4);
1051 break;
1052 case METRICS_VERSION(1, 5):
1053 structure_size = sizeof(struct gpu_metrics_v1_5);
1054 break;
1055 case METRICS_VERSION(2, 0):
1056 structure_size = sizeof(struct gpu_metrics_v2_0);
1057 break;
1058 case METRICS_VERSION(2, 1):
1059 structure_size = sizeof(struct gpu_metrics_v2_1);
1060 break;
1061 case METRICS_VERSION(2, 2):
1062 structure_size = sizeof(struct gpu_metrics_v2_2);
1063 break;
1064 case METRICS_VERSION(2, 3):
1065 structure_size = sizeof(struct gpu_metrics_v2_3);
1066 break;
1067 case METRICS_VERSION(2, 4):
1068 structure_size = sizeof(struct gpu_metrics_v2_4);
1069 break;
1070 case METRICS_VERSION(3, 0):
1071 structure_size = sizeof(struct gpu_metrics_v3_0);
1072 break;
1073 default:
1074 return;
1075 }
1076
1077 #undef METRICS_VERSION
1078
1079 memset(header, 0xFF, structure_size);
1080
1081 header->format_revision = frev;
1082 header->content_revision = crev;
1083 header->structure_size = structure_size;
1084
1085 }
1086
smu_cmn_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)1087 int smu_cmn_set_mp1_state(struct smu_context *smu,
1088 enum pp_mp1_state mp1_state)
1089 {
1090 enum smu_message_type msg;
1091 int ret;
1092
1093 switch (mp1_state) {
1094 case PP_MP1_STATE_SHUTDOWN:
1095 msg = SMU_MSG_PrepareMp1ForShutdown;
1096 break;
1097 case PP_MP1_STATE_UNLOAD:
1098 msg = SMU_MSG_PrepareMp1ForUnload;
1099 break;
1100 case PP_MP1_STATE_RESET:
1101 msg = SMU_MSG_PrepareMp1ForReset;
1102 break;
1103 case PP_MP1_STATE_NONE:
1104 default:
1105 return 0;
1106 }
1107
1108 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1109 if (ret)
1110 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1111
1112 return ret;
1113 }
1114
smu_cmn_is_audio_func_enabled(struct amdgpu_device * adev)1115 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1116 {
1117 struct pci_dev *p = NULL;
1118 bool snd_driver_loaded;
1119
1120 /*
1121 * If the ASIC comes with no audio function, we always assume
1122 * it is "enabled".
1123 */
1124 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1125 adev->pdev->bus->number, 1);
1126 if (!p)
1127 return true;
1128
1129 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1130
1131 pci_dev_put(p);
1132
1133 return snd_driver_loaded;
1134 }
1135