1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
24 */
25
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
37 #include "hwmgr.h"
38
39 static const struct cg_flag_name clocks[] = {
40 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
41 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
42 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
43 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
45 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
51 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
52 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
53 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
54 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
57 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
60 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
63 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
67 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
68 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
69 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
70
71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
73 {0, NULL},
74 };
75
76 static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
78 const char *label;
79 } temp_label[] = {
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
82 {PP_TEMP_MEM, "mem"},
83 };
84
85 /**
86 * DOC: power_dpm_state
87 *
88 * The power_dpm_state file is a legacy interface and is only provided for
89 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
90 * certain power related parameters. The file power_dpm_state is used for this.
91 * It accepts the following arguments:
92 *
93 * - battery
94 *
95 * - balanced
96 *
97 * - performance
98 *
99 * battery
100 *
101 * On older GPUs, the vbios provided a special power state for battery
102 * operation. Selecting battery switched to this state. This is no
103 * longer provided on newer GPUs so the option does nothing in that case.
104 *
105 * balanced
106 *
107 * On older GPUs, the vbios provided a special power state for balanced
108 * operation. Selecting balanced switched to this state. This is no
109 * longer provided on newer GPUs so the option does nothing in that case.
110 *
111 * performance
112 *
113 * On older GPUs, the vbios provided a special power state for performance
114 * operation. Selecting performance switched to this state. This is no
115 * longer provided on newer GPUs so the option does nothing in that case.
116 *
117 */
118
amdgpu_get_power_dpm_state(struct device * dev,struct device_attribute * attr,char * buf)119 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
120 struct device_attribute *attr,
121 char *buf)
122 {
123 struct drm_device *ddev = dev_get_drvdata(dev);
124 struct amdgpu_device *adev = drm_to_adev(ddev);
125 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
126 enum amd_pm_state_type pm;
127 int ret;
128
129 if (amdgpu_in_reset(adev))
130 return -EPERM;
131 if (adev->in_suspend && !adev->in_runpm)
132 return -EPERM;
133
134 ret = pm_runtime_get_sync(ddev->dev);
135 if (ret < 0) {
136 pm_runtime_put_autosuspend(ddev->dev);
137 return ret;
138 }
139
140 if (pp_funcs->get_current_power_state) {
141 pm = amdgpu_dpm_get_current_power_state(adev);
142 } else {
143 pm = adev->pm.dpm.user_state;
144 }
145
146 pm_runtime_mark_last_busy(ddev->dev);
147 pm_runtime_put_autosuspend(ddev->dev);
148
149 return sysfs_emit(buf, "%s\n",
150 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
151 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
152 }
153
amdgpu_set_power_dpm_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)154 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
155 struct device_attribute *attr,
156 const char *buf,
157 size_t count)
158 {
159 struct drm_device *ddev = dev_get_drvdata(dev);
160 struct amdgpu_device *adev = drm_to_adev(ddev);
161 enum amd_pm_state_type state;
162 int ret;
163
164 if (amdgpu_in_reset(adev))
165 return -EPERM;
166 if (adev->in_suspend && !adev->in_runpm)
167 return -EPERM;
168
169 if (strncmp("battery", buf, strlen("battery")) == 0)
170 state = POWER_STATE_TYPE_BATTERY;
171 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
172 state = POWER_STATE_TYPE_BALANCED;
173 else if (strncmp("performance", buf, strlen("performance")) == 0)
174 state = POWER_STATE_TYPE_PERFORMANCE;
175 else
176 return -EINVAL;
177
178 ret = pm_runtime_get_sync(ddev->dev);
179 if (ret < 0) {
180 pm_runtime_put_autosuspend(ddev->dev);
181 return ret;
182 }
183
184 if (is_support_sw_smu(adev)) {
185 mutex_lock(&adev->pm.mutex);
186 adev->pm.dpm.user_state = state;
187 mutex_unlock(&adev->pm.mutex);
188 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
189 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
190 } else {
191 mutex_lock(&adev->pm.mutex);
192 adev->pm.dpm.user_state = state;
193 mutex_unlock(&adev->pm.mutex);
194
195 amdgpu_pm_compute_clocks(adev);
196 }
197 pm_runtime_mark_last_busy(ddev->dev);
198 pm_runtime_put_autosuspend(ddev->dev);
199
200 return count;
201 }
202
203
204 /**
205 * DOC: power_dpm_force_performance_level
206 *
207 * The amdgpu driver provides a sysfs API for adjusting certain power
208 * related parameters. The file power_dpm_force_performance_level is
209 * used for this. It accepts the following arguments:
210 *
211 * - auto
212 *
213 * - low
214 *
215 * - high
216 *
217 * - manual
218 *
219 * - profile_standard
220 *
221 * - profile_min_sclk
222 *
223 * - profile_min_mclk
224 *
225 * - profile_peak
226 *
227 * auto
228 *
229 * When auto is selected, the driver will attempt to dynamically select
230 * the optimal power profile for current conditions in the driver.
231 *
232 * low
233 *
234 * When low is selected, the clocks are forced to the lowest power state.
235 *
236 * high
237 *
238 * When high is selected, the clocks are forced to the highest power state.
239 *
240 * manual
241 *
242 * When manual is selected, the user can manually adjust which power states
243 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
244 * and pp_dpm_pcie files and adjust the power state transition heuristics
245 * via the pp_power_profile_mode sysfs file.
246 *
247 * profile_standard
248 * profile_min_sclk
249 * profile_min_mclk
250 * profile_peak
251 *
252 * When the profiling modes are selected, clock and power gating are
253 * disabled and the clocks are set for different profiling cases. This
254 * mode is recommended for profiling specific work loads where you do
255 * not want clock or power gating for clock fluctuation to interfere
256 * with your results. profile_standard sets the clocks to a fixed clock
257 * level which varies from asic to asic. profile_min_sclk forces the sclk
258 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
259 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
260 *
261 */
262
amdgpu_get_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,char * buf)263 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
264 struct device_attribute *attr,
265 char *buf)
266 {
267 struct drm_device *ddev = dev_get_drvdata(dev);
268 struct amdgpu_device *adev = drm_to_adev(ddev);
269 enum amd_dpm_forced_level level = 0xff;
270 int ret;
271
272 if (amdgpu_in_reset(adev))
273 return -EPERM;
274 if (adev->in_suspend && !adev->in_runpm)
275 return -EPERM;
276
277 ret = pm_runtime_get_sync(ddev->dev);
278 if (ret < 0) {
279 pm_runtime_put_autosuspend(ddev->dev);
280 return ret;
281 }
282
283 if (adev->powerplay.pp_funcs->get_performance_level)
284 level = amdgpu_dpm_get_performance_level(adev);
285 else
286 level = adev->pm.dpm.forced_level;
287
288 pm_runtime_mark_last_busy(ddev->dev);
289 pm_runtime_put_autosuspend(ddev->dev);
290
291 return sysfs_emit(buf, "%s\n",
292 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
293 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
294 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
295 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
296 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
297 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
298 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
299 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
300 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
301 "unknown");
302 }
303
amdgpu_set_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)304 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
305 struct device_attribute *attr,
306 const char *buf,
307 size_t count)
308 {
309 struct drm_device *ddev = dev_get_drvdata(dev);
310 struct amdgpu_device *adev = drm_to_adev(ddev);
311 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
312 enum amd_dpm_forced_level level;
313 enum amd_dpm_forced_level current_level = 0xff;
314 int ret = 0;
315
316 if (amdgpu_in_reset(adev))
317 return -EPERM;
318 if (adev->in_suspend && !adev->in_runpm)
319 return -EPERM;
320
321 if (strncmp("low", buf, strlen("low")) == 0) {
322 level = AMD_DPM_FORCED_LEVEL_LOW;
323 } else if (strncmp("high", buf, strlen("high")) == 0) {
324 level = AMD_DPM_FORCED_LEVEL_HIGH;
325 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
326 level = AMD_DPM_FORCED_LEVEL_AUTO;
327 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
328 level = AMD_DPM_FORCED_LEVEL_MANUAL;
329 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
331 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
333 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
334 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
335 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
336 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
337 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
338 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
339 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
340 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
341 } else {
342 return -EINVAL;
343 }
344
345 ret = pm_runtime_get_sync(ddev->dev);
346 if (ret < 0) {
347 pm_runtime_put_autosuspend(ddev->dev);
348 return ret;
349 }
350
351 if (pp_funcs->get_performance_level)
352 current_level = amdgpu_dpm_get_performance_level(adev);
353
354 if (current_level == level) {
355 pm_runtime_mark_last_busy(ddev->dev);
356 pm_runtime_put_autosuspend(ddev->dev);
357 return count;
358 }
359
360 if (adev->asic_type == CHIP_RAVEN) {
361 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
362 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
363 amdgpu_gfx_off_ctrl(adev, false);
364 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
365 amdgpu_gfx_off_ctrl(adev, true);
366 }
367 }
368
369 /* profile_exit setting is valid only when current mode is in profile mode */
370 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
371 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
372 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
373 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
374 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
375 pr_err("Currently not in any profile mode!\n");
376 pm_runtime_mark_last_busy(ddev->dev);
377 pm_runtime_put_autosuspend(ddev->dev);
378 return -EINVAL;
379 }
380
381 if (pp_funcs->force_performance_level) {
382 mutex_lock(&adev->pm.mutex);
383 if (adev->pm.dpm.thermal_active) {
384 mutex_unlock(&adev->pm.mutex);
385 pm_runtime_mark_last_busy(ddev->dev);
386 pm_runtime_put_autosuspend(ddev->dev);
387 return -EINVAL;
388 }
389 ret = amdgpu_dpm_force_performance_level(adev, level);
390 if (ret) {
391 mutex_unlock(&adev->pm.mutex);
392 pm_runtime_mark_last_busy(ddev->dev);
393 pm_runtime_put_autosuspend(ddev->dev);
394 return -EINVAL;
395 } else {
396 adev->pm.dpm.forced_level = level;
397 }
398 mutex_unlock(&adev->pm.mutex);
399 }
400 pm_runtime_mark_last_busy(ddev->dev);
401 pm_runtime_put_autosuspend(ddev->dev);
402
403 return count;
404 }
405
amdgpu_get_pp_num_states(struct device * dev,struct device_attribute * attr,char * buf)406 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409 {
410 struct drm_device *ddev = dev_get_drvdata(dev);
411 struct amdgpu_device *adev = drm_to_adev(ddev);
412 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
413 struct pp_states_info data;
414 int i, buf_len, ret;
415
416 if (amdgpu_in_reset(adev))
417 return -EPERM;
418 if (adev->in_suspend && !adev->in_runpm)
419 return -EPERM;
420
421 ret = pm_runtime_get_sync(ddev->dev);
422 if (ret < 0) {
423 pm_runtime_put_autosuspend(ddev->dev);
424 return ret;
425 }
426
427 if (pp_funcs->get_pp_num_states) {
428 amdgpu_dpm_get_pp_num_states(adev, &data);
429 } else {
430 memset(&data, 0, sizeof(data));
431 }
432
433 pm_runtime_mark_last_busy(ddev->dev);
434 pm_runtime_put_autosuspend(ddev->dev);
435
436 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
437 for (i = 0; i < data.nums; i++)
438 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
439 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
440 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
441 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
442 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
443
444 return buf_len;
445 }
446
amdgpu_get_pp_cur_state(struct device * dev,struct device_attribute * attr,char * buf)447 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
448 struct device_attribute *attr,
449 char *buf)
450 {
451 struct drm_device *ddev = dev_get_drvdata(dev);
452 struct amdgpu_device *adev = drm_to_adev(ddev);
453 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
454 struct pp_states_info data = {0};
455 enum amd_pm_state_type pm = 0;
456 int i = 0, ret = 0;
457
458 if (amdgpu_in_reset(adev))
459 return -EPERM;
460 if (adev->in_suspend && !adev->in_runpm)
461 return -EPERM;
462
463 ret = pm_runtime_get_sync(ddev->dev);
464 if (ret < 0) {
465 pm_runtime_put_autosuspend(ddev->dev);
466 return ret;
467 }
468
469 if (pp_funcs->get_current_power_state
470 && pp_funcs->get_pp_num_states) {
471 pm = amdgpu_dpm_get_current_power_state(adev);
472 amdgpu_dpm_get_pp_num_states(adev, &data);
473 }
474
475 pm_runtime_mark_last_busy(ddev->dev);
476 pm_runtime_put_autosuspend(ddev->dev);
477
478 for (i = 0; i < data.nums; i++) {
479 if (pm == data.states[i])
480 break;
481 }
482
483 if (i == data.nums)
484 i = -EINVAL;
485
486 return sysfs_emit(buf, "%d\n", i);
487 }
488
amdgpu_get_pp_force_state(struct device * dev,struct device_attribute * attr,char * buf)489 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
490 struct device_attribute *attr,
491 char *buf)
492 {
493 struct drm_device *ddev = dev_get_drvdata(dev);
494 struct amdgpu_device *adev = drm_to_adev(ddev);
495
496 if (amdgpu_in_reset(adev))
497 return -EPERM;
498 if (adev->in_suspend && !adev->in_runpm)
499 return -EPERM;
500
501 if (adev->pp_force_state_enabled)
502 return amdgpu_get_pp_cur_state(dev, attr, buf);
503 else
504 return sysfs_emit(buf, "\n");
505 }
506
amdgpu_set_pp_force_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)507 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
508 struct device_attribute *attr,
509 const char *buf,
510 size_t count)
511 {
512 struct drm_device *ddev = dev_get_drvdata(dev);
513 struct amdgpu_device *adev = drm_to_adev(ddev);
514 enum amd_pm_state_type state = 0;
515 unsigned long idx;
516 int ret;
517
518 if (amdgpu_in_reset(adev))
519 return -EPERM;
520 if (adev->in_suspend && !adev->in_runpm)
521 return -EPERM;
522
523 if (strlen(buf) == 1)
524 adev->pp_force_state_enabled = false;
525 else if (is_support_sw_smu(adev))
526 adev->pp_force_state_enabled = false;
527 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
528 adev->powerplay.pp_funcs->get_pp_num_states) {
529 struct pp_states_info data;
530
531 ret = kstrtoul(buf, 0, &idx);
532 if (ret || idx >= ARRAY_SIZE(data.states))
533 return -EINVAL;
534
535 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
536
537 amdgpu_dpm_get_pp_num_states(adev, &data);
538 state = data.states[idx];
539
540 ret = pm_runtime_get_sync(ddev->dev);
541 if (ret < 0) {
542 pm_runtime_put_autosuspend(ddev->dev);
543 return ret;
544 }
545
546 /* only set user selected power states */
547 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
548 state != POWER_STATE_TYPE_DEFAULT) {
549 amdgpu_dpm_dispatch_task(adev,
550 AMD_PP_TASK_ENABLE_USER_STATE, &state);
551 adev->pp_force_state_enabled = true;
552 }
553 pm_runtime_mark_last_busy(ddev->dev);
554 pm_runtime_put_autosuspend(ddev->dev);
555 }
556
557 return count;
558 }
559
560 /**
561 * DOC: pp_table
562 *
563 * The amdgpu driver provides a sysfs API for uploading new powerplay
564 * tables. The file pp_table is used for this. Reading the file
565 * will dump the current power play table. Writing to the file
566 * will attempt to upload a new powerplay table and re-initialize
567 * powerplay using that new table.
568 *
569 */
570
amdgpu_get_pp_table(struct device * dev,struct device_attribute * attr,char * buf)571 static ssize_t amdgpu_get_pp_table(struct device *dev,
572 struct device_attribute *attr,
573 char *buf)
574 {
575 struct drm_device *ddev = dev_get_drvdata(dev);
576 struct amdgpu_device *adev = drm_to_adev(ddev);
577 char *table = NULL;
578 int size, ret;
579
580 if (amdgpu_in_reset(adev))
581 return -EPERM;
582 if (adev->in_suspend && !adev->in_runpm)
583 return -EPERM;
584
585 ret = pm_runtime_get_sync(ddev->dev);
586 if (ret < 0) {
587 pm_runtime_put_autosuspend(ddev->dev);
588 return ret;
589 }
590
591 if (adev->powerplay.pp_funcs->get_pp_table) {
592 size = amdgpu_dpm_get_pp_table(adev, &table);
593 pm_runtime_mark_last_busy(ddev->dev);
594 pm_runtime_put_autosuspend(ddev->dev);
595 if (size < 0)
596 return size;
597 } else {
598 pm_runtime_mark_last_busy(ddev->dev);
599 pm_runtime_put_autosuspend(ddev->dev);
600 return 0;
601 }
602
603 if (size >= PAGE_SIZE)
604 size = PAGE_SIZE - 1;
605
606 memcpy(buf, table, size);
607
608 return size;
609 }
610
amdgpu_set_pp_table(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)611 static ssize_t amdgpu_set_pp_table(struct device *dev,
612 struct device_attribute *attr,
613 const char *buf,
614 size_t count)
615 {
616 struct drm_device *ddev = dev_get_drvdata(dev);
617 struct amdgpu_device *adev = drm_to_adev(ddev);
618 int ret = 0;
619
620 if (amdgpu_in_reset(adev))
621 return -EPERM;
622 if (adev->in_suspend && !adev->in_runpm)
623 return -EPERM;
624
625 ret = pm_runtime_get_sync(ddev->dev);
626 if (ret < 0) {
627 pm_runtime_put_autosuspend(ddev->dev);
628 return ret;
629 }
630
631 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
632 if (ret) {
633 pm_runtime_mark_last_busy(ddev->dev);
634 pm_runtime_put_autosuspend(ddev->dev);
635 return ret;
636 }
637
638 pm_runtime_mark_last_busy(ddev->dev);
639 pm_runtime_put_autosuspend(ddev->dev);
640
641 return count;
642 }
643
644 /**
645 * DOC: pp_od_clk_voltage
646 *
647 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
648 * in each power level within a power state. The pp_od_clk_voltage is used for
649 * this.
650 *
651 * Note that the actual memory controller clock rate are exposed, not
652 * the effective memory clock of the DRAMs. To translate it, use the
653 * following formula:
654 *
655 * Clock conversion (Mhz):
656 *
657 * HBM: effective_memory_clock = memory_controller_clock * 1
658 *
659 * G5: effective_memory_clock = memory_controller_clock * 1
660 *
661 * G6: effective_memory_clock = memory_controller_clock * 2
662 *
663 * DRAM data rate (MT/s):
664 *
665 * HBM: effective_memory_clock * 2 = data_rate
666 *
667 * G5: effective_memory_clock * 4 = data_rate
668 *
669 * G6: effective_memory_clock * 8 = data_rate
670 *
671 * Bandwidth (MB/s):
672 *
673 * data_rate * vram_bit_width / 8 = memory_bandwidth
674 *
675 * Some examples:
676 *
677 * G5 on RX460:
678 *
679 * memory_controller_clock = 1750 Mhz
680 *
681 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
682 *
683 * data rate = 1750 * 4 = 7000 MT/s
684 *
685 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
686 *
687 * G6 on RX5700:
688 *
689 * memory_controller_clock = 875 Mhz
690 *
691 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
692 *
693 * data rate = 1750 * 8 = 14000 MT/s
694 *
695 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
696 *
697 * < For Vega10 and previous ASICs >
698 *
699 * Reading the file will display:
700 *
701 * - a list of engine clock levels and voltages labeled OD_SCLK
702 *
703 * - a list of memory clock levels and voltages labeled OD_MCLK
704 *
705 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
706 *
707 * To manually adjust these settings, first select manual using
708 * power_dpm_force_performance_level. Enter a new value for each
709 * level by writing a string that contains "s/m level clock voltage" to
710 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
711 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
712 * 810 mV. When you have edited all of the states as needed, write
713 * "c" (commit) to the file to commit your changes. If you want to reset to the
714 * default power levels, write "r" (reset) to the file to reset them.
715 *
716 *
717 * < For Vega20 and newer ASICs >
718 *
719 * Reading the file will display:
720 *
721 * - minimum and maximum engine clock labeled OD_SCLK
722 *
723 * - minimum(not available for Vega20 and Navi1x) and maximum memory
724 * clock labeled OD_MCLK
725 *
726 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
727 * They can be used to calibrate the sclk voltage curve.
728 *
729 * - voltage offset(in mV) applied on target voltage calculation.
730 * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
731 * Cavefish. For these ASICs, the target voltage calculation can be
732 * illustrated by "voltage = voltage calculated from v/f curve +
733 * overdrive vddgfx offset"
734 *
735 * - a list of valid ranges for sclk, mclk, and voltage curve points
736 * labeled OD_RANGE
737 *
738 * To manually adjust these settings:
739 *
740 * - First select manual using power_dpm_force_performance_level
741 *
742 * - For clock frequency setting, enter a new value by writing a
743 * string that contains "s/m index clock" to the file. The index
744 * should be 0 if to set minimum clock. And 1 if to set maximum
745 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
746 * "m 1 800" will update maximum mclk to be 800Mhz.
747 *
748 * For sclk voltage curve, enter the new values by writing a
749 * string that contains "vc point clock voltage" to the file. The
750 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
751 * update point1 with clock set as 300Mhz and voltage as
752 * 600mV. "vc 2 1000 1000" will update point3 with clock set
753 * as 1000Mhz and voltage 1000mV.
754 *
755 * To update the voltage offset applied for gfxclk/voltage calculation,
756 * enter the new value by writing a string that contains "vo offset".
757 * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
758 * And the offset can be a positive or negative value.
759 *
760 * - When you have edited all of the states as needed, write "c" (commit)
761 * to the file to commit your changes
762 *
763 * - If you want to reset to the default power levels, write "r" (reset)
764 * to the file to reset them
765 *
766 */
767
amdgpu_set_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)768 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
769 struct device_attribute *attr,
770 const char *buf,
771 size_t count)
772 {
773 struct drm_device *ddev = dev_get_drvdata(dev);
774 struct amdgpu_device *adev = drm_to_adev(ddev);
775 int ret;
776 uint32_t parameter_size = 0;
777 long parameter[64];
778 char buf_cpy[128];
779 char *tmp_str;
780 char *sub_str;
781 const char delimiter[3] = {' ', '\n', '\0'};
782 uint32_t type;
783
784 if (amdgpu_in_reset(adev))
785 return -EPERM;
786 if (adev->in_suspend && !adev->in_runpm)
787 return -EPERM;
788
789 if (count > 127)
790 return -EINVAL;
791
792 if (*buf == 's')
793 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
794 else if (*buf == 'p')
795 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
796 else if (*buf == 'm')
797 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
798 else if(*buf == 'r')
799 type = PP_OD_RESTORE_DEFAULT_TABLE;
800 else if (*buf == 'c')
801 type = PP_OD_COMMIT_DPM_TABLE;
802 else if (!strncmp(buf, "vc", 2))
803 type = PP_OD_EDIT_VDDC_CURVE;
804 else if (!strncmp(buf, "vo", 2))
805 type = PP_OD_EDIT_VDDGFX_OFFSET;
806 else
807 return -EINVAL;
808
809 memcpy(buf_cpy, buf, count+1);
810
811 tmp_str = buf_cpy;
812
813 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
814 (type == PP_OD_EDIT_VDDGFX_OFFSET))
815 tmp_str++;
816 while (isspace(*++tmp_str));
817
818 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
819 if (strlen(sub_str) == 0)
820 continue;
821 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
822 if (ret)
823 return -EINVAL;
824 parameter_size++;
825
826 while (isspace(*tmp_str))
827 tmp_str++;
828 }
829
830 ret = pm_runtime_get_sync(ddev->dev);
831 if (ret < 0) {
832 pm_runtime_put_autosuspend(ddev->dev);
833 return ret;
834 }
835
836 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
837 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
838 parameter,
839 parameter_size);
840 if (ret) {
841 pm_runtime_mark_last_busy(ddev->dev);
842 pm_runtime_put_autosuspend(ddev->dev);
843 return -EINVAL;
844 }
845 }
846
847 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
848 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
849 parameter, parameter_size);
850 if (ret) {
851 pm_runtime_mark_last_busy(ddev->dev);
852 pm_runtime_put_autosuspend(ddev->dev);
853 return -EINVAL;
854 }
855 }
856
857 if (type == PP_OD_COMMIT_DPM_TABLE) {
858 if (adev->powerplay.pp_funcs->dispatch_tasks) {
859 amdgpu_dpm_dispatch_task(adev,
860 AMD_PP_TASK_READJUST_POWER_STATE,
861 NULL);
862 pm_runtime_mark_last_busy(ddev->dev);
863 pm_runtime_put_autosuspend(ddev->dev);
864 return count;
865 } else {
866 pm_runtime_mark_last_busy(ddev->dev);
867 pm_runtime_put_autosuspend(ddev->dev);
868 return -EINVAL;
869 }
870 }
871
872 pm_runtime_mark_last_busy(ddev->dev);
873 pm_runtime_put_autosuspend(ddev->dev);
874
875 return count;
876 }
877
amdgpu_get_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,char * buf)878 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
879 struct device_attribute *attr,
880 char *buf)
881 {
882 struct drm_device *ddev = dev_get_drvdata(dev);
883 struct amdgpu_device *adev = drm_to_adev(ddev);
884 ssize_t size;
885 int ret;
886
887 if (amdgpu_in_reset(adev))
888 return -EPERM;
889 if (adev->in_suspend && !adev->in_runpm)
890 return -EPERM;
891
892 ret = pm_runtime_get_sync(ddev->dev);
893 if (ret < 0) {
894 pm_runtime_put_autosuspend(ddev->dev);
895 return ret;
896 }
897
898 if (adev->powerplay.pp_funcs->print_clock_levels) {
899 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
900 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
901 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
902 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
903 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
904 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
905 } else {
906 size = snprintf(buf, PAGE_SIZE, "\n");
907 }
908 pm_runtime_mark_last_busy(ddev->dev);
909 pm_runtime_put_autosuspend(ddev->dev);
910
911 return size;
912 }
913
914 /**
915 * DOC: pp_features
916 *
917 * The amdgpu driver provides a sysfs API for adjusting what powerplay
918 * features to be enabled. The file pp_features is used for this. And
919 * this is only available for Vega10 and later dGPUs.
920 *
921 * Reading back the file will show you the followings:
922 * - Current ppfeature masks
923 * - List of the all supported powerplay features with their naming,
924 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
925 *
926 * To manually enable or disable a specific feature, just set or clear
927 * the corresponding bit from original ppfeature masks and input the
928 * new ppfeature masks.
929 */
amdgpu_set_pp_features(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)930 static ssize_t amdgpu_set_pp_features(struct device *dev,
931 struct device_attribute *attr,
932 const char *buf,
933 size_t count)
934 {
935 struct drm_device *ddev = dev_get_drvdata(dev);
936 struct amdgpu_device *adev = drm_to_adev(ddev);
937 uint64_t featuremask;
938 int ret;
939
940 if (amdgpu_in_reset(adev))
941 return -EPERM;
942 if (adev->in_suspend && !adev->in_runpm)
943 return -EPERM;
944
945 ret = kstrtou64(buf, 0, &featuremask);
946 if (ret)
947 return -EINVAL;
948
949 ret = pm_runtime_get_sync(ddev->dev);
950 if (ret < 0) {
951 pm_runtime_put_autosuspend(ddev->dev);
952 return ret;
953 }
954
955 if (adev->powerplay.pp_funcs->set_ppfeature_status) {
956 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
957 if (ret) {
958 pm_runtime_mark_last_busy(ddev->dev);
959 pm_runtime_put_autosuspend(ddev->dev);
960 return -EINVAL;
961 }
962 }
963 pm_runtime_mark_last_busy(ddev->dev);
964 pm_runtime_put_autosuspend(ddev->dev);
965
966 return count;
967 }
968
amdgpu_get_pp_features(struct device * dev,struct device_attribute * attr,char * buf)969 static ssize_t amdgpu_get_pp_features(struct device *dev,
970 struct device_attribute *attr,
971 char *buf)
972 {
973 struct drm_device *ddev = dev_get_drvdata(dev);
974 struct amdgpu_device *adev = drm_to_adev(ddev);
975 ssize_t size;
976 int ret;
977
978 if (amdgpu_in_reset(adev))
979 return -EPERM;
980 if (adev->in_suspend && !adev->in_runpm)
981 return -EPERM;
982
983 ret = pm_runtime_get_sync(ddev->dev);
984 if (ret < 0) {
985 pm_runtime_put_autosuspend(ddev->dev);
986 return ret;
987 }
988
989 if (adev->powerplay.pp_funcs->get_ppfeature_status)
990 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
991 else
992 size = snprintf(buf, PAGE_SIZE, "\n");
993
994 pm_runtime_mark_last_busy(ddev->dev);
995 pm_runtime_put_autosuspend(ddev->dev);
996
997 return size;
998 }
999
1000 /**
1001 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
1002 *
1003 * The amdgpu driver provides a sysfs API for adjusting what power levels
1004 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
1005 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
1006 * this.
1007 *
1008 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
1009 * Vega10 and later ASICs.
1010 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
1011 *
1012 * Reading back the files will show you the available power levels within
1013 * the power state and the clock information for those levels.
1014 *
1015 * To manually adjust these states, first select manual using
1016 * power_dpm_force_performance_level.
1017 * Secondly, enter a new value for each level by inputing a string that
1018 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1019 * E.g.,
1020 *
1021 * .. code-block:: bash
1022 *
1023 * echo "4 5 6" > pp_dpm_sclk
1024 *
1025 * will enable sclk levels 4, 5, and 6.
1026 *
1027 * NOTE: change to the dcefclk max dpm level is not supported now
1028 */
1029
amdgpu_get_pp_dpm_clock(struct device * dev,enum pp_clock_type type,char * buf)1030 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1031 enum pp_clock_type type,
1032 char *buf)
1033 {
1034 struct drm_device *ddev = dev_get_drvdata(dev);
1035 struct amdgpu_device *adev = drm_to_adev(ddev);
1036 ssize_t size;
1037 int ret;
1038
1039 if (amdgpu_in_reset(adev))
1040 return -EPERM;
1041 if (adev->in_suspend && !adev->in_runpm)
1042 return -EPERM;
1043
1044 ret = pm_runtime_get_sync(ddev->dev);
1045 if (ret < 0) {
1046 pm_runtime_put_autosuspend(ddev->dev);
1047 return ret;
1048 }
1049
1050 if (adev->powerplay.pp_funcs->print_clock_levels)
1051 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1052 else
1053 size = snprintf(buf, PAGE_SIZE, "\n");
1054
1055 pm_runtime_mark_last_busy(ddev->dev);
1056 pm_runtime_put_autosuspend(ddev->dev);
1057
1058 return size;
1059 }
1060
1061 /*
1062 * Worst case: 32 bits individually specified, in octal at 12 characters
1063 * per line (+1 for \n).
1064 */
1065 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1066
amdgpu_read_mask(const char * buf,size_t count,uint32_t * mask)1067 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1068 {
1069 int ret;
1070 unsigned long level;
1071 char *sub_str = NULL;
1072 char *tmp;
1073 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1074 const char delimiter[3] = {' ', '\n', '\0'};
1075 size_t bytes;
1076
1077 *mask = 0;
1078
1079 bytes = min(count, sizeof(buf_cpy) - 1);
1080 memcpy(buf_cpy, buf, bytes);
1081 buf_cpy[bytes] = '\0';
1082 tmp = buf_cpy;
1083 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1084 if (strlen(sub_str)) {
1085 ret = kstrtoul(sub_str, 0, &level);
1086 if (ret || level > 31)
1087 return -EINVAL;
1088 *mask |= 1 << level;
1089 } else
1090 break;
1091 }
1092
1093 return 0;
1094 }
1095
amdgpu_set_pp_dpm_clock(struct device * dev,enum pp_clock_type type,const char * buf,size_t count)1096 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1097 enum pp_clock_type type,
1098 const char *buf,
1099 size_t count)
1100 {
1101 struct drm_device *ddev = dev_get_drvdata(dev);
1102 struct amdgpu_device *adev = drm_to_adev(ddev);
1103 int ret;
1104 uint32_t mask = 0;
1105
1106 if (amdgpu_in_reset(adev))
1107 return -EPERM;
1108 if (adev->in_suspend && !adev->in_runpm)
1109 return -EPERM;
1110
1111 ret = amdgpu_read_mask(buf, count, &mask);
1112 if (ret)
1113 return ret;
1114
1115 ret = pm_runtime_get_sync(ddev->dev);
1116 if (ret < 0) {
1117 pm_runtime_put_autosuspend(ddev->dev);
1118 return ret;
1119 }
1120
1121 if (adev->powerplay.pp_funcs->force_clock_level)
1122 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1123 else
1124 ret = 0;
1125
1126 pm_runtime_mark_last_busy(ddev->dev);
1127 pm_runtime_put_autosuspend(ddev->dev);
1128
1129 if (ret)
1130 return -EINVAL;
1131
1132 return count;
1133 }
1134
amdgpu_get_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,char * buf)1135 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1136 struct device_attribute *attr,
1137 char *buf)
1138 {
1139 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1140 }
1141
amdgpu_set_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1142 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1143 struct device_attribute *attr,
1144 const char *buf,
1145 size_t count)
1146 {
1147 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1148 }
1149
amdgpu_get_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,char * buf)1150 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1151 struct device_attribute *attr,
1152 char *buf)
1153 {
1154 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1155 }
1156
amdgpu_set_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1157 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1158 struct device_attribute *attr,
1159 const char *buf,
1160 size_t count)
1161 {
1162 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1163 }
1164
amdgpu_get_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,char * buf)1165 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1166 struct device_attribute *attr,
1167 char *buf)
1168 {
1169 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1170 }
1171
amdgpu_set_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1172 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1173 struct device_attribute *attr,
1174 const char *buf,
1175 size_t count)
1176 {
1177 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1178 }
1179
amdgpu_get_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,char * buf)1180 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1181 struct device_attribute *attr,
1182 char *buf)
1183 {
1184 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1185 }
1186
amdgpu_set_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1187 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1188 struct device_attribute *attr,
1189 const char *buf,
1190 size_t count)
1191 {
1192 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1193 }
1194
amdgpu_get_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,char * buf)1195 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1196 struct device_attribute *attr,
1197 char *buf)
1198 {
1199 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1200 }
1201
amdgpu_set_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1202 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1203 struct device_attribute *attr,
1204 const char *buf,
1205 size_t count)
1206 {
1207 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1208 }
1209
amdgpu_get_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,char * buf)1210 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1211 struct device_attribute *attr,
1212 char *buf)
1213 {
1214 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1215 }
1216
amdgpu_set_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1217 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1218 struct device_attribute *attr,
1219 const char *buf,
1220 size_t count)
1221 {
1222 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1223 }
1224
amdgpu_get_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,char * buf)1225 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1226 struct device_attribute *attr,
1227 char *buf)
1228 {
1229 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1230 }
1231
amdgpu_set_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1232 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1233 struct device_attribute *attr,
1234 const char *buf,
1235 size_t count)
1236 {
1237 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1238 }
1239
amdgpu_get_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,char * buf)1240 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1241 struct device_attribute *attr,
1242 char *buf)
1243 {
1244 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1245 }
1246
amdgpu_set_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1247 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1248 struct device_attribute *attr,
1249 const char *buf,
1250 size_t count)
1251 {
1252 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1253 }
1254
amdgpu_get_pp_sclk_od(struct device * dev,struct device_attribute * attr,char * buf)1255 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1256 struct device_attribute *attr,
1257 char *buf)
1258 {
1259 struct drm_device *ddev = dev_get_drvdata(dev);
1260 struct amdgpu_device *adev = drm_to_adev(ddev);
1261 uint32_t value = 0;
1262 int ret;
1263
1264 if (amdgpu_in_reset(adev))
1265 return -EPERM;
1266 if (adev->in_suspend && !adev->in_runpm)
1267 return -EPERM;
1268
1269 ret = pm_runtime_get_sync(ddev->dev);
1270 if (ret < 0) {
1271 pm_runtime_put_autosuspend(ddev->dev);
1272 return ret;
1273 }
1274
1275 if (is_support_sw_smu(adev))
1276 value = 0;
1277 else if (adev->powerplay.pp_funcs->get_sclk_od)
1278 value = amdgpu_dpm_get_sclk_od(adev);
1279
1280 pm_runtime_mark_last_busy(ddev->dev);
1281 pm_runtime_put_autosuspend(ddev->dev);
1282
1283 return sysfs_emit(buf, "%d\n", value);
1284 }
1285
amdgpu_set_pp_sclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1286 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1287 struct device_attribute *attr,
1288 const char *buf,
1289 size_t count)
1290 {
1291 struct drm_device *ddev = dev_get_drvdata(dev);
1292 struct amdgpu_device *adev = drm_to_adev(ddev);
1293 int ret;
1294 long int value;
1295
1296 if (amdgpu_in_reset(adev))
1297 return -EPERM;
1298 if (adev->in_suspend && !adev->in_runpm)
1299 return -EPERM;
1300
1301 ret = kstrtol(buf, 0, &value);
1302
1303 if (ret)
1304 return -EINVAL;
1305
1306 ret = pm_runtime_get_sync(ddev->dev);
1307 if (ret < 0) {
1308 pm_runtime_put_autosuspend(ddev->dev);
1309 return ret;
1310 }
1311
1312 if (is_support_sw_smu(adev)) {
1313 value = 0;
1314 } else {
1315 if (adev->powerplay.pp_funcs->set_sclk_od)
1316 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1317
1318 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1319 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1320 } else {
1321 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1322 amdgpu_pm_compute_clocks(adev);
1323 }
1324 }
1325
1326 pm_runtime_mark_last_busy(ddev->dev);
1327 pm_runtime_put_autosuspend(ddev->dev);
1328
1329 return count;
1330 }
1331
amdgpu_get_pp_mclk_od(struct device * dev,struct device_attribute * attr,char * buf)1332 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1333 struct device_attribute *attr,
1334 char *buf)
1335 {
1336 struct drm_device *ddev = dev_get_drvdata(dev);
1337 struct amdgpu_device *adev = drm_to_adev(ddev);
1338 uint32_t value = 0;
1339 int ret;
1340
1341 if (amdgpu_in_reset(adev))
1342 return -EPERM;
1343 if (adev->in_suspend && !adev->in_runpm)
1344 return -EPERM;
1345
1346 ret = pm_runtime_get_sync(ddev->dev);
1347 if (ret < 0) {
1348 pm_runtime_put_autosuspend(ddev->dev);
1349 return ret;
1350 }
1351
1352 if (is_support_sw_smu(adev))
1353 value = 0;
1354 else if (adev->powerplay.pp_funcs->get_mclk_od)
1355 value = amdgpu_dpm_get_mclk_od(adev);
1356
1357 pm_runtime_mark_last_busy(ddev->dev);
1358 pm_runtime_put_autosuspend(ddev->dev);
1359
1360 return sysfs_emit(buf, "%d\n", value);
1361 }
1362
amdgpu_set_pp_mclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1363 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1364 struct device_attribute *attr,
1365 const char *buf,
1366 size_t count)
1367 {
1368 struct drm_device *ddev = dev_get_drvdata(dev);
1369 struct amdgpu_device *adev = drm_to_adev(ddev);
1370 int ret;
1371 long int value;
1372
1373 if (amdgpu_in_reset(adev))
1374 return -EPERM;
1375 if (adev->in_suspend && !adev->in_runpm)
1376 return -EPERM;
1377
1378 ret = kstrtol(buf, 0, &value);
1379
1380 if (ret)
1381 return -EINVAL;
1382
1383 ret = pm_runtime_get_sync(ddev->dev);
1384 if (ret < 0) {
1385 pm_runtime_put_autosuspend(ddev->dev);
1386 return ret;
1387 }
1388
1389 if (is_support_sw_smu(adev)) {
1390 value = 0;
1391 } else {
1392 if (adev->powerplay.pp_funcs->set_mclk_od)
1393 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1394
1395 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1396 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1397 } else {
1398 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1399 amdgpu_pm_compute_clocks(adev);
1400 }
1401 }
1402
1403 pm_runtime_mark_last_busy(ddev->dev);
1404 pm_runtime_put_autosuspend(ddev->dev);
1405
1406 return count;
1407 }
1408
1409 /**
1410 * DOC: pp_power_profile_mode
1411 *
1412 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1413 * related to switching between power levels in a power state. The file
1414 * pp_power_profile_mode is used for this.
1415 *
1416 * Reading this file outputs a list of all of the predefined power profiles
1417 * and the relevant heuristics settings for that profile.
1418 *
1419 * To select a profile or create a custom profile, first select manual using
1420 * power_dpm_force_performance_level. Writing the number of a predefined
1421 * profile to pp_power_profile_mode will enable those heuristics. To
1422 * create a custom set of heuristics, write a string of numbers to the file
1423 * starting with the number of the custom profile along with a setting
1424 * for each heuristic parameter. Due to differences across asic families
1425 * the heuristic parameters vary from family to family.
1426 *
1427 */
1428
amdgpu_get_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,char * buf)1429 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1430 struct device_attribute *attr,
1431 char *buf)
1432 {
1433 struct drm_device *ddev = dev_get_drvdata(dev);
1434 struct amdgpu_device *adev = drm_to_adev(ddev);
1435 ssize_t size;
1436 int ret;
1437
1438 if (amdgpu_in_reset(adev))
1439 return -EPERM;
1440 if (adev->in_suspend && !adev->in_runpm)
1441 return -EPERM;
1442
1443 ret = pm_runtime_get_sync(ddev->dev);
1444 if (ret < 0) {
1445 pm_runtime_put_autosuspend(ddev->dev);
1446 return ret;
1447 }
1448
1449 if (adev->powerplay.pp_funcs->get_power_profile_mode)
1450 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1451 else
1452 size = snprintf(buf, PAGE_SIZE, "\n");
1453
1454 pm_runtime_mark_last_busy(ddev->dev);
1455 pm_runtime_put_autosuspend(ddev->dev);
1456
1457 return size;
1458 }
1459
1460
amdgpu_set_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1461 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1462 struct device_attribute *attr,
1463 const char *buf,
1464 size_t count)
1465 {
1466 int ret;
1467 struct drm_device *ddev = dev_get_drvdata(dev);
1468 struct amdgpu_device *adev = drm_to_adev(ddev);
1469 uint32_t parameter_size = 0;
1470 long parameter[64];
1471 char *sub_str, buf_cpy[128];
1472 char *tmp_str;
1473 uint32_t i = 0;
1474 char tmp[2];
1475 long int profile_mode = 0;
1476 const char delimiter[3] = {' ', '\n', '\0'};
1477
1478 if (amdgpu_in_reset(adev))
1479 return -EPERM;
1480 if (adev->in_suspend && !adev->in_runpm)
1481 return -EPERM;
1482
1483 tmp[0] = *(buf);
1484 tmp[1] = '\0';
1485 ret = kstrtol(tmp, 0, &profile_mode);
1486 if (ret)
1487 return -EINVAL;
1488
1489 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1490 if (count < 2 || count > 127)
1491 return -EINVAL;
1492 while (isspace(*++buf))
1493 i++;
1494 memcpy(buf_cpy, buf, count-i);
1495 tmp_str = buf_cpy;
1496 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1497 if (strlen(sub_str) == 0)
1498 continue;
1499 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1500 if (ret)
1501 return -EINVAL;
1502 parameter_size++;
1503 while (isspace(*tmp_str))
1504 tmp_str++;
1505 }
1506 }
1507 parameter[parameter_size] = profile_mode;
1508
1509 ret = pm_runtime_get_sync(ddev->dev);
1510 if (ret < 0) {
1511 pm_runtime_put_autosuspend(ddev->dev);
1512 return ret;
1513 }
1514
1515 if (adev->powerplay.pp_funcs->set_power_profile_mode)
1516 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1517
1518 pm_runtime_mark_last_busy(ddev->dev);
1519 pm_runtime_put_autosuspend(ddev->dev);
1520
1521 if (!ret)
1522 return count;
1523
1524 return -EINVAL;
1525 }
1526
1527 /**
1528 * DOC: gpu_busy_percent
1529 *
1530 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1531 * is as a percentage. The file gpu_busy_percent is used for this.
1532 * The SMU firmware computes a percentage of load based on the
1533 * aggregate activity level in the IP cores.
1534 */
amdgpu_get_gpu_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1535 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1536 struct device_attribute *attr,
1537 char *buf)
1538 {
1539 struct drm_device *ddev = dev_get_drvdata(dev);
1540 struct amdgpu_device *adev = drm_to_adev(ddev);
1541 int r, value, size = sizeof(value);
1542
1543 if (amdgpu_in_reset(adev))
1544 return -EPERM;
1545 if (adev->in_suspend && !adev->in_runpm)
1546 return -EPERM;
1547
1548 r = pm_runtime_get_sync(ddev->dev);
1549 if (r < 0) {
1550 pm_runtime_put_autosuspend(ddev->dev);
1551 return r;
1552 }
1553
1554 /* read the IP busy sensor */
1555 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1556 (void *)&value, &size);
1557
1558 pm_runtime_mark_last_busy(ddev->dev);
1559 pm_runtime_put_autosuspend(ddev->dev);
1560
1561 if (r)
1562 return r;
1563
1564 return sysfs_emit(buf, "%d\n", value);
1565 }
1566
1567 /**
1568 * DOC: mem_busy_percent
1569 *
1570 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1571 * is as a percentage. The file mem_busy_percent is used for this.
1572 * The SMU firmware computes a percentage of load based on the
1573 * aggregate activity level in the IP cores.
1574 */
amdgpu_get_mem_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1575 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1576 struct device_attribute *attr,
1577 char *buf)
1578 {
1579 struct drm_device *ddev = dev_get_drvdata(dev);
1580 struct amdgpu_device *adev = drm_to_adev(ddev);
1581 int r, value, size = sizeof(value);
1582
1583 if (amdgpu_in_reset(adev))
1584 return -EPERM;
1585 if (adev->in_suspend && !adev->in_runpm)
1586 return -EPERM;
1587
1588 r = pm_runtime_get_sync(ddev->dev);
1589 if (r < 0) {
1590 pm_runtime_put_autosuspend(ddev->dev);
1591 return r;
1592 }
1593
1594 /* read the IP busy sensor */
1595 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1596 (void *)&value, &size);
1597
1598 pm_runtime_mark_last_busy(ddev->dev);
1599 pm_runtime_put_autosuspend(ddev->dev);
1600
1601 if (r)
1602 return r;
1603
1604 return sysfs_emit(buf, "%d\n", value);
1605 }
1606
1607 /**
1608 * DOC: pcie_bw
1609 *
1610 * The amdgpu driver provides a sysfs API for estimating how much data
1611 * has been received and sent by the GPU in the last second through PCIe.
1612 * The file pcie_bw is used for this.
1613 * The Perf counters count the number of received and sent messages and return
1614 * those values, as well as the maximum payload size of a PCIe packet (mps).
1615 * Note that it is not possible to easily and quickly obtain the size of each
1616 * packet transmitted, so we output the max payload size (mps) to allow for
1617 * quick estimation of the PCIe bandwidth usage
1618 */
amdgpu_get_pcie_bw(struct device * dev,struct device_attribute * attr,char * buf)1619 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1620 struct device_attribute *attr,
1621 char *buf)
1622 {
1623 struct drm_device *ddev = dev_get_drvdata(dev);
1624 struct amdgpu_device *adev = drm_to_adev(ddev);
1625 uint64_t count0 = 0, count1 = 0;
1626 int ret;
1627
1628 if (amdgpu_in_reset(adev))
1629 return -EPERM;
1630 if (adev->in_suspend && !adev->in_runpm)
1631 return -EPERM;
1632
1633 if (adev->flags & AMD_IS_APU)
1634 return -ENODATA;
1635
1636 if (!adev->asic_funcs->get_pcie_usage)
1637 return -ENODATA;
1638
1639 ret = pm_runtime_get_sync(ddev->dev);
1640 if (ret < 0) {
1641 pm_runtime_put_autosuspend(ddev->dev);
1642 return ret;
1643 }
1644
1645 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1646
1647 pm_runtime_mark_last_busy(ddev->dev);
1648 pm_runtime_put_autosuspend(ddev->dev);
1649
1650 return sysfs_emit(buf, "%llu %llu %i\n",
1651 count0, count1, pcie_get_mps(adev->pdev));
1652 }
1653
1654 /**
1655 * DOC: unique_id
1656 *
1657 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1658 * The file unique_id is used for this.
1659 * This will provide a Unique ID that will persist from machine to machine
1660 *
1661 * NOTE: This will only work for GFX9 and newer. This file will be absent
1662 * on unsupported ASICs (GFX8 and older)
1663 */
amdgpu_get_unique_id(struct device * dev,struct device_attribute * attr,char * buf)1664 static ssize_t amdgpu_get_unique_id(struct device *dev,
1665 struct device_attribute *attr,
1666 char *buf)
1667 {
1668 struct drm_device *ddev = dev_get_drvdata(dev);
1669 struct amdgpu_device *adev = drm_to_adev(ddev);
1670
1671 if (amdgpu_in_reset(adev))
1672 return -EPERM;
1673 if (adev->in_suspend && !adev->in_runpm)
1674 return -EPERM;
1675
1676 if (adev->unique_id)
1677 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1678
1679 return 0;
1680 }
1681
1682 /**
1683 * DOC: thermal_throttling_logging
1684 *
1685 * Thermal throttling pulls down the clock frequency and thus the performance.
1686 * It's an useful mechanism to protect the chip from overheating. Since it
1687 * impacts performance, the user controls whether it is enabled and if so,
1688 * the log frequency.
1689 *
1690 * Reading back the file shows you the status(enabled or disabled) and
1691 * the interval(in seconds) between each thermal logging.
1692 *
1693 * Writing an integer to the file, sets a new logging interval, in seconds.
1694 * The value should be between 1 and 3600. If the value is less than 1,
1695 * thermal logging is disabled. Values greater than 3600 are ignored.
1696 */
amdgpu_get_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,char * buf)1697 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1698 struct device_attribute *attr,
1699 char *buf)
1700 {
1701 struct drm_device *ddev = dev_get_drvdata(dev);
1702 struct amdgpu_device *adev = drm_to_adev(ddev);
1703
1704 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1705 adev_to_drm(adev)->unique,
1706 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1707 adev->throttling_logging_rs.interval / HZ + 1);
1708 }
1709
amdgpu_set_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1710 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1711 struct device_attribute *attr,
1712 const char *buf,
1713 size_t count)
1714 {
1715 struct drm_device *ddev = dev_get_drvdata(dev);
1716 struct amdgpu_device *adev = drm_to_adev(ddev);
1717 long throttling_logging_interval;
1718 unsigned long flags;
1719 int ret = 0;
1720
1721 ret = kstrtol(buf, 0, &throttling_logging_interval);
1722 if (ret)
1723 return ret;
1724
1725 if (throttling_logging_interval > 3600)
1726 return -EINVAL;
1727
1728 if (throttling_logging_interval > 0) {
1729 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1730 /*
1731 * Reset the ratelimit timer internals.
1732 * This can effectively restart the timer.
1733 */
1734 adev->throttling_logging_rs.interval =
1735 (throttling_logging_interval - 1) * HZ;
1736 adev->throttling_logging_rs.begin = 0;
1737 adev->throttling_logging_rs.printed = 0;
1738 adev->throttling_logging_rs.missed = 0;
1739 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1740
1741 atomic_set(&adev->throttling_logging_enabled, 1);
1742 } else {
1743 atomic_set(&adev->throttling_logging_enabled, 0);
1744 }
1745
1746 return count;
1747 }
1748
1749 /**
1750 * DOC: gpu_metrics
1751 *
1752 * The amdgpu driver provides a sysfs API for retrieving current gpu
1753 * metrics data. The file gpu_metrics is used for this. Reading the
1754 * file will dump all the current gpu metrics data.
1755 *
1756 * These data include temperature, frequency, engines utilization,
1757 * power consume, throttler status, fan speed and cpu core statistics(
1758 * available for APU only). That's it will give a snapshot of all sensors
1759 * at the same time.
1760 */
amdgpu_get_gpu_metrics(struct device * dev,struct device_attribute * attr,char * buf)1761 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1762 struct device_attribute *attr,
1763 char *buf)
1764 {
1765 struct drm_device *ddev = dev_get_drvdata(dev);
1766 struct amdgpu_device *adev = drm_to_adev(ddev);
1767 void *gpu_metrics;
1768 ssize_t size = 0;
1769 int ret;
1770
1771 if (amdgpu_in_reset(adev))
1772 return -EPERM;
1773 if (adev->in_suspend && !adev->in_runpm)
1774 return -EPERM;
1775
1776 ret = pm_runtime_get_sync(ddev->dev);
1777 if (ret < 0) {
1778 pm_runtime_put_autosuspend(ddev->dev);
1779 return ret;
1780 }
1781
1782 if (adev->powerplay.pp_funcs->get_gpu_metrics)
1783 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1784
1785 if (size <= 0)
1786 goto out;
1787
1788 if (size >= PAGE_SIZE)
1789 size = PAGE_SIZE - 1;
1790
1791 memcpy(buf, gpu_metrics, size);
1792
1793 out:
1794 pm_runtime_mark_last_busy(ddev->dev);
1795 pm_runtime_put_autosuspend(ddev->dev);
1796
1797 return size;
1798 }
1799
1800 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1801 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1802 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1803 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
1804 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
1805 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
1806 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
1807 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1808 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1809 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1810 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1811 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1812 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1813 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
1814 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
1815 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
1816 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
1817 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
1818 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
1819 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
1820 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
1821 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
1822 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
1823 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
1824 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
1825 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
1826 };
1827
default_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1828 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1829 uint32_t mask, enum amdgpu_device_attr_states *states)
1830 {
1831 struct device_attribute *dev_attr = &attr->dev_attr;
1832 const char *attr_name = dev_attr->attr.name;
1833 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1834 enum amd_asic_type asic_type = adev->asic_type;
1835
1836 if (!(attr->flags & mask)) {
1837 *states = ATTR_STATE_UNSUPPORTED;
1838 return 0;
1839 }
1840
1841 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
1842
1843 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1844 if (asic_type < CHIP_VEGA10)
1845 *states = ATTR_STATE_UNSUPPORTED;
1846 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
1847 if (asic_type < CHIP_VEGA10 ||
1848 asic_type == CHIP_ARCTURUS ||
1849 asic_type == CHIP_ALDEBARAN)
1850 *states = ATTR_STATE_UNSUPPORTED;
1851 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1852 if (asic_type < CHIP_VEGA20)
1853 *states = ATTR_STATE_UNSUPPORTED;
1854 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
1855 *states = ATTR_STATE_UNSUPPORTED;
1856 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
1857 (is_support_sw_smu(adev) && adev->smu.is_apu) ||
1858 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1859 *states = ATTR_STATE_SUPPORTED;
1860 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
1861 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
1862 *states = ATTR_STATE_UNSUPPORTED;
1863 } else if (DEVICE_ATTR_IS(pcie_bw)) {
1864 /* PCIe Perf counters won't work on APU nodes */
1865 if (adev->flags & AMD_IS_APU)
1866 *states = ATTR_STATE_UNSUPPORTED;
1867 } else if (DEVICE_ATTR_IS(unique_id)) {
1868 if (asic_type != CHIP_VEGA10 &&
1869 asic_type != CHIP_VEGA20 &&
1870 asic_type != CHIP_ARCTURUS)
1871 *states = ATTR_STATE_UNSUPPORTED;
1872 } else if (DEVICE_ATTR_IS(pp_features)) {
1873 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
1874 *states = ATTR_STATE_UNSUPPORTED;
1875 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
1876 if (asic_type < CHIP_VEGA12)
1877 *states = ATTR_STATE_UNSUPPORTED;
1878 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
1879 if (!(asic_type == CHIP_VANGOGH))
1880 *states = ATTR_STATE_UNSUPPORTED;
1881 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
1882 if (!(asic_type == CHIP_VANGOGH))
1883 *states = ATTR_STATE_UNSUPPORTED;
1884 }
1885
1886 if (asic_type == CHIP_ARCTURUS) {
1887 /* Arcturus does not support standalone mclk/socclk/fclk level setting */
1888 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
1889 DEVICE_ATTR_IS(pp_dpm_socclk) ||
1890 DEVICE_ATTR_IS(pp_dpm_fclk)) {
1891 dev_attr->attr.mode &= ~S_IWUGO;
1892 dev_attr->store = NULL;
1893 }
1894 }
1895
1896 if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
1897 /* SMU MP1 does not support dcefclk level setting */
1898 if (asic_type >= CHIP_NAVI10) {
1899 dev_attr->attr.mode &= ~S_IWUGO;
1900 dev_attr->store = NULL;
1901 }
1902 }
1903
1904 #undef DEVICE_ATTR_IS
1905
1906 return 0;
1907 }
1908
1909
amdgpu_device_attr_create(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,struct list_head * attr_list)1910 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
1911 struct amdgpu_device_attr *attr,
1912 uint32_t mask, struct list_head *attr_list)
1913 {
1914 int ret = 0;
1915 struct device_attribute *dev_attr = &attr->dev_attr;
1916 const char *name = dev_attr->attr.name;
1917 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
1918 struct amdgpu_device_attr_entry *attr_entry;
1919
1920 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1921 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
1922
1923 BUG_ON(!attr);
1924
1925 attr_update = attr->attr_update ? attr_update : default_attr_update;
1926
1927 ret = attr_update(adev, attr, mask, &attr_states);
1928 if (ret) {
1929 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
1930 name, ret);
1931 return ret;
1932 }
1933
1934 if (attr_states == ATTR_STATE_UNSUPPORTED)
1935 return 0;
1936
1937 ret = device_create_file(adev->dev, dev_attr);
1938 if (ret) {
1939 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
1940 name, ret);
1941 }
1942
1943 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
1944 if (!attr_entry)
1945 return -ENOMEM;
1946
1947 attr_entry->attr = attr;
1948 INIT_LIST_HEAD(&attr_entry->entry);
1949
1950 list_add_tail(&attr_entry->entry, attr_list);
1951
1952 return ret;
1953 }
1954
amdgpu_device_attr_remove(struct amdgpu_device * adev,struct amdgpu_device_attr * attr)1955 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
1956 {
1957 struct device_attribute *dev_attr = &attr->dev_attr;
1958
1959 device_remove_file(adev->dev, dev_attr);
1960 }
1961
1962 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
1963 struct list_head *attr_list);
1964
amdgpu_device_attr_create_groups(struct amdgpu_device * adev,struct amdgpu_device_attr * attrs,uint32_t counts,uint32_t mask,struct list_head * attr_list)1965 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
1966 struct amdgpu_device_attr *attrs,
1967 uint32_t counts,
1968 uint32_t mask,
1969 struct list_head *attr_list)
1970 {
1971 int ret = 0;
1972 uint32_t i = 0;
1973
1974 for (i = 0; i < counts; i++) {
1975 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
1976 if (ret)
1977 goto failed;
1978 }
1979
1980 return 0;
1981
1982 failed:
1983 amdgpu_device_attr_remove_groups(adev, attr_list);
1984
1985 return ret;
1986 }
1987
amdgpu_device_attr_remove_groups(struct amdgpu_device * adev,struct list_head * attr_list)1988 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
1989 struct list_head *attr_list)
1990 {
1991 struct amdgpu_device_attr_entry *entry, *entry_tmp;
1992
1993 if (list_empty(attr_list))
1994 return ;
1995
1996 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
1997 amdgpu_device_attr_remove(adev, entry->attr);
1998 list_del(&entry->entry);
1999 kfree(entry);
2000 }
2001 }
2002
amdgpu_hwmon_show_temp(struct device * dev,struct device_attribute * attr,char * buf)2003 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2004 struct device_attribute *attr,
2005 char *buf)
2006 {
2007 struct amdgpu_device *adev = dev_get_drvdata(dev);
2008 int channel = to_sensor_dev_attr(attr)->index;
2009 int r, temp = 0, size = sizeof(temp);
2010
2011 if (amdgpu_in_reset(adev))
2012 return -EPERM;
2013 if (adev->in_suspend && !adev->in_runpm)
2014 return -EPERM;
2015
2016 if (channel >= PP_TEMP_MAX)
2017 return -EINVAL;
2018
2019 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2020 if (r < 0) {
2021 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2022 return r;
2023 }
2024
2025 switch (channel) {
2026 case PP_TEMP_JUNCTION:
2027 /* get current junction temperature */
2028 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2029 (void *)&temp, &size);
2030 break;
2031 case PP_TEMP_EDGE:
2032 /* get current edge temperature */
2033 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2034 (void *)&temp, &size);
2035 break;
2036 case PP_TEMP_MEM:
2037 /* get current memory temperature */
2038 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2039 (void *)&temp, &size);
2040 break;
2041 default:
2042 r = -EINVAL;
2043 break;
2044 }
2045
2046 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2047 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2048
2049 if (r)
2050 return r;
2051
2052 return sysfs_emit(buf, "%d\n", temp);
2053 }
2054
amdgpu_hwmon_show_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2055 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2056 struct device_attribute *attr,
2057 char *buf)
2058 {
2059 struct amdgpu_device *adev = dev_get_drvdata(dev);
2060 int hyst = to_sensor_dev_attr(attr)->index;
2061 int temp;
2062
2063 if (hyst)
2064 temp = adev->pm.dpm.thermal.min_temp;
2065 else
2066 temp = adev->pm.dpm.thermal.max_temp;
2067
2068 return sysfs_emit(buf, "%d\n", temp);
2069 }
2070
amdgpu_hwmon_show_hotspot_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2071 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2072 struct device_attribute *attr,
2073 char *buf)
2074 {
2075 struct amdgpu_device *adev = dev_get_drvdata(dev);
2076 int hyst = to_sensor_dev_attr(attr)->index;
2077 int temp;
2078
2079 if (hyst)
2080 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2081 else
2082 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2083
2084 return sysfs_emit(buf, "%d\n", temp);
2085 }
2086
amdgpu_hwmon_show_mem_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2087 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2088 struct device_attribute *attr,
2089 char *buf)
2090 {
2091 struct amdgpu_device *adev = dev_get_drvdata(dev);
2092 int hyst = to_sensor_dev_attr(attr)->index;
2093 int temp;
2094
2095 if (hyst)
2096 temp = adev->pm.dpm.thermal.min_mem_temp;
2097 else
2098 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2099
2100 return sysfs_emit(buf, "%d\n", temp);
2101 }
2102
amdgpu_hwmon_show_temp_label(struct device * dev,struct device_attribute * attr,char * buf)2103 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2104 struct device_attribute *attr,
2105 char *buf)
2106 {
2107 int channel = to_sensor_dev_attr(attr)->index;
2108
2109 if (channel >= PP_TEMP_MAX)
2110 return -EINVAL;
2111
2112 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2113 }
2114
amdgpu_hwmon_show_temp_emergency(struct device * dev,struct device_attribute * attr,char * buf)2115 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2116 struct device_attribute *attr,
2117 char *buf)
2118 {
2119 struct amdgpu_device *adev = dev_get_drvdata(dev);
2120 int channel = to_sensor_dev_attr(attr)->index;
2121 int temp = 0;
2122
2123 if (channel >= PP_TEMP_MAX)
2124 return -EINVAL;
2125
2126 switch (channel) {
2127 case PP_TEMP_JUNCTION:
2128 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2129 break;
2130 case PP_TEMP_EDGE:
2131 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2132 break;
2133 case PP_TEMP_MEM:
2134 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2135 break;
2136 }
2137
2138 return sysfs_emit(buf, "%d\n", temp);
2139 }
2140
amdgpu_hwmon_get_pwm1_enable(struct device * dev,struct device_attribute * attr,char * buf)2141 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2142 struct device_attribute *attr,
2143 char *buf)
2144 {
2145 struct amdgpu_device *adev = dev_get_drvdata(dev);
2146 u32 pwm_mode = 0;
2147 int ret;
2148
2149 if (amdgpu_in_reset(adev))
2150 return -EPERM;
2151 if (adev->in_suspend && !adev->in_runpm)
2152 return -EPERM;
2153
2154 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2155 if (ret < 0) {
2156 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2157 return ret;
2158 }
2159
2160 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2161 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2162 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2163 return -EINVAL;
2164 }
2165
2166 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2167
2168 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2169 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2170
2171 return sprintf(buf, "%u\n", pwm_mode);
2172 }
2173
amdgpu_hwmon_set_pwm1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2174 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2175 struct device_attribute *attr,
2176 const char *buf,
2177 size_t count)
2178 {
2179 struct amdgpu_device *adev = dev_get_drvdata(dev);
2180 int err, ret;
2181 int value;
2182
2183 if (amdgpu_in_reset(adev))
2184 return -EPERM;
2185 if (adev->in_suspend && !adev->in_runpm)
2186 return -EPERM;
2187
2188 err = kstrtoint(buf, 10, &value);
2189 if (err)
2190 return err;
2191
2192 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2193 if (ret < 0) {
2194 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2195 return ret;
2196 }
2197
2198 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2199 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2200 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2201 return -EINVAL;
2202 }
2203
2204 amdgpu_dpm_set_fan_control_mode(adev, value);
2205
2206 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2207 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2208
2209 return count;
2210 }
2211
amdgpu_hwmon_get_pwm1_min(struct device * dev,struct device_attribute * attr,char * buf)2212 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2213 struct device_attribute *attr,
2214 char *buf)
2215 {
2216 return sprintf(buf, "%i\n", 0);
2217 }
2218
amdgpu_hwmon_get_pwm1_max(struct device * dev,struct device_attribute * attr,char * buf)2219 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2220 struct device_attribute *attr,
2221 char *buf)
2222 {
2223 return sprintf(buf, "%i\n", 255);
2224 }
2225
amdgpu_hwmon_set_pwm1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2226 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2227 struct device_attribute *attr,
2228 const char *buf, size_t count)
2229 {
2230 struct amdgpu_device *adev = dev_get_drvdata(dev);
2231 int err;
2232 u32 value;
2233 u32 pwm_mode;
2234
2235 if (amdgpu_in_reset(adev))
2236 return -EPERM;
2237 if (adev->in_suspend && !adev->in_runpm)
2238 return -EPERM;
2239
2240 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2241 if (err < 0) {
2242 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2243 return err;
2244 }
2245
2246 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2247 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2248 pr_info("manual fan speed control should be enabled first\n");
2249 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2250 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2251 return -EINVAL;
2252 }
2253
2254 err = kstrtou32(buf, 10, &value);
2255 if (err) {
2256 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2257 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2258 return err;
2259 }
2260
2261 value = (value * 100) / 255;
2262
2263 if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2264 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2265 else
2266 err = -EINVAL;
2267
2268 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2269 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2270
2271 if (err)
2272 return err;
2273
2274 return count;
2275 }
2276
amdgpu_hwmon_get_pwm1(struct device * dev,struct device_attribute * attr,char * buf)2277 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2278 struct device_attribute *attr,
2279 char *buf)
2280 {
2281 struct amdgpu_device *adev = dev_get_drvdata(dev);
2282 int err;
2283 u32 speed = 0;
2284
2285 if (amdgpu_in_reset(adev))
2286 return -EPERM;
2287 if (adev->in_suspend && !adev->in_runpm)
2288 return -EPERM;
2289
2290 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2291 if (err < 0) {
2292 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2293 return err;
2294 }
2295
2296 if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2297 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2298 else
2299 err = -EINVAL;
2300
2301 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2302 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2303
2304 if (err)
2305 return err;
2306
2307 speed = (speed * 255) / 100;
2308
2309 return sprintf(buf, "%i\n", speed);
2310 }
2311
amdgpu_hwmon_get_fan1_input(struct device * dev,struct device_attribute * attr,char * buf)2312 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2313 struct device_attribute *attr,
2314 char *buf)
2315 {
2316 struct amdgpu_device *adev = dev_get_drvdata(dev);
2317 int err;
2318 u32 speed = 0;
2319
2320 if (amdgpu_in_reset(adev))
2321 return -EPERM;
2322 if (adev->in_suspend && !adev->in_runpm)
2323 return -EPERM;
2324
2325 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2326 if (err < 0) {
2327 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2328 return err;
2329 }
2330
2331 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2332 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2333 else
2334 err = -EINVAL;
2335
2336 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2337 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2338
2339 if (err)
2340 return err;
2341
2342 return sprintf(buf, "%i\n", speed);
2343 }
2344
amdgpu_hwmon_get_fan1_min(struct device * dev,struct device_attribute * attr,char * buf)2345 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2346 struct device_attribute *attr,
2347 char *buf)
2348 {
2349 struct amdgpu_device *adev = dev_get_drvdata(dev);
2350 u32 min_rpm = 0;
2351 u32 size = sizeof(min_rpm);
2352 int r;
2353
2354 if (amdgpu_in_reset(adev))
2355 return -EPERM;
2356 if (adev->in_suspend && !adev->in_runpm)
2357 return -EPERM;
2358
2359 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2360 if (r < 0) {
2361 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2362 return r;
2363 }
2364
2365 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2366 (void *)&min_rpm, &size);
2367
2368 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2369 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2370
2371 if (r)
2372 return r;
2373
2374 return sysfs_emit(buf, "%d\n", min_rpm);
2375 }
2376
amdgpu_hwmon_get_fan1_max(struct device * dev,struct device_attribute * attr,char * buf)2377 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2378 struct device_attribute *attr,
2379 char *buf)
2380 {
2381 struct amdgpu_device *adev = dev_get_drvdata(dev);
2382 u32 max_rpm = 0;
2383 u32 size = sizeof(max_rpm);
2384 int r;
2385
2386 if (amdgpu_in_reset(adev))
2387 return -EPERM;
2388 if (adev->in_suspend && !adev->in_runpm)
2389 return -EPERM;
2390
2391 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2392 if (r < 0) {
2393 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2394 return r;
2395 }
2396
2397 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2398 (void *)&max_rpm, &size);
2399
2400 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2401 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2402
2403 if (r)
2404 return r;
2405
2406 return sysfs_emit(buf, "%d\n", max_rpm);
2407 }
2408
amdgpu_hwmon_get_fan1_target(struct device * dev,struct device_attribute * attr,char * buf)2409 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2410 struct device_attribute *attr,
2411 char *buf)
2412 {
2413 struct amdgpu_device *adev = dev_get_drvdata(dev);
2414 int err;
2415 u32 rpm = 0;
2416
2417 if (amdgpu_in_reset(adev))
2418 return -EPERM;
2419 if (adev->in_suspend && !adev->in_runpm)
2420 return -EPERM;
2421
2422 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2423 if (err < 0) {
2424 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2425 return err;
2426 }
2427
2428 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2429 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2430 else
2431 err = -EINVAL;
2432
2433 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2434 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2435
2436 if (err)
2437 return err;
2438
2439 return sprintf(buf, "%i\n", rpm);
2440 }
2441
amdgpu_hwmon_set_fan1_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2442 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2443 struct device_attribute *attr,
2444 const char *buf, size_t count)
2445 {
2446 struct amdgpu_device *adev = dev_get_drvdata(dev);
2447 int err;
2448 u32 value;
2449 u32 pwm_mode;
2450
2451 if (amdgpu_in_reset(adev))
2452 return -EPERM;
2453 if (adev->in_suspend && !adev->in_runpm)
2454 return -EPERM;
2455
2456 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2457 if (err < 0) {
2458 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2459 return err;
2460 }
2461
2462 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2463
2464 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2465 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2466 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2467 return -ENODATA;
2468 }
2469
2470 err = kstrtou32(buf, 10, &value);
2471 if (err) {
2472 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2473 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2474 return err;
2475 }
2476
2477 if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2478 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2479 else
2480 err = -EINVAL;
2481
2482 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2483 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2484
2485 if (err)
2486 return err;
2487
2488 return count;
2489 }
2490
amdgpu_hwmon_get_fan1_enable(struct device * dev,struct device_attribute * attr,char * buf)2491 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2492 struct device_attribute *attr,
2493 char *buf)
2494 {
2495 struct amdgpu_device *adev = dev_get_drvdata(dev);
2496 u32 pwm_mode = 0;
2497 int ret;
2498
2499 if (amdgpu_in_reset(adev))
2500 return -EPERM;
2501 if (adev->in_suspend && !adev->in_runpm)
2502 return -EPERM;
2503
2504 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2505 if (ret < 0) {
2506 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2507 return ret;
2508 }
2509
2510 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2511 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2512 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2513 return -EINVAL;
2514 }
2515
2516 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2517
2518 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2519 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2520
2521 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2522 }
2523
amdgpu_hwmon_set_fan1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2524 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2525 struct device_attribute *attr,
2526 const char *buf,
2527 size_t count)
2528 {
2529 struct amdgpu_device *adev = dev_get_drvdata(dev);
2530 int err;
2531 int value;
2532 u32 pwm_mode;
2533
2534 if (amdgpu_in_reset(adev))
2535 return -EPERM;
2536 if (adev->in_suspend && !adev->in_runpm)
2537 return -EPERM;
2538
2539 err = kstrtoint(buf, 10, &value);
2540 if (err)
2541 return err;
2542
2543 if (value == 0)
2544 pwm_mode = AMD_FAN_CTRL_AUTO;
2545 else if (value == 1)
2546 pwm_mode = AMD_FAN_CTRL_MANUAL;
2547 else
2548 return -EINVAL;
2549
2550 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2551 if (err < 0) {
2552 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2553 return err;
2554 }
2555
2556 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2557 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2558 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2559 return -EINVAL;
2560 }
2561 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2562
2563 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2564 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2565
2566 return count;
2567 }
2568
amdgpu_hwmon_show_vddgfx(struct device * dev,struct device_attribute * attr,char * buf)2569 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2570 struct device_attribute *attr,
2571 char *buf)
2572 {
2573 struct amdgpu_device *adev = dev_get_drvdata(dev);
2574 u32 vddgfx;
2575 int r, size = sizeof(vddgfx);
2576
2577 if (amdgpu_in_reset(adev))
2578 return -EPERM;
2579 if (adev->in_suspend && !adev->in_runpm)
2580 return -EPERM;
2581
2582 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2583 if (r < 0) {
2584 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2585 return r;
2586 }
2587
2588 /* get the voltage */
2589 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2590 (void *)&vddgfx, &size);
2591
2592 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2593 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2594
2595 if (r)
2596 return r;
2597
2598 return sysfs_emit(buf, "%d\n", vddgfx);
2599 }
2600
amdgpu_hwmon_show_vddgfx_label(struct device * dev,struct device_attribute * attr,char * buf)2601 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2602 struct device_attribute *attr,
2603 char *buf)
2604 {
2605 return sysfs_emit(buf, "vddgfx\n");
2606 }
2607
amdgpu_hwmon_show_vddnb(struct device * dev,struct device_attribute * attr,char * buf)2608 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2609 struct device_attribute *attr,
2610 char *buf)
2611 {
2612 struct amdgpu_device *adev = dev_get_drvdata(dev);
2613 u32 vddnb;
2614 int r, size = sizeof(vddnb);
2615
2616 if (amdgpu_in_reset(adev))
2617 return -EPERM;
2618 if (adev->in_suspend && !adev->in_runpm)
2619 return -EPERM;
2620
2621 /* only APUs have vddnb */
2622 if (!(adev->flags & AMD_IS_APU))
2623 return -EINVAL;
2624
2625 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2626 if (r < 0) {
2627 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2628 return r;
2629 }
2630
2631 /* get the voltage */
2632 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2633 (void *)&vddnb, &size);
2634
2635 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2636 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2637
2638 if (r)
2639 return r;
2640
2641 return sysfs_emit(buf, "%d\n", vddnb);
2642 }
2643
amdgpu_hwmon_show_vddnb_label(struct device * dev,struct device_attribute * attr,char * buf)2644 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2645 struct device_attribute *attr,
2646 char *buf)
2647 {
2648 return sysfs_emit(buf, "vddnb\n");
2649 }
2650
amdgpu_hwmon_show_power_avg(struct device * dev,struct device_attribute * attr,char * buf)2651 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2652 struct device_attribute *attr,
2653 char *buf)
2654 {
2655 struct amdgpu_device *adev = dev_get_drvdata(dev);
2656 u32 query = 0;
2657 int r, size = sizeof(u32);
2658 unsigned uw;
2659
2660 if (amdgpu_in_reset(adev))
2661 return -EPERM;
2662 if (adev->in_suspend && !adev->in_runpm)
2663 return -EPERM;
2664
2665 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2666 if (r < 0) {
2667 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2668 return r;
2669 }
2670
2671 /* get the voltage */
2672 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2673 (void *)&query, &size);
2674
2675 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2676 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2677
2678 if (r)
2679 return r;
2680
2681 /* convert to microwatts */
2682 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2683
2684 return sysfs_emit(buf, "%u\n", uw);
2685 }
2686
amdgpu_hwmon_show_power_cap_min(struct device * dev,struct device_attribute * attr,char * buf)2687 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2688 struct device_attribute *attr,
2689 char *buf)
2690 {
2691 return sprintf(buf, "%i\n", 0);
2692 }
2693
amdgpu_hwmon_show_power_cap_max(struct device * dev,struct device_attribute * attr,char * buf)2694 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2695 struct device_attribute *attr,
2696 char *buf)
2697 {
2698 struct amdgpu_device *adev = dev_get_drvdata(dev);
2699 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2700 int limit_type = to_sensor_dev_attr(attr)->index;
2701 uint32_t limit = limit_type << 24;
2702 uint32_t max_limit = 0;
2703 ssize_t size;
2704 int r;
2705
2706 if (amdgpu_in_reset(adev))
2707 return -EPERM;
2708 if (adev->in_suspend && !adev->in_runpm)
2709 return -EPERM;
2710
2711 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2712 if (r < 0) {
2713 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2714 return r;
2715 }
2716
2717 if (is_support_sw_smu(adev)) {
2718 smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
2719 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2720 } else if (pp_funcs && pp_funcs->get_power_limit) {
2721 pp_funcs->get_power_limit(adev->powerplay.pp_handle,
2722 &limit, &max_limit, true);
2723 size = snprintf(buf, PAGE_SIZE, "%u\n", max_limit * 1000000);
2724 } else {
2725 size = snprintf(buf, PAGE_SIZE, "\n");
2726 }
2727
2728 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2729 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2730
2731 return size;
2732 }
2733
amdgpu_hwmon_show_power_cap(struct device * dev,struct device_attribute * attr,char * buf)2734 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2735 struct device_attribute *attr,
2736 char *buf)
2737 {
2738 struct amdgpu_device *adev = dev_get_drvdata(dev);
2739 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2740 int limit_type = to_sensor_dev_attr(attr)->index;
2741 uint32_t limit = limit_type << 24;
2742 ssize_t size;
2743 int r;
2744
2745 if (amdgpu_in_reset(adev))
2746 return -EPERM;
2747 if (adev->in_suspend && !adev->in_runpm)
2748 return -EPERM;
2749
2750 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2751 if (r < 0) {
2752 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2753 return r;
2754 }
2755
2756 if (is_support_sw_smu(adev)) {
2757 smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
2758 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2759 } else if (pp_funcs && pp_funcs->get_power_limit) {
2760 pp_funcs->get_power_limit(adev->powerplay.pp_handle,
2761 &limit, NULL, false);
2762 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2763 } else {
2764 size = snprintf(buf, PAGE_SIZE, "\n");
2765 }
2766
2767 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2768 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2769
2770 return size;
2771 }
2772
amdgpu_hwmon_show_power_cap_default(struct device * dev,struct device_attribute * attr,char * buf)2773 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2774 struct device_attribute *attr,
2775 char *buf)
2776 {
2777 struct amdgpu_device *adev = dev_get_drvdata(dev);
2778 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2779 int limit_type = to_sensor_dev_attr(attr)->index;
2780 uint32_t limit = limit_type << 24;
2781 ssize_t size;
2782 int r;
2783
2784 if (amdgpu_in_reset(adev))
2785 return -EPERM;
2786 if (adev->in_suspend && !adev->in_runpm)
2787 return -EPERM;
2788
2789 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2790 if (r < 0) {
2791 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2792 return r;
2793 }
2794
2795 if (is_support_sw_smu(adev)) {
2796 smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_DEFAULT);
2797 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2798 } else if (pp_funcs && pp_funcs->get_power_limit) {
2799 pp_funcs->get_power_limit(adev->powerplay.pp_handle,
2800 &limit, NULL, true);
2801 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2802 } else {
2803 size = snprintf(buf, PAGE_SIZE, "\n");
2804 }
2805
2806 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2807 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2808
2809 return size;
2810 }
amdgpu_hwmon_show_power_label(struct device * dev,struct device_attribute * attr,char * buf)2811 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2812 struct device_attribute *attr,
2813 char *buf)
2814 {
2815 int limit_type = to_sensor_dev_attr(attr)->index;
2816
2817 return sysfs_emit(buf, "%s\n",
2818 limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
2819 }
2820
amdgpu_hwmon_set_power_cap(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2821 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2822 struct device_attribute *attr,
2823 const char *buf,
2824 size_t count)
2825 {
2826 struct amdgpu_device *adev = dev_get_drvdata(dev);
2827 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2828 int limit_type = to_sensor_dev_attr(attr)->index;
2829 int err;
2830 u32 value;
2831
2832 if (amdgpu_in_reset(adev))
2833 return -EPERM;
2834 if (adev->in_suspend && !adev->in_runpm)
2835 return -EPERM;
2836
2837 if (amdgpu_sriov_vf(adev))
2838 return -EINVAL;
2839
2840 err = kstrtou32(buf, 10, &value);
2841 if (err)
2842 return err;
2843
2844 value = value / 1000000; /* convert to Watt */
2845 value |= limit_type << 24;
2846
2847 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2848 if (err < 0) {
2849 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2850 return err;
2851 }
2852
2853 if (pp_funcs && pp_funcs->set_power_limit)
2854 err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2855 else
2856 err = -EINVAL;
2857
2858 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2859 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2860
2861 if (err)
2862 return err;
2863
2864 return count;
2865 }
2866
amdgpu_hwmon_show_sclk(struct device * dev,struct device_attribute * attr,char * buf)2867 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2868 struct device_attribute *attr,
2869 char *buf)
2870 {
2871 struct amdgpu_device *adev = dev_get_drvdata(dev);
2872 uint32_t sclk;
2873 int r, size = sizeof(sclk);
2874
2875 if (amdgpu_in_reset(adev))
2876 return -EPERM;
2877 if (adev->in_suspend && !adev->in_runpm)
2878 return -EPERM;
2879
2880 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2881 if (r < 0) {
2882 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2883 return r;
2884 }
2885
2886 /* get the sclk */
2887 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2888 (void *)&sclk, &size);
2889
2890 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2891 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2892
2893 if (r)
2894 return r;
2895
2896 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
2897 }
2898
amdgpu_hwmon_show_sclk_label(struct device * dev,struct device_attribute * attr,char * buf)2899 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2900 struct device_attribute *attr,
2901 char *buf)
2902 {
2903 return sysfs_emit(buf, "sclk\n");
2904 }
2905
amdgpu_hwmon_show_mclk(struct device * dev,struct device_attribute * attr,char * buf)2906 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2907 struct device_attribute *attr,
2908 char *buf)
2909 {
2910 struct amdgpu_device *adev = dev_get_drvdata(dev);
2911 uint32_t mclk;
2912 int r, size = sizeof(mclk);
2913
2914 if (amdgpu_in_reset(adev))
2915 return -EPERM;
2916 if (adev->in_suspend && !adev->in_runpm)
2917 return -EPERM;
2918
2919 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2920 if (r < 0) {
2921 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2922 return r;
2923 }
2924
2925 /* get the sclk */
2926 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2927 (void *)&mclk, &size);
2928
2929 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2930 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2931
2932 if (r)
2933 return r;
2934
2935 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
2936 }
2937
amdgpu_hwmon_show_mclk_label(struct device * dev,struct device_attribute * attr,char * buf)2938 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2939 struct device_attribute *attr,
2940 char *buf)
2941 {
2942 return sysfs_emit(buf, "mclk\n");
2943 }
2944
2945 /**
2946 * DOC: hwmon
2947 *
2948 * The amdgpu driver exposes the following sensor interfaces:
2949 *
2950 * - GPU temperature (via the on-die sensor)
2951 *
2952 * - GPU voltage
2953 *
2954 * - Northbridge voltage (APUs only)
2955 *
2956 * - GPU power
2957 *
2958 * - GPU fan
2959 *
2960 * - GPU gfx/compute engine clock
2961 *
2962 * - GPU memory clock (dGPU only)
2963 *
2964 * hwmon interfaces for GPU temperature:
2965 *
2966 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
2967 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
2968 *
2969 * - temp[1-3]_label: temperature channel label
2970 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
2971 *
2972 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
2973 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
2974 *
2975 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
2976 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
2977 *
2978 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
2979 * - these are supported on SOC15 dGPUs only
2980 *
2981 * hwmon interfaces for GPU voltage:
2982 *
2983 * - in0_input: the voltage on the GPU in millivolts
2984 *
2985 * - in1_input: the voltage on the Northbridge in millivolts
2986 *
2987 * hwmon interfaces for GPU power:
2988 *
2989 * - power1_average: average power used by the GPU in microWatts
2990 *
2991 * - power1_cap_min: minimum cap supported in microWatts
2992 *
2993 * - power1_cap_max: maximum cap supported in microWatts
2994 *
2995 * - power1_cap: selected power cap in microWatts
2996 *
2997 * hwmon interfaces for GPU fan:
2998 *
2999 * - pwm1: pulse width modulation fan level (0-255)
3000 *
3001 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3002 *
3003 * - pwm1_min: pulse width modulation fan control minimum level (0)
3004 *
3005 * - pwm1_max: pulse width modulation fan control maximum level (255)
3006 *
3007 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3008 *
3009 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3010 *
3011 * - fan1_input: fan speed in RPM
3012 *
3013 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3014 *
3015 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3016 *
3017 * hwmon interfaces for GPU clocks:
3018 *
3019 * - freq1_input: the gfx/compute clock in hertz
3020 *
3021 * - freq2_input: the memory clock in hertz
3022 *
3023 * You can use hwmon tools like sensors to view this information on your system.
3024 *
3025 */
3026
3027 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3028 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3029 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3030 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3031 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3032 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3033 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3034 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3035 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3036 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3037 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3038 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3039 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3040 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3041 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3042 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3043 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3044 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3045 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3046 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3047 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3048 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3049 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3050 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3051 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3052 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3053 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3054 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3055 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3056 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3057 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3058 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3059 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3060 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3061 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3062 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3063 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3064 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3065 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3066 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3067 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3068 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3069 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3070 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3071
3072 static struct attribute *hwmon_attributes[] = {
3073 &sensor_dev_attr_temp1_input.dev_attr.attr,
3074 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3075 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3076 &sensor_dev_attr_temp2_input.dev_attr.attr,
3077 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3078 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3079 &sensor_dev_attr_temp3_input.dev_attr.attr,
3080 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3081 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3082 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3083 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3084 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3085 &sensor_dev_attr_temp1_label.dev_attr.attr,
3086 &sensor_dev_attr_temp2_label.dev_attr.attr,
3087 &sensor_dev_attr_temp3_label.dev_attr.attr,
3088 &sensor_dev_attr_pwm1.dev_attr.attr,
3089 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3090 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3091 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3092 &sensor_dev_attr_fan1_input.dev_attr.attr,
3093 &sensor_dev_attr_fan1_min.dev_attr.attr,
3094 &sensor_dev_attr_fan1_max.dev_attr.attr,
3095 &sensor_dev_attr_fan1_target.dev_attr.attr,
3096 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3097 &sensor_dev_attr_in0_input.dev_attr.attr,
3098 &sensor_dev_attr_in0_label.dev_attr.attr,
3099 &sensor_dev_attr_in1_input.dev_attr.attr,
3100 &sensor_dev_attr_in1_label.dev_attr.attr,
3101 &sensor_dev_attr_power1_average.dev_attr.attr,
3102 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3103 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3104 &sensor_dev_attr_power1_cap.dev_attr.attr,
3105 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3106 &sensor_dev_attr_power1_label.dev_attr.attr,
3107 &sensor_dev_attr_power2_average.dev_attr.attr,
3108 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3109 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3110 &sensor_dev_attr_power2_cap.dev_attr.attr,
3111 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3112 &sensor_dev_attr_power2_label.dev_attr.attr,
3113 &sensor_dev_attr_freq1_input.dev_attr.attr,
3114 &sensor_dev_attr_freq1_label.dev_attr.attr,
3115 &sensor_dev_attr_freq2_input.dev_attr.attr,
3116 &sensor_dev_attr_freq2_label.dev_attr.attr,
3117 NULL
3118 };
3119
hwmon_attributes_visible(struct kobject * kobj,struct attribute * attr,int index)3120 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3121 struct attribute *attr, int index)
3122 {
3123 struct device *dev = kobj_to_dev(kobj);
3124 struct amdgpu_device *adev = dev_get_drvdata(dev);
3125 umode_t effective_mode = attr->mode;
3126
3127 /* under multi-vf mode, the hwmon attributes are all not supported */
3128 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3129 return 0;
3130
3131 /* there is no fan under pp one vf mode */
3132 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3133 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3134 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3135 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3136 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3137 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3138 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3139 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3140 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3141 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3142 return 0;
3143
3144 /* Skip fan attributes if fan is not present */
3145 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3146 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3147 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3148 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3149 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3150 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3151 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3152 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3153 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3154 return 0;
3155
3156 /* Skip fan attributes on APU */
3157 if ((adev->flags & AMD_IS_APU) &&
3158 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3159 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3160 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3161 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3162 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3163 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3164 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3165 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3166 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3167 return 0;
3168
3169 /* Skip crit temp on APU */
3170 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3171 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3172 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3173 return 0;
3174
3175 /* Skip limit attributes if DPM is not enabled */
3176 if (!adev->pm.dpm_enabled &&
3177 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3178 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3179 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3180 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3181 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3182 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3183 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3184 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3185 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3186 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3187 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3188 return 0;
3189
3190 if (!is_support_sw_smu(adev)) {
3191 /* mask fan attributes if we have no bindings for this asic to expose */
3192 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
3193 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3194 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3195 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3196 effective_mode &= ~S_IRUGO;
3197
3198 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3199 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3200 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3201 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3202 effective_mode &= ~S_IWUSR;
3203 }
3204
3205 if (((adev->family == AMDGPU_FAMILY_SI) ||
3206 ((adev->flags & AMD_IS_APU) &&
3207 (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
3208 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3209 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3210 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3211 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3212 return 0;
3213
3214 if (((adev->family == AMDGPU_FAMILY_SI) ||
3215 ((adev->flags & AMD_IS_APU) &&
3216 (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */
3217 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3218 return 0;
3219
3220 if (!is_support_sw_smu(adev)) {
3221 /* hide max/min values if we can't both query and manage the fan */
3222 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3223 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
3224 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3225 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3226 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3227 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3228 return 0;
3229
3230 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3231 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3232 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3233 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3234 return 0;
3235 }
3236
3237 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3238 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
3239 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3240 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3241 return 0;
3242
3243 /* only APUs have vddnb */
3244 if (!(adev->flags & AMD_IS_APU) &&
3245 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3246 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3247 return 0;
3248
3249 /* no mclk on APUs */
3250 if ((adev->flags & AMD_IS_APU) &&
3251 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3252 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3253 return 0;
3254
3255 /* only SOC15 dGPUs support hotspot and mem temperatures */
3256 if (((adev->flags & AMD_IS_APU) ||
3257 adev->asic_type < CHIP_VEGA10) &&
3258 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3259 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3260 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3261 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3262 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3263 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3264 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3265 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3266 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3267 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3268 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3269 return 0;
3270
3271 /* only Vangogh has fast PPT limit and power labels */
3272 if (!(adev->asic_type == CHIP_VANGOGH) &&
3273 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3274 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3275 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3276 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3277 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3278 attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
3279 attr == &sensor_dev_attr_power1_label.dev_attr.attr))
3280 return 0;
3281
3282 return effective_mode;
3283 }
3284
3285 static const struct attribute_group hwmon_attrgroup = {
3286 .attrs = hwmon_attributes,
3287 .is_visible = hwmon_attributes_visible,
3288 };
3289
3290 static const struct attribute_group *hwmon_groups[] = {
3291 &hwmon_attrgroup,
3292 NULL
3293 };
3294
amdgpu_pm_sysfs_init(struct amdgpu_device * adev)3295 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3296 {
3297 int ret;
3298 uint32_t mask = 0;
3299
3300 if (adev->pm.sysfs_initialized)
3301 return 0;
3302
3303 if (adev->pm.dpm_enabled == 0)
3304 return 0;
3305
3306 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3307
3308 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3309 DRIVER_NAME, adev,
3310 hwmon_groups);
3311 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3312 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3313 dev_err(adev->dev,
3314 "Unable to register hwmon device: %d\n", ret);
3315 return ret;
3316 }
3317
3318 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3319 case SRIOV_VF_MODE_ONE_VF:
3320 mask = ATTR_FLAG_ONEVF;
3321 break;
3322 case SRIOV_VF_MODE_MULTI_VF:
3323 mask = 0;
3324 break;
3325 case SRIOV_VF_MODE_BARE_METAL:
3326 default:
3327 mask = ATTR_FLAG_MASK_ALL;
3328 break;
3329 }
3330
3331 ret = amdgpu_device_attr_create_groups(adev,
3332 amdgpu_device_attrs,
3333 ARRAY_SIZE(amdgpu_device_attrs),
3334 mask,
3335 &adev->pm.pm_attr_list);
3336 if (ret)
3337 return ret;
3338
3339 adev->pm.sysfs_initialized = true;
3340
3341 return 0;
3342 }
3343
amdgpu_pm_sysfs_fini(struct amdgpu_device * adev)3344 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3345 {
3346 if (adev->pm.dpm_enabled == 0)
3347 return;
3348
3349 if (adev->pm.int_hwmon_dev)
3350 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3351
3352 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3353 }
3354
3355 /*
3356 * Debugfs info
3357 */
3358 #if defined(CONFIG_DEBUG_FS)
3359
amdgpu_debugfs_prints_cpu_info(struct seq_file * m,struct amdgpu_device * adev)3360 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3361 struct amdgpu_device *adev) {
3362 uint16_t *p_val;
3363 uint32_t size;
3364 int i;
3365
3366 if (is_support_cclk_dpm(adev)) {
3367 p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
3368 GFP_KERNEL);
3369
3370 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3371 (void *)p_val, &size)) {
3372 for (i = 0; i < adev->smu.cpu_core_num; i++)
3373 seq_printf(m, "\t%u MHz (CPU%d)\n",
3374 *(p_val + i), i);
3375 }
3376
3377 kfree(p_val);
3378 }
3379 }
3380
amdgpu_debugfs_pm_info_pp(struct seq_file * m,struct amdgpu_device * adev)3381 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3382 {
3383 uint32_t value;
3384 uint64_t value64 = 0;
3385 uint32_t query = 0;
3386 int size;
3387
3388 /* GPU Clocks */
3389 size = sizeof(value);
3390 seq_printf(m, "GFX Clocks and Power:\n");
3391
3392 amdgpu_debugfs_prints_cpu_info(m, adev);
3393
3394 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3395 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3396 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3397 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3398 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3399 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3400 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3401 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3402 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3403 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3404 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3405 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3406 size = sizeof(uint32_t);
3407 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3408 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3409 size = sizeof(value);
3410 seq_printf(m, "\n");
3411
3412 /* GPU Temp */
3413 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3414 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3415
3416 /* GPU Load */
3417 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3418 seq_printf(m, "GPU Load: %u %%\n", value);
3419 /* MEM Load */
3420 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3421 seq_printf(m, "MEM Load: %u %%\n", value);
3422
3423 seq_printf(m, "\n");
3424
3425 /* SMC feature mask */
3426 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3427 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3428
3429 if (adev->asic_type > CHIP_VEGA20) {
3430 /* VCN clocks */
3431 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3432 if (!value) {
3433 seq_printf(m, "VCN: Disabled\n");
3434 } else {
3435 seq_printf(m, "VCN: Enabled\n");
3436 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3437 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3438 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3439 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3440 }
3441 }
3442 seq_printf(m, "\n");
3443 } else {
3444 /* UVD clocks */
3445 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3446 if (!value) {
3447 seq_printf(m, "UVD: Disabled\n");
3448 } else {
3449 seq_printf(m, "UVD: Enabled\n");
3450 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3451 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3452 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3453 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3454 }
3455 }
3456 seq_printf(m, "\n");
3457
3458 /* VCE clocks */
3459 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3460 if (!value) {
3461 seq_printf(m, "VCE: Disabled\n");
3462 } else {
3463 seq_printf(m, "VCE: Enabled\n");
3464 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3465 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3466 }
3467 }
3468 }
3469
3470 return 0;
3471 }
3472
amdgpu_parse_cg_state(struct seq_file * m,u32 flags)3473 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3474 {
3475 int i;
3476
3477 for (i = 0; clocks[i].flag; i++)
3478 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3479 (flags & clocks[i].flag) ? "On" : "Off");
3480 }
3481
amdgpu_debugfs_pm_info_show(struct seq_file * m,void * unused)3482 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
3483 {
3484 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3485 struct drm_device *dev = adev_to_drm(adev);
3486 u32 flags = 0;
3487 int r;
3488
3489 if (amdgpu_in_reset(adev))
3490 return -EPERM;
3491 if (adev->in_suspend && !adev->in_runpm)
3492 return -EPERM;
3493
3494 r = pm_runtime_get_sync(dev->dev);
3495 if (r < 0) {
3496 pm_runtime_put_autosuspend(dev->dev);
3497 return r;
3498 }
3499
3500 if (!adev->pm.dpm_enabled) {
3501 seq_printf(m, "dpm not enabled\n");
3502 pm_runtime_mark_last_busy(dev->dev);
3503 pm_runtime_put_autosuspend(dev->dev);
3504 return 0;
3505 }
3506
3507 if (!is_support_sw_smu(adev) &&
3508 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3509 mutex_lock(&adev->pm.mutex);
3510 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3511 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3512 else
3513 seq_printf(m, "Debugfs support not implemented for this asic\n");
3514 mutex_unlock(&adev->pm.mutex);
3515 r = 0;
3516 } else {
3517 r = amdgpu_debugfs_pm_info_pp(m, adev);
3518 }
3519 if (r)
3520 goto out;
3521
3522 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3523
3524 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3525 amdgpu_parse_cg_state(m, flags);
3526 seq_printf(m, "\n");
3527
3528 out:
3529 pm_runtime_mark_last_busy(dev->dev);
3530 pm_runtime_put_autosuspend(dev->dev);
3531
3532 return r;
3533 }
3534
3535 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3536
3537 #endif
3538
amdgpu_debugfs_pm_init(struct amdgpu_device * adev)3539 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3540 {
3541 #if defined(CONFIG_DEBUG_FS)
3542 struct drm_minor *minor = adev_to_drm(adev)->primary;
3543 struct dentry *root = minor->debugfs_root;
3544
3545 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3546 &amdgpu_debugfs_pm_info_fops);
3547
3548 #endif
3549 }
3550