1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_drv.h" 28 #include "amdgpu_pm.h" 29 #include "amdgpu_dpm.h" 30 #include "atom.h" 31 #include <linux/power_supply.h> 32 #include <linux/hwmon.h> 33 #if 0 34 #include <linux/hwmon-sysfs.h> 35 #include <linux/nospec.h> 36 #endif 37 38 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 39 40 #if 0 41 static const struct cg_flag_name clocks[] = { 42 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 43 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 44 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 45 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 46 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 47 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 48 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 49 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 50 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 51 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 52 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 53 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 54 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 55 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 56 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 57 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 58 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 59 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 60 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 61 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 62 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 63 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 64 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 65 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 66 {0, NULL}, 67 }; 68 #endif 69 70 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 71 { 72 if (adev->pm.dpm_enabled) { 73 mutex_lock(&adev->pm.mutex); 74 if (power_supply_is_system_supplied() > 0) 75 adev->pm.ac_power = true; 76 else 77 adev->pm.ac_power = false; 78 if (adev->powerplay.pp_funcs && 79 adev->powerplay.pp_funcs->enable_bapm) 80 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 81 mutex_unlock(&adev->pm.mutex); 82 } 83 } 84 85 /** 86 * DOC: power_dpm_state 87 * 88 * The power_dpm_state file is a legacy interface and is only provided for 89 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 90 * certain power related parameters. The file power_dpm_state is used for this. 91 * It accepts the following arguments: 92 * 93 * - battery 94 * 95 * - balanced 96 * 97 * - performance 98 * 99 * battery 100 * 101 * On older GPUs, the vbios provided a special power state for battery 102 * operation. Selecting battery switched to this state. This is no 103 * longer provided on newer GPUs so the option does nothing in that case. 104 * 105 * balanced 106 * 107 * On older GPUs, the vbios provided a special power state for balanced 108 * operation. Selecting balanced switched to this state. This is no 109 * longer provided on newer GPUs so the option does nothing in that case. 110 * 111 * performance 112 * 113 * On older GPUs, the vbios provided a special power state for performance 114 * operation. Selecting performance switched to this state. This is no 115 * longer provided on newer GPUs so the option does nothing in that case. 116 * 117 */ 118 #if 0 119 static ssize_t amdgpu_get_dpm_state(struct device *dev, 120 struct device_attribute *attr, 121 char *buf) 122 { 123 struct drm_device *ddev = dev_get_drvdata(dev); 124 struct amdgpu_device *adev = ddev->dev_private; 125 enum amd_pm_state_type pm; 126 127 if (adev->powerplay.pp_funcs->get_current_power_state) 128 pm = amdgpu_dpm_get_current_power_state(adev); 129 else 130 pm = adev->pm.dpm.user_state; 131 132 return snprintf(buf, PAGE_SIZE, "%s\n", 133 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 134 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 135 } 136 137 static ssize_t amdgpu_set_dpm_state(struct device *dev, 138 struct device_attribute *attr, 139 const char *buf, 140 size_t count) 141 { 142 struct drm_device *ddev = dev_get_drvdata(dev); 143 struct amdgpu_device *adev = ddev->dev_private; 144 enum amd_pm_state_type state; 145 146 if (strncmp("battery", buf, strlen("battery")) == 0) 147 state = POWER_STATE_TYPE_BATTERY; 148 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 149 state = POWER_STATE_TYPE_BALANCED; 150 else if (strncmp("performance", buf, strlen("performance")) == 0) 151 state = POWER_STATE_TYPE_PERFORMANCE; 152 else { 153 count = -EINVAL; 154 goto fail; 155 } 156 157 if (adev->powerplay.pp_funcs->dispatch_tasks) { 158 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 159 } else { 160 mutex_lock(&adev->pm.mutex); 161 adev->pm.dpm.user_state = state; 162 mutex_unlock(&adev->pm.mutex); 163 164 /* Can't set dpm state when the card is off */ 165 if (!(adev->flags & AMD_IS_PX) || 166 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 167 amdgpu_pm_compute_clocks(adev); 168 } 169 fail: 170 return count; 171 } 172 173 174 /** 175 * DOC: power_dpm_force_performance_level 176 * 177 * The amdgpu driver provides a sysfs API for adjusting certain power 178 * related parameters. The file power_dpm_force_performance_level is 179 * used for this. It accepts the following arguments: 180 * 181 * - auto 182 * 183 * - low 184 * 185 * - high 186 * 187 * - manual 188 * 189 * - profile_standard 190 * 191 * - profile_min_sclk 192 * 193 * - profile_min_mclk 194 * 195 * - profile_peak 196 * 197 * auto 198 * 199 * When auto is selected, the driver will attempt to dynamically select 200 * the optimal power profile for current conditions in the driver. 201 * 202 * low 203 * 204 * When low is selected, the clocks are forced to the lowest power state. 205 * 206 * high 207 * 208 * When high is selected, the clocks are forced to the highest power state. 209 * 210 * manual 211 * 212 * When manual is selected, the user can manually adjust which power states 213 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 214 * and pp_dpm_pcie files and adjust the power state transition heuristics 215 * via the pp_power_profile_mode sysfs file. 216 * 217 * profile_standard 218 * profile_min_sclk 219 * profile_min_mclk 220 * profile_peak 221 * 222 * When the profiling modes are selected, clock and power gating are 223 * disabled and the clocks are set for different profiling cases. This 224 * mode is recommended for profiling specific work loads where you do 225 * not want clock or power gating for clock fluctuation to interfere 226 * with your results. profile_standard sets the clocks to a fixed clock 227 * level which varies from asic to asic. profile_min_sclk forces the sclk 228 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 229 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 230 * 231 */ 232 233 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 234 struct device_attribute *attr, 235 char *buf) 236 { 237 struct drm_device *ddev = dev_get_drvdata(dev); 238 struct amdgpu_device *adev = ddev->dev_private; 239 enum amd_dpm_forced_level level = 0xff; 240 241 if ((adev->flags & AMD_IS_PX) && 242 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 243 return snprintf(buf, PAGE_SIZE, "off\n"); 244 245 if (adev->powerplay.pp_funcs->get_performance_level) 246 level = amdgpu_dpm_get_performance_level(adev); 247 else 248 level = adev->pm.dpm.forced_level; 249 250 return snprintf(buf, PAGE_SIZE, "%s\n", 251 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 252 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 253 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 254 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 255 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 256 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 257 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 258 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 259 "unknown"); 260 } 261 262 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 263 struct device_attribute *attr, 264 const char *buf, 265 size_t count) 266 { 267 struct drm_device *ddev = dev_get_drvdata(dev); 268 struct amdgpu_device *adev = ddev->dev_private; 269 enum amd_dpm_forced_level level; 270 enum amd_dpm_forced_level current_level = 0xff; 271 int ret = 0; 272 273 /* Can't force performance level when the card is off */ 274 if ((adev->flags & AMD_IS_PX) && 275 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 276 return -EINVAL; 277 278 if (adev->powerplay.pp_funcs->get_performance_level) 279 current_level = amdgpu_dpm_get_performance_level(adev); 280 281 if (strncmp("low", buf, strlen("low")) == 0) { 282 level = AMD_DPM_FORCED_LEVEL_LOW; 283 } else if (strncmp("high", buf, strlen("high")) == 0) { 284 level = AMD_DPM_FORCED_LEVEL_HIGH; 285 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 286 level = AMD_DPM_FORCED_LEVEL_AUTO; 287 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 288 level = AMD_DPM_FORCED_LEVEL_MANUAL; 289 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 290 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 291 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 292 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 293 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 294 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 295 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 296 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 297 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 298 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 299 } else { 300 count = -EINVAL; 301 goto fail; 302 } 303 304 if (current_level == level) 305 return count; 306 307 if (adev->powerplay.pp_funcs->force_performance_level) { 308 mutex_lock(&adev->pm.mutex); 309 if (adev->pm.dpm.thermal_active) { 310 count = -EINVAL; 311 mutex_unlock(&adev->pm.mutex); 312 goto fail; 313 } 314 ret = amdgpu_dpm_force_performance_level(adev, level); 315 if (ret) 316 count = -EINVAL; 317 else 318 adev->pm.dpm.forced_level = level; 319 mutex_unlock(&adev->pm.mutex); 320 } 321 322 fail: 323 return count; 324 } 325 326 static ssize_t amdgpu_get_pp_num_states(struct device *dev, 327 struct device_attribute *attr, 328 char *buf) 329 { 330 struct drm_device *ddev = dev_get_drvdata(dev); 331 struct amdgpu_device *adev = ddev->dev_private; 332 struct pp_states_info data; 333 int i, buf_len; 334 335 if (adev->powerplay.pp_funcs->get_pp_num_states) 336 amdgpu_dpm_get_pp_num_states(adev, &data); 337 338 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 339 for (i = 0; i < data.nums; i++) 340 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 341 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 342 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 343 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 344 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 345 346 return buf_len; 347 } 348 349 static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 350 struct device_attribute *attr, 351 char *buf) 352 { 353 struct drm_device *ddev = dev_get_drvdata(dev); 354 struct amdgpu_device *adev = ddev->dev_private; 355 struct pp_states_info data; 356 enum amd_pm_state_type pm = 0; 357 int i = 0; 358 359 if (adev->powerplay.pp_funcs->get_current_power_state 360 && adev->powerplay.pp_funcs->get_pp_num_states) { 361 pm = amdgpu_dpm_get_current_power_state(adev); 362 amdgpu_dpm_get_pp_num_states(adev, &data); 363 364 for (i = 0; i < data.nums; i++) { 365 if (pm == data.states[i]) 366 break; 367 } 368 369 if (i == data.nums) 370 i = -EINVAL; 371 } 372 373 return snprintf(buf, PAGE_SIZE, "%d\n", i); 374 } 375 376 static ssize_t amdgpu_get_pp_force_state(struct device *dev, 377 struct device_attribute *attr, 378 char *buf) 379 { 380 struct drm_device *ddev = dev_get_drvdata(dev); 381 struct amdgpu_device *adev = ddev->dev_private; 382 383 if (adev->pp_force_state_enabled) 384 return amdgpu_get_pp_cur_state(dev, attr, buf); 385 else 386 return snprintf(buf, PAGE_SIZE, "\n"); 387 } 388 389 static ssize_t amdgpu_set_pp_force_state(struct device *dev, 390 struct device_attribute *attr, 391 const char *buf, 392 size_t count) 393 { 394 #if 0 395 struct drm_device *ddev = dev_get_drvdata(dev); 396 struct amdgpu_device *adev = ddev->dev_private; 397 enum amd_pm_state_type state = 0; 398 unsigned long idx; 399 int ret; 400 401 if (strlen(buf) == 1) 402 adev->pp_force_state_enabled = false; 403 else if (adev->powerplay.pp_funcs->dispatch_tasks && 404 adev->powerplay.pp_funcs->get_pp_num_states) { 405 struct pp_states_info data; 406 407 ret = kstrtoul(buf, 0, &idx); 408 if (ret || idx >= ARRAY_SIZE(data.states)) { 409 count = -EINVAL; 410 goto fail; 411 } 412 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 413 414 amdgpu_dpm_get_pp_num_states(adev, &data); 415 state = data.states[idx]; 416 /* only set user selected power states */ 417 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 418 state != POWER_STATE_TYPE_DEFAULT) { 419 amdgpu_dpm_dispatch_task(adev, 420 AMD_PP_TASK_ENABLE_USER_STATE, &state); 421 adev->pp_force_state_enabled = true; 422 } 423 } 424 fail: 425 return count; 426 #endif 427 return -EINVAL; 428 } 429 430 /** 431 * DOC: pp_table 432 * 433 * The amdgpu driver provides a sysfs API for uploading new powerplay 434 * tables. The file pp_table is used for this. Reading the file 435 * will dump the current power play table. Writing to the file 436 * will attempt to upload a new powerplay table and re-initialize 437 * powerplay using that new table. 438 * 439 */ 440 441 static ssize_t amdgpu_get_pp_table(struct device *dev, 442 struct device_attribute *attr, 443 char *buf) 444 { 445 struct drm_device *ddev = dev_get_drvdata(dev); 446 struct amdgpu_device *adev = ddev->dev_private; 447 char *table = NULL; 448 int size; 449 450 if (adev->powerplay.pp_funcs->get_pp_table) 451 size = amdgpu_dpm_get_pp_table(adev, &table); 452 else 453 return 0; 454 455 if (size >= PAGE_SIZE) 456 size = PAGE_SIZE - 1; 457 458 memcpy(buf, table, size); 459 460 return size; 461 } 462 463 static ssize_t amdgpu_set_pp_table(struct device *dev, 464 struct device_attribute *attr, 465 const char *buf, 466 size_t count) 467 { 468 struct drm_device *ddev = dev_get_drvdata(dev); 469 struct amdgpu_device *adev = ddev->dev_private; 470 471 if (adev->powerplay.pp_funcs->set_pp_table) 472 amdgpu_dpm_set_pp_table(adev, buf, count); 473 474 return count; 475 } 476 477 /** 478 * DOC: pp_od_clk_voltage 479 * 480 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 481 * in each power level within a power state. The pp_od_clk_voltage is used for 482 * this. 483 * 484 * Reading the file will display: 485 * 486 * - a list of engine clock levels and voltages labeled OD_SCLK 487 * 488 * - a list of memory clock levels and voltages labeled OD_MCLK 489 * 490 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 491 * 492 * To manually adjust these settings, first select manual using 493 * power_dpm_force_performance_level. Enter a new value for each 494 * level by writing a string that contains "s/m level clock voltage" to 495 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 496 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 497 * 810 mV. When you have edited all of the states as needed, write 498 * "c" (commit) to the file to commit your changes. If you want to reset to the 499 * default power levels, write "r" (reset) to the file to reset them. 500 * 501 */ 502 503 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 504 struct device_attribute *attr, 505 const char *buf, 506 size_t count) 507 { 508 struct drm_device *ddev = dev_get_drvdata(dev); 509 struct amdgpu_device *adev = ddev->dev_private; 510 int ret; 511 uint32_t parameter_size = 0; 512 long parameter[64]; 513 char buf_cpy[128]; 514 char *tmp_str; 515 char *sub_str; 516 const char delimiter[3] = {' ', '\n', '\0'}; 517 uint32_t type; 518 519 if (count > 127) 520 return -EINVAL; 521 522 if (*buf == 's') 523 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 524 else if (*buf == 'm') 525 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 526 else if(*buf == 'r') 527 type = PP_OD_RESTORE_DEFAULT_TABLE; 528 else if (*buf == 'c') 529 type = PP_OD_COMMIT_DPM_TABLE; 530 else 531 return -EINVAL; 532 533 memcpy(buf_cpy, buf, count+1); 534 535 tmp_str = buf_cpy; 536 537 while (isspace(*++tmp_str)); 538 539 while (tmp_str[0]) { 540 sub_str = strsep(&tmp_str, delimiter); 541 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 542 if (ret) 543 return -EINVAL; 544 parameter_size++; 545 546 while (isspace(*tmp_str)) 547 tmp_str++; 548 } 549 550 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) 551 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 552 parameter, parameter_size); 553 554 if (ret) 555 return -EINVAL; 556 557 if (type == PP_OD_COMMIT_DPM_TABLE) { 558 if (adev->powerplay.pp_funcs->dispatch_tasks) { 559 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 560 return count; 561 } else { 562 return -EINVAL; 563 } 564 } 565 566 return count; 567 } 568 569 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 570 struct device_attribute *attr, 571 char *buf) 572 { 573 struct drm_device *ddev = dev_get_drvdata(dev); 574 struct amdgpu_device *adev = ddev->dev_private; 575 uint32_t size = 0; 576 577 if (adev->powerplay.pp_funcs->print_clock_levels) { 578 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 579 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 580 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 581 return size; 582 } else { 583 return snprintf(buf, PAGE_SIZE, "\n"); 584 } 585 586 } 587 588 /** 589 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie 590 * 591 * The amdgpu driver provides a sysfs API for adjusting what power levels 592 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 593 * and pp_dpm_pcie are used for this. 594 * 595 * Reading back the files will show you the available power levels within 596 * the power state and the clock information for those levels. 597 * 598 * To manually adjust these states, first select manual using 599 * power_dpm_force_performance_level. 600 * Secondly,Enter a new value for each level by inputing a string that 601 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 602 * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. 603 */ 604 605 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 606 struct device_attribute *attr, 607 char *buf) 608 { 609 struct drm_device *ddev = dev_get_drvdata(dev); 610 struct amdgpu_device *adev = ddev->dev_private; 611 612 if (adev->powerplay.pp_funcs->print_clock_levels) 613 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 614 else 615 return snprintf(buf, PAGE_SIZE, "\n"); 616 } 617 618 /* 619 * Worst case: 32 bits individually specified, in octal at 12 characters 620 * per line (+1 for \n). 621 */ 622 #define AMDGPU_MASK_BUF_MAX (32 * 13) 623 624 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 625 { 626 int ret; 627 unsigned long level; 628 char *sub_str = NULL; 629 char *tmp; 630 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 631 const char delimiter[3] = {' ', '\n', '\0'}; 632 size_t bytes; 633 634 *mask = 0; 635 636 bytes = min(count, sizeof(buf_cpy) - 1); 637 memcpy(buf_cpy, buf, bytes); 638 buf_cpy[bytes] = '\0'; 639 tmp = buf_cpy; 640 while (tmp[0]) { 641 sub_str = strsep(&tmp, delimiter); 642 if (strlen(sub_str)) { 643 ret = kstrtoul(sub_str, 0, &level); 644 if (ret || level > 31) 645 return -EINVAL; 646 *mask |= 1 << level; 647 } else 648 break; 649 } 650 651 return 0; 652 } 653 654 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 655 struct device_attribute *attr, 656 const char *buf, 657 size_t count) 658 { 659 struct drm_device *ddev = dev_get_drvdata(dev); 660 struct amdgpu_device *adev = ddev->dev_private; 661 int ret; 662 uint32_t mask = 0; 663 664 ret = amdgpu_read_mask(buf, count, &mask); 665 if (ret) 666 return ret; 667 668 if (adev->powerplay.pp_funcs->force_clock_level) 669 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 670 671 return count; 672 } 673 674 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 675 struct device_attribute *attr, 676 char *buf) 677 { 678 struct drm_device *ddev = dev_get_drvdata(dev); 679 struct amdgpu_device *adev = ddev->dev_private; 680 681 if (adev->powerplay.pp_funcs->print_clock_levels) 682 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 683 else 684 return snprintf(buf, PAGE_SIZE, "\n"); 685 } 686 687 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 688 struct device_attribute *attr, 689 const char *buf, 690 size_t count) 691 { 692 struct drm_device *ddev = dev_get_drvdata(dev); 693 struct amdgpu_device *adev = ddev->dev_private; 694 int ret; 695 uint32_t mask = 0; 696 697 ret = amdgpu_read_mask(buf, count, &mask); 698 if (ret) 699 return ret; 700 701 if (adev->powerplay.pp_funcs->force_clock_level) 702 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 703 704 return count; 705 } 706 707 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 708 struct device_attribute *attr, 709 char *buf) 710 { 711 struct drm_device *ddev = dev_get_drvdata(dev); 712 struct amdgpu_device *adev = ddev->dev_private; 713 714 if (adev->powerplay.pp_funcs->print_clock_levels) 715 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 716 else 717 return snprintf(buf, PAGE_SIZE, "\n"); 718 } 719 720 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 721 struct device_attribute *attr, 722 const char *buf, 723 size_t count) 724 { 725 struct drm_device *ddev = dev_get_drvdata(dev); 726 struct amdgpu_device *adev = ddev->dev_private; 727 int ret; 728 uint32_t mask = 0; 729 730 ret = amdgpu_read_mask(buf, count, &mask); 731 if (ret) 732 return ret; 733 734 if (adev->powerplay.pp_funcs->force_clock_level) 735 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 736 737 return count; 738 } 739 740 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 741 struct device_attribute *attr, 742 char *buf) 743 { 744 struct drm_device *ddev = dev_get_drvdata(dev); 745 struct amdgpu_device *adev = ddev->dev_private; 746 uint32_t value = 0; 747 748 if (adev->powerplay.pp_funcs->get_sclk_od) 749 value = amdgpu_dpm_get_sclk_od(adev); 750 751 return snprintf(buf, PAGE_SIZE, "%d\n", value); 752 } 753 754 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 755 struct device_attribute *attr, 756 const char *buf, 757 size_t count) 758 { 759 struct drm_device *ddev = dev_get_drvdata(dev); 760 struct amdgpu_device *adev = ddev->dev_private; 761 int ret; 762 long int value; 763 764 ret = kstrtol(buf, 0, &value); 765 766 if (ret) { 767 count = -EINVAL; 768 goto fail; 769 } 770 if (adev->powerplay.pp_funcs->set_sclk_od) 771 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 772 773 if (adev->powerplay.pp_funcs->dispatch_tasks) { 774 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 775 } else { 776 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 777 amdgpu_pm_compute_clocks(adev); 778 } 779 780 fail: 781 return count; 782 } 783 784 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 785 struct device_attribute *attr, 786 char *buf) 787 { 788 struct drm_device *ddev = dev_get_drvdata(dev); 789 struct amdgpu_device *adev = ddev->dev_private; 790 uint32_t value = 0; 791 792 if (adev->pp_enabled) 793 value = amdgpu_dpm_get_sclk_od(adev); 794 795 return snprintf(buf, PAGE_SIZE, "%d\n", value); 796 } 797 798 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 799 struct device_attribute *attr, 800 const char *buf, 801 size_t count) 802 { 803 struct drm_device *ddev = dev_get_drvdata(dev); 804 struct amdgpu_device *adev = ddev->dev_private; 805 int ret; 806 long int value; 807 808 ret = kstrtol(buf, 0, &value); 809 810 if (ret) { 811 count = -EINVAL; 812 goto fail; 813 } 814 815 if (adev->pp_enabled) 816 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 817 818 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); 819 820 fail: 821 return count; 822 } 823 824 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 825 struct device_attribute *attr, 826 char *buf) 827 { 828 struct drm_device *ddev = dev_get_drvdata(dev); 829 struct amdgpu_device *adev = ddev->dev_private; 830 uint32_t value = 0; 831 832 if (adev->powerplay.pp_funcs->get_mclk_od) 833 value = amdgpu_dpm_get_mclk_od(adev); 834 835 return snprintf(buf, PAGE_SIZE, "%d\n", value); 836 } 837 838 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 839 struct device_attribute *attr, 840 const char *buf, 841 size_t count) 842 { 843 struct drm_device *ddev = dev_get_drvdata(dev); 844 struct amdgpu_device *adev = ddev->dev_private; 845 int ret; 846 long int value; 847 848 ret = kstrtol(buf, 0, &value); 849 850 if (ret) { 851 count = -EINVAL; 852 goto fail; 853 } 854 if (adev->powerplay.pp_funcs->set_mclk_od) 855 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 856 857 if (adev->powerplay.pp_funcs->dispatch_tasks) { 858 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 859 } else { 860 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 861 amdgpu_pm_compute_clocks(adev); 862 } 863 864 fail: 865 return count; 866 } 867 868 /** 869 * DOC: pp_power_profile_mode 870 * 871 * The amdgpu driver provides a sysfs API for adjusting the heuristics 872 * related to switching between power levels in a power state. The file 873 * pp_power_profile_mode is used for this. 874 * 875 * Reading this file outputs a list of all of the predefined power profiles 876 * and the relevant heuristics settings for that profile. 877 * 878 * To select a profile or create a custom profile, first select manual using 879 * power_dpm_force_performance_level. Writing the number of a predefined 880 * profile to pp_power_profile_mode will enable those heuristics. To 881 * create a custom set of heuristics, write a string of numbers to the file 882 * starting with the number of the custom profile along with a setting 883 * for each heuristic parameter. Due to differences across asic families 884 * the heuristic parameters vary from family to family. 885 * 886 */ 887 888 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 889 struct device_attribute *attr, 890 char *buf) 891 { 892 struct drm_device *ddev = dev_get_drvdata(dev); 893 struct amdgpu_device *adev = ddev->dev_private; 894 895 if (adev->powerplay.pp_funcs->get_power_profile_mode) 896 return amdgpu_dpm_get_power_profile_mode(adev, buf); 897 898 return snprintf(buf, PAGE_SIZE, "\n"); 899 } 900 901 902 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 903 struct device_attribute *attr, 904 const char *buf, 905 size_t count) 906 { 907 int ret = 0xff; 908 struct drm_device *ddev = dev_get_drvdata(dev); 909 struct amdgpu_device *adev = ddev->dev_private; 910 uint32_t parameter_size = 0; 911 long parameter[64]; 912 char *sub_str, buf_cpy[128]; 913 char *tmp_str; 914 uint32_t i = 0; 915 char tmp[2]; 916 long int profile_mode = 0; 917 const char delimiter[3] = {' ', '\n', '\0'}; 918 919 tmp[0] = *(buf); 920 tmp[1] = '\0'; 921 ret = kstrtol(tmp, 0, &profile_mode); 922 if (ret) 923 goto fail; 924 925 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 926 if (count < 2 || count > 127) 927 return -EINVAL; 928 while (isspace(*++buf)) 929 i++; 930 memcpy(buf_cpy, buf, count-i); 931 tmp_str = buf_cpy; 932 while (tmp_str[0]) { 933 sub_str = strsep(&tmp_str, delimiter); 934 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 935 if (ret) { 936 count = -EINVAL; 937 goto fail; 938 } 939 parameter_size++; 940 while (isspace(*tmp_str)) 941 tmp_str++; 942 } 943 } 944 parameter[parameter_size] = profile_mode; 945 if (adev->powerplay.pp_funcs->set_power_profile_mode) 946 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 947 948 if (!ret) 949 return count; 950 fail: 951 return -EINVAL; 952 } 953 954 /** 955 * DOC: busy_percent 956 * 957 * The amdgpu driver provides a sysfs API for reading how busy the GPU 958 * is as a percentage. The file gpu_busy_percent is used for this. 959 * The SMU firmware computes a percentage of load based on the 960 * aggregate activity level in the IP cores. 961 */ 962 static ssize_t amdgpu_get_busy_percent(struct device *dev, 963 struct device_attribute *attr, 964 char *buf) 965 { 966 struct drm_device *ddev = dev_get_drvdata(dev); 967 struct amdgpu_device *adev = ddev->dev_private; 968 int r, value, size = sizeof(value); 969 970 /* sanity check PP is enabled */ 971 if (!(adev->powerplay.pp_funcs && 972 adev->powerplay.pp_funcs->read_sensor)) 973 return -EINVAL; 974 975 /* read the IP busy sensor */ 976 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 977 (void *)&value, &size); 978 if (r) 979 return r; 980 981 return snprintf(buf, PAGE_SIZE, "%d\n", value); 982 } 983 984 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 985 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 986 amdgpu_get_dpm_forced_performance_level, 987 amdgpu_set_dpm_forced_performance_level); 988 static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 989 static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 990 static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 991 amdgpu_get_pp_force_state, 992 amdgpu_set_pp_force_state); 993 static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 994 amdgpu_get_pp_table, 995 amdgpu_set_pp_table); 996 static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 997 amdgpu_get_pp_dpm_sclk, 998 amdgpu_set_pp_dpm_sclk); 999 static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 1000 amdgpu_get_pp_dpm_mclk, 1001 amdgpu_set_pp_dpm_mclk); 1002 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 1003 amdgpu_get_pp_dpm_pcie, 1004 amdgpu_set_pp_dpm_pcie); 1005 static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, 1006 amdgpu_get_pp_sclk_od, 1007 amdgpu_set_pp_sclk_od); 1008 static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 1009 amdgpu_get_pp_mclk_od, 1010 amdgpu_set_pp_mclk_od); 1011 static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, 1012 amdgpu_get_pp_power_profile_mode, 1013 amdgpu_set_pp_power_profile_mode); 1014 static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, 1015 amdgpu_get_pp_od_clk_voltage, 1016 amdgpu_set_pp_od_clk_voltage); 1017 static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, 1018 amdgpu_get_busy_percent, NULL); 1019 1020 static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 1021 struct device_attribute *attr, 1022 char *buf) 1023 { 1024 struct amdgpu_device *adev = dev_get_drvdata(dev); 1025 struct drm_device *ddev = adev->ddev; 1026 int r, temp, size = sizeof(temp); 1027 1028 /* Can't get temperature when the card is off */ 1029 if ((adev->flags & AMD_IS_PX) && 1030 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1031 return -EINVAL; 1032 1033 /* sanity check PP is enabled */ 1034 if (!(adev->powerplay.pp_funcs && 1035 adev->powerplay.pp_funcs->read_sensor)) 1036 return -EINVAL; 1037 1038 /* get the temperature */ 1039 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 1040 (void *)&temp, &size); 1041 if (r) 1042 return r; 1043 1044 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1045 } 1046 1047 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 1048 struct device_attribute *attr, 1049 char *buf) 1050 { 1051 struct amdgpu_device *adev = dev_get_drvdata(dev); 1052 int hyst = to_sensor_dev_attr(attr)->index; 1053 int temp; 1054 1055 if (hyst) 1056 temp = adev->pm.dpm.thermal.min_temp; 1057 else 1058 temp = adev->pm.dpm.thermal.max_temp; 1059 1060 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1061 } 1062 1063 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 1064 struct device_attribute *attr, 1065 char *buf) 1066 { 1067 struct amdgpu_device *adev = dev_get_drvdata(dev); 1068 u32 pwm_mode = 0; 1069 1070 if (!adev->powerplay.pp_funcs->get_fan_control_mode) 1071 return -EINVAL; 1072 1073 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1074 1075 return sprintf(buf, "%i\n", pwm_mode); 1076 } 1077 1078 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 1079 struct device_attribute *attr, 1080 const char *buf, 1081 size_t count) 1082 { 1083 struct amdgpu_device *adev = dev_get_drvdata(dev); 1084 int err; 1085 int value; 1086 1087 /* Can't adjust fan when the card is off */ 1088 if ((adev->flags & AMD_IS_PX) && 1089 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1090 return -EINVAL; 1091 1092 if (!adev->powerplay.pp_funcs->set_fan_control_mode) 1093 return -EINVAL; 1094 1095 err = kstrtoint(buf, 10, &value); 1096 if (err) 1097 return err; 1098 1099 amdgpu_dpm_set_fan_control_mode(adev, value); 1100 1101 return count; 1102 } 1103 1104 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 1105 struct device_attribute *attr, 1106 char *buf) 1107 { 1108 return sprintf(buf, "%i\n", 0); 1109 } 1110 1111 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 1112 struct device_attribute *attr, 1113 char *buf) 1114 { 1115 return sprintf(buf, "%i\n", 255); 1116 } 1117 1118 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 1119 struct device_attribute *attr, 1120 const char *buf, size_t count) 1121 { 1122 struct amdgpu_device *adev = dev_get_drvdata(dev); 1123 int err; 1124 u32 value; 1125 1126 /* Can't adjust fan when the card is off */ 1127 if ((adev->flags & AMD_IS_PX) && 1128 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1129 return -EINVAL; 1130 1131 err = kstrtou32(buf, 10, &value); 1132 if (err) 1133 return err; 1134 1135 value = (value * 100) / 255; 1136 1137 if (adev->powerplay.pp_funcs->set_fan_speed_percent) { 1138 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 1139 if (err) 1140 return err; 1141 } 1142 1143 return count; 1144 } 1145 1146 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 1147 struct device_attribute *attr, 1148 char *buf) 1149 { 1150 struct amdgpu_device *adev = dev_get_drvdata(dev); 1151 int err; 1152 u32 speed = 0; 1153 1154 /* Can't adjust fan when the card is off */ 1155 if ((adev->flags & AMD_IS_PX) && 1156 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1157 return -EINVAL; 1158 1159 if (adev->powerplay.pp_funcs->get_fan_speed_percent) { 1160 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 1161 if (err) 1162 return err; 1163 } 1164 1165 speed = (speed * 255) / 100; 1166 1167 return sprintf(buf, "%i\n", speed); 1168 } 1169 1170 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 1171 struct device_attribute *attr, 1172 char *buf) 1173 { 1174 struct amdgpu_device *adev = dev_get_drvdata(dev); 1175 int err; 1176 u32 speed = 0; 1177 1178 /* Can't adjust fan when the card is off */ 1179 if ((adev->flags & AMD_IS_PX) && 1180 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1181 return -EINVAL; 1182 1183 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 1184 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 1185 if (err) 1186 return err; 1187 } 1188 1189 return sprintf(buf, "%i\n", speed); 1190 } 1191 1192 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 1193 struct device_attribute *attr, 1194 char *buf) 1195 { 1196 struct amdgpu_device *adev = dev_get_drvdata(dev); 1197 struct drm_device *ddev = adev->ddev; 1198 u32 vddgfx; 1199 int r, size = sizeof(vddgfx); 1200 1201 /* Can't get voltage when the card is off */ 1202 if ((adev->flags & AMD_IS_PX) && 1203 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1204 return -EINVAL; 1205 1206 /* sanity check PP is enabled */ 1207 if (!(adev->powerplay.pp_funcs && 1208 adev->powerplay.pp_funcs->read_sensor)) 1209 return -EINVAL; 1210 1211 /* get the voltage */ 1212 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 1213 (void *)&vddgfx, &size); 1214 if (r) 1215 return r; 1216 1217 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); 1218 } 1219 1220 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 1221 struct device_attribute *attr, 1222 char *buf) 1223 { 1224 return snprintf(buf, PAGE_SIZE, "vddgfx\n"); 1225 } 1226 1227 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 1228 struct device_attribute *attr, 1229 char *buf) 1230 { 1231 struct amdgpu_device *adev = dev_get_drvdata(dev); 1232 struct drm_device *ddev = adev->ddev; 1233 u32 vddnb; 1234 int r, size = sizeof(vddnb); 1235 1236 /* only APUs have vddnb */ 1237 if (!(adev->flags & AMD_IS_APU)) 1238 return -EINVAL; 1239 1240 /* Can't get voltage when the card is off */ 1241 if ((adev->flags & AMD_IS_PX) && 1242 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1243 return -EINVAL; 1244 1245 /* sanity check PP is enabled */ 1246 if (!(adev->powerplay.pp_funcs && 1247 adev->powerplay.pp_funcs->read_sensor)) 1248 return -EINVAL; 1249 1250 /* get the voltage */ 1251 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 1252 (void *)&vddnb, &size); 1253 if (r) 1254 return r; 1255 1256 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); 1257 } 1258 1259 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 1260 struct device_attribute *attr, 1261 char *buf) 1262 { 1263 return snprintf(buf, PAGE_SIZE, "vddnb\n"); 1264 } 1265 1266 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 1267 struct device_attribute *attr, 1268 char *buf) 1269 { 1270 struct amdgpu_device *adev = dev_get_drvdata(dev); 1271 struct drm_device *ddev = adev->ddev; 1272 u32 query = 0; 1273 int r, size = sizeof(u32); 1274 unsigned uw; 1275 1276 /* Can't get power when the card is off */ 1277 if ((adev->flags & AMD_IS_PX) && 1278 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1279 return -EINVAL; 1280 1281 /* sanity check PP is enabled */ 1282 if (!(adev->powerplay.pp_funcs && 1283 adev->powerplay.pp_funcs->read_sensor)) 1284 return -EINVAL; 1285 1286 /* get the voltage */ 1287 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 1288 (void *)&query, &size); 1289 if (r) 1290 return r; 1291 1292 /* convert to microwatts */ 1293 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 1294 1295 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 1296 } 1297 1298 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 1299 struct device_attribute *attr, 1300 char *buf) 1301 { 1302 return sprintf(buf, "%i\n", 0); 1303 } 1304 1305 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 1306 struct device_attribute *attr, 1307 char *buf) 1308 { 1309 struct amdgpu_device *adev = dev_get_drvdata(dev); 1310 uint32_t limit = 0; 1311 1312 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 1313 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 1314 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 1315 } else { 1316 return snprintf(buf, PAGE_SIZE, "\n"); 1317 } 1318 } 1319 1320 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 1321 struct device_attribute *attr, 1322 char *buf) 1323 { 1324 struct amdgpu_device *adev = dev_get_drvdata(dev); 1325 uint32_t limit = 0; 1326 1327 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 1328 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 1329 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 1330 } else { 1331 return snprintf(buf, PAGE_SIZE, "\n"); 1332 } 1333 } 1334 1335 1336 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 1337 struct device_attribute *attr, 1338 const char *buf, 1339 size_t count) 1340 { 1341 struct amdgpu_device *adev = dev_get_drvdata(dev); 1342 int err; 1343 u32 value; 1344 1345 err = kstrtou32(buf, 10, &value); 1346 if (err) 1347 return err; 1348 1349 value = value / 1000000; /* convert to Watt */ 1350 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { 1351 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 1352 if (err) 1353 return err; 1354 } else { 1355 return -EINVAL; 1356 } 1357 1358 return count; 1359 } 1360 1361 1362 /** 1363 * DOC: hwmon 1364 * 1365 * The amdgpu driver exposes the following sensor interfaces: 1366 * 1367 * - GPU temperature (via the on-die sensor) 1368 * 1369 * - GPU voltage 1370 * 1371 * - Northbridge voltage (APUs only) 1372 * 1373 * - GPU power 1374 * 1375 * - GPU fan 1376 * 1377 * hwmon interfaces for GPU temperature: 1378 * 1379 * - temp1_input: the on die GPU temperature in millidegrees Celsius 1380 * 1381 * - temp1_crit: temperature critical max value in millidegrees Celsius 1382 * 1383 * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 1384 * 1385 * hwmon interfaces for GPU voltage: 1386 * 1387 * - in0_input: the voltage on the GPU in millivolts 1388 * 1389 * - in1_input: the voltage on the Northbridge in millivolts 1390 * 1391 * hwmon interfaces for GPU power: 1392 * 1393 * - power1_average: average power used by the GPU in microWatts 1394 * 1395 * - power1_cap_min: minimum cap supported in microWatts 1396 * 1397 * - power1_cap_max: maximum cap supported in microWatts 1398 * 1399 * - power1_cap: selected power cap in microWatts 1400 * 1401 * hwmon interfaces for GPU fan: 1402 * 1403 * - pwm1: pulse width modulation fan level (0-255) 1404 * 1405 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 1406 * 1407 * - pwm1_min: pulse width modulation fan control minimum level (0) 1408 * 1409 * - pwm1_max: pulse width modulation fan control maximum level (255) 1410 * 1411 * - fan1_input: fan speed in RPM 1412 * 1413 * You can use hwmon tools like sensors to view this information on your system. 1414 * 1415 */ 1416 1417 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); 1418 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 1419 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 1420 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 1421 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 1422 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 1423 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 1424 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 1425 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 1426 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 1427 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 1428 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 1429 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 1430 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 1431 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 1432 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 1433 1434 static struct attribute *hwmon_attributes[] = { 1435 &sensor_dev_attr_temp1_input.dev_attr.attr, 1436 &sensor_dev_attr_temp1_crit.dev_attr.attr, 1437 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 1438 &sensor_dev_attr_pwm1.dev_attr.attr, 1439 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 1440 &sensor_dev_attr_pwm1_min.dev_attr.attr, 1441 &sensor_dev_attr_pwm1_max.dev_attr.attr, 1442 &sensor_dev_attr_fan1_input.dev_attr.attr, 1443 &sensor_dev_attr_in0_input.dev_attr.attr, 1444 &sensor_dev_attr_in0_label.dev_attr.attr, 1445 &sensor_dev_attr_in1_input.dev_attr.attr, 1446 &sensor_dev_attr_in1_label.dev_attr.attr, 1447 &sensor_dev_attr_power1_average.dev_attr.attr, 1448 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 1449 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 1450 &sensor_dev_attr_power1_cap.dev_attr.attr, 1451 NULL 1452 }; 1453 1454 static umode_t hwmon_attributes_visible(struct kobject *kobj, 1455 struct attribute *attr, int index) 1456 { 1457 struct device *dev = kobj_to_dev(kobj); 1458 struct amdgpu_device *adev = dev_get_drvdata(dev); 1459 umode_t effective_mode = attr->mode; 1460 1461 1462 /* Skip fan attributes if fan is not present */ 1463 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 1464 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 1465 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 1466 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 1467 attr == &sensor_dev_attr_fan1_input.dev_attr.attr)) 1468 return 0; 1469 1470 /* Skip limit attributes if DPM is not enabled */ 1471 if (!adev->pm.dpm_enabled && 1472 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 1473 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 1474 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 1475 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 1476 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 1477 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 1478 return 0; 1479 1480 /* mask fan attributes if we have no bindings for this asic to expose */ 1481 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 1482 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 1483 (!adev->powerplay.pp_funcs->get_fan_control_mode && 1484 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 1485 effective_mode &= ~S_IRUGO; 1486 1487 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 1488 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 1489 (!adev->powerplay.pp_funcs->set_fan_control_mode && 1490 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 1491 effective_mode &= ~S_IWUSR; 1492 1493 if ((adev->flags & AMD_IS_APU) && 1494 (attr == &sensor_dev_attr_power1_average.dev_attr.attr || 1495 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 1496 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 1497 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 1498 return 0; 1499 1500 /* hide max/min values if we can't both query and manage the fan */ 1501 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 1502 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 1503 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 1504 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 1505 return 0; 1506 1507 /* only APUs have vddnb */ 1508 if (!(adev->flags & AMD_IS_APU) && 1509 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 1510 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 1511 return 0; 1512 1513 return effective_mode; 1514 } 1515 1516 static const struct attribute_group hwmon_attrgroup = { 1517 .attrs = hwmon_attributes, 1518 .is_visible = hwmon_attributes_visible, 1519 }; 1520 1521 static const struct attribute_group *hwmon_groups[] = { 1522 &hwmon_attrgroup, 1523 NULL 1524 }; 1525 #endif 1526 1527 void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1528 { 1529 struct amdgpu_device *adev = 1530 container_of(work, struct amdgpu_device, 1531 pm.dpm.thermal.work); 1532 /* switch to the thermal state */ 1533 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1534 int temp, size = sizeof(temp); 1535 1536 if (!adev->pm.dpm_enabled) 1537 return; 1538 1539 if (adev->powerplay.pp_funcs && 1540 adev->powerplay.pp_funcs->read_sensor && 1541 !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 1542 (void *)&temp, &size)) { 1543 if (temp < adev->pm.dpm.thermal.min_temp) 1544 /* switch back the user state */ 1545 dpm_state = adev->pm.dpm.user_state; 1546 } else { 1547 if (adev->pm.dpm.thermal.high_to_low) 1548 /* switch back the user state */ 1549 dpm_state = adev->pm.dpm.user_state; 1550 } 1551 mutex_lock(&adev->pm.mutex); 1552 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1553 adev->pm.dpm.thermal_active = true; 1554 else 1555 adev->pm.dpm.thermal_active = false; 1556 adev->pm.dpm.state = dpm_state; 1557 mutex_unlock(&adev->pm.mutex); 1558 1559 amdgpu_pm_compute_clocks(adev); 1560 } 1561 1562 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 1563 enum amd_pm_state_type dpm_state) 1564 { 1565 int i; 1566 struct amdgpu_ps *ps; 1567 u32 ui_class; 1568 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 1569 true : false; 1570 1571 /* check if the vblank period is too short to adjust the mclk */ 1572 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 1573 if (amdgpu_dpm_vblank_too_short(adev)) 1574 single_display = false; 1575 } 1576 1577 /* certain older asics have a separare 3D performance state, 1578 * so try that first if the user selected performance 1579 */ 1580 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 1581 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 1582 /* balanced states don't exist at the moment */ 1583 if (dpm_state == POWER_STATE_TYPE_BALANCED) 1584 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1585 1586 restart_search: 1587 /* Pick the best power state based on current conditions */ 1588 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 1589 ps = &adev->pm.dpm.ps[i]; 1590 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 1591 switch (dpm_state) { 1592 /* user states */ 1593 case POWER_STATE_TYPE_BATTERY: 1594 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 1595 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1596 if (single_display) 1597 return ps; 1598 } else 1599 return ps; 1600 } 1601 break; 1602 case POWER_STATE_TYPE_BALANCED: 1603 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 1604 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1605 if (single_display) 1606 return ps; 1607 } else 1608 return ps; 1609 } 1610 break; 1611 case POWER_STATE_TYPE_PERFORMANCE: 1612 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 1613 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1614 if (single_display) 1615 return ps; 1616 } else 1617 return ps; 1618 } 1619 break; 1620 /* internal states */ 1621 case POWER_STATE_TYPE_INTERNAL_UVD: 1622 if (adev->pm.dpm.uvd_ps) 1623 return adev->pm.dpm.uvd_ps; 1624 else 1625 break; 1626 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1627 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 1628 return ps; 1629 break; 1630 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1631 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 1632 return ps; 1633 break; 1634 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1635 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 1636 return ps; 1637 break; 1638 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1639 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 1640 return ps; 1641 break; 1642 case POWER_STATE_TYPE_INTERNAL_BOOT: 1643 return adev->pm.dpm.boot_ps; 1644 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1645 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1646 return ps; 1647 break; 1648 case POWER_STATE_TYPE_INTERNAL_ACPI: 1649 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1650 return ps; 1651 break; 1652 case POWER_STATE_TYPE_INTERNAL_ULV: 1653 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1654 return ps; 1655 break; 1656 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1657 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1658 return ps; 1659 break; 1660 default: 1661 break; 1662 } 1663 } 1664 /* use a fallback state if we didn't match */ 1665 switch (dpm_state) { 1666 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1667 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1668 goto restart_search; 1669 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1670 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1671 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1672 if (adev->pm.dpm.uvd_ps) { 1673 return adev->pm.dpm.uvd_ps; 1674 } else { 1675 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1676 goto restart_search; 1677 } 1678 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1679 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1680 goto restart_search; 1681 case POWER_STATE_TYPE_INTERNAL_ACPI: 1682 dpm_state = POWER_STATE_TYPE_BATTERY; 1683 goto restart_search; 1684 case POWER_STATE_TYPE_BATTERY: 1685 case POWER_STATE_TYPE_BALANCED: 1686 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1687 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1688 goto restart_search; 1689 default: 1690 break; 1691 } 1692 1693 return NULL; 1694 } 1695 1696 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1697 { 1698 struct amdgpu_ps *ps; 1699 enum amd_pm_state_type dpm_state; 1700 int ret; 1701 bool equal = false; 1702 1703 /* if dpm init failed */ 1704 if (!adev->pm.dpm_enabled) 1705 return; 1706 1707 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1708 /* add other state override checks here */ 1709 if ((!adev->pm.dpm.thermal_active) && 1710 (!adev->pm.dpm.uvd_active)) 1711 adev->pm.dpm.state = adev->pm.dpm.user_state; 1712 } 1713 dpm_state = adev->pm.dpm.state; 1714 1715 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1716 if (ps) 1717 adev->pm.dpm.requested_ps = ps; 1718 else 1719 return; 1720 1721 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 1722 printk("switching from power state:\n"); 1723 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1724 printk("switching to power state:\n"); 1725 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1726 } 1727 1728 /* update whether vce is active */ 1729 ps->vce_active = adev->pm.dpm.vce_active; 1730 if (adev->powerplay.pp_funcs->display_configuration_changed) 1731 amdgpu_dpm_display_configuration_changed(adev); 1732 1733 ret = amdgpu_dpm_pre_set_power_state(adev); 1734 if (ret) 1735 return; 1736 1737 if (adev->powerplay.pp_funcs->check_state_equal) { 1738 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 1739 equal = false; 1740 } 1741 1742 if (equal) 1743 return; 1744 1745 amdgpu_dpm_set_power_state(adev); 1746 amdgpu_dpm_post_set_power_state(adev); 1747 1748 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1749 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1750 1751 if (adev->powerplay.pp_funcs->force_performance_level) { 1752 if (adev->pm.dpm.thermal_active) { 1753 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1754 /* force low perf level for thermal */ 1755 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 1756 /* save the user's level */ 1757 adev->pm.dpm.forced_level = level; 1758 } else { 1759 /* otherwise, user selected level */ 1760 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1761 } 1762 } 1763 } 1764 1765 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1766 { 1767 if (adev->powerplay.pp_funcs->set_powergating_by_smu) { 1768 /* enable/disable UVD */ 1769 mutex_lock(&adev->pm.mutex); 1770 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 1771 mutex_unlock(&adev->pm.mutex); 1772 } else { 1773 if (enable) { 1774 mutex_lock(&adev->pm.mutex); 1775 adev->pm.dpm.uvd_active = true; 1776 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1777 mutex_unlock(&adev->pm.mutex); 1778 } else { 1779 mutex_lock(&adev->pm.mutex); 1780 adev->pm.dpm.uvd_active = false; 1781 mutex_unlock(&adev->pm.mutex); 1782 } 1783 amdgpu_pm_compute_clocks(adev); 1784 } 1785 } 1786 1787 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1788 { 1789 if (adev->powerplay.pp_funcs->set_powergating_by_smu) { 1790 /* enable/disable VCE */ 1791 mutex_lock(&adev->pm.mutex); 1792 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 1793 mutex_unlock(&adev->pm.mutex); 1794 } else { 1795 if (enable) { 1796 mutex_lock(&adev->pm.mutex); 1797 adev->pm.dpm.vce_active = true; 1798 /* XXX select vce level based on ring/task */ 1799 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1800 mutex_unlock(&adev->pm.mutex); 1801 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1802 AMD_CG_STATE_UNGATE); 1803 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1804 AMD_PG_STATE_UNGATE); 1805 amdgpu_pm_compute_clocks(adev); 1806 } else { 1807 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1808 AMD_PG_STATE_GATE); 1809 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1810 AMD_CG_STATE_GATE); 1811 mutex_lock(&adev->pm.mutex); 1812 adev->pm.dpm.vce_active = false; 1813 mutex_unlock(&adev->pm.mutex); 1814 amdgpu_pm_compute_clocks(adev); 1815 } 1816 1817 } 1818 } 1819 1820 void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1821 { 1822 int i; 1823 1824 if (adev->powerplay.pp_funcs->print_power_state == NULL) 1825 return; 1826 1827 for (i = 0; i < adev->pm.dpm.num_ps; i++) 1828 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1829 1830 } 1831 1832 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 1833 { 1834 int ret; 1835 1836 if (adev->pm.sysfs_initialized) 1837 return 0; 1838 1839 if (adev->pm.dpm_enabled == 0) 1840 return 0; 1841 1842 #if 0 1843 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 1844 DRIVER_NAME, adev, 1845 hwmon_groups); 1846 if (IS_ERR(adev->pm.int_hwmon_dev)) { 1847 ret = PTR_ERR(adev->pm.int_hwmon_dev); 1848 dev_err(adev->dev, 1849 "Unable to register hwmon device: %d\n", ret); 1850 return ret; 1851 } 1852 1853 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); 1854 if (ret) { 1855 DRM_ERROR("failed to create device file for dpm state\n"); 1856 return ret; 1857 } 1858 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1859 if (ret) { 1860 DRM_ERROR("failed to create device file for dpm state\n"); 1861 return ret; 1862 } 1863 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); 1864 if (ret) { 1865 DRM_ERROR("failed to create device file pp_mclk_od\n"); 1866 return ret; 1867 } 1868 1869 1870 ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 1871 if (ret) { 1872 DRM_ERROR("failed to create device file pp_num_states\n"); 1873 return ret; 1874 } 1875 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 1876 if (ret) { 1877 DRM_ERROR("failed to create device file pp_cur_state\n"); 1878 return ret; 1879 } 1880 ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 1881 if (ret) { 1882 DRM_ERROR("failed to create device file pp_force_state\n"); 1883 return ret; 1884 } 1885 ret = device_create_file(adev->dev, &dev_attr_pp_table); 1886 if (ret) { 1887 DRM_ERROR("failed to create device file pp_table\n"); 1888 return ret; 1889 } 1890 1891 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 1892 if (ret) { 1893 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 1894 return ret; 1895 } 1896 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 1897 if (ret) { 1898 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 1899 return ret; 1900 } 1901 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 1902 if (ret) { 1903 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 1904 return ret; 1905 } 1906 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 1907 if (ret) { 1908 DRM_ERROR("failed to create device file pp_sclk_od\n"); 1909 return ret; 1910 } 1911 ret = device_create_file(adev->dev, 1912 &dev_attr_pp_power_profile_mode); 1913 if (ret) { 1914 DRM_ERROR("failed to create device file " 1915 "pp_power_profile_mode\n"); 1916 return ret; 1917 } 1918 ret = device_create_file(adev->dev, 1919 &dev_attr_pp_od_clk_voltage); 1920 if (ret) { 1921 DRM_ERROR("failed to create device file " 1922 "pp_od_clk_voltage\n"); 1923 return ret; 1924 } 1925 ret = device_create_file(adev->dev, 1926 &dev_attr_gpu_busy_percent); 1927 if (ret) { 1928 DRM_ERROR("failed to create device file " 1929 "gpu_busy_level\n"); 1930 return ret; 1931 } 1932 #endif 1933 1934 ret = amdgpu_debugfs_pm_init(adev); 1935 if (ret) { 1936 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1937 return ret; 1938 } 1939 #if 0 1940 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 1941 if (ret) { 1942 DRM_ERROR("failed to create device file pp_sclk_od\n"); 1943 return ret; 1944 } 1945 #endif 1946 adev->pm.sysfs_initialized = true; 1947 1948 return 0; 1949 } 1950 1951 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 1952 { 1953 #if 0 1954 if (adev->pm.dpm_enabled == 0) 1955 return; 1956 1957 if (adev->pm.int_hwmon_dev) 1958 hwmon_device_unregister(adev->pm.int_hwmon_dev); 1959 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 1960 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1961 1962 device_remove_file(adev->dev, &dev_attr_pp_num_states); 1963 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 1964 device_remove_file(adev->dev, &dev_attr_pp_force_state); 1965 device_remove_file(adev->dev, &dev_attr_pp_table); 1966 1967 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 1968 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 1969 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 1970 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 1971 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 1972 device_remove_file(adev->dev, 1973 &dev_attr_pp_power_profile_mode); 1974 device_remove_file(adev->dev, 1975 &dev_attr_pp_od_clk_voltage); 1976 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 1977 #endif 1978 } 1979 1980 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1981 { 1982 int i = 0; 1983 1984 if (!adev->pm.dpm_enabled) 1985 return; 1986 1987 if (adev->mode_info.num_crtc) 1988 amdgpu_display_bandwidth_update(adev); 1989 1990 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1991 struct amdgpu_ring *ring = adev->rings[i]; 1992 if (ring && ring->ready) 1993 amdgpu_fence_wait_empty(ring); 1994 } 1995 1996 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1997 if (!amdgpu_device_has_dc_support(adev)) { 1998 mutex_lock(&adev->pm.mutex); 1999 amdgpu_dpm_get_active_displays(adev); 2000 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 2001 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 2002 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 2003 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 2004 if (adev->pm.pm_display_cfg.vrefresh > 120) 2005 adev->pm.pm_display_cfg.min_vblank_time = 0; 2006 if (adev->powerplay.pp_funcs->display_configuration_change) 2007 adev->powerplay.pp_funcs->display_configuration_change( 2008 adev->powerplay.pp_handle, 2009 &adev->pm.pm_display_cfg); 2010 mutex_unlock(&adev->pm.mutex); 2011 } 2012 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 2013 } else { 2014 mutex_lock(&adev->pm.mutex); 2015 amdgpu_dpm_get_active_displays(adev); 2016 amdgpu_dpm_change_power_state_locked(adev); 2017 mutex_unlock(&adev->pm.mutex); 2018 } 2019 } 2020 2021 /* 2022 * Debugfs info 2023 */ 2024 #if defined(CONFIG_DEBUG_FS) 2025 2026 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 2027 { 2028 uint32_t value; 2029 uint32_t query = 0; 2030 int size; 2031 2032 /* sanity check PP is enabled */ 2033 if (!(adev->powerplay.pp_funcs && 2034 adev->powerplay.pp_funcs->read_sensor)) 2035 return -EINVAL; 2036 2037 /* GPU Clocks */ 2038 size = sizeof(value); 2039 seq_printf(m, "GFX Clocks and Power:\n"); 2040 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 2041 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 2042 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 2043 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 2044 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 2045 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 2046 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 2047 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 2048 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 2049 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 2050 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 2051 seq_printf(m, "\t%u mV (VDDNB)\n", value); 2052 size = sizeof(uint32_t); 2053 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) 2054 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 2055 size = sizeof(value); 2056 seq_printf(m, "\n"); 2057 2058 /* GPU Temp */ 2059 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 2060 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 2061 2062 /* GPU Load */ 2063 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 2064 seq_printf(m, "GPU Load: %u %%\n", value); 2065 seq_printf(m, "\n"); 2066 2067 /* UVD clocks */ 2068 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 2069 if (!value) { 2070 seq_printf(m, "UVD: Disabled\n"); 2071 } else { 2072 seq_printf(m, "UVD: Enabled\n"); 2073 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 2074 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 2075 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 2076 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 2077 } 2078 } 2079 seq_printf(m, "\n"); 2080 2081 /* VCE clocks */ 2082 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 2083 if (!value) { 2084 seq_printf(m, "VCE: Disabled\n"); 2085 } else { 2086 seq_printf(m, "VCE: Enabled\n"); 2087 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 2088 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 2089 } 2090 } 2091 2092 return 0; 2093 } 2094 2095 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 2096 { 2097 int i; 2098 2099 for (i = 0; clocks[i].flag; i++) 2100 seq_printf(m, "\t%s: %s\n", clocks[i].name, 2101 (flags & clocks[i].flag) ? "On" : "Off"); 2102 } 2103 2104 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 2105 { 2106 struct drm_info_node *node = (struct drm_info_node *) m->private; 2107 struct drm_device *dev = node->minor->dev; 2108 struct amdgpu_device *adev = dev->dev_private; 2109 struct drm_device *ddev = adev->ddev; 2110 u32 flags = 0; 2111 2112 amdgpu_device_ip_get_clockgating_state(adev, &flags); 2113 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 2114 amdgpu_parse_cg_state(m, flags); 2115 seq_printf(m, "\n"); 2116 2117 if (!adev->pm.dpm_enabled) { 2118 seq_printf(m, "dpm not enabled\n"); 2119 return 0; 2120 } 2121 if ((adev->flags & AMD_IS_PX) && 2122 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 2123 seq_printf(m, "PX asic powered off\n"); 2124 } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 2125 mutex_lock(&adev->pm.mutex); 2126 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 2127 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 2128 else 2129 seq_printf(m, "Debugfs support not implemented for this asic\n"); 2130 mutex_unlock(&adev->pm.mutex); 2131 } else { 2132 return amdgpu_debugfs_pm_info_pp(m, adev); 2133 } 2134 2135 return 0; 2136 } 2137 2138 static const struct drm_info_list amdgpu_pm_info_list[] = { 2139 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 2140 }; 2141 #endif 2142 2143 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 2144 { 2145 #if defined(CONFIG_DEBUG_FS) 2146 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 2147 #else 2148 return 0; 2149 #endif 2150 } 2151