1 /* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 * 23 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_pm.c 254885 2013-08-25 19:37:15Z dumbbell $ 24 */ 25 26 #include <sys/power.h> 27 #include <drm/drmP.h> 28 #include <sys/sensors.h> 29 #include "radeon.h" 30 #include "avivod.h" 31 #include "atom.h" 32 #include "r600_dpm.h" 33 34 #define RADEON_IDLE_LOOP_MS 100 35 #define RADEON_RECLOCK_DELAY_MS 200 36 #define RADEON_WAIT_VBLANK_TIMEOUT 200 37 38 static const char *radeon_pm_state_type_name[5] = { 39 "", 40 "Powersave", 41 "Battery", 42 "Balanced", 43 "Performance", 44 }; 45 46 #define DUMBBELL_PM 1 47 48 #ifdef DUMBBELL_PM 49 static void radeon_dynpm_idle_work_handler(struct work_struct *work); 50 #endif /* DUMBBELL_PM */ 51 static int radeon_debugfs_pm_init(struct radeon_device *rdev); 52 static bool radeon_pm_in_vbl(struct radeon_device *rdev); 53 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 54 static void radeon_pm_update_profile(struct radeon_device *rdev); 55 static void radeon_pm_set_clocks(struct radeon_device *rdev); 56 57 int radeon_pm_get_type_index(struct radeon_device *rdev, 58 enum radeon_pm_state_type ps_type, 59 int instance) 60 { 61 int i; 62 int found_instance = -1; 63 64 for (i = 0; i < rdev->pm.num_power_states; i++) { 65 if (rdev->pm.power_state[i].type == ps_type) { 66 found_instance++; 67 if (found_instance == instance) 68 return i; 69 } 70 } 71 /* return default if no match */ 72 return rdev->pm.default_power_state_index; 73 } 74 75 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 76 { 77 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 78 mutex_lock(&rdev->pm.mutex); 79 if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE) 80 rdev->pm.dpm.ac_power = true; 81 else 82 rdev->pm.dpm.ac_power = false; 83 if (rdev->family == CHIP_ARUBA) { 84 if (rdev->asic->dpm.enable_bapm) 85 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 86 } 87 mutex_unlock(&rdev->pm.mutex); 88 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 89 if (rdev->pm.profile == PM_PROFILE_AUTO) { 90 mutex_lock(&rdev->pm.mutex); 91 radeon_pm_update_profile(rdev); 92 radeon_pm_set_clocks(rdev); 93 mutex_unlock(&rdev->pm.mutex); 94 } 95 } 96 } 97 98 static void radeon_pm_update_profile(struct radeon_device *rdev) 99 { 100 switch (rdev->pm.profile) { 101 case PM_PROFILE_DEFAULT: 102 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 103 break; 104 case PM_PROFILE_AUTO: 105 if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE) { 106 if (rdev->pm.active_crtc_count > 1) 107 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 108 else 109 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 110 } else { 111 if (rdev->pm.active_crtc_count > 1) 112 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 113 else 114 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 115 } 116 break; 117 case PM_PROFILE_LOW: 118 if (rdev->pm.active_crtc_count > 1) 119 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 120 else 121 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 122 break; 123 case PM_PROFILE_MID: 124 if (rdev->pm.active_crtc_count > 1) 125 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 126 else 127 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 128 break; 129 case PM_PROFILE_HIGH: 130 if (rdev->pm.active_crtc_count > 1) 131 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 132 else 133 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 134 break; 135 } 136 137 if (rdev->pm.active_crtc_count == 0) { 138 rdev->pm.requested_power_state_index = 139 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 140 rdev->pm.requested_clock_mode_index = 141 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 142 } else { 143 rdev->pm.requested_power_state_index = 144 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 145 rdev->pm.requested_clock_mode_index = 146 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 147 } 148 } 149 150 static void radeon_unmap_vram_bos(struct radeon_device *rdev) 151 { 152 struct radeon_bo *bo, *n; 153 154 if (list_empty(&rdev->gem.objects)) 155 return; 156 157 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 158 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 159 ttm_bo_unmap_virtual(&bo->tbo); 160 } 161 } 162 163 static void radeon_sync_with_vblank(struct radeon_device *rdev) 164 { 165 if (rdev->pm.active_crtcs) { 166 rdev->pm.vblank_sync = false; 167 #ifdef DUMBBELL_PM 168 wait_event_timeout( 169 rdev->irq.vblank_queue, rdev->pm.vblank_sync, 170 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 171 #endif /* DUMBBELL_PM */ 172 } 173 } 174 175 static void radeon_set_power_state(struct radeon_device *rdev) 176 { 177 u32 sclk, mclk; 178 bool misc_after = false; 179 180 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 181 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 182 return; 183 184 if (radeon_gui_idle(rdev)) { 185 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 186 clock_info[rdev->pm.requested_clock_mode_index].sclk; 187 if (sclk > rdev->pm.default_sclk) 188 sclk = rdev->pm.default_sclk; 189 190 /* starting with BTC, there is one state that is used for both 191 * MH and SH. Difference is that we always use the high clock index for 192 * mclk and vddci. 193 */ 194 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 195 (rdev->family >= CHIP_BARTS) && 196 rdev->pm.active_crtc_count && 197 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 198 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 199 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 200 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 201 else 202 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 203 clock_info[rdev->pm.requested_clock_mode_index].mclk; 204 205 if (mclk > rdev->pm.default_mclk) 206 mclk = rdev->pm.default_mclk; 207 208 /* upvolt before raising clocks, downvolt after lowering clocks */ 209 if (sclk < rdev->pm.current_sclk) 210 misc_after = true; 211 212 radeon_sync_with_vblank(rdev); 213 214 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 215 if (!radeon_pm_in_vbl(rdev)) 216 return; 217 } 218 219 radeon_pm_prepare(rdev); 220 221 if (!misc_after) 222 /* voltage, pcie lanes, etc.*/ 223 radeon_pm_misc(rdev); 224 225 /* set engine clock */ 226 if (sclk != rdev->pm.current_sclk) { 227 radeon_pm_debug_check_in_vbl(rdev, false); 228 radeon_set_engine_clock(rdev, sclk); 229 radeon_pm_debug_check_in_vbl(rdev, true); 230 rdev->pm.current_sclk = sclk; 231 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 232 } 233 234 /* set memory clock */ 235 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 236 radeon_pm_debug_check_in_vbl(rdev, false); 237 radeon_set_memory_clock(rdev, mclk); 238 radeon_pm_debug_check_in_vbl(rdev, true); 239 rdev->pm.current_mclk = mclk; 240 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 241 } 242 243 if (misc_after) 244 /* voltage, pcie lanes, etc.*/ 245 radeon_pm_misc(rdev); 246 247 radeon_pm_finish(rdev); 248 249 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 250 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 251 } else 252 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 253 } 254 255 static void radeon_pm_set_clocks(struct radeon_device *rdev) 256 { 257 int i, r; 258 259 /* no need to take locks, etc. if nothing's going to change */ 260 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 261 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 262 return; 263 264 lockmgr(&rdev->pm.mclk_lock, LK_EXCLUSIVE); // down_write 265 mutex_lock(&rdev->ring_lock); 266 267 /* wait for the rings to drain */ 268 for (i = 0; i < RADEON_NUM_RINGS; i++) { 269 struct radeon_ring *ring = &rdev->ring[i]; 270 if (!ring->ready) { 271 continue; 272 } 273 r = radeon_fence_wait_empty(rdev, i); 274 if (r) { 275 /* needs a GPU reset dont reset here */ 276 mutex_unlock(&rdev->ring_lock); 277 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write 278 return; 279 } 280 } 281 282 radeon_unmap_vram_bos(rdev); 283 284 if (rdev->irq.installed) { 285 for (i = 0; i < rdev->num_crtc; i++) { 286 if (rdev->pm.active_crtcs & (1 << i)) { 287 /* This can fail if a modeset is in progress */ 288 if (drm_vblank_get(rdev->ddev, i) == 0) 289 rdev->pm.req_vblank |= (1 << i); 290 else 291 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", 292 i); 293 294 } 295 } 296 } 297 298 radeon_set_power_state(rdev); 299 300 if (rdev->irq.installed) { 301 for (i = 0; i < rdev->num_crtc; i++) { 302 if (rdev->pm.req_vblank & (1 << i)) { 303 rdev->pm.req_vblank &= ~(1 << i); 304 drm_vblank_put(rdev->ddev, i); 305 } 306 } 307 } 308 309 /* update display watermarks based on new power state */ 310 radeon_update_bandwidth_info(rdev); 311 if (rdev->pm.active_crtc_count) 312 radeon_bandwidth_update(rdev); 313 314 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 315 316 mutex_unlock(&rdev->ring_lock); 317 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write 318 } 319 320 static void radeon_pm_print_states(struct radeon_device *rdev) 321 { 322 int i, j; 323 struct radeon_power_state *power_state; 324 struct radeon_pm_clock_info *clock_info; 325 326 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 327 for (i = 0; i < rdev->pm.num_power_states; i++) { 328 power_state = &rdev->pm.power_state[i]; 329 DRM_DEBUG_DRIVER("State %d: %s\n", i, 330 radeon_pm_state_type_name[power_state->type]); 331 if (i == rdev->pm.default_power_state_index) 332 DRM_DEBUG_DRIVER("\tDefault"); 333 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 334 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 335 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 336 DRM_DEBUG_DRIVER("\tSingle display only\n"); 337 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 338 for (j = 0; j < power_state->num_clock_modes; j++) { 339 clock_info = &(power_state->clock_info[j]); 340 if (rdev->flags & RADEON_IS_IGP) 341 DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 342 j, 343 clock_info->sclk * 10); 344 else 345 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 346 j, 347 clock_info->sclk * 10, 348 clock_info->mclk * 10, 349 clock_info->voltage.voltage); 350 } 351 } 352 } 353 354 #ifdef DUMBBELL_WIP 355 static ssize_t radeon_get_pm_profile(struct device *dev, 356 struct device_attribute *attr, 357 char *buf) 358 { 359 struct drm_device *ddev = dev_get_drvdata(dev); 360 struct radeon_device *rdev = ddev->dev_private; 361 int cp = rdev->pm.profile; 362 363 return ksnprintf(buf, PAGE_SIZE, "%s\n", 364 (cp == PM_PROFILE_AUTO) ? "auto" : 365 (cp == PM_PROFILE_LOW) ? "low" : 366 (cp == PM_PROFILE_MID) ? "mid" : 367 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 368 } 369 370 static ssize_t radeon_set_pm_profile(struct device *dev, 371 struct device_attribute *attr, 372 const char *buf, 373 size_t count) 374 { 375 struct drm_device *ddev = dev_get_drvdata(dev); 376 struct radeon_device *rdev = ddev->dev_private; 377 378 /* Can't set profile when the card is off */ 379 if ((rdev->flags & RADEON_IS_PX) && 380 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 381 return -EINVAL; 382 383 mutex_lock(&rdev->pm.mutex); 384 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 385 if (strncmp("default", buf, strlen("default")) == 0) 386 rdev->pm.profile = PM_PROFILE_DEFAULT; 387 else if (strncmp("auto", buf, strlen("auto")) == 0) 388 rdev->pm.profile = PM_PROFILE_AUTO; 389 else if (strncmp("low", buf, strlen("low")) == 0) 390 rdev->pm.profile = PM_PROFILE_LOW; 391 else if (strncmp("mid", buf, strlen("mid")) == 0) 392 rdev->pm.profile = PM_PROFILE_MID; 393 else if (strncmp("high", buf, strlen("high")) == 0) 394 rdev->pm.profile = PM_PROFILE_HIGH; 395 else { 396 count = -EINVAL; 397 goto fail; 398 } 399 radeon_pm_update_profile(rdev); 400 radeon_pm_set_clocks(rdev); 401 } else 402 count = -EINVAL; 403 404 fail: 405 mutex_unlock(&rdev->pm.mutex); 406 407 return count; 408 } 409 410 static ssize_t radeon_get_pm_method(struct device *dev, 411 struct device_attribute *attr, 412 char *buf) 413 { 414 struct drm_device *ddev = dev_get_drvdata(dev); 415 struct radeon_device *rdev = ddev->dev_private; 416 int pm = rdev->pm.pm_method; 417 418 return ksnprintf(buf, PAGE_SIZE, "%s\n", 419 (pm == PM_METHOD_DYNPM) ? "dynpm" : 420 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm"); 421 } 422 423 static ssize_t radeon_set_pm_method(struct device *dev, 424 struct device_attribute *attr, 425 const char *buf, 426 size_t count) 427 { 428 struct drm_device *ddev = dev_get_drvdata(dev); 429 struct radeon_device *rdev = ddev->dev_private; 430 431 /* Can't set method when the card is off */ 432 if ((rdev->flags & RADEON_IS_PX) && 433 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 434 count = -EINVAL; 435 goto fail; 436 } 437 438 /* we don't support the legacy modes with dpm */ 439 if (rdev->pm.pm_method == PM_METHOD_DPM) { 440 count = -EINVAL; 441 goto fail; 442 } 443 444 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 445 mutex_lock(&rdev->pm.mutex); 446 rdev->pm.pm_method = PM_METHOD_DYNPM; 447 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 448 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 449 mutex_unlock(&rdev->pm.mutex); 450 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 451 mutex_lock(&rdev->pm.mutex); 452 /* disable dynpm */ 453 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 454 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 455 rdev->pm.pm_method = PM_METHOD_PROFILE; 456 mutex_unlock(&rdev->pm.mutex); 457 #ifdef DUMBBELL_PM 458 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 459 #endif /* DUMBBELL_PM */ 460 } else { 461 count = -EINVAL; 462 goto fail; 463 } 464 radeon_pm_compute_clocks(rdev); 465 fail: 466 return count; 467 } 468 469 static ssize_t radeon_get_dpm_state(struct device *dev, 470 struct device_attribute *attr, 471 char *buf) 472 { 473 struct drm_device *ddev = dev_get_drvdata(dev); 474 struct radeon_device *rdev = ddev->dev_private; 475 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 476 477 return snprintf(buf, PAGE_SIZE, "%s\n", 478 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 479 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 480 } 481 482 static ssize_t radeon_set_dpm_state(struct device *dev, 483 struct device_attribute *attr, 484 const char *buf, 485 size_t count) 486 { 487 struct drm_device *ddev = dev_get_drvdata(dev); 488 struct radeon_device *rdev = ddev->dev_private; 489 490 mutex_lock(&rdev->pm.mutex); 491 if (strncmp("battery", buf, strlen("battery")) == 0) 492 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 493 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 494 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 495 else if (strncmp("performance", buf, strlen("performance")) == 0) 496 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 497 else { 498 mutex_unlock(&rdev->pm.mutex); 499 count = -EINVAL; 500 goto fail; 501 } 502 mutex_unlock(&rdev->pm.mutex); 503 504 /* Can't set dpm state when the card is off */ 505 if (!(rdev->flags & RADEON_IS_PX) || 506 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 507 radeon_pm_compute_clocks(rdev); 508 509 fail: 510 return count; 511 } 512 513 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, 514 struct device_attribute *attr, 515 char *buf) 516 { 517 struct drm_device *ddev = dev_get_drvdata(dev); 518 struct radeon_device *rdev = ddev->dev_private; 519 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 520 521 if ((rdev->flags & RADEON_IS_PX) && 522 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 523 return snprintf(buf, PAGE_SIZE, "off\n"); 524 525 return snprintf(buf, PAGE_SIZE, "%s\n", 526 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 527 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 528 } 529 530 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, 531 struct device_attribute *attr, 532 const char *buf, 533 size_t count) 534 { 535 struct drm_device *ddev = dev_get_drvdata(dev); 536 struct radeon_device *rdev = ddev->dev_private; 537 enum radeon_dpm_forced_level level; 538 int ret = 0; 539 540 /* Can't force performance level when the card is off */ 541 if ((rdev->flags & RADEON_IS_PX) && 542 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 543 return -EINVAL; 544 545 mutex_lock(&rdev->pm.mutex); 546 if (strncmp("low", buf, strlen("low")) == 0) { 547 level = RADEON_DPM_FORCED_LEVEL_LOW; 548 } else if (strncmp("high", buf, strlen("high")) == 0) { 549 level = RADEON_DPM_FORCED_LEVEL_HIGH; 550 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 551 level = RADEON_DPM_FORCED_LEVEL_AUTO; 552 } else { 553 count = -EINVAL; 554 goto fail; 555 } 556 if (rdev->asic->dpm.force_performance_level) { 557 if (rdev->pm.dpm.thermal_active) { 558 count = -EINVAL; 559 goto fail; 560 } 561 ret = radeon_dpm_force_performance_level(rdev, level); 562 if (ret) 563 count = -EINVAL; 564 } 565 fail: 566 mutex_unlock(&rdev->pm.mutex); 567 568 return count; 569 } 570 571 static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev, 572 struct device_attribute *attr, 573 char *buf) 574 { 575 struct radeon_device *rdev = dev_get_drvdata(dev); 576 u32 pwm_mode = 0; 577 578 if (rdev->asic->dpm.fan_ctrl_get_mode) 579 pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev); 580 581 /* never 0 (full-speed), fuse or smc-controlled always */ 582 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); 583 } 584 585 static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev, 586 struct device_attribute *attr, 587 const char *buf, 588 size_t count) 589 { 590 struct radeon_device *rdev = dev_get_drvdata(dev); 591 int err; 592 int value; 593 594 if(!rdev->asic->dpm.fan_ctrl_set_mode) 595 return -EINVAL; 596 597 err = kstrtoint(buf, 10, &value); 598 if (err) 599 return err; 600 601 switch (value) { 602 case 1: /* manual, percent-based */ 603 rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC); 604 break; 605 default: /* disable */ 606 rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0); 607 break; 608 } 609 610 return count; 611 } 612 613 static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev, 614 struct device_attribute *attr, 615 char *buf) 616 { 617 return sprintf(buf, "%i\n", 0); 618 } 619 620 static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev, 621 struct device_attribute *attr, 622 char *buf) 623 { 624 return sprintf(buf, "%i\n", 255); 625 } 626 627 static ssize_t radeon_hwmon_set_pwm1(struct device *dev, 628 struct device_attribute *attr, 629 const char *buf, size_t count) 630 { 631 struct radeon_device *rdev = dev_get_drvdata(dev); 632 int err; 633 u32 value; 634 635 err = kstrtou32(buf, 10, &value); 636 if (err) 637 return err; 638 639 value = (value * 100) / 255; 640 641 err = rdev->asic->dpm.set_fan_speed_percent(rdev, value); 642 if (err) 643 return err; 644 645 return count; 646 } 647 648 static ssize_t radeon_hwmon_get_pwm1(struct device *dev, 649 struct device_attribute *attr, 650 char *buf) 651 { 652 struct radeon_device *rdev = dev_get_drvdata(dev); 653 int err; 654 u32 speed; 655 656 err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed); 657 if (err) 658 return err; 659 660 speed = (speed * 255) / 100; 661 662 return sprintf(buf, "%i\n", speed); 663 } 664 665 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 666 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 667 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state); 668 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 669 radeon_get_dpm_forced_performance_level, 670 radeon_set_dpm_forced_performance_level); 671 672 static ssize_t radeon_hwmon_show_temp(struct device *dev, 673 struct device_attribute *attr, 674 char *buf) 675 { 676 struct radeon_device *rdev = dev_get_drvdata(dev); 677 struct drm_device *ddev = rdev->ddev; 678 int temp; 679 680 /* Can't get temperature when the card is off */ 681 if ((rdev->flags & RADEON_IS_PX) && 682 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 683 return -EINVAL; 684 685 if (rdev->asic->pm.get_temperature) 686 temp = radeon_get_temperature(rdev); 687 else 688 temp = 0; 689 690 return ksnprintf(buf, PAGE_SIZE, "%d\n", temp); 691 } 692 693 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, 694 struct device_attribute *attr, 695 char *buf) 696 { 697 struct radeon_device *rdev = dev_get_drvdata(dev); 698 int hyst = to_sensor_dev_attr(attr)->index; 699 int temp; 700 701 if (hyst) 702 temp = rdev->pm.dpm.thermal.min_temp; 703 else 704 temp = rdev->pm.dpm.thermal.max_temp; 705 706 return ksnprintf(buf, PAGE_SIZE, "%d\n", temp); 707 } 708 709 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 710 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 711 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 712 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0); 713 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0); 714 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0); 715 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0); 716 717 static struct attribute *hwmon_attributes[] = { 718 &sensor_dev_attr_temp1_input.dev_attr.attr, 719 &sensor_dev_attr_temp1_crit.dev_attr.attr, 720 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 721 &sensor_dev_attr_pwm1.dev_attr.attr, 722 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 723 &sensor_dev_attr_pwm1_min.dev_attr.attr, 724 &sensor_dev_attr_pwm1_max.dev_attr.attr, 725 NULL 726 }; 727 728 static umode_t hwmon_attributes_visible(struct kobject *kobj, 729 struct attribute *attr, int index) 730 { 731 struct device *dev = container_of(kobj, struct device, kobj); 732 struct radeon_device *rdev = dev_get_drvdata(dev); 733 umode_t effective_mode = attr->mode; 734 735 /* Skip attributes if DPM is not enabled */ 736 if (rdev->pm.pm_method != PM_METHOD_DPM && 737 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 738 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 739 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 740 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 741 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 742 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 743 return 0; 744 745 /* Skip fan attributes if fan is not present */ 746 if (rdev->pm.no_fan && 747 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 748 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 749 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 750 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 751 return 0; 752 753 /* mask fan attributes if we have no bindings for this asic to expose */ 754 if ((!rdev->asic->dpm.get_fan_speed_percent && 755 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 756 (!rdev->asic->dpm.fan_ctrl_get_mode && 757 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 758 effective_mode &= ~S_IRUGO; 759 760 if ((!rdev->asic->dpm.set_fan_speed_percent && 761 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 762 (!rdev->asic->dpm.fan_ctrl_set_mode && 763 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 764 effective_mode &= ~S_IWUSR; 765 766 /* hide max/min values if we can't both query and manage the fan */ 767 if ((!rdev->asic->dpm.set_fan_speed_percent && 768 !rdev->asic->dpm.get_fan_speed_percent) && 769 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 770 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 771 return 0; 772 773 return effective_mode; 774 } 775 776 static const struct attribute_group hwmon_attrgroup = { 777 .attrs = hwmon_attributes, 778 .is_visible = hwmon_attributes_visible, 779 }; 780 781 static const struct attribute_group *hwmon_groups[] = { 782 &hwmon_attrgroup, 783 NULL 784 }; 785 #endif /* DUMBBELL_WIP */ 786 787 static void 788 radeon_hwmon_refresh(void *arg) 789 { 790 struct radeon_device *rdev = (struct radeon_device *)arg; 791 struct drm_device *ddev = rdev->ddev; 792 struct ksensor *s = rdev->pm.int_sensor; 793 int temp; 794 enum sensor_status stat; 795 796 /* Can't get temperature when the card is off */ 797 if ((rdev->flags & RADEON_IS_PX) && 798 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 799 sensor_set_unknown(s); 800 s->status = SENSOR_S_OK; 801 return; 802 } 803 804 if (rdev->asic->pm.get_temperature == NULL) { 805 sensor_set_invalid(s); 806 return; 807 } 808 809 temp = radeon_get_temperature(rdev); 810 if (temp >= rdev->pm.dpm.thermal.max_temp) 811 stat = SENSOR_S_CRIT; 812 else if (temp >= rdev->pm.dpm.thermal.min_temp) 813 stat = SENSOR_S_WARN; 814 else 815 stat = SENSOR_S_OK; 816 817 sensor_set(s, temp * 1000 + 273150000, stat); 818 } 819 820 static int radeon_hwmon_init(struct radeon_device *rdev) 821 { 822 int err = 0; 823 824 rdev->pm.int_sensor = NULL; 825 rdev->pm.int_sensordev = NULL; 826 827 switch (rdev->pm.int_thermal_type) { 828 case THERMAL_TYPE_RV6XX: 829 case THERMAL_TYPE_RV770: 830 case THERMAL_TYPE_EVERGREEN: 831 case THERMAL_TYPE_NI: 832 case THERMAL_TYPE_SUMO: 833 case THERMAL_TYPE_SI: 834 case THERMAL_TYPE_CI: 835 case THERMAL_TYPE_KV: 836 if (rdev->asic->pm.get_temperature == NULL) 837 return err; 838 839 rdev->pm.int_sensor = kmalloc(sizeof(*rdev->pm.int_sensor), 840 M_DRM, M_ZERO | M_WAITOK); 841 rdev->pm.int_sensordev = kmalloc( 842 sizeof(*rdev->pm.int_sensordev), M_DRM, 843 M_ZERO | M_WAITOK); 844 strlcpy(rdev->pm.int_sensordev->xname, 845 device_get_nameunit(rdev->dev->bsddev), 846 sizeof(rdev->pm.int_sensordev->xname)); 847 rdev->pm.int_sensor->type = SENSOR_TEMP; 848 rdev->pm.int_sensor->flags |= SENSOR_FINVALID; 849 sensor_attach(rdev->pm.int_sensordev, rdev->pm.int_sensor); 850 sensor_task_register(rdev, radeon_hwmon_refresh, 5); 851 sensordev_install(rdev->pm.int_sensordev); 852 break; 853 default: 854 break; 855 } 856 857 return err; 858 } 859 860 static void radeon_hwmon_fini(struct radeon_device *rdev) 861 { 862 if (rdev->pm.int_sensor != NULL && rdev->pm.int_sensordev != NULL) { 863 sensordev_deinstall(rdev->pm.int_sensordev); 864 sensor_task_unregister(rdev); 865 kfree(rdev->pm.int_sensor); 866 kfree(rdev->pm.int_sensordev); 867 rdev->pm.int_sensor = NULL; 868 rdev->pm.int_sensordev = NULL; 869 } 870 } 871 872 static void radeon_dpm_thermal_work_handler(void *arg, int pending) 873 { 874 struct radeon_device *rdev = arg; 875 /* switch to the thermal state */ 876 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 877 878 if (!rdev->pm.dpm_enabled) 879 return; 880 881 if (rdev->asic->pm.get_temperature) { 882 int temp = radeon_get_temperature(rdev); 883 884 if (temp < rdev->pm.dpm.thermal.min_temp) 885 /* switch back the user state */ 886 dpm_state = rdev->pm.dpm.user_state; 887 } else { 888 if (rdev->pm.dpm.thermal.high_to_low) 889 /* switch back the user state */ 890 dpm_state = rdev->pm.dpm.user_state; 891 } 892 mutex_lock(&rdev->pm.mutex); 893 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 894 rdev->pm.dpm.thermal_active = true; 895 else 896 rdev->pm.dpm.thermal_active = false; 897 rdev->pm.dpm.state = dpm_state; 898 mutex_unlock(&rdev->pm.mutex); 899 900 radeon_pm_compute_clocks(rdev); 901 } 902 903 static bool radeon_dpm_single_display(struct radeon_device *rdev) 904 { 905 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? 906 true : false; 907 908 /* check if the vblank period is too short to adjust the mclk */ 909 if (single_display && rdev->asic->dpm.vblank_too_short) { 910 if (radeon_dpm_vblank_too_short(rdev)) 911 single_display = false; 912 } 913 914 /* 120hz tends to be problematic even if they are under the 915 * vblank limit. 916 */ 917 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) 918 single_display = false; 919 920 return single_display; 921 } 922 923 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 924 enum radeon_pm_state_type dpm_state) 925 { 926 int i; 927 struct radeon_ps *ps; 928 u32 ui_class; 929 bool single_display = radeon_dpm_single_display(rdev); 930 931 /* certain older asics have a separare 3D performance state, 932 * so try that first if the user selected performance 933 */ 934 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 935 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 936 /* balanced states don't exist at the moment */ 937 if (dpm_state == POWER_STATE_TYPE_BALANCED) 938 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 939 940 restart_search: 941 /* Pick the best power state based on current conditions */ 942 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 943 ps = &rdev->pm.dpm.ps[i]; 944 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 945 switch (dpm_state) { 946 /* user states */ 947 case POWER_STATE_TYPE_BATTERY: 948 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 949 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 950 if (single_display) 951 return ps; 952 } else 953 return ps; 954 } 955 break; 956 case POWER_STATE_TYPE_BALANCED: 957 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 958 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 959 if (single_display) 960 return ps; 961 } else 962 return ps; 963 } 964 break; 965 case POWER_STATE_TYPE_PERFORMANCE: 966 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 967 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 968 if (single_display) 969 return ps; 970 } else 971 return ps; 972 } 973 break; 974 /* internal states */ 975 case POWER_STATE_TYPE_INTERNAL_UVD: 976 if (rdev->pm.dpm.uvd_ps) 977 return rdev->pm.dpm.uvd_ps; 978 else 979 break; 980 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 981 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 982 return ps; 983 break; 984 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 985 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 986 return ps; 987 break; 988 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 989 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 990 return ps; 991 break; 992 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 993 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 994 return ps; 995 break; 996 case POWER_STATE_TYPE_INTERNAL_BOOT: 997 return rdev->pm.dpm.boot_ps; 998 case POWER_STATE_TYPE_INTERNAL_THERMAL: 999 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1000 return ps; 1001 break; 1002 case POWER_STATE_TYPE_INTERNAL_ACPI: 1003 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1004 return ps; 1005 break; 1006 case POWER_STATE_TYPE_INTERNAL_ULV: 1007 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1008 return ps; 1009 break; 1010 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1011 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1012 return ps; 1013 break; 1014 default: 1015 break; 1016 } 1017 } 1018 /* use a fallback state if we didn't match */ 1019 switch (dpm_state) { 1020 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1021 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1022 goto restart_search; 1023 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1024 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1025 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1026 if (rdev->pm.dpm.uvd_ps) { 1027 return rdev->pm.dpm.uvd_ps; 1028 } else { 1029 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1030 goto restart_search; 1031 } 1032 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1033 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1034 goto restart_search; 1035 case POWER_STATE_TYPE_INTERNAL_ACPI: 1036 dpm_state = POWER_STATE_TYPE_BATTERY; 1037 goto restart_search; 1038 case POWER_STATE_TYPE_BATTERY: 1039 case POWER_STATE_TYPE_BALANCED: 1040 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1041 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1042 goto restart_search; 1043 default: 1044 break; 1045 } 1046 1047 return NULL; 1048 } 1049 1050 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) 1051 { 1052 int i; 1053 struct radeon_ps *ps; 1054 enum radeon_pm_state_type dpm_state; 1055 int ret; 1056 bool single_display = radeon_dpm_single_display(rdev); 1057 1058 /* if dpm init failed */ 1059 if (!rdev->pm.dpm_enabled) 1060 return; 1061 1062 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) { 1063 /* add other state override checks here */ 1064 if ((!rdev->pm.dpm.thermal_active) && 1065 (!rdev->pm.dpm.uvd_active)) 1066 rdev->pm.dpm.state = rdev->pm.dpm.user_state; 1067 } 1068 dpm_state = rdev->pm.dpm.state; 1069 1070 ps = radeon_dpm_pick_power_state(rdev, dpm_state); 1071 if (ps) 1072 rdev->pm.dpm.requested_ps = ps; 1073 else 1074 return; 1075 1076 /* no need to reprogram if nothing changed unless we are on BTC+ */ 1077 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 1078 /* vce just modifies an existing state so force a change */ 1079 if (ps->vce_active != rdev->pm.dpm.vce_active) 1080 goto force; 1081 /* user has made a display change (such as timing) */ 1082 if (rdev->pm.dpm.single_display != single_display) 1083 goto force; 1084 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 1085 /* for pre-BTC and APUs if the num crtcs changed but state is the same, 1086 * all we need to do is update the display configuration. 1087 */ 1088 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) { 1089 /* update display watermarks based on new power state */ 1090 radeon_bandwidth_update(rdev); 1091 /* update displays */ 1092 radeon_dpm_display_configuration_changed(rdev); 1093 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1094 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1095 } 1096 return; 1097 } else { 1098 /* for BTC+ if the num crtcs hasn't changed and state is the same, 1099 * nothing to do, if the num crtcs is > 1 and state is the same, 1100 * update display configuration. 1101 */ 1102 if (rdev->pm.dpm.new_active_crtcs == 1103 rdev->pm.dpm.current_active_crtcs) { 1104 return; 1105 } else { 1106 if ((rdev->pm.dpm.current_active_crtc_count > 1) && 1107 (rdev->pm.dpm.new_active_crtc_count > 1)) { 1108 /* update display watermarks based on new power state */ 1109 radeon_bandwidth_update(rdev); 1110 /* update displays */ 1111 radeon_dpm_display_configuration_changed(rdev); 1112 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1113 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1114 return; 1115 } 1116 } 1117 } 1118 } 1119 1120 force: 1121 if (radeon_dpm == 1) { 1122 printk("switching from power state:\n"); 1123 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 1124 printk("switching to power state:\n"); 1125 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 1126 } 1127 1128 lockmgr(&rdev->pm.mclk_lock, LK_EXCLUSIVE); // down_write 1129 mutex_lock(&rdev->ring_lock); 1130 1131 /* update whether vce is active */ 1132 ps->vce_active = rdev->pm.dpm.vce_active; 1133 1134 ret = radeon_dpm_pre_set_power_state(rdev); 1135 if (ret) 1136 goto done; 1137 1138 /* update display watermarks based on new power state */ 1139 radeon_bandwidth_update(rdev); 1140 /* update displays */ 1141 radeon_dpm_display_configuration_changed(rdev); 1142 1143 /* wait for the rings to drain */ 1144 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1145 struct radeon_ring *ring = &rdev->ring[i]; 1146 if (ring->ready) 1147 radeon_fence_wait_empty(rdev, i); 1148 } 1149 1150 /* program the new power state */ 1151 radeon_dpm_set_power_state(rdev); 1152 1153 /* update current power state */ 1154 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps; 1155 1156 radeon_dpm_post_set_power_state(rdev); 1157 1158 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1159 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1160 rdev->pm.dpm.single_display = single_display; 1161 1162 if (rdev->asic->dpm.force_performance_level) { 1163 if (rdev->pm.dpm.thermal_active) { 1164 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 1165 /* force low perf level for thermal */ 1166 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 1167 /* save the user's level */ 1168 rdev->pm.dpm.forced_level = level; 1169 } else { 1170 /* otherwise, user selected level */ 1171 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level); 1172 } 1173 } 1174 1175 done: 1176 mutex_unlock(&rdev->ring_lock); 1177 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write 1178 } 1179 1180 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) 1181 { 1182 enum radeon_pm_state_type dpm_state; 1183 1184 if (rdev->asic->dpm.powergate_uvd) { 1185 mutex_lock(&rdev->pm.mutex); 1186 /* don't powergate anything if we 1187 have active but pause streams */ 1188 enable |= rdev->pm.dpm.sd > 0; 1189 enable |= rdev->pm.dpm.hd > 0; 1190 /* enable/disable UVD */ 1191 radeon_dpm_powergate_uvd(rdev, !enable); 1192 mutex_unlock(&rdev->pm.mutex); 1193 } else { 1194 if (enable) { 1195 mutex_lock(&rdev->pm.mutex); 1196 rdev->pm.dpm.uvd_active = true; 1197 /* disable this for now */ 1198 #if 0 1199 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 1200 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 1201 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 1202 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1203 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) 1204 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1205 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 1206 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 1207 else 1208 #endif 1209 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 1210 rdev->pm.dpm.state = dpm_state; 1211 mutex_unlock(&rdev->pm.mutex); 1212 } else { 1213 mutex_lock(&rdev->pm.mutex); 1214 rdev->pm.dpm.uvd_active = false; 1215 mutex_unlock(&rdev->pm.mutex); 1216 } 1217 1218 radeon_pm_compute_clocks(rdev); 1219 } 1220 } 1221 1222 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable) 1223 { 1224 if (enable) { 1225 mutex_lock(&rdev->pm.mutex); 1226 rdev->pm.dpm.vce_active = true; 1227 /* XXX select vce level based on ring/task */ 1228 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL; 1229 mutex_unlock(&rdev->pm.mutex); 1230 } else { 1231 mutex_lock(&rdev->pm.mutex); 1232 rdev->pm.dpm.vce_active = false; 1233 mutex_unlock(&rdev->pm.mutex); 1234 } 1235 1236 radeon_pm_compute_clocks(rdev); 1237 } 1238 1239 static void radeon_pm_suspend_old(struct radeon_device *rdev) 1240 { 1241 mutex_lock(&rdev->pm.mutex); 1242 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1243 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 1244 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 1245 } 1246 mutex_unlock(&rdev->pm.mutex); 1247 1248 #ifdef DUMBBELL_PM 1249 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1250 #endif /* DUMBBELL_PM */ 1251 } 1252 1253 static void radeon_pm_suspend_dpm(struct radeon_device *rdev) 1254 { 1255 mutex_lock(&rdev->pm.mutex); 1256 /* disable dpm */ 1257 radeon_dpm_disable(rdev); 1258 /* reset the power state */ 1259 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1260 rdev->pm.dpm_enabled = false; 1261 mutex_unlock(&rdev->pm.mutex); 1262 } 1263 1264 void radeon_pm_suspend(struct radeon_device *rdev) 1265 { 1266 if (rdev->pm.pm_method == PM_METHOD_DPM) 1267 radeon_pm_suspend_dpm(rdev); 1268 else 1269 radeon_pm_suspend_old(rdev); 1270 } 1271 1272 static void radeon_pm_resume_old(struct radeon_device *rdev) 1273 { 1274 /* set up the default clocks if the MC ucode is loaded */ 1275 if ((rdev->family >= CHIP_BARTS) && 1276 (rdev->family <= CHIP_CAYMAN) && 1277 rdev->mc_fw) { 1278 if (rdev->pm.default_vddc) 1279 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1280 SET_VOLTAGE_TYPE_ASIC_VDDC); 1281 if (rdev->pm.default_vddci) 1282 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1283 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1284 if (rdev->pm.default_sclk) 1285 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1286 if (rdev->pm.default_mclk) 1287 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1288 } 1289 /* asic init will reset the default power state */ 1290 mutex_lock(&rdev->pm.mutex); 1291 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 1292 rdev->pm.current_clock_mode_index = 0; 1293 rdev->pm.current_sclk = rdev->pm.default_sclk; 1294 rdev->pm.current_mclk = rdev->pm.default_mclk; 1295 if (rdev->pm.power_state) { 1296 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 1297 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 1298 } 1299 if (rdev->pm.pm_method == PM_METHOD_DYNPM 1300 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 1301 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1302 #ifdef DUMBBELL_PM 1303 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1304 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1305 #endif /* DUMBBELL_PM */ 1306 } 1307 mutex_unlock(&rdev->pm.mutex); 1308 radeon_pm_compute_clocks(rdev); 1309 } 1310 1311 static void radeon_pm_resume_dpm(struct radeon_device *rdev) 1312 { 1313 int ret; 1314 1315 /* asic init will reset to the boot state */ 1316 mutex_lock(&rdev->pm.mutex); 1317 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1318 radeon_dpm_setup_asic(rdev); 1319 ret = radeon_dpm_enable(rdev); 1320 mutex_unlock(&rdev->pm.mutex); 1321 if (ret) 1322 goto dpm_resume_fail; 1323 rdev->pm.dpm_enabled = true; 1324 return; 1325 1326 dpm_resume_fail: 1327 DRM_ERROR("radeon: dpm resume failed\n"); 1328 if ((rdev->family >= CHIP_BARTS) && 1329 (rdev->family <= CHIP_CAYMAN) && 1330 rdev->mc_fw) { 1331 if (rdev->pm.default_vddc) 1332 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1333 SET_VOLTAGE_TYPE_ASIC_VDDC); 1334 if (rdev->pm.default_vddci) 1335 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1336 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1337 if (rdev->pm.default_sclk) 1338 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1339 if (rdev->pm.default_mclk) 1340 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1341 } 1342 } 1343 1344 void radeon_pm_resume(struct radeon_device *rdev) 1345 { 1346 if (rdev->pm.pm_method == PM_METHOD_DPM) 1347 radeon_pm_resume_dpm(rdev); 1348 else 1349 radeon_pm_resume_old(rdev); 1350 } 1351 1352 static int radeon_pm_init_old(struct radeon_device *rdev) 1353 { 1354 int ret; 1355 1356 rdev->pm.profile = PM_PROFILE_DEFAULT; 1357 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1358 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1359 rdev->pm.dynpm_can_upclock = true; 1360 rdev->pm.dynpm_can_downclock = true; 1361 rdev->pm.default_sclk = rdev->clock.default_sclk; 1362 rdev->pm.default_mclk = rdev->clock.default_mclk; 1363 rdev->pm.current_sclk = rdev->clock.default_sclk; 1364 rdev->pm.current_mclk = rdev->clock.default_mclk; 1365 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1366 1367 if (rdev->bios) { 1368 if (rdev->is_atom_bios) 1369 radeon_atombios_get_power_modes(rdev); 1370 else 1371 radeon_combios_get_power_modes(rdev); 1372 radeon_pm_print_states(rdev); 1373 radeon_pm_init_profile(rdev); 1374 /* set up the default clocks if the MC ucode is loaded */ 1375 if ((rdev->family >= CHIP_BARTS) && 1376 (rdev->family <= CHIP_CAYMAN) && 1377 rdev->mc_fw) { 1378 if (rdev->pm.default_vddc) 1379 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1380 SET_VOLTAGE_TYPE_ASIC_VDDC); 1381 if (rdev->pm.default_vddci) 1382 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1383 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1384 if (rdev->pm.default_sclk) 1385 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1386 if (rdev->pm.default_mclk) 1387 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1388 } 1389 } 1390 1391 /* set up the internal thermal sensor if applicable */ 1392 ret = radeon_hwmon_init(rdev); 1393 if (ret) 1394 return ret; 1395 1396 #ifdef DUMBBELL_PM 1397 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 1398 #endif /* DUMBBELL_PM */ 1399 1400 if (rdev->pm.num_power_states > 1) { 1401 if (radeon_debugfs_pm_init(rdev)) { 1402 DRM_ERROR("Failed to register debugfs file for PM!\n"); 1403 } 1404 1405 DRM_INFO("radeon: power management initialized\n"); 1406 } 1407 1408 return 0; 1409 } 1410 1411 static void radeon_dpm_print_power_states(struct radeon_device *rdev) 1412 { 1413 int i; 1414 1415 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 1416 printk("== power state %d ==\n", i); 1417 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]); 1418 } 1419 } 1420 1421 static int radeon_pm_init_dpm(struct radeon_device *rdev) 1422 { 1423 int ret; 1424 1425 /* default to balanced state */ 1426 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1427 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1428 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1429 rdev->pm.default_sclk = rdev->clock.default_sclk; 1430 rdev->pm.default_mclk = rdev->clock.default_mclk; 1431 rdev->pm.current_sclk = rdev->clock.default_sclk; 1432 rdev->pm.current_mclk = rdev->clock.default_mclk; 1433 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1434 1435 if (rdev->bios && rdev->is_atom_bios) 1436 radeon_atombios_get_power_modes(rdev); 1437 else 1438 return -EINVAL; 1439 1440 /* set up the internal thermal sensor if applicable */ 1441 ret = radeon_hwmon_init(rdev); 1442 if (ret) 1443 return ret; 1444 1445 TASK_INIT(&rdev->pm.dpm.thermal.work, 0, radeon_dpm_thermal_work_handler, rdev); 1446 mutex_lock(&rdev->pm.mutex); 1447 radeon_dpm_init(rdev); 1448 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1449 if (radeon_dpm == 1) 1450 radeon_dpm_print_power_states(rdev); 1451 radeon_dpm_setup_asic(rdev); 1452 ret = radeon_dpm_enable(rdev); 1453 mutex_unlock(&rdev->pm.mutex); 1454 if (ret) 1455 goto dpm_failed; 1456 rdev->pm.dpm_enabled = true; 1457 1458 #ifdef TODO_DEVICE_FILE 1459 if (radeon_debugfs_pm_init(rdev)) { 1460 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1461 } 1462 #endif 1463 1464 DRM_INFO("radeon: dpm initialized\n"); 1465 1466 return 0; 1467 1468 dpm_failed: 1469 rdev->pm.dpm_enabled = false; 1470 if ((rdev->family >= CHIP_BARTS) && 1471 (rdev->family <= CHIP_CAYMAN) && 1472 rdev->mc_fw) { 1473 if (rdev->pm.default_vddc) 1474 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1475 SET_VOLTAGE_TYPE_ASIC_VDDC); 1476 if (rdev->pm.default_vddci) 1477 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1478 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1479 if (rdev->pm.default_sclk) 1480 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1481 if (rdev->pm.default_mclk) 1482 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1483 } 1484 DRM_ERROR("radeon: dpm initialization failed\n"); 1485 return ret; 1486 } 1487 1488 struct radeon_dpm_quirk { 1489 u32 chip_vendor; 1490 u32 chip_device; 1491 u32 subsys_vendor; 1492 u32 subsys_device; 1493 }; 1494 1495 /* cards with dpm stability problems */ 1496 static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { 1497 /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ 1498 { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, 1499 /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ 1500 { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, 1501 { 0, 0, 0, 0 }, 1502 }; 1503 1504 int radeon_pm_init(struct radeon_device *rdev) 1505 { 1506 struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; 1507 bool disable_dpm = false; 1508 1509 /* Apply dpm quirks */ 1510 while (p && p->chip_device != 0) { 1511 if (rdev->pdev->vendor == p->chip_vendor && 1512 rdev->pdev->device == p->chip_device && 1513 rdev->pdev->subsystem_vendor == p->subsys_vendor && 1514 rdev->pdev->subsystem_device == p->subsys_device) { 1515 disable_dpm = true; 1516 break; 1517 } 1518 ++p; 1519 } 1520 1521 /* enable dpm on rv6xx+ */ 1522 switch (rdev->family) { 1523 case CHIP_RV610: 1524 case CHIP_RV630: 1525 case CHIP_RV620: 1526 case CHIP_RV635: 1527 case CHIP_RV670: 1528 case CHIP_RS780: 1529 case CHIP_RS880: 1530 case CHIP_RV770: 1531 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1532 if (!rdev->rlc_fw) 1533 rdev->pm.pm_method = PM_METHOD_PROFILE; 1534 else if ((rdev->family >= CHIP_RV770) && 1535 (!(rdev->flags & RADEON_IS_IGP)) && 1536 (!rdev->smc_fw)) 1537 rdev->pm.pm_method = PM_METHOD_PROFILE; 1538 else if (radeon_dpm == 1) 1539 rdev->pm.pm_method = PM_METHOD_DPM; 1540 else 1541 rdev->pm.pm_method = PM_METHOD_PROFILE; 1542 break; 1543 case CHIP_RV730: 1544 case CHIP_RV710: 1545 case CHIP_RV740: 1546 case CHIP_CEDAR: 1547 case CHIP_REDWOOD: 1548 case CHIP_JUNIPER: 1549 case CHIP_CYPRESS: 1550 case CHIP_HEMLOCK: 1551 case CHIP_PALM: 1552 case CHIP_SUMO: 1553 case CHIP_SUMO2: 1554 case CHIP_BARTS: 1555 case CHIP_TURKS: 1556 case CHIP_CAICOS: 1557 case CHIP_CAYMAN: 1558 case CHIP_ARUBA: 1559 case CHIP_TAHITI: 1560 case CHIP_PITCAIRN: 1561 case CHIP_VERDE: 1562 case CHIP_OLAND: 1563 case CHIP_HAINAN: 1564 case CHIP_BONAIRE: 1565 case CHIP_KABINI: 1566 case CHIP_KAVERI: 1567 case CHIP_HAWAII: 1568 case CHIP_MULLINS: 1569 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1570 if (!rdev->rlc_fw) 1571 rdev->pm.pm_method = PM_METHOD_PROFILE; 1572 else if ((rdev->family >= CHIP_RV770) && 1573 (!(rdev->flags & RADEON_IS_IGP)) && 1574 (!rdev->smc_fw)) 1575 rdev->pm.pm_method = PM_METHOD_PROFILE; 1576 else if (disable_dpm && (radeon_dpm == -1)) 1577 rdev->pm.pm_method = PM_METHOD_PROFILE; 1578 else if (radeon_dpm == 0) 1579 rdev->pm.pm_method = PM_METHOD_PROFILE; 1580 else 1581 rdev->pm.pm_method = PM_METHOD_DPM; 1582 break; 1583 default: 1584 /* default to profile method */ 1585 rdev->pm.pm_method = PM_METHOD_PROFILE; 1586 break; 1587 } 1588 1589 if (rdev->pm.pm_method == PM_METHOD_DPM) 1590 return radeon_pm_init_dpm(rdev); 1591 else 1592 return radeon_pm_init_old(rdev); 1593 } 1594 1595 int radeon_pm_late_init(struct radeon_device *rdev) 1596 { 1597 int ret = 0; 1598 1599 if (rdev->pm.pm_method == PM_METHOD_DPM) { 1600 if (rdev->pm.dpm_enabled) { 1601 #ifdef TODO_DEVICE_FILE 1602 if (!rdev->pm.sysfs_initialized) { 1603 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1604 if (ret) 1605 DRM_ERROR("failed to create device file for dpm state\n"); 1606 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1607 if (ret) 1608 DRM_ERROR("failed to create device file for dpm state\n"); 1609 /* XXX: these are noops for dpm but are here for backwards compat */ 1610 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1611 if (ret) 1612 DRM_ERROR("failed to create device file for power profile\n"); 1613 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1614 if (ret) 1615 DRM_ERROR("failed to create device file for power method\n"); 1616 if (!ret) 1617 rdev->pm.sysfs_initialized = true; 1618 } 1619 #endif 1620 1621 mutex_lock(&rdev->pm.mutex); 1622 ret = radeon_dpm_late_enable(rdev); 1623 mutex_unlock(&rdev->pm.mutex); 1624 if (ret) { 1625 rdev->pm.dpm_enabled = false; 1626 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1627 } else { 1628 /* set the dpm state for PX since there won't be 1629 * a modeset to call this. 1630 */ 1631 radeon_pm_compute_clocks(rdev); 1632 } 1633 } 1634 } else { 1635 #ifdef TODO_DEVICE_FILE 1636 if ((rdev->pm.num_power_states > 1) && 1637 (!rdev->pm.sysfs_initialized)) { 1638 /* where's the best place to put these? */ 1639 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1640 if (ret) 1641 DRM_ERROR("failed to create device file for power profile\n"); 1642 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1643 if (ret) 1644 DRM_ERROR("failed to create device file for power method\n"); 1645 if (!ret) 1646 rdev->pm.sysfs_initialized = true; 1647 } 1648 #endif 1649 } 1650 return ret; 1651 } 1652 1653 static void radeon_pm_fini_old(struct radeon_device *rdev) 1654 { 1655 if (rdev->pm.num_power_states > 1) { 1656 mutex_lock(&rdev->pm.mutex); 1657 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1658 rdev->pm.profile = PM_PROFILE_DEFAULT; 1659 radeon_pm_update_profile(rdev); 1660 radeon_pm_set_clocks(rdev); 1661 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1662 /* reset default clocks */ 1663 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1664 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1665 radeon_pm_set_clocks(rdev); 1666 } 1667 mutex_unlock(&rdev->pm.mutex); 1668 1669 #ifdef DUMBBELL_PM 1670 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1671 #endif /* DUMBBELL_PM */ 1672 1673 #ifdef DUMBBELL_WIP 1674 device_remove_file(rdev->dev, &dev_attr_power_profile); 1675 device_remove_file(rdev->dev, &dev_attr_power_method); 1676 #endif /* DUMBBELL_WIP */ 1677 } 1678 1679 if (rdev->pm.power_state) { 1680 int i; 1681 for (i = 0; i < rdev->pm.num_power_states; ++i) { 1682 kfree(rdev->pm.power_state[i].clock_info); 1683 } 1684 kfree(rdev->pm.power_state); 1685 rdev->pm.power_state = NULL; 1686 rdev->pm.num_power_states = 0; 1687 } 1688 1689 radeon_hwmon_fini(rdev); 1690 } 1691 1692 static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1693 { 1694 if (rdev->pm.num_power_states > 1) { 1695 mutex_lock(&rdev->pm.mutex); 1696 radeon_dpm_disable(rdev); 1697 mutex_unlock(&rdev->pm.mutex); 1698 1699 #ifdef TODO_DEVICE_FILE 1700 device_remove_file(rdev->dev, &dev_attr_power_dpm_state); 1701 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1702 /* XXX backwards compat */ 1703 device_remove_file(rdev->dev, &dev_attr_power_profile); 1704 device_remove_file(rdev->dev, &dev_attr_power_method); 1705 #endif 1706 } 1707 radeon_dpm_fini(rdev); 1708 1709 /* prevents leaking 440 bytes on OLAND */ 1710 if (rdev->pm.power_state) { 1711 int i; 1712 for (i = 0; i < rdev->pm.num_power_states; ++i) { 1713 kfree(rdev->pm.power_state[i].clock_info); 1714 } 1715 kfree(rdev->pm.power_state); 1716 rdev->pm.power_state = NULL; 1717 rdev->pm.num_power_states = 0; 1718 } 1719 1720 radeon_hwmon_fini(rdev); 1721 } 1722 1723 void radeon_pm_fini(struct radeon_device *rdev) 1724 { 1725 if (rdev->pm.pm_method == PM_METHOD_DPM) 1726 radeon_pm_fini_dpm(rdev); 1727 else 1728 radeon_pm_fini_old(rdev); 1729 } 1730 1731 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) 1732 { 1733 struct drm_device *ddev = rdev->ddev; 1734 struct drm_crtc *crtc; 1735 struct radeon_crtc *radeon_crtc; 1736 1737 if (rdev->pm.num_power_states < 2) 1738 return; 1739 1740 mutex_lock(&rdev->pm.mutex); 1741 1742 rdev->pm.active_crtcs = 0; 1743 rdev->pm.active_crtc_count = 0; 1744 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1745 list_for_each_entry(crtc, 1746 &ddev->mode_config.crtc_list, head) { 1747 radeon_crtc = to_radeon_crtc(crtc); 1748 if (radeon_crtc->enabled) { 1749 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 1750 rdev->pm.active_crtc_count++; 1751 } 1752 } 1753 } 1754 1755 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1756 radeon_pm_update_profile(rdev); 1757 radeon_pm_set_clocks(rdev); 1758 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1759 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 1760 if (rdev->pm.active_crtc_count > 1) { 1761 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1762 #ifdef DUMBBELL_PM 1763 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1764 #endif /* DUMBBELL_PM */ 1765 1766 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 1767 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1768 radeon_pm_get_dynpm_state(rdev); 1769 radeon_pm_set_clocks(rdev); 1770 1771 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 1772 } 1773 } else if (rdev->pm.active_crtc_count == 1) { 1774 /* TODO: Increase clocks if needed for current mode */ 1775 1776 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 1777 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1778 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 1779 radeon_pm_get_dynpm_state(rdev); 1780 radeon_pm_set_clocks(rdev); 1781 1782 #ifdef DUMBBELL_PM 1783 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1784 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1785 #endif /* DUMBBELL_PM */ 1786 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 1787 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1788 #ifdef DUMBBELL_PM 1789 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1790 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1791 #endif /* DUMBBELL_PM*/ 1792 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 1793 } 1794 } else { /* count == 0 */ 1795 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 1796 #ifdef DUMBBELL_PM 1797 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1798 #endif /* DUMBBELL_PM */ 1799 1800 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 1801 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 1802 radeon_pm_get_dynpm_state(rdev); 1803 radeon_pm_set_clocks(rdev); 1804 } 1805 } 1806 } 1807 } 1808 1809 mutex_unlock(&rdev->pm.mutex); 1810 } 1811 1812 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) 1813 { 1814 struct drm_device *ddev = rdev->ddev; 1815 struct drm_crtc *crtc; 1816 struct radeon_crtc *radeon_crtc; 1817 1818 if (!rdev->pm.dpm_enabled) 1819 return; 1820 1821 mutex_lock(&rdev->pm.mutex); 1822 1823 /* update active crtc counts */ 1824 rdev->pm.dpm.new_active_crtcs = 0; 1825 rdev->pm.dpm.new_active_crtc_count = 0; 1826 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1827 list_for_each_entry(crtc, 1828 &ddev->mode_config.crtc_list, head) { 1829 radeon_crtc = to_radeon_crtc(crtc); 1830 if (crtc->enabled) { 1831 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); 1832 rdev->pm.dpm.new_active_crtc_count++; 1833 } 1834 } 1835 } 1836 1837 /* update battery/ac status */ 1838 if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE) 1839 rdev->pm.dpm.ac_power = true; 1840 else 1841 rdev->pm.dpm.ac_power = false; 1842 1843 radeon_dpm_change_power_state_locked(rdev); 1844 1845 mutex_unlock(&rdev->pm.mutex); 1846 1847 } 1848 1849 void radeon_pm_compute_clocks(struct radeon_device *rdev) 1850 { 1851 if (rdev->pm.pm_method == PM_METHOD_DPM) 1852 radeon_pm_compute_clocks_dpm(rdev); 1853 else 1854 radeon_pm_compute_clocks_old(rdev); 1855 } 1856 1857 static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1858 { 1859 int crtc, vpos, hpos, vbl_status; 1860 bool in_vbl = true; 1861 1862 /* Iterate over all active crtc's. All crtc's must be in vblank, 1863 * otherwise return in_vbl == false. 1864 */ 1865 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1866 if (rdev->pm.active_crtcs & (1 << crtc)) { 1867 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, 1868 crtc, 1869 USE_REAL_VBLANKSTART, 1870 &vpos, &hpos, NULL, NULL, 1871 &rdev->mode_info.crtcs[crtc]->base.hwmode); 1872 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1873 !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK)) 1874 in_vbl = false; 1875 } 1876 } 1877 1878 return in_vbl; 1879 } 1880 1881 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 1882 { 1883 u32 stat_crtc = 0; 1884 bool in_vbl = radeon_pm_in_vbl(rdev); 1885 1886 if (in_vbl == false) 1887 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 1888 finish ? "exit" : "entry"); 1889 return in_vbl; 1890 } 1891 1892 #ifdef DUMBBELL_PM 1893 static void radeon_dynpm_idle_work_handler(struct work_struct *work) 1894 { 1895 struct radeon_device *rdev; 1896 int resched; 1897 rdev = container_of(work, struct radeon_device, 1898 pm.dynpm_idle_work.work); 1899 1900 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1901 mutex_lock(&rdev->pm.mutex); 1902 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1903 int not_processed = 0; 1904 int i; 1905 1906 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1907 struct radeon_ring *ring = &rdev->ring[i]; 1908 1909 if (ring->ready) { 1910 not_processed += radeon_fence_count_emitted(rdev, i); 1911 if (not_processed >= 3) 1912 break; 1913 } 1914 } 1915 1916 if (not_processed >= 3) { /* should upclock */ 1917 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 1918 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1919 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1920 rdev->pm.dynpm_can_upclock) { 1921 rdev->pm.dynpm_planned_action = 1922 DYNPM_ACTION_UPCLOCK; 1923 rdev->pm.dynpm_action_timeout = jiffies + 1924 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1925 } 1926 } else if (not_processed == 0) { /* should downclock */ 1927 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 1928 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1929 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1930 rdev->pm.dynpm_can_downclock) { 1931 rdev->pm.dynpm_planned_action = 1932 DYNPM_ACTION_DOWNCLOCK; 1933 rdev->pm.dynpm_action_timeout = jiffies + 1934 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1935 } 1936 } 1937 1938 /* Note, radeon_pm_set_clocks is called with static_switch set 1939 * to false since we want to wait for vbl to avoid flicker. 1940 */ 1941 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 1942 jiffies > rdev->pm.dynpm_action_timeout) { 1943 radeon_pm_get_dynpm_state(rdev); 1944 radeon_pm_set_clocks(rdev); 1945 } 1946 1947 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1948 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1949 } 1950 mutex_unlock(&rdev->pm.mutex); 1951 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1952 } 1953 #endif /* DUMBBELL_PM */ 1954 1955 /* 1956 * Debugfs info 1957 */ 1958 #if defined(CONFIG_DEBUG_FS) 1959 1960 static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 1961 { 1962 struct drm_info_node *node = (struct drm_info_node *) m->private; 1963 struct drm_device *dev = node->minor->dev; 1964 struct radeon_device *rdev = dev->dev_private; 1965 struct drm_device *ddev = rdev->ddev; 1966 1967 if ((rdev->flags & RADEON_IS_PX) && 1968 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1969 seq_printf(m, "PX asic powered off\n"); 1970 } else if (rdev->pm.dpm_enabled) { 1971 mutex_lock(&rdev->pm.mutex); 1972 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1973 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1974 else 1975 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1976 mutex_unlock(&rdev->pm.mutex); 1977 } else { 1978 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 1979 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ 1980 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) 1981 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); 1982 else 1983 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 1984 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 1985 if (rdev->asic->pm.get_memory_clock) 1986 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 1987 if (rdev->pm.current_vddc) 1988 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 1989 if (rdev->asic->pm.get_pcie_lanes) 1990 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 1991 } 1992 1993 return 0; 1994 } 1995 1996 static struct drm_info_list radeon_pm_info_list[] = { 1997 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 1998 }; 1999 #endif 2000 2001 static int radeon_debugfs_pm_init(struct radeon_device *rdev) 2002 { 2003 #if defined(CONFIG_DEBUG_FS) 2004 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 2005 #else 2006 return 0; 2007 #endif 2008 } 2009