1 /* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 * 23 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_pm.c 254885 2013-08-25 19:37:15Z dumbbell $ 24 */ 25 26 #include <drm/drmP.h> 27 #include "radeon.h" 28 #include "avivod.h" 29 #include "atom.h" 30 31 #define RADEON_IDLE_LOOP_MS 100 32 #define RADEON_RECLOCK_DELAY_MS 200 33 #define RADEON_WAIT_VBLANK_TIMEOUT 200 34 35 static const char *radeon_pm_state_type_name[5] = { 36 "", 37 "Powersave", 38 "Battery", 39 "Balanced", 40 "Performance", 41 }; 42 43 #ifdef DUMBBELL_WIP 44 static void radeon_dynpm_idle_work_handler(struct work_struct *work); 45 #endif /* DUMBBELL_WIP */ 46 static int radeon_debugfs_pm_init(struct radeon_device *rdev); 47 static bool radeon_pm_in_vbl(struct radeon_device *rdev); 48 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 49 static void radeon_pm_update_profile(struct radeon_device *rdev); 50 static void radeon_pm_set_clocks(struct radeon_device *rdev); 51 52 int radeon_pm_get_type_index(struct radeon_device *rdev, 53 enum radeon_pm_state_type ps_type, 54 int instance) 55 { 56 int i; 57 int found_instance = -1; 58 59 for (i = 0; i < rdev->pm.num_power_states; i++) { 60 if (rdev->pm.power_state[i].type == ps_type) { 61 found_instance++; 62 if (found_instance == instance) 63 return i; 64 } 65 } 66 /* return default if no match */ 67 return rdev->pm.default_power_state_index; 68 } 69 70 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 71 { 72 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 73 if (rdev->pm.profile == PM_PROFILE_AUTO) { 74 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 75 radeon_pm_update_profile(rdev); 76 radeon_pm_set_clocks(rdev); 77 lockmgr(&rdev->pm.mutex, LK_RELEASE); 78 } 79 } 80 } 81 82 static void radeon_pm_update_profile(struct radeon_device *rdev) 83 { 84 switch (rdev->pm.profile) { 85 case PM_PROFILE_DEFAULT: 86 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 87 break; 88 case PM_PROFILE_AUTO: 89 #ifdef DUMBBELL_WIP 90 if (power_supply_is_system_supplied() > 0) { 91 if (rdev->pm.active_crtc_count > 1) 92 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 93 else 94 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 95 } else { 96 if (rdev->pm.active_crtc_count > 1) 97 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 98 else 99 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 100 } 101 #endif /* DUMBBELL_WIP */ 102 break; 103 case PM_PROFILE_LOW: 104 if (rdev->pm.active_crtc_count > 1) 105 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 106 else 107 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 108 break; 109 case PM_PROFILE_MID: 110 if (rdev->pm.active_crtc_count > 1) 111 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 112 else 113 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 114 break; 115 case PM_PROFILE_HIGH: 116 if (rdev->pm.active_crtc_count > 1) 117 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 118 else 119 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 120 break; 121 } 122 123 if (rdev->pm.active_crtc_count == 0) { 124 rdev->pm.requested_power_state_index = 125 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 126 rdev->pm.requested_clock_mode_index = 127 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 128 } else { 129 rdev->pm.requested_power_state_index = 130 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 131 rdev->pm.requested_clock_mode_index = 132 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 133 } 134 } 135 136 static void radeon_unmap_vram_bos(struct radeon_device *rdev) 137 { 138 struct radeon_bo *bo, *n; 139 140 if (list_empty(&rdev->gem.objects)) 141 return; 142 143 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 144 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 145 ttm_bo_unmap_virtual(&bo->tbo); 146 } 147 } 148 149 static void radeon_sync_with_vblank(struct radeon_device *rdev) 150 { 151 if (rdev->pm.active_crtcs) { 152 rdev->pm.vblank_sync = false; 153 #ifdef DUMBBELL_WIP 154 wait_event_timeout( 155 rdev->irq.vblank_queue, rdev->pm.vblank_sync, 156 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 157 #endif /* DUMBBELL_WIP */ 158 } 159 } 160 161 static void radeon_set_power_state(struct radeon_device *rdev) 162 { 163 u32 sclk, mclk; 164 bool misc_after = false; 165 166 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 167 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 168 return; 169 170 if (radeon_gui_idle(rdev)) { 171 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 172 clock_info[rdev->pm.requested_clock_mode_index].sclk; 173 if (sclk > rdev->pm.default_sclk) 174 sclk = rdev->pm.default_sclk; 175 176 /* starting with BTC, there is one state that is used for both 177 * MH and SH. Difference is that we always use the high clock index for 178 * mclk. 179 */ 180 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 181 (rdev->family >= CHIP_BARTS) && 182 rdev->pm.active_crtc_count && 183 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 184 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 185 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 186 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 187 else 188 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 189 clock_info[rdev->pm.requested_clock_mode_index].mclk; 190 191 if (mclk > rdev->pm.default_mclk) 192 mclk = rdev->pm.default_mclk; 193 194 /* upvolt before raising clocks, downvolt after lowering clocks */ 195 if (sclk < rdev->pm.current_sclk) 196 misc_after = true; 197 198 radeon_sync_with_vblank(rdev); 199 200 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 201 if (!radeon_pm_in_vbl(rdev)) 202 return; 203 } 204 205 radeon_pm_prepare(rdev); 206 207 if (!misc_after) 208 /* voltage, pcie lanes, etc.*/ 209 radeon_pm_misc(rdev); 210 211 /* set engine clock */ 212 if (sclk != rdev->pm.current_sclk) { 213 radeon_pm_debug_check_in_vbl(rdev, false); 214 radeon_set_engine_clock(rdev, sclk); 215 radeon_pm_debug_check_in_vbl(rdev, true); 216 rdev->pm.current_sclk = sclk; 217 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 218 } 219 220 /* set memory clock */ 221 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 222 radeon_pm_debug_check_in_vbl(rdev, false); 223 radeon_set_memory_clock(rdev, mclk); 224 radeon_pm_debug_check_in_vbl(rdev, true); 225 rdev->pm.current_mclk = mclk; 226 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 227 } 228 229 if (misc_after) 230 /* voltage, pcie lanes, etc.*/ 231 radeon_pm_misc(rdev); 232 233 radeon_pm_finish(rdev); 234 235 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 236 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 237 } else 238 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 239 } 240 241 static void radeon_pm_set_clocks(struct radeon_device *rdev) 242 { 243 int i, r; 244 245 /* no need to take locks, etc. if nothing's going to change */ 246 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 247 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 248 return; 249 250 DRM_LOCK(rdev->ddev); 251 lockmgr(&rdev->pm.mclk_lock, LK_EXCLUSIVE); 252 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 253 254 /* wait for the rings to drain */ 255 for (i = 0; i < RADEON_NUM_RINGS; i++) { 256 struct radeon_ring *ring = &rdev->ring[i]; 257 if (!ring->ready) { 258 continue; 259 } 260 r = radeon_fence_wait_empty_locked(rdev, i); 261 if (r) { 262 /* needs a GPU reset dont reset here */ 263 lockmgr(&rdev->ring_lock, LK_RELEASE); 264 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); 265 DRM_UNLOCK(rdev->ddev); 266 return; 267 } 268 } 269 270 radeon_unmap_vram_bos(rdev); 271 272 if (rdev->irq.installed) { 273 for (i = 0; i < rdev->num_crtc; i++) { 274 if (rdev->pm.active_crtcs & (1 << i)) { 275 rdev->pm.req_vblank |= (1 << i); 276 drm_vblank_get(rdev->ddev, i); 277 } 278 } 279 } 280 281 radeon_set_power_state(rdev); 282 283 if (rdev->irq.installed) { 284 for (i = 0; i < rdev->num_crtc; i++) { 285 if (rdev->pm.req_vblank & (1 << i)) { 286 rdev->pm.req_vblank &= ~(1 << i); 287 drm_vblank_put(rdev->ddev, i); 288 } 289 } 290 } 291 292 /* update display watermarks based on new power state */ 293 radeon_update_bandwidth_info(rdev); 294 if (rdev->pm.active_crtc_count) 295 radeon_bandwidth_update(rdev); 296 297 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 298 299 lockmgr(&rdev->ring_lock, LK_RELEASE); 300 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); 301 DRM_UNLOCK(rdev->ddev); 302 } 303 304 static void radeon_pm_print_states(struct radeon_device *rdev) 305 { 306 int i, j; 307 struct radeon_power_state *power_state; 308 struct radeon_pm_clock_info *clock_info; 309 310 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 311 for (i = 0; i < rdev->pm.num_power_states; i++) { 312 power_state = &rdev->pm.power_state[i]; 313 DRM_DEBUG_DRIVER("State %d: %s\n", i, 314 radeon_pm_state_type_name[power_state->type]); 315 if (i == rdev->pm.default_power_state_index) 316 DRM_DEBUG_DRIVER("\tDefault"); 317 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 318 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 319 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 320 DRM_DEBUG_DRIVER("\tSingle display only\n"); 321 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 322 for (j = 0; j < power_state->num_clock_modes; j++) { 323 clock_info = &(power_state->clock_info[j]); 324 if (rdev->flags & RADEON_IS_IGP) 325 DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 326 j, 327 clock_info->sclk * 10); 328 else 329 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 330 j, 331 clock_info->sclk * 10, 332 clock_info->mclk * 10, 333 clock_info->voltage.voltage); 334 } 335 } 336 } 337 338 #ifdef DUMBBELL_WIP 339 static ssize_t radeon_get_pm_profile(struct device *dev, 340 struct device_attribute *attr, 341 char *buf) 342 { 343 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 344 struct radeon_device *rdev = ddev->dev_private; 345 int cp = rdev->pm.profile; 346 347 return ksnprintf(buf, PAGE_SIZE, "%s\n", 348 (cp == PM_PROFILE_AUTO) ? "auto" : 349 (cp == PM_PROFILE_LOW) ? "low" : 350 (cp == PM_PROFILE_MID) ? "mid" : 351 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 352 } 353 354 static ssize_t radeon_set_pm_profile(struct device *dev, 355 struct device_attribute *attr, 356 const char *buf, 357 size_t count) 358 { 359 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 360 struct radeon_device *rdev = ddev->dev_private; 361 362 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 363 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 364 if (strncmp("default", buf, strlen("default")) == 0) 365 rdev->pm.profile = PM_PROFILE_DEFAULT; 366 else if (strncmp("auto", buf, strlen("auto")) == 0) 367 rdev->pm.profile = PM_PROFILE_AUTO; 368 else if (strncmp("low", buf, strlen("low")) == 0) 369 rdev->pm.profile = PM_PROFILE_LOW; 370 else if (strncmp("mid", buf, strlen("mid")) == 0) 371 rdev->pm.profile = PM_PROFILE_MID; 372 else if (strncmp("high", buf, strlen("high")) == 0) 373 rdev->pm.profile = PM_PROFILE_HIGH; 374 else { 375 count = -EINVAL; 376 goto fail; 377 } 378 radeon_pm_update_profile(rdev); 379 radeon_pm_set_clocks(rdev); 380 } else 381 count = -EINVAL; 382 383 fail: 384 lockmgr(&rdev->pm.mutex, LK_RELEASE); 385 386 return count; 387 } 388 389 static ssize_t radeon_get_pm_method(struct device *dev, 390 struct device_attribute *attr, 391 char *buf) 392 { 393 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 394 struct radeon_device *rdev = ddev->dev_private; 395 int pm = rdev->pm.pm_method; 396 397 return ksnprintf(buf, PAGE_SIZE, "%s\n", 398 (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); 399 } 400 401 static ssize_t radeon_set_pm_method(struct device *dev, 402 struct device_attribute *attr, 403 const char *buf, 404 size_t count) 405 { 406 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 407 struct radeon_device *rdev = ddev->dev_private; 408 409 410 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 411 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 412 rdev->pm.pm_method = PM_METHOD_DYNPM; 413 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 414 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 415 lockmgr(&rdev->pm.mutex, LK_RELEASE); 416 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 417 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 418 /* disable dynpm */ 419 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 420 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 421 rdev->pm.pm_method = PM_METHOD_PROFILE; 422 lockmgr(&rdev->pm.mutex, LK_RELEASE); 423 #ifdef DUMBBELL_WIP 424 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 425 #endif /* DUMBBELL_WIP */ 426 } else { 427 count = -EINVAL; 428 goto fail; 429 } 430 radeon_pm_compute_clocks(rdev); 431 fail: 432 return count; 433 } 434 435 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 436 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 437 438 static ssize_t radeon_hwmon_show_temp(struct device *dev, 439 struct device_attribute *attr, 440 char *buf) 441 { 442 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 443 struct radeon_device *rdev = ddev->dev_private; 444 int temp; 445 446 switch (rdev->pm.int_thermal_type) { 447 case THERMAL_TYPE_RV6XX: 448 temp = rv6xx_get_temp(rdev); 449 break; 450 case THERMAL_TYPE_RV770: 451 temp = rv770_get_temp(rdev); 452 break; 453 case THERMAL_TYPE_EVERGREEN: 454 case THERMAL_TYPE_NI: 455 temp = evergreen_get_temp(rdev); 456 break; 457 case THERMAL_TYPE_SUMO: 458 temp = sumo_get_temp(rdev); 459 break; 460 case THERMAL_TYPE_SI: 461 temp = si_get_temp(rdev); 462 break; 463 default: 464 temp = 0; 465 break; 466 } 467 468 return ksnprintf(buf, PAGE_SIZE, "%d\n", temp); 469 } 470 471 static ssize_t radeon_hwmon_show_name(struct device *dev, 472 struct device_attribute *attr, 473 char *buf) 474 { 475 return sprintf(buf, "radeon\n"); 476 } 477 478 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 479 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 480 481 static struct attribute *hwmon_attributes[] = { 482 &sensor_dev_attr_temp1_input.dev_attr.attr, 483 &sensor_dev_attr_name.dev_attr.attr, 484 NULL 485 }; 486 487 static const struct attribute_group hwmon_attrgroup = { 488 .attrs = hwmon_attributes, 489 }; 490 #endif /* DUMBBELL_WIP */ 491 492 static int radeon_hwmon_init(struct radeon_device *rdev) 493 { 494 int err = 0; 495 496 #ifdef DUMBBELL_WIP 497 rdev->pm.int_hwmon_dev = NULL; 498 #endif /* DUMBBELL_WIP */ 499 500 switch (rdev->pm.int_thermal_type) { 501 case THERMAL_TYPE_RV6XX: 502 case THERMAL_TYPE_RV770: 503 case THERMAL_TYPE_EVERGREEN: 504 case THERMAL_TYPE_NI: 505 case THERMAL_TYPE_SUMO: 506 case THERMAL_TYPE_SI: 507 /* No support for TN yet */ 508 if (rdev->family == CHIP_ARUBA) 509 return err; 510 #ifdef DUMBBELL_WIP 511 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 512 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 513 err = PTR_ERR(rdev->pm.int_hwmon_dev); 514 dev_err(rdev->dev, 515 "Unable to register hwmon device: %d\n", err); 516 break; 517 } 518 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); 519 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, 520 &hwmon_attrgroup); 521 if (err) { 522 dev_err(rdev->dev, 523 "Unable to create hwmon sysfs file: %d\n", err); 524 hwmon_device_unregister(rdev->dev); 525 } 526 #endif /* DUMBBELL_WIP */ 527 break; 528 default: 529 break; 530 } 531 532 return err; 533 } 534 535 static void radeon_hwmon_fini(struct radeon_device *rdev) 536 { 537 #ifdef DUMBBELL_WIP 538 if (rdev->pm.int_hwmon_dev) { 539 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); 540 hwmon_device_unregister(rdev->pm.int_hwmon_dev); 541 } 542 #endif /* DUMBBELL_WIP */ 543 } 544 545 void radeon_pm_suspend(struct radeon_device *rdev) 546 { 547 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 548 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 549 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 550 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 551 } 552 lockmgr(&rdev->pm.mutex, LK_RELEASE); 553 554 #ifdef DUMBBELL_WIP 555 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 556 #endif /* DUMBBELL_WIP */ 557 } 558 559 void radeon_pm_resume(struct radeon_device *rdev) 560 { 561 /* set up the default clocks if the MC ucode is loaded */ 562 if ((rdev->family >= CHIP_BARTS) && 563 (rdev->family <= CHIP_CAYMAN) && 564 rdev->mc_fw) { 565 if (rdev->pm.default_vddc) 566 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 567 SET_VOLTAGE_TYPE_ASIC_VDDC); 568 if (rdev->pm.default_vddci) 569 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 570 SET_VOLTAGE_TYPE_ASIC_VDDCI); 571 if (rdev->pm.default_sclk) 572 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 573 if (rdev->pm.default_mclk) 574 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 575 } 576 /* asic init will reset the default power state */ 577 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 578 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 579 rdev->pm.current_clock_mode_index = 0; 580 rdev->pm.current_sclk = rdev->pm.default_sclk; 581 rdev->pm.current_mclk = rdev->pm.default_mclk; 582 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 583 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 584 if (rdev->pm.pm_method == PM_METHOD_DYNPM 585 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 586 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 587 #ifdef DUMBBELL_WIP 588 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 589 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 590 #endif /* DUMBBELL_WIP */ 591 } 592 lockmgr(&rdev->pm.mutex, LK_RELEASE); 593 radeon_pm_compute_clocks(rdev); 594 } 595 596 int radeon_pm_init(struct radeon_device *rdev) 597 { 598 int ret; 599 600 /* default to profile method */ 601 rdev->pm.pm_method = PM_METHOD_PROFILE; 602 rdev->pm.profile = PM_PROFILE_DEFAULT; 603 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 604 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 605 rdev->pm.dynpm_can_upclock = true; 606 rdev->pm.dynpm_can_downclock = true; 607 rdev->pm.default_sclk = rdev->clock.default_sclk; 608 rdev->pm.default_mclk = rdev->clock.default_mclk; 609 rdev->pm.current_sclk = rdev->clock.default_sclk; 610 rdev->pm.current_mclk = rdev->clock.default_mclk; 611 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 612 613 if (rdev->bios) { 614 if (rdev->is_atom_bios) 615 radeon_atombios_get_power_modes(rdev); 616 else 617 radeon_combios_get_power_modes(rdev); 618 radeon_pm_print_states(rdev); 619 radeon_pm_init_profile(rdev); 620 /* set up the default clocks if the MC ucode is loaded */ 621 if ((rdev->family >= CHIP_BARTS) && 622 (rdev->family <= CHIP_CAYMAN) && 623 rdev->mc_fw) { 624 if (rdev->pm.default_vddc) 625 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 626 SET_VOLTAGE_TYPE_ASIC_VDDC); 627 if (rdev->pm.default_vddci) 628 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 629 SET_VOLTAGE_TYPE_ASIC_VDDCI); 630 if (rdev->pm.default_sclk) 631 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 632 if (rdev->pm.default_mclk) 633 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 634 } 635 } 636 637 /* set up the internal thermal sensor if applicable */ 638 ret = radeon_hwmon_init(rdev); 639 if (ret) 640 return ret; 641 642 #ifdef DUMBBELL_WIP 643 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 644 #endif /* DUMBBELL_WIP */ 645 646 if (rdev->pm.num_power_states > 1) { 647 /* where's the best place to put these? */ 648 #ifdef DUMBBELL_WIP 649 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 650 #endif /* DUMBBELL_WIP */ 651 if (ret) 652 DRM_ERROR("failed to create device file for power profile\n"); 653 #ifdef DUMBBELL_WIP 654 ret = device_create_file(rdev->dev, &dev_attr_power_method); 655 #endif /* DUMBBELL_WIP */ 656 if (ret) 657 DRM_ERROR("failed to create device file for power method\n"); 658 659 if (radeon_debugfs_pm_init(rdev)) { 660 DRM_ERROR("Failed to register debugfs file for PM!\n"); 661 } 662 663 DRM_INFO("radeon: power management initialized\n"); 664 } 665 666 return 0; 667 } 668 669 void radeon_pm_fini(struct radeon_device *rdev) 670 { 671 if (rdev->pm.num_power_states > 1) { 672 DRM_UNLOCK(rdev->ddev); /* Work around LOR. */ 673 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 674 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 675 rdev->pm.profile = PM_PROFILE_DEFAULT; 676 radeon_pm_update_profile(rdev); 677 radeon_pm_set_clocks(rdev); 678 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 679 /* reset default clocks */ 680 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 681 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 682 radeon_pm_set_clocks(rdev); 683 } 684 lockmgr(&rdev->pm.mutex, LK_RELEASE); 685 DRM_LOCK(rdev->ddev); 686 687 #ifdef DUMBBELL_WIP 688 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 689 690 device_remove_file(rdev->dev, &dev_attr_power_profile); 691 device_remove_file(rdev->dev, &dev_attr_power_method); 692 #endif /* DUMBBELL_WIP */ 693 } 694 695 if (rdev->pm.power_state) { 696 int i; 697 for (i = 0; i < rdev->pm.num_power_states; ++i) { 698 drm_free(rdev->pm.power_state[i].clock_info, 699 DRM_MEM_DRIVER); 700 } 701 drm_free(rdev->pm.power_state, DRM_MEM_DRIVER); 702 rdev->pm.power_state = NULL; 703 rdev->pm.num_power_states = 0; 704 } 705 706 radeon_hwmon_fini(rdev); 707 } 708 709 void radeon_pm_compute_clocks(struct radeon_device *rdev) 710 { 711 struct drm_device *ddev = rdev->ddev; 712 struct drm_crtc *crtc; 713 struct radeon_crtc *radeon_crtc; 714 715 if (rdev->pm.num_power_states < 2) 716 return; 717 718 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 719 720 rdev->pm.active_crtcs = 0; 721 rdev->pm.active_crtc_count = 0; 722 list_for_each_entry(crtc, 723 &ddev->mode_config.crtc_list, head) { 724 radeon_crtc = to_radeon_crtc(crtc); 725 if (radeon_crtc->enabled) { 726 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 727 rdev->pm.active_crtc_count++; 728 } 729 } 730 731 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 732 radeon_pm_update_profile(rdev); 733 radeon_pm_set_clocks(rdev); 734 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 735 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 736 if (rdev->pm.active_crtc_count > 1) { 737 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 738 #ifdef DUMBBELL_WIP 739 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 740 #endif /* DUMBBELL_WIP */ 741 742 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 743 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 744 radeon_pm_get_dynpm_state(rdev); 745 radeon_pm_set_clocks(rdev); 746 747 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 748 } 749 } else if (rdev->pm.active_crtc_count == 1) { 750 /* TODO: Increase clocks if needed for current mode */ 751 752 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 753 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 754 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 755 radeon_pm_get_dynpm_state(rdev); 756 radeon_pm_set_clocks(rdev); 757 758 #ifdef DUMBBELL_WIP 759 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 760 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 761 #endif /* DUMBBELL_WIP */ 762 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 763 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 764 #ifdef DUMBBELL_WIP 765 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 766 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 767 #endif /* DUMBBELL_WIP */ 768 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 769 } 770 } else { /* count == 0 */ 771 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 772 #ifdef DUMBBELL_WIP 773 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 774 #endif /* DUMBBELL_WIP */ 775 776 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 777 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 778 radeon_pm_get_dynpm_state(rdev); 779 radeon_pm_set_clocks(rdev); 780 } 781 } 782 } 783 } 784 785 lockmgr(&rdev->pm.mutex, LK_RELEASE); 786 } 787 788 static bool radeon_pm_in_vbl(struct radeon_device *rdev) 789 { 790 int crtc, vpos, hpos, vbl_status; 791 bool in_vbl = true; 792 793 /* Iterate over all active crtc's. All crtc's must be in vblank, 794 * otherwise return in_vbl == false. 795 */ 796 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 797 if (rdev->pm.active_crtcs & (1 << crtc)) { 798 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); 799 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 800 !(vbl_status & DRM_SCANOUTPOS_INVBL)) 801 in_vbl = false; 802 } 803 } 804 805 return in_vbl; 806 } 807 808 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 809 { 810 u32 stat_crtc = 0; 811 bool in_vbl = radeon_pm_in_vbl(rdev); 812 813 if (in_vbl == false) 814 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 815 finish ? "exit" : "entry"); 816 return in_vbl; 817 } 818 819 #ifdef DUMBBELL_WIP 820 static void radeon_dynpm_idle_work_handler(struct work_struct *work) 821 { 822 struct radeon_device *rdev; 823 int resched; 824 rdev = container_of(work, struct radeon_device, 825 pm.dynpm_idle_work.work); 826 827 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 828 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 829 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 830 int not_processed = 0; 831 int i; 832 833 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 834 struct radeon_ring *ring = &rdev->ring[i]; 835 836 if (ring->ready) { 837 not_processed += radeon_fence_count_emitted(rdev, i); 838 if (not_processed >= 3) 839 break; 840 } 841 } 842 843 if (not_processed >= 3) { /* should upclock */ 844 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 845 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 846 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 847 rdev->pm.dynpm_can_upclock) { 848 rdev->pm.dynpm_planned_action = 849 DYNPM_ACTION_UPCLOCK; 850 rdev->pm.dynpm_action_timeout = jiffies + 851 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 852 } 853 } else if (not_processed == 0) { /* should downclock */ 854 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 855 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 856 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 857 rdev->pm.dynpm_can_downclock) { 858 rdev->pm.dynpm_planned_action = 859 DYNPM_ACTION_DOWNCLOCK; 860 rdev->pm.dynpm_action_timeout = jiffies + 861 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 862 } 863 } 864 865 /* Note, radeon_pm_set_clocks is called with static_switch set 866 * to false since we want to wait for vbl to avoid flicker. 867 */ 868 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 869 jiffies > rdev->pm.dynpm_action_timeout) { 870 radeon_pm_get_dynpm_state(rdev); 871 radeon_pm_set_clocks(rdev); 872 } 873 874 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 875 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 876 } 877 lockmgr(&rdev->pm.mutex, LK_RELEASE); 878 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 879 } 880 #endif /* DUMBBELL_WIP */ 881 882 /* 883 * Debugfs info 884 */ 885 #if defined(CONFIG_DEBUG_FS) 886 887 static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 888 { 889 struct drm_info_node *node = (struct drm_info_node *) m->private; 890 struct drm_device *dev = node->minor->dev; 891 struct radeon_device *rdev = dev->dev_private; 892 893 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 894 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 895 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 896 if (rdev->asic->pm.get_memory_clock) 897 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 898 if (rdev->pm.current_vddc) 899 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 900 if (rdev->asic->pm.get_pcie_lanes) 901 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 902 903 return 0; 904 } 905 906 static struct drm_info_list radeon_pm_info_list[] = { 907 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 908 }; 909 #endif 910 911 static int radeon_debugfs_pm_init(struct radeon_device *rdev) 912 { 913 #if defined(CONFIG_DEBUG_FS) 914 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 915 #else 916 return 0; 917 #endif 918 } 919