1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <drm/drmP.h> 25 #include "radeon.h" 26 #include "radeon_asic.h" 27 #include "cikd.h" 28 #include "r600_dpm.h" 29 #include "kv_dpm.h" 30 #include "radeon_asic.h" 31 #include <linux/seq_file.h> 32 33 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 34 #define KV_MINIMUM_ENGINE_CLOCK 800 35 #define SMC_RAM_END 0x40000 36 37 static int kv_enable_nb_dpm(struct radeon_device *rdev, 38 bool enable); 39 static void kv_init_graphics_levels(struct radeon_device *rdev); 40 static int kv_calculate_ds_divider(struct radeon_device *rdev); 41 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 42 static int kv_calculate_dpm_settings(struct radeon_device *rdev); 43 static void kv_enable_new_levels(struct radeon_device *rdev); 44 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 45 struct radeon_ps *new_rps); 46 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 47 static int kv_set_enabled_levels(struct radeon_device *rdev); 48 static int kv_force_dpm_highest(struct radeon_device *rdev); 49 static int kv_force_dpm_lowest(struct radeon_device *rdev); 50 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 51 struct radeon_ps *new_rps, 52 struct radeon_ps *old_rps); 53 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 54 int min_temp, int max_temp); 55 static int kv_init_fps_limits(struct radeon_device *rdev); 56 57 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); 58 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); 59 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); 60 61 #if 0 /* unused */ 62 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 63 { 64 { 0, 4, 1 }, 65 { 1, 4, 1 }, 66 { 2, 5, 1 }, 67 { 3, 4, 2 }, 68 { 4, 1, 1 }, 69 { 5, 5, 2 }, 70 { 6, 6, 1 }, 71 { 7, 9, 2 }, 72 { 0xffffffff } 73 }; 74 75 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 76 { 77 { 0, 4, 1 }, 78 { 0xffffffff } 79 }; 80 81 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 82 { 83 { 0, 4, 1 }, 84 { 0xffffffff } 85 }; 86 87 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 88 { 89 { 0, 4, 1 }, 90 { 0xffffffff } 91 }; 92 93 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 94 { 95 { 0, 4, 1 }, 96 { 0xffffffff } 97 }; 98 99 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 100 { 101 { 0, 4, 1 }, 102 { 1, 4, 1 }, 103 { 2, 5, 1 }, 104 { 3, 4, 1 }, 105 { 4, 1, 1 }, 106 { 5, 5, 1 }, 107 { 6, 6, 1 }, 108 { 7, 9, 1 }, 109 { 8, 4, 1 }, 110 { 9, 2, 1 }, 111 { 10, 3, 1 }, 112 { 11, 6, 1 }, 113 { 12, 8, 2 }, 114 { 13, 1, 1 }, 115 { 14, 2, 1 }, 116 { 15, 3, 1 }, 117 { 16, 1, 1 }, 118 { 17, 4, 1 }, 119 { 18, 3, 1 }, 120 { 19, 1, 1 }, 121 { 20, 8, 1 }, 122 { 21, 5, 1 }, 123 { 22, 1, 1 }, 124 { 23, 1, 1 }, 125 { 24, 4, 1 }, 126 { 27, 6, 1 }, 127 { 28, 1, 1 }, 128 { 0xffffffff } 129 }; 130 131 static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 132 { 133 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 134 }; 135 136 static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 137 { 138 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 139 }; 140 141 static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 142 { 143 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 144 }; 145 146 static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 147 { 148 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 149 }; 150 151 static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 152 { 153 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 154 }; 155 156 static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 157 { 158 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 159 }; 160 #endif 161 162 static const struct kv_pt_config_reg didt_config_kv[] = 163 { 164 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 165 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 166 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 167 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 168 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 169 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 170 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 171 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 172 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 173 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 174 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 175 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 176 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 177 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 178 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 179 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 180 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 181 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 182 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 183 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 184 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 185 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 186 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 187 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 188 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 189 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 190 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 191 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 192 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 193 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 194 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 195 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 196 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 197 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 198 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 199 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 200 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 201 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 202 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 203 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 204 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 205 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 206 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 207 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 208 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 209 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 210 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 211 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 212 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 213 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 214 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 215 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 216 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 217 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 218 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 219 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 220 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 221 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 222 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 223 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 224 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 225 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 226 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 227 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 228 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 229 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 230 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 231 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 232 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 233 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 234 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 235 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 236 { 0xFFFFFFFF } 237 }; 238 239 static struct kv_ps *kv_get_ps(struct radeon_ps *rps) 240 { 241 struct kv_ps *ps = rps->ps_priv; 242 243 return ps; 244 } 245 246 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) 247 { 248 struct kv_power_info *pi = rdev->pm.dpm.priv; 249 250 return pi; 251 } 252 253 #if 0 254 static void kv_program_local_cac_table(struct radeon_device *rdev, 255 const struct kv_lcac_config_values *local_cac_table, 256 const struct kv_lcac_config_reg *local_cac_reg) 257 { 258 u32 i, count, data; 259 const struct kv_lcac_config_values *values = local_cac_table; 260 261 while (values->block_id != 0xffffffff) { 262 count = values->signal_id; 263 for (i = 0; i < count; i++) { 264 data = ((values->block_id << local_cac_reg->block_shift) & 265 local_cac_reg->block_mask); 266 data |= ((i << local_cac_reg->signal_shift) & 267 local_cac_reg->signal_mask); 268 data |= ((values->t << local_cac_reg->t_shift) & 269 local_cac_reg->t_mask); 270 data |= ((1 << local_cac_reg->enable_shift) & 271 local_cac_reg->enable_mask); 272 WREG32_SMC(local_cac_reg->cntl, data); 273 } 274 values++; 275 } 276 } 277 #endif 278 279 static int kv_program_pt_config_registers(struct radeon_device *rdev, 280 const struct kv_pt_config_reg *cac_config_regs) 281 { 282 const struct kv_pt_config_reg *config_regs = cac_config_regs; 283 u32 data; 284 u32 cache = 0; 285 286 if (config_regs == NULL) 287 return -EINVAL; 288 289 while (config_regs->offset != 0xFFFFFFFF) { 290 if (config_regs->type == KV_CONFIGREG_CACHE) { 291 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 292 } else { 293 switch (config_regs->type) { 294 case KV_CONFIGREG_SMC_IND: 295 data = RREG32_SMC(config_regs->offset); 296 break; 297 case KV_CONFIGREG_DIDT_IND: 298 data = RREG32_DIDT(config_regs->offset); 299 break; 300 default: 301 data = RREG32(config_regs->offset << 2); 302 break; 303 } 304 305 data &= ~config_regs->mask; 306 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 307 data |= cache; 308 cache = 0; 309 310 switch (config_regs->type) { 311 case KV_CONFIGREG_SMC_IND: 312 WREG32_SMC(config_regs->offset, data); 313 break; 314 case KV_CONFIGREG_DIDT_IND: 315 WREG32_DIDT(config_regs->offset, data); 316 break; 317 default: 318 WREG32(config_regs->offset << 2, data); 319 break; 320 } 321 } 322 config_regs++; 323 } 324 325 return 0; 326 } 327 328 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) 329 { 330 struct kv_power_info *pi = kv_get_pi(rdev); 331 u32 data; 332 333 if (pi->caps_sq_ramping) { 334 data = RREG32_DIDT(DIDT_SQ_CTRL0); 335 if (enable) 336 data |= DIDT_CTRL_EN; 337 else 338 data &= ~DIDT_CTRL_EN; 339 WREG32_DIDT(DIDT_SQ_CTRL0, data); 340 } 341 342 if (pi->caps_db_ramping) { 343 data = RREG32_DIDT(DIDT_DB_CTRL0); 344 if (enable) 345 data |= DIDT_CTRL_EN; 346 else 347 data &= ~DIDT_CTRL_EN; 348 WREG32_DIDT(DIDT_DB_CTRL0, data); 349 } 350 351 if (pi->caps_td_ramping) { 352 data = RREG32_DIDT(DIDT_TD_CTRL0); 353 if (enable) 354 data |= DIDT_CTRL_EN; 355 else 356 data &= ~DIDT_CTRL_EN; 357 WREG32_DIDT(DIDT_TD_CTRL0, data); 358 } 359 360 if (pi->caps_tcp_ramping) { 361 data = RREG32_DIDT(DIDT_TCP_CTRL0); 362 if (enable) 363 data |= DIDT_CTRL_EN; 364 else 365 data &= ~DIDT_CTRL_EN; 366 WREG32_DIDT(DIDT_TCP_CTRL0, data); 367 } 368 } 369 370 static int kv_enable_didt(struct radeon_device *rdev, bool enable) 371 { 372 struct kv_power_info *pi = kv_get_pi(rdev); 373 int ret; 374 375 if (pi->caps_sq_ramping || 376 pi->caps_db_ramping || 377 pi->caps_td_ramping || 378 pi->caps_tcp_ramping) { 379 cik_enter_rlc_safe_mode(rdev); 380 381 if (enable) { 382 ret = kv_program_pt_config_registers(rdev, didt_config_kv); 383 if (ret) { 384 cik_exit_rlc_safe_mode(rdev); 385 return ret; 386 } 387 } 388 389 kv_do_enable_didt(rdev, enable); 390 391 cik_exit_rlc_safe_mode(rdev); 392 } 393 394 return 0; 395 } 396 397 #if 0 398 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev) 399 { 400 struct kv_power_info *pi = kv_get_pi(rdev); 401 402 if (pi->caps_cac) { 403 WREG32_SMC(LCAC_SX0_OVR_SEL, 0); 404 WREG32_SMC(LCAC_SX0_OVR_VAL, 0); 405 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 406 407 WREG32_SMC(LCAC_MC0_OVR_SEL, 0); 408 WREG32_SMC(LCAC_MC0_OVR_VAL, 0); 409 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 410 411 WREG32_SMC(LCAC_MC1_OVR_SEL, 0); 412 WREG32_SMC(LCAC_MC1_OVR_VAL, 0); 413 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 414 415 WREG32_SMC(LCAC_MC2_OVR_SEL, 0); 416 WREG32_SMC(LCAC_MC2_OVR_VAL, 0); 417 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 418 419 WREG32_SMC(LCAC_MC3_OVR_SEL, 0); 420 WREG32_SMC(LCAC_MC3_OVR_VAL, 0); 421 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 422 423 WREG32_SMC(LCAC_CPL_OVR_SEL, 0); 424 WREG32_SMC(LCAC_CPL_OVR_VAL, 0); 425 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 426 } 427 } 428 #endif 429 430 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) 431 { 432 struct kv_power_info *pi = kv_get_pi(rdev); 433 int ret = 0; 434 435 if (pi->caps_cac) { 436 if (enable) { 437 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); 438 if (ret) 439 pi->cac_enabled = false; 440 else 441 pi->cac_enabled = true; 442 } else if (pi->cac_enabled) { 443 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); 444 pi->cac_enabled = false; 445 } 446 } 447 448 return ret; 449 } 450 451 static int kv_process_firmware_header(struct radeon_device *rdev) 452 { 453 struct kv_power_info *pi = kv_get_pi(rdev); 454 u32 tmp; 455 int ret; 456 457 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 458 offsetof(SMU7_Firmware_Header, DpmTable), 459 &tmp, pi->sram_end); 460 461 if (ret == 0) 462 pi->dpm_table_start = tmp; 463 464 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 465 offsetof(SMU7_Firmware_Header, SoftRegisters), 466 &tmp, pi->sram_end); 467 468 if (ret == 0) 469 pi->soft_regs_start = tmp; 470 471 return ret; 472 } 473 474 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) 475 { 476 struct kv_power_info *pi = kv_get_pi(rdev); 477 int ret; 478 479 pi->graphics_voltage_change_enable = 1; 480 481 ret = kv_copy_bytes_to_smc(rdev, 482 pi->dpm_table_start + 483 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 484 &pi->graphics_voltage_change_enable, 485 sizeof(u8), pi->sram_end); 486 487 return ret; 488 } 489 490 static int kv_set_dpm_interval(struct radeon_device *rdev) 491 { 492 struct kv_power_info *pi = kv_get_pi(rdev); 493 int ret; 494 495 pi->graphics_interval = 1; 496 497 ret = kv_copy_bytes_to_smc(rdev, 498 pi->dpm_table_start + 499 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 500 &pi->graphics_interval, 501 sizeof(u8), pi->sram_end); 502 503 return ret; 504 } 505 506 static int kv_set_dpm_boot_state(struct radeon_device *rdev) 507 { 508 struct kv_power_info *pi = kv_get_pi(rdev); 509 int ret; 510 511 ret = kv_copy_bytes_to_smc(rdev, 512 pi->dpm_table_start + 513 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 514 &pi->graphics_boot_level, 515 sizeof(u8), pi->sram_end); 516 517 return ret; 518 } 519 520 static void kv_program_vc(struct radeon_device *rdev) 521 { 522 WREG32_SMC(CG_FTV_0, 0x3FFFC100); 523 } 524 525 static void kv_clear_vc(struct radeon_device *rdev) 526 { 527 WREG32_SMC(CG_FTV_0, 0); 528 } 529 530 static int kv_set_divider_value(struct radeon_device *rdev, 531 u32 index, u32 sclk) 532 { 533 struct kv_power_info *pi = kv_get_pi(rdev); 534 struct atom_clock_dividers dividers; 535 int ret; 536 537 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 538 sclk, false, ÷rs); 539 if (ret) 540 return ret; 541 542 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 543 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 544 545 return 0; 546 } 547 548 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 549 struct sumo_vid_mapping_table *vid_mapping_table, 550 u32 vid_2bit) 551 { 552 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 553 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 554 u32 i; 555 556 if (vddc_sclk_table && vddc_sclk_table->count) { 557 if (vid_2bit < vddc_sclk_table->count) 558 return vddc_sclk_table->entries[vid_2bit].v; 559 else 560 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 561 } else { 562 for (i = 0; i < vid_mapping_table->num_entries; i++) { 563 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 564 return vid_mapping_table->entries[i].vid_7bit; 565 } 566 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 567 } 568 } 569 570 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 571 struct sumo_vid_mapping_table *vid_mapping_table, 572 u32 vid_7bit) 573 { 574 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 575 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 576 u32 i; 577 578 if (vddc_sclk_table && vddc_sclk_table->count) { 579 for (i = 0; i < vddc_sclk_table->count; i++) { 580 if (vddc_sclk_table->entries[i].v == vid_7bit) 581 return i; 582 } 583 return vddc_sclk_table->count - 1; 584 } else { 585 for (i = 0; i < vid_mapping_table->num_entries; i++) { 586 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 587 return vid_mapping_table->entries[i].vid_2bit; 588 } 589 590 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 591 } 592 } 593 594 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 595 u16 voltage) 596 { 597 return 6200 - (voltage * 25); 598 } 599 600 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, 601 u32 vid_2bit) 602 { 603 struct kv_power_info *pi = kv_get_pi(rdev); 604 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 605 &pi->sys_info.vid_mapping_table, 606 vid_2bit); 607 608 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 609 } 610 611 612 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) 613 { 614 struct kv_power_info *pi = kv_get_pi(rdev); 615 616 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 617 pi->graphics_level[index].MinVddNb = 618 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); 619 620 return 0; 621 } 622 623 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) 624 { 625 struct kv_power_info *pi = kv_get_pi(rdev); 626 627 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 628 629 return 0; 630 } 631 632 static void kv_dpm_power_level_enable(struct radeon_device *rdev, 633 u32 index, bool enable) 634 { 635 struct kv_power_info *pi = kv_get_pi(rdev); 636 637 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 638 } 639 640 static void kv_start_dpm(struct radeon_device *rdev) 641 { 642 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 643 644 tmp |= GLOBAL_PWRMGT_EN; 645 WREG32_SMC(GENERAL_PWRMGT, tmp); 646 647 kv_smc_dpm_enable(rdev, true); 648 } 649 650 static void kv_stop_dpm(struct radeon_device *rdev) 651 { 652 kv_smc_dpm_enable(rdev, false); 653 } 654 655 static void kv_start_am(struct radeon_device *rdev) 656 { 657 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 658 659 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 660 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; 661 662 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 663 } 664 665 static void kv_reset_am(struct radeon_device *rdev) 666 { 667 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 668 669 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 670 671 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 672 } 673 674 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) 675 { 676 return kv_notify_message_to_smu(rdev, freeze ? 677 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 678 } 679 680 static int kv_force_lowest_valid(struct radeon_device *rdev) 681 { 682 return kv_force_dpm_lowest(rdev); 683 } 684 685 static int kv_unforce_levels(struct radeon_device *rdev) 686 { 687 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 688 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 689 else 690 return kv_set_enabled_levels(rdev); 691 } 692 693 static int kv_update_sclk_t(struct radeon_device *rdev) 694 { 695 struct kv_power_info *pi = kv_get_pi(rdev); 696 u32 low_sclk_interrupt_t = 0; 697 int ret = 0; 698 699 if (pi->caps_sclk_throttle_low_notification) { 700 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 701 702 ret = kv_copy_bytes_to_smc(rdev, 703 pi->dpm_table_start + 704 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 705 (u8 *)&low_sclk_interrupt_t, 706 sizeof(u32), pi->sram_end); 707 } 708 return ret; 709 } 710 711 static int kv_program_bootup_state(struct radeon_device *rdev) 712 { 713 struct kv_power_info *pi = kv_get_pi(rdev); 714 u32 i; 715 struct radeon_clock_voltage_dependency_table *table = 716 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 717 718 if (table && table->count) { 719 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 720 if (table->entries[i].clk == pi->boot_pl.sclk) 721 break; 722 } 723 724 pi->graphics_boot_level = (u8)i; 725 kv_dpm_power_level_enable(rdev, i, true); 726 } else { 727 struct sumo_sclk_voltage_mapping_table *table = 728 &pi->sys_info.sclk_voltage_mapping_table; 729 730 if (table->num_max_dpm_entries == 0) 731 return -EINVAL; 732 733 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 734 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 735 break; 736 } 737 738 pi->graphics_boot_level = (u8)i; 739 kv_dpm_power_level_enable(rdev, i, true); 740 } 741 return 0; 742 } 743 744 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) 745 { 746 struct kv_power_info *pi = kv_get_pi(rdev); 747 int ret; 748 749 pi->graphics_therm_throttle_enable = 1; 750 751 ret = kv_copy_bytes_to_smc(rdev, 752 pi->dpm_table_start + 753 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 754 &pi->graphics_therm_throttle_enable, 755 sizeof(u8), pi->sram_end); 756 757 return ret; 758 } 759 760 static int kv_upload_dpm_settings(struct radeon_device *rdev) 761 { 762 struct kv_power_info *pi = kv_get_pi(rdev); 763 int ret; 764 765 ret = kv_copy_bytes_to_smc(rdev, 766 pi->dpm_table_start + 767 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 768 (u8 *)&pi->graphics_level, 769 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 770 pi->sram_end); 771 772 if (ret) 773 return ret; 774 775 ret = kv_copy_bytes_to_smc(rdev, 776 pi->dpm_table_start + 777 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 778 &pi->graphics_dpm_level_count, 779 sizeof(u8), pi->sram_end); 780 781 return ret; 782 } 783 784 static u32 kv_get_clock_difference(u32 a, u32 b) 785 { 786 return (a >= b) ? a - b : b - a; 787 } 788 789 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) 790 { 791 struct kv_power_info *pi = kv_get_pi(rdev); 792 u32 value; 793 794 if (pi->caps_enable_dfs_bypass) { 795 if (kv_get_clock_difference(clk, 40000) < 200) 796 value = 3; 797 else if (kv_get_clock_difference(clk, 30000) < 200) 798 value = 2; 799 else if (kv_get_clock_difference(clk, 20000) < 200) 800 value = 7; 801 else if (kv_get_clock_difference(clk, 15000) < 200) 802 value = 6; 803 else if (kv_get_clock_difference(clk, 10000) < 200) 804 value = 8; 805 else 806 value = 0; 807 } else { 808 value = 0; 809 } 810 811 return value; 812 } 813 814 static int kv_populate_uvd_table(struct radeon_device *rdev) 815 { 816 struct kv_power_info *pi = kv_get_pi(rdev); 817 struct radeon_uvd_clock_voltage_dependency_table *table = 818 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 819 struct atom_clock_dividers dividers; 820 int ret; 821 u32 i; 822 823 if (table == NULL || table->count == 0) 824 return 0; 825 826 pi->uvd_level_count = 0; 827 for (i = 0; i < table->count; i++) { 828 if (pi->high_voltage_t && 829 (pi->high_voltage_t < table->entries[i].v)) 830 break; 831 832 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 833 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 834 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 835 836 pi->uvd_level[i].VClkBypassCntl = 837 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); 838 pi->uvd_level[i].DClkBypassCntl = 839 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); 840 841 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 842 table->entries[i].vclk, false, ÷rs); 843 if (ret) 844 return ret; 845 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 846 847 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 848 table->entries[i].dclk, false, ÷rs); 849 if (ret) 850 return ret; 851 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 852 853 pi->uvd_level_count++; 854 } 855 856 ret = kv_copy_bytes_to_smc(rdev, 857 pi->dpm_table_start + 858 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 859 (u8 *)&pi->uvd_level_count, 860 sizeof(u8), pi->sram_end); 861 if (ret) 862 return ret; 863 864 pi->uvd_interval = 1; 865 866 ret = kv_copy_bytes_to_smc(rdev, 867 pi->dpm_table_start + 868 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 869 &pi->uvd_interval, 870 sizeof(u8), pi->sram_end); 871 if (ret) 872 return ret; 873 874 ret = kv_copy_bytes_to_smc(rdev, 875 pi->dpm_table_start + 876 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 877 (u8 *)&pi->uvd_level, 878 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 879 pi->sram_end); 880 881 return ret; 882 883 } 884 885 static int kv_populate_vce_table(struct radeon_device *rdev) 886 { 887 struct kv_power_info *pi = kv_get_pi(rdev); 888 int ret; 889 u32 i; 890 struct radeon_vce_clock_voltage_dependency_table *table = 891 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 892 struct atom_clock_dividers dividers; 893 894 if (table == NULL || table->count == 0) 895 return 0; 896 897 pi->vce_level_count = 0; 898 for (i = 0; i < table->count; i++) { 899 if (pi->high_voltage_t && 900 pi->high_voltage_t < table->entries[i].v) 901 break; 902 903 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 904 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 905 906 pi->vce_level[i].ClkBypassCntl = 907 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); 908 909 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 910 table->entries[i].evclk, false, ÷rs); 911 if (ret) 912 return ret; 913 pi->vce_level[i].Divider = (u8)dividers.post_div; 914 915 pi->vce_level_count++; 916 } 917 918 ret = kv_copy_bytes_to_smc(rdev, 919 pi->dpm_table_start + 920 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 921 (u8 *)&pi->vce_level_count, 922 sizeof(u8), 923 pi->sram_end); 924 if (ret) 925 return ret; 926 927 pi->vce_interval = 1; 928 929 ret = kv_copy_bytes_to_smc(rdev, 930 pi->dpm_table_start + 931 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 932 (u8 *)&pi->vce_interval, 933 sizeof(u8), 934 pi->sram_end); 935 if (ret) 936 return ret; 937 938 ret = kv_copy_bytes_to_smc(rdev, 939 pi->dpm_table_start + 940 offsetof(SMU7_Fusion_DpmTable, VceLevel), 941 (u8 *)&pi->vce_level, 942 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 943 pi->sram_end); 944 945 return ret; 946 } 947 948 static int kv_populate_samu_table(struct radeon_device *rdev) 949 { 950 struct kv_power_info *pi = kv_get_pi(rdev); 951 struct radeon_clock_voltage_dependency_table *table = 952 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 953 struct atom_clock_dividers dividers; 954 int ret; 955 u32 i; 956 957 if (table == NULL || table->count == 0) 958 return 0; 959 960 pi->samu_level_count = 0; 961 for (i = 0; i < table->count; i++) { 962 if (pi->high_voltage_t && 963 pi->high_voltage_t < table->entries[i].v) 964 break; 965 966 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 967 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 968 969 pi->samu_level[i].ClkBypassCntl = 970 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); 971 972 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 973 table->entries[i].clk, false, ÷rs); 974 if (ret) 975 return ret; 976 pi->samu_level[i].Divider = (u8)dividers.post_div; 977 978 pi->samu_level_count++; 979 } 980 981 ret = kv_copy_bytes_to_smc(rdev, 982 pi->dpm_table_start + 983 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 984 (u8 *)&pi->samu_level_count, 985 sizeof(u8), 986 pi->sram_end); 987 if (ret) 988 return ret; 989 990 pi->samu_interval = 1; 991 992 ret = kv_copy_bytes_to_smc(rdev, 993 pi->dpm_table_start + 994 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 995 (u8 *)&pi->samu_interval, 996 sizeof(u8), 997 pi->sram_end); 998 if (ret) 999 return ret; 1000 1001 ret = kv_copy_bytes_to_smc(rdev, 1002 pi->dpm_table_start + 1003 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1004 (u8 *)&pi->samu_level, 1005 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1006 pi->sram_end); 1007 if (ret) 1008 return ret; 1009 1010 return ret; 1011 } 1012 1013 1014 static int kv_populate_acp_table(struct radeon_device *rdev) 1015 { 1016 struct kv_power_info *pi = kv_get_pi(rdev); 1017 struct radeon_clock_voltage_dependency_table *table = 1018 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1019 struct atom_clock_dividers dividers; 1020 int ret; 1021 u32 i; 1022 1023 if (table == NULL || table->count == 0) 1024 return 0; 1025 1026 pi->acp_level_count = 0; 1027 for (i = 0; i < table->count; i++) { 1028 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1029 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1030 1031 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1032 table->entries[i].clk, false, ÷rs); 1033 if (ret) 1034 return ret; 1035 pi->acp_level[i].Divider = (u8)dividers.post_div; 1036 1037 pi->acp_level_count++; 1038 } 1039 1040 ret = kv_copy_bytes_to_smc(rdev, 1041 pi->dpm_table_start + 1042 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1043 (u8 *)&pi->acp_level_count, 1044 sizeof(u8), 1045 pi->sram_end); 1046 if (ret) 1047 return ret; 1048 1049 pi->acp_interval = 1; 1050 1051 ret = kv_copy_bytes_to_smc(rdev, 1052 pi->dpm_table_start + 1053 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1054 (u8 *)&pi->acp_interval, 1055 sizeof(u8), 1056 pi->sram_end); 1057 if (ret) 1058 return ret; 1059 1060 ret = kv_copy_bytes_to_smc(rdev, 1061 pi->dpm_table_start + 1062 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1063 (u8 *)&pi->acp_level, 1064 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1065 pi->sram_end); 1066 if (ret) 1067 return ret; 1068 1069 return ret; 1070 } 1071 1072 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) 1073 { 1074 struct kv_power_info *pi = kv_get_pi(rdev); 1075 u32 i; 1076 struct radeon_clock_voltage_dependency_table *table = 1077 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1078 1079 if (table && table->count) { 1080 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1081 if (pi->caps_enable_dfs_bypass) { 1082 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1083 pi->graphics_level[i].ClkBypassCntl = 3; 1084 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1085 pi->graphics_level[i].ClkBypassCntl = 2; 1086 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1087 pi->graphics_level[i].ClkBypassCntl = 7; 1088 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1089 pi->graphics_level[i].ClkBypassCntl = 6; 1090 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1091 pi->graphics_level[i].ClkBypassCntl = 8; 1092 else 1093 pi->graphics_level[i].ClkBypassCntl = 0; 1094 } else { 1095 pi->graphics_level[i].ClkBypassCntl = 0; 1096 } 1097 } 1098 } else { 1099 struct sumo_sclk_voltage_mapping_table *table = 1100 &pi->sys_info.sclk_voltage_mapping_table; 1101 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1102 if (pi->caps_enable_dfs_bypass) { 1103 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1104 pi->graphics_level[i].ClkBypassCntl = 3; 1105 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1106 pi->graphics_level[i].ClkBypassCntl = 2; 1107 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1108 pi->graphics_level[i].ClkBypassCntl = 7; 1109 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1110 pi->graphics_level[i].ClkBypassCntl = 6; 1111 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1112 pi->graphics_level[i].ClkBypassCntl = 8; 1113 else 1114 pi->graphics_level[i].ClkBypassCntl = 0; 1115 } else { 1116 pi->graphics_level[i].ClkBypassCntl = 0; 1117 } 1118 } 1119 } 1120 } 1121 1122 static int kv_enable_ulv(struct radeon_device *rdev, bool enable) 1123 { 1124 return kv_notify_message_to_smu(rdev, enable ? 1125 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1126 } 1127 1128 static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1129 { 1130 struct kv_power_info *pi = kv_get_pi(rdev); 1131 1132 pi->acp_boot_level = 0xff; 1133 } 1134 1135 static void kv_update_current_ps(struct radeon_device *rdev, 1136 struct radeon_ps *rps) 1137 { 1138 struct kv_ps *new_ps = kv_get_ps(rps); 1139 struct kv_power_info *pi = kv_get_pi(rdev); 1140 1141 pi->current_rps = *rps; 1142 pi->current_ps = *new_ps; 1143 pi->current_rps.ps_priv = &pi->current_ps; 1144 } 1145 1146 static void kv_update_requested_ps(struct radeon_device *rdev, 1147 struct radeon_ps *rps) 1148 { 1149 struct kv_ps *new_ps = kv_get_ps(rps); 1150 struct kv_power_info *pi = kv_get_pi(rdev); 1151 1152 pi->requested_rps = *rps; 1153 pi->requested_ps = *new_ps; 1154 pi->requested_rps.ps_priv = &pi->requested_ps; 1155 } 1156 1157 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1158 { 1159 struct kv_power_info *pi = kv_get_pi(rdev); 1160 int ret; 1161 1162 if (pi->bapm_enable) { 1163 ret = kv_smc_bapm_enable(rdev, enable); 1164 if (ret) 1165 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1166 } 1167 } 1168 1169 static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) 1170 { 1171 u32 thermal_int; 1172 1173 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); 1174 if (enable) 1175 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; 1176 else 1177 thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); 1178 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); 1179 1180 } 1181 1182 int kv_dpm_enable(struct radeon_device *rdev) 1183 { 1184 struct kv_power_info *pi = kv_get_pi(rdev); 1185 int ret; 1186 1187 ret = kv_process_firmware_header(rdev); 1188 if (ret) { 1189 DRM_ERROR("kv_process_firmware_header failed\n"); 1190 return ret; 1191 } 1192 kv_init_fps_limits(rdev); 1193 kv_init_graphics_levels(rdev); 1194 ret = kv_program_bootup_state(rdev); 1195 if (ret) { 1196 DRM_ERROR("kv_program_bootup_state failed\n"); 1197 return ret; 1198 } 1199 kv_calculate_dfs_bypass_settings(rdev); 1200 ret = kv_upload_dpm_settings(rdev); 1201 if (ret) { 1202 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1203 return ret; 1204 } 1205 ret = kv_populate_uvd_table(rdev); 1206 if (ret) { 1207 DRM_ERROR("kv_populate_uvd_table failed\n"); 1208 return ret; 1209 } 1210 ret = kv_populate_vce_table(rdev); 1211 if (ret) { 1212 DRM_ERROR("kv_populate_vce_table failed\n"); 1213 return ret; 1214 } 1215 ret = kv_populate_samu_table(rdev); 1216 if (ret) { 1217 DRM_ERROR("kv_populate_samu_table failed\n"); 1218 return ret; 1219 } 1220 ret = kv_populate_acp_table(rdev); 1221 if (ret) { 1222 DRM_ERROR("kv_populate_acp_table failed\n"); 1223 return ret; 1224 } 1225 kv_program_vc(rdev); 1226 #if 0 1227 kv_initialize_hardware_cac_manager(rdev); 1228 #endif 1229 kv_start_am(rdev); 1230 if (pi->enable_auto_thermal_throttling) { 1231 ret = kv_enable_auto_thermal_throttling(rdev); 1232 if (ret) { 1233 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1234 return ret; 1235 } 1236 } 1237 ret = kv_enable_dpm_voltage_scaling(rdev); 1238 if (ret) { 1239 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1240 return ret; 1241 } 1242 ret = kv_set_dpm_interval(rdev); 1243 if (ret) { 1244 DRM_ERROR("kv_set_dpm_interval failed\n"); 1245 return ret; 1246 } 1247 ret = kv_set_dpm_boot_state(rdev); 1248 if (ret) { 1249 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1250 return ret; 1251 } 1252 ret = kv_enable_ulv(rdev, true); 1253 if (ret) { 1254 DRM_ERROR("kv_enable_ulv failed\n"); 1255 return ret; 1256 } 1257 kv_start_dpm(rdev); 1258 ret = kv_enable_didt(rdev, true); 1259 if (ret) { 1260 DRM_ERROR("kv_enable_didt failed\n"); 1261 return ret; 1262 } 1263 ret = kv_enable_smc_cac(rdev, true); 1264 if (ret) { 1265 DRM_ERROR("kv_enable_smc_cac failed\n"); 1266 return ret; 1267 } 1268 1269 kv_reset_acp_boot_level(rdev); 1270 1271 ret = kv_smc_bapm_enable(rdev, false); 1272 if (ret) { 1273 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1274 return ret; 1275 } 1276 1277 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1278 1279 return ret; 1280 } 1281 1282 int kv_dpm_late_enable(struct radeon_device *rdev) 1283 { 1284 int ret = 0; 1285 1286 if (rdev->irq.installed && 1287 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1288 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1289 if (ret) { 1290 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1291 return ret; 1292 } 1293 kv_enable_thermal_int(rdev, true); 1294 } 1295 1296 /* powerdown unused blocks for now */ 1297 kv_dpm_powergate_acp(rdev, true); 1298 kv_dpm_powergate_samu(rdev, true); 1299 kv_dpm_powergate_vce(rdev, true); 1300 kv_dpm_powergate_uvd(rdev, true); 1301 1302 return ret; 1303 } 1304 1305 void kv_dpm_disable(struct radeon_device *rdev) 1306 { 1307 kv_smc_bapm_enable(rdev, false); 1308 1309 if (rdev->family == CHIP_MULLINS) 1310 kv_enable_nb_dpm(rdev, false); 1311 1312 /* powerup blocks */ 1313 kv_dpm_powergate_acp(rdev, false); 1314 kv_dpm_powergate_samu(rdev, false); 1315 kv_dpm_powergate_vce(rdev, false); 1316 kv_dpm_powergate_uvd(rdev, false); 1317 1318 kv_enable_smc_cac(rdev, false); 1319 kv_enable_didt(rdev, false); 1320 kv_clear_vc(rdev); 1321 kv_stop_dpm(rdev); 1322 kv_enable_ulv(rdev, false); 1323 kv_reset_am(rdev); 1324 kv_enable_thermal_int(rdev, false); 1325 1326 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1327 } 1328 1329 #if 0 1330 static int kv_write_smc_soft_register(struct radeon_device *rdev, 1331 u16 reg_offset, u32 value) 1332 { 1333 struct kv_power_info *pi = kv_get_pi(rdev); 1334 1335 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset, 1336 (u8 *)&value, sizeof(u16), pi->sram_end); 1337 } 1338 1339 static int kv_read_smc_soft_register(struct radeon_device *rdev, 1340 u16 reg_offset, u32 *value) 1341 { 1342 struct kv_power_info *pi = kv_get_pi(rdev); 1343 1344 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset, 1345 value, pi->sram_end); 1346 } 1347 #endif 1348 1349 static void kv_init_sclk_t(struct radeon_device *rdev) 1350 { 1351 struct kv_power_info *pi = kv_get_pi(rdev); 1352 1353 pi->low_sclk_interrupt_t = 0; 1354 } 1355 1356 static int kv_init_fps_limits(struct radeon_device *rdev) 1357 { 1358 struct kv_power_info *pi = kv_get_pi(rdev); 1359 int ret = 0; 1360 1361 if (pi->caps_fps) { 1362 u16 tmp; 1363 1364 tmp = 45; 1365 pi->fps_high_t = cpu_to_be16(tmp); 1366 ret = kv_copy_bytes_to_smc(rdev, 1367 pi->dpm_table_start + 1368 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1369 (u8 *)&pi->fps_high_t, 1370 sizeof(u16), pi->sram_end); 1371 1372 tmp = 30; 1373 pi->fps_low_t = cpu_to_be16(tmp); 1374 1375 ret = kv_copy_bytes_to_smc(rdev, 1376 pi->dpm_table_start + 1377 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1378 (u8 *)&pi->fps_low_t, 1379 sizeof(u16), pi->sram_end); 1380 1381 } 1382 return ret; 1383 } 1384 1385 static void kv_init_powergate_state(struct radeon_device *rdev) 1386 { 1387 struct kv_power_info *pi = kv_get_pi(rdev); 1388 1389 pi->uvd_power_gated = false; 1390 pi->vce_power_gated = false; 1391 pi->samu_power_gated = false; 1392 pi->acp_power_gated = false; 1393 1394 } 1395 1396 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 1397 { 1398 return kv_notify_message_to_smu(rdev, enable ? 1399 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1400 } 1401 1402 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1403 { 1404 return kv_notify_message_to_smu(rdev, enable ? 1405 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1406 } 1407 1408 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1409 { 1410 return kv_notify_message_to_smu(rdev, enable ? 1411 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1412 } 1413 1414 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) 1415 { 1416 return kv_notify_message_to_smu(rdev, enable ? 1417 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1418 } 1419 1420 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) 1421 { 1422 struct kv_power_info *pi = kv_get_pi(rdev); 1423 struct radeon_uvd_clock_voltage_dependency_table *table = 1424 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1425 int ret; 1426 u32 mask; 1427 1428 if (!gate) { 1429 if (table->count) 1430 pi->uvd_boot_level = table->count - 1; 1431 else 1432 pi->uvd_boot_level = 0; 1433 1434 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1435 mask = 1 << pi->uvd_boot_level; 1436 } else { 1437 mask = 0x1f; 1438 } 1439 1440 ret = kv_copy_bytes_to_smc(rdev, 1441 pi->dpm_table_start + 1442 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1443 (uint8_t *)&pi->uvd_boot_level, 1444 sizeof(u8), pi->sram_end); 1445 if (ret) 1446 return ret; 1447 1448 kv_send_msg_to_smc_with_parameter(rdev, 1449 PPSMC_MSG_UVDDPM_SetEnabledMask, 1450 mask); 1451 } 1452 1453 return kv_enable_uvd_dpm(rdev, !gate); 1454 } 1455 1456 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) 1457 { 1458 u8 i; 1459 struct radeon_vce_clock_voltage_dependency_table *table = 1460 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1461 1462 for (i = 0; i < table->count; i++) { 1463 if (table->entries[i].evclk >= evclk) 1464 break; 1465 } 1466 1467 return i; 1468 } 1469 1470 static int kv_update_vce_dpm(struct radeon_device *rdev, 1471 struct radeon_ps *radeon_new_state, 1472 struct radeon_ps *radeon_current_state) 1473 { 1474 struct kv_power_info *pi = kv_get_pi(rdev); 1475 struct radeon_vce_clock_voltage_dependency_table *table = 1476 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1477 int ret; 1478 1479 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1480 kv_dpm_powergate_vce(rdev, false); 1481 /* turn the clocks on when encoding */ 1482 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 1483 if (pi->caps_stable_p_state) 1484 pi->vce_boot_level = table->count - 1; 1485 else 1486 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); 1487 1488 ret = kv_copy_bytes_to_smc(rdev, 1489 pi->dpm_table_start + 1490 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1491 (u8 *)&pi->vce_boot_level, 1492 sizeof(u8), 1493 pi->sram_end); 1494 if (ret) 1495 return ret; 1496 1497 if (pi->caps_stable_p_state) 1498 kv_send_msg_to_smc_with_parameter(rdev, 1499 PPSMC_MSG_VCEDPM_SetEnabledMask, 1500 (1 << pi->vce_boot_level)); 1501 1502 kv_enable_vce_dpm(rdev, true); 1503 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1504 kv_enable_vce_dpm(rdev, false); 1505 /* turn the clocks off when not encoding */ 1506 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 1507 kv_dpm_powergate_vce(rdev, true); 1508 } 1509 1510 return 0; 1511 } 1512 1513 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1514 { 1515 struct kv_power_info *pi = kv_get_pi(rdev); 1516 struct radeon_clock_voltage_dependency_table *table = 1517 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1518 int ret; 1519 1520 if (!gate) { 1521 if (pi->caps_stable_p_state) 1522 pi->samu_boot_level = table->count - 1; 1523 else 1524 pi->samu_boot_level = 0; 1525 1526 ret = kv_copy_bytes_to_smc(rdev, 1527 pi->dpm_table_start + 1528 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1529 (u8 *)&pi->samu_boot_level, 1530 sizeof(u8), 1531 pi->sram_end); 1532 if (ret) 1533 return ret; 1534 1535 if (pi->caps_stable_p_state) 1536 kv_send_msg_to_smc_with_parameter(rdev, 1537 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1538 (1 << pi->samu_boot_level)); 1539 } 1540 1541 return kv_enable_samu_dpm(rdev, !gate); 1542 } 1543 1544 static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1545 { 1546 u8 i; 1547 struct radeon_clock_voltage_dependency_table *table = 1548 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1549 1550 for (i = 0; i < table->count; i++) { 1551 if (table->entries[i].clk >= 0) /* XXX */ 1552 break; 1553 } 1554 1555 if (i >= table->count) 1556 i = table->count - 1; 1557 1558 return i; 1559 } 1560 1561 static void kv_update_acp_boot_level(struct radeon_device *rdev) 1562 { 1563 struct kv_power_info *pi = kv_get_pi(rdev); 1564 u8 acp_boot_level; 1565 1566 if (!pi->caps_stable_p_state) { 1567 acp_boot_level = kv_get_acp_boot_level(rdev); 1568 if (acp_boot_level != pi->acp_boot_level) { 1569 pi->acp_boot_level = acp_boot_level; 1570 kv_send_msg_to_smc_with_parameter(rdev, 1571 PPSMC_MSG_ACPDPM_SetEnabledMask, 1572 (1 << pi->acp_boot_level)); 1573 } 1574 } 1575 } 1576 1577 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1578 { 1579 struct kv_power_info *pi = kv_get_pi(rdev); 1580 struct radeon_clock_voltage_dependency_table *table = 1581 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1582 int ret; 1583 1584 if (!gate) { 1585 if (pi->caps_stable_p_state) 1586 pi->acp_boot_level = table->count - 1; 1587 else 1588 pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1589 1590 ret = kv_copy_bytes_to_smc(rdev, 1591 pi->dpm_table_start + 1592 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1593 (u8 *)&pi->acp_boot_level, 1594 sizeof(u8), 1595 pi->sram_end); 1596 if (ret) 1597 return ret; 1598 1599 if (pi->caps_stable_p_state) 1600 kv_send_msg_to_smc_with_parameter(rdev, 1601 PPSMC_MSG_ACPDPM_SetEnabledMask, 1602 (1 << pi->acp_boot_level)); 1603 } 1604 1605 return kv_enable_acp_dpm(rdev, !gate); 1606 } 1607 1608 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 1609 { 1610 struct kv_power_info *pi = kv_get_pi(rdev); 1611 1612 if (pi->uvd_power_gated == gate) 1613 return; 1614 1615 pi->uvd_power_gated = gate; 1616 1617 if (gate) { 1618 if (pi->caps_uvd_pg) { 1619 uvd_v1_0_stop(rdev); 1620 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); 1621 } 1622 kv_update_uvd_dpm(rdev, gate); 1623 if (pi->caps_uvd_pg) 1624 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); 1625 } else { 1626 if (pi->caps_uvd_pg) { 1627 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); 1628 uvd_v4_2_resume(rdev); 1629 uvd_v1_0_start(rdev); 1630 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); 1631 } 1632 kv_update_uvd_dpm(rdev, gate); 1633 } 1634 } 1635 1636 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) 1637 { 1638 struct kv_power_info *pi = kv_get_pi(rdev); 1639 1640 if (pi->vce_power_gated == gate) 1641 return; 1642 1643 pi->vce_power_gated = gate; 1644 1645 if (gate) { 1646 if (pi->caps_vce_pg) { 1647 /* XXX do we need a vce_v1_0_stop() ? */ 1648 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1649 } 1650 } else { 1651 if (pi->caps_vce_pg) { 1652 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1653 vce_v2_0_resume(rdev); 1654 vce_v1_0_start(rdev); 1655 } 1656 } 1657 } 1658 1659 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) 1660 { 1661 struct kv_power_info *pi = kv_get_pi(rdev); 1662 1663 if (pi->samu_power_gated == gate) 1664 return; 1665 1666 pi->samu_power_gated = gate; 1667 1668 if (gate) { 1669 kv_update_samu_dpm(rdev, true); 1670 if (pi->caps_samu_pg) 1671 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); 1672 } else { 1673 if (pi->caps_samu_pg) 1674 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); 1675 kv_update_samu_dpm(rdev, false); 1676 } 1677 } 1678 1679 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) 1680 { 1681 struct kv_power_info *pi = kv_get_pi(rdev); 1682 1683 if (pi->acp_power_gated == gate) 1684 return; 1685 1686 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1687 return; 1688 1689 pi->acp_power_gated = gate; 1690 1691 if (gate) { 1692 kv_update_acp_dpm(rdev, true); 1693 if (pi->caps_acp_pg) 1694 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); 1695 } else { 1696 if (pi->caps_acp_pg) 1697 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); 1698 kv_update_acp_dpm(rdev, false); 1699 } 1700 } 1701 1702 static void kv_set_valid_clock_range(struct radeon_device *rdev, 1703 struct radeon_ps *new_rps) 1704 { 1705 struct kv_ps *new_ps = kv_get_ps(new_rps); 1706 struct kv_power_info *pi = kv_get_pi(rdev); 1707 u32 i; 1708 struct radeon_clock_voltage_dependency_table *table = 1709 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1710 1711 if (table && table->count) { 1712 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1713 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1714 (i == (pi->graphics_dpm_level_count - 1))) { 1715 pi->lowest_valid = i; 1716 break; 1717 } 1718 } 1719 1720 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1721 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1722 break; 1723 } 1724 pi->highest_valid = i; 1725 1726 if (pi->lowest_valid > pi->highest_valid) { 1727 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1728 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1729 pi->highest_valid = pi->lowest_valid; 1730 else 1731 pi->lowest_valid = pi->highest_valid; 1732 } 1733 } else { 1734 struct sumo_sclk_voltage_mapping_table *table = 1735 &pi->sys_info.sclk_voltage_mapping_table; 1736 1737 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1738 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1739 i == (int)(pi->graphics_dpm_level_count - 1)) { 1740 pi->lowest_valid = i; 1741 break; 1742 } 1743 } 1744 1745 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1746 if (table->entries[i].sclk_frequency <= 1747 new_ps->levels[new_ps->num_levels - 1].sclk) 1748 break; 1749 } 1750 pi->highest_valid = i; 1751 1752 if (pi->lowest_valid > pi->highest_valid) { 1753 if ((new_ps->levels[0].sclk - 1754 table->entries[pi->highest_valid].sclk_frequency) > 1755 (table->entries[pi->lowest_valid].sclk_frequency - 1756 new_ps->levels[new_ps->num_levels -1].sclk)) 1757 pi->highest_valid = pi->lowest_valid; 1758 else 1759 pi->lowest_valid = pi->highest_valid; 1760 } 1761 } 1762 } 1763 1764 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, 1765 struct radeon_ps *new_rps) 1766 { 1767 struct kv_ps *new_ps = kv_get_ps(new_rps); 1768 struct kv_power_info *pi = kv_get_pi(rdev); 1769 int ret = 0; 1770 u8 clk_bypass_cntl; 1771 1772 if (pi->caps_enable_dfs_bypass) { 1773 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1774 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1775 ret = kv_copy_bytes_to_smc(rdev, 1776 (pi->dpm_table_start + 1777 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1778 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1779 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1780 &clk_bypass_cntl, 1781 sizeof(u8), pi->sram_end); 1782 } 1783 1784 return ret; 1785 } 1786 1787 static int kv_enable_nb_dpm(struct radeon_device *rdev, 1788 bool enable) 1789 { 1790 struct kv_power_info *pi = kv_get_pi(rdev); 1791 int ret = 0; 1792 1793 if (enable) { 1794 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1795 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1796 if (ret == 0) 1797 pi->nb_dpm_enabled = true; 1798 } 1799 } else { 1800 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1801 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable); 1802 if (ret == 0) 1803 pi->nb_dpm_enabled = false; 1804 } 1805 } 1806 1807 return ret; 1808 } 1809 1810 int kv_dpm_force_performance_level(struct radeon_device *rdev, 1811 enum radeon_dpm_forced_level level) 1812 { 1813 int ret; 1814 1815 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1816 ret = kv_force_dpm_highest(rdev); 1817 if (ret) 1818 return ret; 1819 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1820 ret = kv_force_dpm_lowest(rdev); 1821 if (ret) 1822 return ret; 1823 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1824 ret = kv_unforce_levels(rdev); 1825 if (ret) 1826 return ret; 1827 } 1828 1829 rdev->pm.dpm.forced_level = level; 1830 1831 return 0; 1832 } 1833 1834 int kv_dpm_pre_set_power_state(struct radeon_device *rdev) 1835 { 1836 struct kv_power_info *pi = kv_get_pi(rdev); 1837 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 1838 struct radeon_ps *new_ps = &requested_ps; 1839 1840 kv_update_requested_ps(rdev, new_ps); 1841 1842 kv_apply_state_adjust_rules(rdev, 1843 &pi->requested_rps, 1844 &pi->current_rps); 1845 1846 return 0; 1847 } 1848 1849 int kv_dpm_set_power_state(struct radeon_device *rdev) 1850 { 1851 struct kv_power_info *pi = kv_get_pi(rdev); 1852 struct radeon_ps *new_ps = &pi->requested_rps; 1853 struct radeon_ps *old_ps = &pi->current_rps; 1854 int ret; 1855 1856 if (pi->bapm_enable) { 1857 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1858 if (ret) { 1859 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1860 return ret; 1861 } 1862 } 1863 1864 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1865 if (pi->enable_dpm) { 1866 kv_set_valid_clock_range(rdev, new_ps); 1867 kv_update_dfs_bypass_settings(rdev, new_ps); 1868 ret = kv_calculate_ds_divider(rdev); 1869 if (ret) { 1870 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1871 return ret; 1872 } 1873 kv_calculate_nbps_level_settings(rdev); 1874 kv_calculate_dpm_settings(rdev); 1875 kv_force_lowest_valid(rdev); 1876 kv_enable_new_levels(rdev); 1877 kv_upload_dpm_settings(rdev); 1878 kv_program_nbps_index_settings(rdev, new_ps); 1879 kv_unforce_levels(rdev); 1880 kv_set_enabled_levels(rdev); 1881 kv_force_lowest_valid(rdev); 1882 kv_unforce_levels(rdev); 1883 1884 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1885 if (ret) { 1886 DRM_ERROR("kv_update_vce_dpm failed\n"); 1887 return ret; 1888 } 1889 kv_update_sclk_t(rdev); 1890 if (rdev->family == CHIP_MULLINS) 1891 kv_enable_nb_dpm(rdev, true); 1892 } 1893 } else { 1894 if (pi->enable_dpm) { 1895 kv_set_valid_clock_range(rdev, new_ps); 1896 kv_update_dfs_bypass_settings(rdev, new_ps); 1897 ret = kv_calculate_ds_divider(rdev); 1898 if (ret) { 1899 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1900 return ret; 1901 } 1902 kv_calculate_nbps_level_settings(rdev); 1903 kv_calculate_dpm_settings(rdev); 1904 kv_freeze_sclk_dpm(rdev, true); 1905 kv_upload_dpm_settings(rdev); 1906 kv_program_nbps_index_settings(rdev, new_ps); 1907 kv_freeze_sclk_dpm(rdev, false); 1908 kv_set_enabled_levels(rdev); 1909 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1910 if (ret) { 1911 DRM_ERROR("kv_update_vce_dpm failed\n"); 1912 return ret; 1913 } 1914 kv_update_acp_boot_level(rdev); 1915 kv_update_sclk_t(rdev); 1916 kv_enable_nb_dpm(rdev, true); 1917 } 1918 } 1919 1920 return 0; 1921 } 1922 1923 void kv_dpm_post_set_power_state(struct radeon_device *rdev) 1924 { 1925 struct kv_power_info *pi = kv_get_pi(rdev); 1926 struct radeon_ps *new_ps = &pi->requested_rps; 1927 1928 kv_update_current_ps(rdev, new_ps); 1929 } 1930 1931 void kv_dpm_setup_asic(struct radeon_device *rdev) 1932 { 1933 sumo_take_smu_control(rdev, true); 1934 kv_init_powergate_state(rdev); 1935 kv_init_sclk_t(rdev); 1936 } 1937 1938 #if 0 1939 void kv_dpm_reset_asic(struct radeon_device *rdev) 1940 { 1941 struct kv_power_info *pi = kv_get_pi(rdev); 1942 1943 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1944 kv_force_lowest_valid(rdev); 1945 kv_init_graphics_levels(rdev); 1946 kv_program_bootup_state(rdev); 1947 kv_upload_dpm_settings(rdev); 1948 kv_force_lowest_valid(rdev); 1949 kv_unforce_levels(rdev); 1950 } else { 1951 kv_init_graphics_levels(rdev); 1952 kv_program_bootup_state(rdev); 1953 kv_freeze_sclk_dpm(rdev, true); 1954 kv_upload_dpm_settings(rdev); 1955 kv_freeze_sclk_dpm(rdev, false); 1956 kv_set_enabled_level(rdev, pi->graphics_boot_level); 1957 } 1958 } 1959 #endif 1960 1961 //XXX use sumo_dpm_display_configuration_changed 1962 1963 static void kv_construct_max_power_limits_table(struct radeon_device *rdev, 1964 struct radeon_clock_and_voltage_limits *table) 1965 { 1966 struct kv_power_info *pi = kv_get_pi(rdev); 1967 1968 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 1969 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 1970 table->sclk = 1971 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 1972 table->vddc = 1973 kv_convert_2bit_index_to_voltage(rdev, 1974 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 1975 } 1976 1977 table->mclk = pi->sys_info.nbp_memory_clock[0]; 1978 } 1979 1980 static void kv_patch_voltage_values(struct radeon_device *rdev) 1981 { 1982 int i; 1983 struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1984 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1985 struct radeon_vce_clock_voltage_dependency_table *vce_table = 1986 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1987 struct radeon_clock_voltage_dependency_table *samu_table = 1988 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1989 struct radeon_clock_voltage_dependency_table *acp_table = 1990 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1991 1992 if (uvd_table->count) { 1993 for (i = 0; i < uvd_table->count; i++) 1994 uvd_table->entries[i].v = 1995 kv_convert_8bit_index_to_voltage(rdev, 1996 uvd_table->entries[i].v); 1997 } 1998 1999 if (vce_table->count) { 2000 for (i = 0; i < vce_table->count; i++) 2001 vce_table->entries[i].v = 2002 kv_convert_8bit_index_to_voltage(rdev, 2003 vce_table->entries[i].v); 2004 } 2005 2006 if (samu_table->count) { 2007 for (i = 0; i < samu_table->count; i++) 2008 samu_table->entries[i].v = 2009 kv_convert_8bit_index_to_voltage(rdev, 2010 samu_table->entries[i].v); 2011 } 2012 2013 if (acp_table->count) { 2014 for (i = 0; i < acp_table->count; i++) 2015 acp_table->entries[i].v = 2016 kv_convert_8bit_index_to_voltage(rdev, 2017 acp_table->entries[i].v); 2018 } 2019 2020 } 2021 2022 static void kv_construct_boot_state(struct radeon_device *rdev) 2023 { 2024 struct kv_power_info *pi = kv_get_pi(rdev); 2025 2026 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2027 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2028 pi->boot_pl.ds_divider_index = 0; 2029 pi->boot_pl.ss_divider_index = 0; 2030 pi->boot_pl.allow_gnb_slow = 1; 2031 pi->boot_pl.force_nbp_state = 0; 2032 pi->boot_pl.display_wm = 0; 2033 pi->boot_pl.vce_wm = 0; 2034 } 2035 2036 static int kv_force_dpm_highest(struct radeon_device *rdev) 2037 { 2038 int ret; 2039 u32 enable_mask, i; 2040 2041 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2042 if (ret) 2043 return ret; 2044 2045 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2046 if (enable_mask & (1 << i)) 2047 break; 2048 } 2049 2050 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2051 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2052 else 2053 return kv_set_enabled_level(rdev, i); 2054 } 2055 2056 static int kv_force_dpm_lowest(struct radeon_device *rdev) 2057 { 2058 int ret; 2059 u32 enable_mask, i; 2060 2061 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2062 if (ret) 2063 return ret; 2064 2065 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2066 if (enable_mask & (1 << i)) 2067 break; 2068 } 2069 2070 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2071 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2072 else 2073 return kv_set_enabled_level(rdev, i); 2074 } 2075 2076 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2077 u32 sclk, u32 min_sclk_in_sr) 2078 { 2079 struct kv_power_info *pi = kv_get_pi(rdev); 2080 u32 i; 2081 u32 temp; 2082 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 2083 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 2084 2085 if (sclk < min) 2086 return 0; 2087 2088 if (!pi->caps_sclk_ds) 2089 return 0; 2090 2091 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2092 temp = sclk / sumo_get_sleep_divider_from_id(i); 2093 if (temp >= min) 2094 break; 2095 } 2096 2097 return (u8)i; 2098 } 2099 2100 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) 2101 { 2102 struct kv_power_info *pi = kv_get_pi(rdev); 2103 struct radeon_clock_voltage_dependency_table *table = 2104 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2105 int i; 2106 2107 if (table && table->count) { 2108 for (i = table->count - 1; i >= 0; i--) { 2109 if (pi->high_voltage_t && 2110 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= 2111 pi->high_voltage_t)) { 2112 *limit = i; 2113 return 0; 2114 } 2115 } 2116 } else { 2117 struct sumo_sclk_voltage_mapping_table *table = 2118 &pi->sys_info.sclk_voltage_mapping_table; 2119 2120 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2121 if (pi->high_voltage_t && 2122 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= 2123 pi->high_voltage_t)) { 2124 *limit = i; 2125 return 0; 2126 } 2127 } 2128 } 2129 2130 *limit = 0; 2131 return 0; 2132 } 2133 2134 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 2135 struct radeon_ps *new_rps, 2136 struct radeon_ps *old_rps) 2137 { 2138 struct kv_ps *ps = kv_get_ps(new_rps); 2139 struct kv_power_info *pi = kv_get_pi(rdev); 2140 u32 min_sclk = 10000; /* ??? */ 2141 u32 sclk, mclk = 0; 2142 int i, limit; 2143 bool force_high; 2144 struct radeon_clock_voltage_dependency_table *table = 2145 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2146 u32 stable_p_state_sclk = 0; 2147 struct radeon_clock_and_voltage_limits *max_limits = 2148 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2149 2150 if (new_rps->vce_active) { 2151 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 2152 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 2153 } else { 2154 new_rps->evclk = 0; 2155 new_rps->ecclk = 0; 2156 } 2157 2158 mclk = max_limits->mclk; 2159 sclk = min_sclk; 2160 2161 if (pi->caps_stable_p_state) { 2162 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2163 2164 for (i = table->count - 1; i >= 0; i--) { 2165 if (stable_p_state_sclk >= table->entries[i].clk) { 2166 stable_p_state_sclk = table->entries[i].clk; 2167 break; 2168 } 2169 } 2170 2171 if (i > 0) 2172 stable_p_state_sclk = table->entries[0].clk; 2173 2174 sclk = stable_p_state_sclk; 2175 } 2176 2177 if (new_rps->vce_active) { 2178 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 2179 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 2180 } 2181 2182 ps->need_dfs_bypass = true; 2183 2184 for (i = 0; i < ps->num_levels; i++) { 2185 if (ps->levels[i].sclk < sclk) 2186 ps->levels[i].sclk = sclk; 2187 } 2188 2189 if (table && table->count) { 2190 for (i = 0; i < ps->num_levels; i++) { 2191 if (pi->high_voltage_t && 2192 (pi->high_voltage_t < 2193 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2194 kv_get_high_voltage_limit(rdev, &limit); 2195 ps->levels[i].sclk = table->entries[limit].clk; 2196 } 2197 } 2198 } else { 2199 struct sumo_sclk_voltage_mapping_table *table = 2200 &pi->sys_info.sclk_voltage_mapping_table; 2201 2202 for (i = 0; i < ps->num_levels; i++) { 2203 if (pi->high_voltage_t && 2204 (pi->high_voltage_t < 2205 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2206 kv_get_high_voltage_limit(rdev, &limit); 2207 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2208 } 2209 } 2210 } 2211 2212 if (pi->caps_stable_p_state) { 2213 for (i = 0; i < ps->num_levels; i++) { 2214 ps->levels[i].sclk = stable_p_state_sclk; 2215 } 2216 } 2217 2218 pi->video_start = new_rps->dclk || new_rps->vclk || 2219 new_rps->evclk || new_rps->ecclk; 2220 2221 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2222 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2223 pi->battery_state = true; 2224 else 2225 pi->battery_state = false; 2226 2227 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2228 ps->dpm0_pg_nb_ps_lo = 0x1; 2229 ps->dpm0_pg_nb_ps_hi = 0x0; 2230 ps->dpmx_nb_ps_lo = 0x1; 2231 ps->dpmx_nb_ps_hi = 0x0; 2232 } else { 2233 ps->dpm0_pg_nb_ps_lo = 0x3; 2234 ps->dpm0_pg_nb_ps_hi = 0x0; 2235 ps->dpmx_nb_ps_lo = 0x3; 2236 ps->dpmx_nb_ps_hi = 0x0; 2237 2238 if (pi->sys_info.nb_dpm_enable) { 2239 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2240 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2241 pi->disable_nb_ps3_in_battery; 2242 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2243 ps->dpm0_pg_nb_ps_hi = 0x2; 2244 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2245 ps->dpmx_nb_ps_hi = 0x2; 2246 } 2247 } 2248 } 2249 2250 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, 2251 u32 index, bool enable) 2252 { 2253 struct kv_power_info *pi = kv_get_pi(rdev); 2254 2255 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2256 } 2257 2258 static int kv_calculate_ds_divider(struct radeon_device *rdev) 2259 { 2260 struct kv_power_info *pi = kv_get_pi(rdev); 2261 u32 sclk_in_sr = 10000; /* ??? */ 2262 u32 i; 2263 2264 if (pi->lowest_valid > pi->highest_valid) 2265 return -EINVAL; 2266 2267 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2268 pi->graphics_level[i].DeepSleepDivId = 2269 kv_get_sleep_divider_id_from_clock(rdev, 2270 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2271 sclk_in_sr); 2272 } 2273 return 0; 2274 } 2275 2276 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) 2277 { 2278 struct kv_power_info *pi = kv_get_pi(rdev); 2279 u32 i; 2280 bool force_high; 2281 struct radeon_clock_and_voltage_limits *max_limits = 2282 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2283 u32 mclk = max_limits->mclk; 2284 2285 if (pi->lowest_valid > pi->highest_valid) 2286 return -EINVAL; 2287 2288 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2289 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2290 pi->graphics_level[i].GnbSlow = 1; 2291 pi->graphics_level[i].ForceNbPs1 = 0; 2292 pi->graphics_level[i].UpH = 0; 2293 } 2294 2295 if (!pi->sys_info.nb_dpm_enable) 2296 return 0; 2297 2298 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2299 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2300 2301 if (force_high) { 2302 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2303 pi->graphics_level[i].GnbSlow = 0; 2304 } else { 2305 if (pi->battery_state) 2306 pi->graphics_level[0].ForceNbPs1 = 1; 2307 2308 pi->graphics_level[1].GnbSlow = 0; 2309 pi->graphics_level[2].GnbSlow = 0; 2310 pi->graphics_level[3].GnbSlow = 0; 2311 pi->graphics_level[4].GnbSlow = 0; 2312 } 2313 } else { 2314 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2315 pi->graphics_level[i].GnbSlow = 1; 2316 pi->graphics_level[i].ForceNbPs1 = 0; 2317 pi->graphics_level[i].UpH = 0; 2318 } 2319 2320 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2321 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2322 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2323 if (pi->lowest_valid != pi->highest_valid) 2324 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2325 } 2326 } 2327 return 0; 2328 } 2329 2330 static int kv_calculate_dpm_settings(struct radeon_device *rdev) 2331 { 2332 struct kv_power_info *pi = kv_get_pi(rdev); 2333 u32 i; 2334 2335 if (pi->lowest_valid > pi->highest_valid) 2336 return -EINVAL; 2337 2338 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2339 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2340 2341 return 0; 2342 } 2343 2344 static void kv_init_graphics_levels(struct radeon_device *rdev) 2345 { 2346 struct kv_power_info *pi = kv_get_pi(rdev); 2347 u32 i; 2348 struct radeon_clock_voltage_dependency_table *table = 2349 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2350 2351 if (table && table->count) { 2352 u32 vid_2bit; 2353 2354 pi->graphics_dpm_level_count = 0; 2355 for (i = 0; i < table->count; i++) { 2356 if (pi->high_voltage_t && 2357 (pi->high_voltage_t < 2358 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) 2359 break; 2360 2361 kv_set_divider_value(rdev, i, table->entries[i].clk); 2362 vid_2bit = kv_convert_vid7_to_vid2(rdev, 2363 &pi->sys_info.vid_mapping_table, 2364 table->entries[i].v); 2365 kv_set_vid(rdev, i, vid_2bit); 2366 kv_set_at(rdev, i, pi->at[i]); 2367 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2368 pi->graphics_dpm_level_count++; 2369 } 2370 } else { 2371 struct sumo_sclk_voltage_mapping_table *table = 2372 &pi->sys_info.sclk_voltage_mapping_table; 2373 2374 pi->graphics_dpm_level_count = 0; 2375 for (i = 0; i < table->num_max_dpm_entries; i++) { 2376 if (pi->high_voltage_t && 2377 pi->high_voltage_t < 2378 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) 2379 break; 2380 2381 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); 2382 kv_set_vid(rdev, i, table->entries[i].vid_2bit); 2383 kv_set_at(rdev, i, pi->at[i]); 2384 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2385 pi->graphics_dpm_level_count++; 2386 } 2387 } 2388 2389 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2390 kv_dpm_power_level_enable(rdev, i, false); 2391 } 2392 2393 static void kv_enable_new_levels(struct radeon_device *rdev) 2394 { 2395 struct kv_power_info *pi = kv_get_pi(rdev); 2396 u32 i; 2397 2398 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2399 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2400 kv_dpm_power_level_enable(rdev, i, true); 2401 } 2402 } 2403 2404 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2405 { 2406 u32 new_mask = (1 << level); 2407 2408 return kv_send_msg_to_smc_with_parameter(rdev, 2409 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2410 new_mask); 2411 } 2412 2413 static int kv_set_enabled_levels(struct radeon_device *rdev) 2414 { 2415 struct kv_power_info *pi = kv_get_pi(rdev); 2416 u32 i, new_mask = 0; 2417 2418 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2419 new_mask |= (1 << i); 2420 2421 return kv_send_msg_to_smc_with_parameter(rdev, 2422 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2423 new_mask); 2424 } 2425 2426 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 2427 struct radeon_ps *new_rps) 2428 { 2429 struct kv_ps *new_ps = kv_get_ps(new_rps); 2430 struct kv_power_info *pi = kv_get_pi(rdev); 2431 u32 nbdpmconfig1; 2432 2433 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2434 return; 2435 2436 if (pi->sys_info.nb_dpm_enable) { 2437 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); 2438 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | 2439 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); 2440 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | 2441 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | 2442 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | 2443 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); 2444 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); 2445 } 2446 } 2447 2448 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 2449 int min_temp, int max_temp) 2450 { 2451 int low_temp = 0 * 1000; 2452 int high_temp = 255 * 1000; 2453 u32 tmp; 2454 2455 if (low_temp < min_temp) 2456 low_temp = min_temp; 2457 if (high_temp > max_temp) 2458 high_temp = max_temp; 2459 if (high_temp < low_temp) { 2460 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2461 return -EINVAL; 2462 } 2463 2464 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); 2465 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); 2466 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | 2467 DIG_THERM_INTL(49 + (low_temp / 1000))); 2468 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); 2469 2470 rdev->pm.dpm.thermal.min_temp = low_temp; 2471 rdev->pm.dpm.thermal.max_temp = high_temp; 2472 2473 return 0; 2474 } 2475 2476 union igp_info { 2477 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2478 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2479 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2480 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2481 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2482 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2483 }; 2484 2485 static int kv_parse_sys_info_table(struct radeon_device *rdev) 2486 { 2487 struct kv_power_info *pi = kv_get_pi(rdev); 2488 struct radeon_mode_info *mode_info = &rdev->mode_info; 2489 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2490 union igp_info *igp_info; 2491 u8 frev, crev; 2492 u16 data_offset; 2493 int i; 2494 2495 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2496 &frev, &crev, &data_offset)) { 2497 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2498 data_offset); 2499 2500 if (crev != 8) { 2501 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2502 return -EINVAL; 2503 } 2504 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2505 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2506 pi->sys_info.bootup_nb_voltage_index = 2507 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2508 if (igp_info->info_8.ucHtcTmpLmt == 0) 2509 pi->sys_info.htc_tmp_lmt = 203; 2510 else 2511 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2512 if (igp_info->info_8.ucHtcHystLmt == 0) 2513 pi->sys_info.htc_hyst_lmt = 5; 2514 else 2515 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2516 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2517 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2518 } 2519 2520 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2521 pi->sys_info.nb_dpm_enable = true; 2522 else 2523 pi->sys_info.nb_dpm_enable = false; 2524 2525 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2526 pi->sys_info.nbp_memory_clock[i] = 2527 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2528 pi->sys_info.nbp_n_clock[i] = 2529 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2530 } 2531 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2532 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2533 pi->caps_enable_dfs_bypass = true; 2534 2535 sumo_construct_sclk_voltage_mapping_table(rdev, 2536 &pi->sys_info.sclk_voltage_mapping_table, 2537 igp_info->info_8.sAvail_SCLK); 2538 2539 sumo_construct_vid_mapping_table(rdev, 2540 &pi->sys_info.vid_mapping_table, 2541 igp_info->info_8.sAvail_SCLK); 2542 2543 kv_construct_max_power_limits_table(rdev, 2544 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2545 } 2546 return 0; 2547 } 2548 2549 union power_info { 2550 struct _ATOM_POWERPLAY_INFO info; 2551 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2552 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2553 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2554 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2555 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2556 }; 2557 2558 union pplib_clock_info { 2559 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2560 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2561 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2562 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2563 }; 2564 2565 union pplib_power_state { 2566 struct _ATOM_PPLIB_STATE v1; 2567 struct _ATOM_PPLIB_STATE_V2 v2; 2568 }; 2569 2570 static void kv_patch_boot_state(struct radeon_device *rdev, 2571 struct kv_ps *ps) 2572 { 2573 struct kv_power_info *pi = kv_get_pi(rdev); 2574 2575 ps->num_levels = 1; 2576 ps->levels[0] = pi->boot_pl; 2577 } 2578 2579 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, 2580 struct radeon_ps *rps, 2581 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2582 u8 table_rev) 2583 { 2584 struct kv_ps *ps = kv_get_ps(rps); 2585 2586 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2587 rps->class = le16_to_cpu(non_clock_info->usClassification); 2588 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2589 2590 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2591 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2592 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2593 } else { 2594 rps->vclk = 0; 2595 rps->dclk = 0; 2596 } 2597 2598 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2599 rdev->pm.dpm.boot_ps = rps; 2600 kv_patch_boot_state(rdev, ps); 2601 } 2602 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2603 rdev->pm.dpm.uvd_ps = rps; 2604 } 2605 2606 static void kv_parse_pplib_clock_info(struct radeon_device *rdev, 2607 struct radeon_ps *rps, int index, 2608 union pplib_clock_info *clock_info) 2609 { 2610 struct kv_power_info *pi = kv_get_pi(rdev); 2611 struct kv_ps *ps = kv_get_ps(rps); 2612 struct kv_pl *pl = &ps->levels[index]; 2613 u32 sclk; 2614 2615 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2616 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2617 pl->sclk = sclk; 2618 pl->vddc_index = clock_info->sumo.vddcIndex; 2619 2620 ps->num_levels = index + 1; 2621 2622 if (pi->caps_sclk_ds) { 2623 pl->ds_divider_index = 5; 2624 pl->ss_divider_index = 5; 2625 } 2626 } 2627 2628 static int kv_parse_power_table(struct radeon_device *rdev) 2629 { 2630 struct radeon_mode_info *mode_info = &rdev->mode_info; 2631 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2632 union pplib_power_state *power_state; 2633 int i, j, k, non_clock_array_index, clock_array_index; 2634 union pplib_clock_info *clock_info; 2635 struct _StateArray *state_array; 2636 struct _ClockInfoArray *clock_info_array; 2637 struct _NonClockInfoArray *non_clock_info_array; 2638 union power_info *power_info; 2639 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2640 u16 data_offset; 2641 u8 frev, crev; 2642 u8 *power_state_offset; 2643 struct kv_ps *ps; 2644 2645 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2646 &frev, &crev, &data_offset)) 2647 return -EINVAL; 2648 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2649 2650 state_array = (struct _StateArray *) 2651 (mode_info->atom_context->bios + data_offset + 2652 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2653 clock_info_array = (struct _ClockInfoArray *) 2654 (mode_info->atom_context->bios + data_offset + 2655 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2656 non_clock_info_array = (struct _NonClockInfoArray *) 2657 (mode_info->atom_context->bios + data_offset + 2658 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2659 2660 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 2661 state_array->ucNumEntries, GFP_KERNEL); 2662 if (!rdev->pm.dpm.ps) 2663 return -ENOMEM; 2664 power_state_offset = (u8 *)state_array->states; 2665 for (i = 0; i < state_array->ucNumEntries; i++) { 2666 u8 *idx; 2667 power_state = (union pplib_power_state *)power_state_offset; 2668 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2669 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2670 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2671 if (!rdev->pm.power_state[i].clock_info) 2672 return -EINVAL; 2673 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2674 if (ps == NULL) { 2675 kfree(rdev->pm.dpm.ps); 2676 return -ENOMEM; 2677 } 2678 rdev->pm.dpm.ps[i].ps_priv = ps; 2679 k = 0; 2680 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2681 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2682 clock_array_index = idx[j]; 2683 if (clock_array_index >= clock_info_array->ucNumEntries) 2684 continue; 2685 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2686 break; 2687 clock_info = (union pplib_clock_info *) 2688 ((u8 *)&clock_info_array->clockInfo[0] + 2689 (clock_array_index * clock_info_array->ucEntrySize)); 2690 kv_parse_pplib_clock_info(rdev, 2691 &rdev->pm.dpm.ps[i], k, 2692 clock_info); 2693 k++; 2694 } 2695 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2696 non_clock_info, 2697 non_clock_info_array->ucEntrySize); 2698 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2699 } 2700 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2701 2702 /* fill in the vce power states */ 2703 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 2704 u32 sclk; 2705 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 2706 clock_info = (union pplib_clock_info *) 2707 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2708 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2709 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2710 rdev->pm.dpm.vce_states[i].sclk = sclk; 2711 rdev->pm.dpm.vce_states[i].mclk = 0; 2712 } 2713 2714 return 0; 2715 } 2716 2717 int kv_dpm_init(struct radeon_device *rdev) 2718 { 2719 struct kv_power_info *pi; 2720 int ret, i; 2721 2722 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2723 if (pi == NULL) 2724 return -ENOMEM; 2725 rdev->pm.dpm.priv = pi; 2726 2727 ret = r600_get_platform_caps(rdev); 2728 if (ret) 2729 return ret; 2730 2731 ret = r600_parse_extended_power_table(rdev); 2732 if (ret) 2733 return ret; 2734 2735 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2736 pi->at[i] = TRINITY_AT_DFLT; 2737 2738 pi->sram_end = SMC_RAM_END; 2739 2740 /* Enabling nb dpm on an asrock system prevents dpm from working */ 2741 if (rdev->pdev->subsystem_vendor == 0x1849) 2742 pi->enable_nb_dpm = false; 2743 else 2744 pi->enable_nb_dpm = true; 2745 2746 pi->caps_power_containment = true; 2747 pi->caps_cac = true; 2748 pi->enable_didt = false; 2749 if (pi->enable_didt) { 2750 pi->caps_sq_ramping = true; 2751 pi->caps_db_ramping = true; 2752 pi->caps_td_ramping = true; 2753 pi->caps_tcp_ramping = true; 2754 } 2755 2756 pi->caps_sclk_ds = true; 2757 pi->enable_auto_thermal_throttling = true; 2758 pi->disable_nb_ps3_in_battery = false; 2759 if (radeon_bapm == -1) { 2760 /* only enable bapm on KB, ML by default */ 2761 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2762 pi->bapm_enable = true; 2763 else 2764 pi->bapm_enable = false; 2765 } else if (radeon_bapm == 0) { 2766 pi->bapm_enable = false; 2767 } else { 2768 pi->bapm_enable = true; 2769 } 2770 pi->voltage_drop_t = 0; 2771 pi->caps_sclk_throttle_low_notification = false; 2772 pi->caps_fps = false; /* true? */ 2773 pi->caps_uvd_pg = true; 2774 pi->caps_uvd_dpm = true; 2775 pi->caps_vce_pg = false; /* XXX true */ 2776 pi->caps_samu_pg = false; 2777 pi->caps_acp_pg = false; 2778 pi->caps_stable_p_state = false; 2779 2780 ret = kv_parse_sys_info_table(rdev); 2781 if (ret) 2782 return ret; 2783 2784 kv_patch_voltage_values(rdev); 2785 kv_construct_boot_state(rdev); 2786 2787 ret = kv_parse_power_table(rdev); 2788 if (ret) 2789 return ret; 2790 2791 pi->enable_dpm = true; 2792 2793 return 0; 2794 } 2795 2796 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2797 struct seq_file *m) 2798 { 2799 struct kv_power_info *pi = kv_get_pi(rdev); 2800 u32 current_index = 2801 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2802 CURR_SCLK_INDEX_SHIFT; 2803 u32 sclk, tmp; 2804 u16 vddc; 2805 2806 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2807 seq_printf(m, "invalid dpm profile %d\n", current_index); 2808 } else { 2809 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2810 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2811 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2812 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2813 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2814 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2815 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2816 current_index, sclk, vddc); 2817 } 2818 } 2819 2820 u32 kv_dpm_get_current_sclk(struct radeon_device *rdev) 2821 { 2822 struct kv_power_info *pi = kv_get_pi(rdev); 2823 u32 current_index = 2824 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2825 CURR_SCLK_INDEX_SHIFT; 2826 u32 sclk; 2827 2828 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2829 return 0; 2830 } else { 2831 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2832 return sclk; 2833 } 2834 } 2835 2836 u32 kv_dpm_get_current_mclk(struct radeon_device *rdev) 2837 { 2838 struct kv_power_info *pi = kv_get_pi(rdev); 2839 2840 return pi->sys_info.bootup_uma_clk; 2841 } 2842 2843 void kv_dpm_print_power_state(struct radeon_device *rdev, 2844 struct radeon_ps *rps) 2845 { 2846 int i; 2847 struct kv_ps *ps = kv_get_ps(rps); 2848 2849 r600_dpm_print_class_info(rps->class, rps->class2); 2850 r600_dpm_print_cap_info(rps->caps); 2851 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2852 for (i = 0; i < ps->num_levels; i++) { 2853 struct kv_pl *pl = &ps->levels[i]; 2854 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2855 i, pl->sclk, 2856 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); 2857 } 2858 r600_dpm_print_ps_status(rdev, rps); 2859 } 2860 2861 void kv_dpm_fini(struct radeon_device *rdev) 2862 { 2863 int i; 2864 2865 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2866 kfree(rdev->pm.dpm.ps[i].ps_priv); 2867 } 2868 kfree(rdev->pm.dpm.ps); 2869 kfree(rdev->pm.dpm.priv); 2870 r600_free_extended_power_table(rdev); 2871 } 2872 2873 void kv_dpm_display_configuration_changed(struct radeon_device *rdev) 2874 { 2875 2876 } 2877 2878 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) 2879 { 2880 struct kv_power_info *pi = kv_get_pi(rdev); 2881 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2882 2883 if (low) 2884 return requested_state->levels[0].sclk; 2885 else 2886 return requested_state->levels[requested_state->num_levels - 1].sclk; 2887 } 2888 2889 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) 2890 { 2891 struct kv_power_info *pi = kv_get_pi(rdev); 2892 2893 return pi->sys_info.bootup_uma_clk; 2894 } 2895