1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <linux/firmware.h> 30 #include <linux/module.h> 31 #include <drm/drmP.h> 32 #include <uapi_drm/radeon_drm.h> 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "radeon_audio.h" 36 #include "radeon_mode.h" 37 #include "r600d.h" 38 #include "atom.h" 39 #include "avivod.h" 40 #include "radeon_ucode.h" 41 42 /* Firmware Names */ 43 MODULE_FIRMWARE("radeon/R600_pfp.bin"); 44 MODULE_FIRMWARE("radeon/R600_me.bin"); 45 MODULE_FIRMWARE("radeon/RV610_pfp.bin"); 46 MODULE_FIRMWARE("radeon/RV610_me.bin"); 47 MODULE_FIRMWARE("radeon/RV630_pfp.bin"); 48 MODULE_FIRMWARE("radeon/RV630_me.bin"); 49 MODULE_FIRMWARE("radeon/RV620_pfp.bin"); 50 MODULE_FIRMWARE("radeon/RV620_me.bin"); 51 MODULE_FIRMWARE("radeon/RV635_pfp.bin"); 52 MODULE_FIRMWARE("radeon/RV635_me.bin"); 53 MODULE_FIRMWARE("radeon/RV670_pfp.bin"); 54 MODULE_FIRMWARE("radeon/RV670_me.bin"); 55 MODULE_FIRMWARE("radeon/RS780_pfp.bin"); 56 MODULE_FIRMWARE("radeon/RS780_me.bin"); 57 MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 58 MODULE_FIRMWARE("radeon/RV770_me.bin"); 59 MODULE_FIRMWARE("radeon/RV770_smc.bin"); 60 MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 61 MODULE_FIRMWARE("radeon/RV730_me.bin"); 62 MODULE_FIRMWARE("radeon/RV730_smc.bin"); 63 MODULE_FIRMWARE("radeon/RV740_smc.bin"); 64 MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65 MODULE_FIRMWARE("radeon/RV710_me.bin"); 66 MODULE_FIRMWARE("radeon/RV710_smc.bin"); 67 MODULE_FIRMWARE("radeon/R600_rlc.bin"); 68 MODULE_FIRMWARE("radeon/R700_rlc.bin"); 69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); 70 MODULE_FIRMWARE("radeon/CEDAR_me.bin"); 71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); 72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin"); 73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); 74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); 75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); 76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin"); 77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); 78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); 79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); 80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin"); 81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin"); 85 MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 86 MODULE_FIRMWARE("radeon/PALM_me.bin"); 87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); 89 MODULE_FIRMWARE("radeon/SUMO_me.bin"); 90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); 91 MODULE_FIRMWARE("radeon/SUMO2_me.bin"); 92 MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); 93 MODULE_FIRMWARE("radeon/OLAND_me.bin"); 94 MODULE_FIRMWARE("radeon/OLAND_ce.bin"); 95 MODULE_FIRMWARE("radeon/OLAND_mc.bin"); 96 MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); 97 98 static const u32 crtc_offsets[2] = 99 { 100 0, 101 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL 102 }; 103 104 int r600_debugfs_mc_info_init(struct radeon_device *rdev); 105 106 /* r600,rv610,rv630,rv620,rv635,rv670 */ 107 static void r600_gpu_init(struct radeon_device *rdev); 108 void r600_irq_disable(struct radeon_device *rdev); 109 static void r600_pcie_gen2_enable(struct radeon_device *rdev); 110 111 /* 112 * Indirect registers accessor 113 */ 114 u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 115 { 116 unsigned long flags; 117 u32 r; 118 119 spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 120 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 121 r = RREG32(R600_RCU_DATA); 122 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 123 return r; 124 } 125 126 void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 131 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 132 WREG32(R600_RCU_DATA, (v)); 133 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 134 } 135 136 u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 137 { 138 unsigned long flags; 139 u32 r; 140 141 spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 142 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 143 r = RREG32(R600_UVD_CTX_DATA); 144 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 145 return r; 146 } 147 148 void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 149 { 150 unsigned long flags; 151 152 spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 153 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 154 WREG32(R600_UVD_CTX_DATA, (v)); 155 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 156 } 157 158 /** 159 * r600_get_allowed_info_register - fetch the register for the info ioctl 160 * 161 * @rdev: radeon_device pointer 162 * @reg: register offset in bytes 163 * @val: register value 164 * 165 * Returns 0 for success or -EINVAL for an invalid register 166 * 167 */ 168 int r600_get_allowed_info_register(struct radeon_device *rdev, 169 u32 reg, u32 *val) 170 { 171 switch (reg) { 172 case GRBM_STATUS: 173 case GRBM_STATUS2: 174 case R_000E50_SRBM_STATUS: 175 case DMA_STATUS_REG: 176 case UVD_STATUS: 177 *val = RREG32(reg); 178 return 0; 179 default: 180 return -EINVAL; 181 } 182 } 183 184 /** 185 * r600_get_xclk - get the xclk 186 * 187 * @rdev: radeon_device pointer 188 * 189 * Returns the reference clock used by the gfx engine 190 * (r6xx, IGPs, APUs). 191 */ 192 u32 r600_get_xclk(struct radeon_device *rdev) 193 { 194 return rdev->clock.spll.reference_freq; 195 } 196 197 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 198 { 199 unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; 200 int r; 201 202 /* bypass vclk and dclk with bclk */ 203 WREG32_P(CG_UPLL_FUNC_CNTL_2, 204 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), 205 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); 206 207 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ 208 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( 209 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); 210 211 if (rdev->family >= CHIP_RS780) 212 WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, 213 ~UPLL_BYPASS_CNTL); 214 215 if (!vclk || !dclk) { 216 /* keep the Bypass mode, put PLL to sleep */ 217 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); 218 return 0; 219 } 220 221 if (rdev->clock.spll.reference_freq == 10000) 222 ref_div = 34; 223 else 224 ref_div = 4; 225 226 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, 227 ref_div + 1, 0xFFF, 2, 30, ~0, 228 &fb_div, &vclk_div, &dclk_div); 229 if (r) 230 return r; 231 232 if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) 233 fb_div >>= 1; 234 else 235 fb_div |= 1; 236 237 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); 238 if (r) 239 return r; 240 241 /* assert PLL_RESET */ 242 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); 243 244 /* For RS780 we have to choose ref clk */ 245 if (rdev->family >= CHIP_RS780) 246 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, 247 ~UPLL_REFCLK_SRC_SEL_MASK); 248 249 /* set the required fb, ref and post divder values */ 250 WREG32_P(CG_UPLL_FUNC_CNTL, 251 UPLL_FB_DIV(fb_div) | 252 UPLL_REF_DIV(ref_div), 253 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); 254 WREG32_P(CG_UPLL_FUNC_CNTL_2, 255 UPLL_SW_HILEN(vclk_div >> 1) | 256 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | 257 UPLL_SW_HILEN2(dclk_div >> 1) | 258 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | 259 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, 260 ~UPLL_SW_MASK); 261 262 /* give the PLL some time to settle */ 263 mdelay(15); 264 265 /* deassert PLL_RESET */ 266 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); 267 268 mdelay(15); 269 270 /* deassert BYPASS EN */ 271 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); 272 273 if (rdev->family >= CHIP_RS780) 274 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); 275 276 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); 277 if (r) 278 return r; 279 280 /* switch VCLK and DCLK selection */ 281 WREG32_P(CG_UPLL_FUNC_CNTL_2, 282 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), 283 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); 284 285 mdelay(100); 286 287 return 0; 288 } 289 290 void dce3_program_fmt(struct drm_encoder *encoder) 291 { 292 struct drm_device *dev = encoder->dev; 293 struct radeon_device *rdev = dev->dev_private; 294 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 296 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 297 int bpc = 0; 298 u32 tmp = 0; 299 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; 300 301 if (connector) { 302 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 303 bpc = radeon_get_monitor_bpc(connector); 304 dither = radeon_connector->dither; 305 } 306 307 /* LVDS FMT is set up by atom */ 308 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 309 return; 310 311 /* not needed for analog */ 312 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 313 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 314 return; 315 316 if (bpc == 0) 317 return; 318 319 switch (bpc) { 320 case 6: 321 if (dither == RADEON_FMT_DITHER_ENABLE) 322 /* XXX sort out optimal dither settings */ 323 tmp |= FMT_SPATIAL_DITHER_EN; 324 else 325 tmp |= FMT_TRUNCATE_EN; 326 break; 327 case 8: 328 if (dither == RADEON_FMT_DITHER_ENABLE) 329 /* XXX sort out optimal dither settings */ 330 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); 331 else 332 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); 333 break; 334 case 10: 335 default: 336 /* not needed */ 337 break; 338 } 339 340 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp); 341 } 342 343 /* get temperature in millidegrees */ 344 int rv6xx_get_temp(struct radeon_device *rdev) 345 { 346 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 347 ASIC_T_SHIFT; 348 int actual_temp = temp & 0xff; 349 350 if (temp & 0x100) 351 actual_temp -= 256; 352 353 return actual_temp * 1000; 354 } 355 356 void r600_pm_get_dynpm_state(struct radeon_device *rdev) 357 { 358 int i; 359 360 rdev->pm.dynpm_can_upclock = true; 361 rdev->pm.dynpm_can_downclock = true; 362 363 /* power state array is low to high, default is first */ 364 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { 365 int min_power_state_index = 0; 366 367 if (rdev->pm.num_power_states > 2) 368 min_power_state_index = 1; 369 370 switch (rdev->pm.dynpm_planned_action) { 371 case DYNPM_ACTION_MINIMUM: 372 rdev->pm.requested_power_state_index = min_power_state_index; 373 rdev->pm.requested_clock_mode_index = 0; 374 rdev->pm.dynpm_can_downclock = false; 375 break; 376 case DYNPM_ACTION_DOWNCLOCK: 377 if (rdev->pm.current_power_state_index == min_power_state_index) { 378 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 379 rdev->pm.dynpm_can_downclock = false; 380 } else { 381 if (rdev->pm.active_crtc_count > 1) { 382 for (i = 0; i < rdev->pm.num_power_states; i++) { 383 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 384 continue; 385 else if (i >= rdev->pm.current_power_state_index) { 386 rdev->pm.requested_power_state_index = 387 rdev->pm.current_power_state_index; 388 break; 389 } else { 390 rdev->pm.requested_power_state_index = i; 391 break; 392 } 393 } 394 } else { 395 if (rdev->pm.current_power_state_index == 0) 396 rdev->pm.requested_power_state_index = 397 rdev->pm.num_power_states - 1; 398 else 399 rdev->pm.requested_power_state_index = 400 rdev->pm.current_power_state_index - 1; 401 } 402 } 403 rdev->pm.requested_clock_mode_index = 0; 404 /* don't use the power state if crtcs are active and no display flag is set */ 405 if ((rdev->pm.active_crtc_count > 0) && 406 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 407 clock_info[rdev->pm.requested_clock_mode_index].flags & 408 RADEON_PM_MODE_NO_DISPLAY)) { 409 rdev->pm.requested_power_state_index++; 410 } 411 break; 412 case DYNPM_ACTION_UPCLOCK: 413 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 414 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 415 rdev->pm.dynpm_can_upclock = false; 416 } else { 417 if (rdev->pm.active_crtc_count > 1) { 418 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 419 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 420 continue; 421 else if (i <= rdev->pm.current_power_state_index) { 422 rdev->pm.requested_power_state_index = 423 rdev->pm.current_power_state_index; 424 break; 425 } else { 426 rdev->pm.requested_power_state_index = i; 427 break; 428 } 429 } 430 } else 431 rdev->pm.requested_power_state_index = 432 rdev->pm.current_power_state_index + 1; 433 } 434 rdev->pm.requested_clock_mode_index = 0; 435 break; 436 case DYNPM_ACTION_DEFAULT: 437 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 438 rdev->pm.requested_clock_mode_index = 0; 439 rdev->pm.dynpm_can_upclock = false; 440 break; 441 case DYNPM_ACTION_NONE: 442 default: 443 DRM_ERROR("Requested mode for not defined action\n"); 444 return; 445 } 446 } else { 447 /* XXX select a power state based on AC/DC, single/dualhead, etc. */ 448 /* for now just select the first power state and switch between clock modes */ 449 /* power state array is low to high, default is first (0) */ 450 if (rdev->pm.active_crtc_count > 1) { 451 rdev->pm.requested_power_state_index = -1; 452 /* start at 1 as we don't want the default mode */ 453 for (i = 1; i < rdev->pm.num_power_states; i++) { 454 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 455 continue; 456 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || 457 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { 458 rdev->pm.requested_power_state_index = i; 459 break; 460 } 461 } 462 /* if nothing selected, grab the default state. */ 463 if (rdev->pm.requested_power_state_index == -1) 464 rdev->pm.requested_power_state_index = 0; 465 } else 466 rdev->pm.requested_power_state_index = 1; 467 468 switch (rdev->pm.dynpm_planned_action) { 469 case DYNPM_ACTION_MINIMUM: 470 rdev->pm.requested_clock_mode_index = 0; 471 rdev->pm.dynpm_can_downclock = false; 472 break; 473 case DYNPM_ACTION_DOWNCLOCK: 474 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 475 if (rdev->pm.current_clock_mode_index == 0) { 476 rdev->pm.requested_clock_mode_index = 0; 477 rdev->pm.dynpm_can_downclock = false; 478 } else 479 rdev->pm.requested_clock_mode_index = 480 rdev->pm.current_clock_mode_index - 1; 481 } else { 482 rdev->pm.requested_clock_mode_index = 0; 483 rdev->pm.dynpm_can_downclock = false; 484 } 485 /* don't use the power state if crtcs are active and no display flag is set */ 486 if ((rdev->pm.active_crtc_count > 0) && 487 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 488 clock_info[rdev->pm.requested_clock_mode_index].flags & 489 RADEON_PM_MODE_NO_DISPLAY)) { 490 rdev->pm.requested_clock_mode_index++; 491 } 492 break; 493 case DYNPM_ACTION_UPCLOCK: 494 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 495 if (rdev->pm.current_clock_mode_index == 496 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { 497 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; 498 rdev->pm.dynpm_can_upclock = false; 499 } else 500 rdev->pm.requested_clock_mode_index = 501 rdev->pm.current_clock_mode_index + 1; 502 } else { 503 rdev->pm.requested_clock_mode_index = 504 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; 505 rdev->pm.dynpm_can_upclock = false; 506 } 507 break; 508 case DYNPM_ACTION_DEFAULT: 509 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 510 rdev->pm.requested_clock_mode_index = 0; 511 rdev->pm.dynpm_can_upclock = false; 512 break; 513 case DYNPM_ACTION_NONE: 514 default: 515 DRM_ERROR("Requested mode for not defined action\n"); 516 return; 517 } 518 } 519 520 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 521 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 522 clock_info[rdev->pm.requested_clock_mode_index].sclk, 523 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 524 clock_info[rdev->pm.requested_clock_mode_index].mclk, 525 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 526 pcie_lanes); 527 } 528 529 void rs780_pm_init_profile(struct radeon_device *rdev) 530 { 531 if (rdev->pm.num_power_states == 2) { 532 /* default */ 533 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 534 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 535 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 536 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 537 /* low sh */ 538 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 539 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 540 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 541 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 542 /* mid sh */ 543 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 544 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 545 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 546 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 547 /* high sh */ 548 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 549 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 550 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 551 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 552 /* low mh */ 553 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 555 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 557 /* mid mh */ 558 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 559 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 560 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 562 /* high mh */ 563 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 564 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; 565 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 566 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 567 } else if (rdev->pm.num_power_states == 3) { 568 /* default */ 569 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 570 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 571 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 572 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 573 /* low sh */ 574 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 575 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 576 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 577 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 578 /* mid sh */ 579 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 580 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 581 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 582 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 583 /* high sh */ 584 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 585 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; 586 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 587 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 588 /* low mh */ 589 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; 590 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; 591 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 592 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 593 /* mid mh */ 594 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; 595 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; 596 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 597 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 598 /* high mh */ 599 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; 600 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 601 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 602 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 603 } else { 604 /* default */ 605 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 606 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 607 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 608 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 609 /* low sh */ 610 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; 611 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; 612 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 613 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 614 /* mid sh */ 615 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; 616 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; 617 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 618 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 619 /* high sh */ 620 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; 621 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; 622 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 623 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 624 /* low mh */ 625 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 626 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 627 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 628 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 629 /* mid mh */ 630 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 631 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 632 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 633 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 634 /* high mh */ 635 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 636 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; 637 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 638 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 639 } 640 } 641 642 void r600_pm_init_profile(struct radeon_device *rdev) 643 { 644 int idx; 645 646 if (rdev->family == CHIP_R600) { 647 /* XXX */ 648 /* default */ 649 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 650 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 651 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 652 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 653 /* low sh */ 654 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 655 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 656 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 657 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 658 /* mid sh */ 659 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 660 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 661 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 662 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 663 /* high sh */ 664 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 665 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 666 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 667 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 668 /* low mh */ 669 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 670 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 671 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 672 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 673 /* mid mh */ 674 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 675 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 676 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 677 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 678 /* high mh */ 679 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 680 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 681 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 682 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 683 } else { 684 if (rdev->pm.num_power_states < 4) { 685 /* default */ 686 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 687 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 688 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 689 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 690 /* low sh */ 691 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 692 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 693 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 694 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 695 /* mid sh */ 696 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 697 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 698 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 699 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 700 /* high sh */ 701 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 702 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 703 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 704 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 705 /* low mh */ 706 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 707 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; 708 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 709 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 710 /* low mh */ 711 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 712 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; 713 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 714 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 715 /* high mh */ 716 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 717 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 718 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 719 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 720 } else { 721 /* default */ 722 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 723 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 724 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 725 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 726 /* low sh */ 727 if (rdev->flags & RADEON_IS_MOBILITY) 728 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 729 else 730 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 731 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 732 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 733 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 734 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 735 /* mid sh */ 736 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 737 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 738 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 739 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 740 /* high sh */ 741 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 742 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 743 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 744 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 745 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 746 /* low mh */ 747 if (rdev->flags & RADEON_IS_MOBILITY) 748 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 749 else 750 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 751 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 752 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 753 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 754 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 755 /* mid mh */ 756 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 757 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 758 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 759 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 760 /* high mh */ 761 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 762 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 763 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 764 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 765 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 766 } 767 } 768 } 769 770 void r600_pm_misc(struct radeon_device *rdev) 771 { 772 int req_ps_idx = rdev->pm.requested_power_state_index; 773 int req_cm_idx = rdev->pm.requested_clock_mode_index; 774 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 775 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 776 777 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 778 /* 0xff01 is a flag rather then an actual voltage */ 779 if (voltage->voltage == 0xff01) 780 return; 781 if (voltage->voltage != rdev->pm.current_vddc) { 782 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 783 rdev->pm.current_vddc = voltage->voltage; 784 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); 785 } 786 } 787 } 788 789 bool r600_gui_idle(struct radeon_device *rdev) 790 { 791 if (RREG32(GRBM_STATUS) & GUI_ACTIVE) 792 return false; 793 else 794 return true; 795 } 796 797 /* hpd for digital panel detect/disconnect */ 798 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 799 { 800 bool connected = false; 801 802 if (ASIC_IS_DCE3(rdev)) { 803 switch (hpd) { 804 case RADEON_HPD_1: 805 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) 806 connected = true; 807 break; 808 case RADEON_HPD_2: 809 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) 810 connected = true; 811 break; 812 case RADEON_HPD_3: 813 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) 814 connected = true; 815 break; 816 case RADEON_HPD_4: 817 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) 818 connected = true; 819 break; 820 /* DCE 3.2 */ 821 case RADEON_HPD_5: 822 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) 823 connected = true; 824 break; 825 case RADEON_HPD_6: 826 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 827 connected = true; 828 break; 829 default: 830 break; 831 } 832 } else { 833 switch (hpd) { 834 case RADEON_HPD_1: 835 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 836 connected = true; 837 break; 838 case RADEON_HPD_2: 839 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 840 connected = true; 841 break; 842 case RADEON_HPD_3: 843 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 844 connected = true; 845 break; 846 default: 847 break; 848 } 849 } 850 return connected; 851 } 852 853 void r600_hpd_set_polarity(struct radeon_device *rdev, 854 enum radeon_hpd_id hpd) 855 { 856 u32 tmp; 857 bool connected = r600_hpd_sense(rdev, hpd); 858 859 if (ASIC_IS_DCE3(rdev)) { 860 switch (hpd) { 861 case RADEON_HPD_1: 862 tmp = RREG32(DC_HPD1_INT_CONTROL); 863 if (connected) 864 tmp &= ~DC_HPDx_INT_POLARITY; 865 else 866 tmp |= DC_HPDx_INT_POLARITY; 867 WREG32(DC_HPD1_INT_CONTROL, tmp); 868 break; 869 case RADEON_HPD_2: 870 tmp = RREG32(DC_HPD2_INT_CONTROL); 871 if (connected) 872 tmp &= ~DC_HPDx_INT_POLARITY; 873 else 874 tmp |= DC_HPDx_INT_POLARITY; 875 WREG32(DC_HPD2_INT_CONTROL, tmp); 876 break; 877 case RADEON_HPD_3: 878 tmp = RREG32(DC_HPD3_INT_CONTROL); 879 if (connected) 880 tmp &= ~DC_HPDx_INT_POLARITY; 881 else 882 tmp |= DC_HPDx_INT_POLARITY; 883 WREG32(DC_HPD3_INT_CONTROL, tmp); 884 break; 885 case RADEON_HPD_4: 886 tmp = RREG32(DC_HPD4_INT_CONTROL); 887 if (connected) 888 tmp &= ~DC_HPDx_INT_POLARITY; 889 else 890 tmp |= DC_HPDx_INT_POLARITY; 891 WREG32(DC_HPD4_INT_CONTROL, tmp); 892 break; 893 case RADEON_HPD_5: 894 tmp = RREG32(DC_HPD5_INT_CONTROL); 895 if (connected) 896 tmp &= ~DC_HPDx_INT_POLARITY; 897 else 898 tmp |= DC_HPDx_INT_POLARITY; 899 WREG32(DC_HPD5_INT_CONTROL, tmp); 900 break; 901 /* DCE 3.2 */ 902 case RADEON_HPD_6: 903 tmp = RREG32(DC_HPD6_INT_CONTROL); 904 if (connected) 905 tmp &= ~DC_HPDx_INT_POLARITY; 906 else 907 tmp |= DC_HPDx_INT_POLARITY; 908 WREG32(DC_HPD6_INT_CONTROL, tmp); 909 break; 910 default: 911 break; 912 } 913 } else { 914 switch (hpd) { 915 case RADEON_HPD_1: 916 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 917 if (connected) 918 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 919 else 920 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 921 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 922 break; 923 case RADEON_HPD_2: 924 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 925 if (connected) 926 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 927 else 928 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 929 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 930 break; 931 case RADEON_HPD_3: 932 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 933 if (connected) 934 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 935 else 936 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 937 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 938 break; 939 default: 940 break; 941 } 942 } 943 } 944 945 void r600_hpd_init(struct radeon_device *rdev) 946 { 947 struct drm_device *dev = rdev->ddev; 948 struct drm_connector *connector; 949 unsigned enable = 0; 950 951 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 952 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 953 954 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 955 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 956 /* don't try to enable hpd on eDP or LVDS avoid breaking the 957 * aux dp channel on imac and help (but not completely fix) 958 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 959 */ 960 continue; 961 } 962 if (ASIC_IS_DCE3(rdev)) { 963 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); 964 if (ASIC_IS_DCE32(rdev)) 965 tmp |= DC_HPDx_EN; 966 967 switch (radeon_connector->hpd.hpd) { 968 case RADEON_HPD_1: 969 WREG32(DC_HPD1_CONTROL, tmp); 970 break; 971 case RADEON_HPD_2: 972 WREG32(DC_HPD2_CONTROL, tmp); 973 break; 974 case RADEON_HPD_3: 975 WREG32(DC_HPD3_CONTROL, tmp); 976 break; 977 case RADEON_HPD_4: 978 WREG32(DC_HPD4_CONTROL, tmp); 979 break; 980 /* DCE 3.2 */ 981 case RADEON_HPD_5: 982 WREG32(DC_HPD5_CONTROL, tmp); 983 break; 984 case RADEON_HPD_6: 985 WREG32(DC_HPD6_CONTROL, tmp); 986 break; 987 default: 988 break; 989 } 990 } else { 991 switch (radeon_connector->hpd.hpd) { 992 case RADEON_HPD_1: 993 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); 994 break; 995 case RADEON_HPD_2: 996 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); 997 break; 998 case RADEON_HPD_3: 999 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); 1000 break; 1001 default: 1002 break; 1003 } 1004 } 1005 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 1006 enable |= 1 << radeon_connector->hpd.hpd; 1007 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 1008 } 1009 radeon_irq_kms_enable_hpd(rdev, enable); 1010 } 1011 1012 void r600_hpd_fini(struct radeon_device *rdev) 1013 { 1014 struct drm_device *dev = rdev->ddev; 1015 struct drm_connector *connector; 1016 unsigned disable = 0; 1017 1018 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1019 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1020 if (ASIC_IS_DCE3(rdev)) { 1021 switch (radeon_connector->hpd.hpd) { 1022 case RADEON_HPD_1: 1023 WREG32(DC_HPD1_CONTROL, 0); 1024 break; 1025 case RADEON_HPD_2: 1026 WREG32(DC_HPD2_CONTROL, 0); 1027 break; 1028 case RADEON_HPD_3: 1029 WREG32(DC_HPD3_CONTROL, 0); 1030 break; 1031 case RADEON_HPD_4: 1032 WREG32(DC_HPD4_CONTROL, 0); 1033 break; 1034 /* DCE 3.2 */ 1035 case RADEON_HPD_5: 1036 WREG32(DC_HPD5_CONTROL, 0); 1037 break; 1038 case RADEON_HPD_6: 1039 WREG32(DC_HPD6_CONTROL, 0); 1040 break; 1041 default: 1042 break; 1043 } 1044 } else { 1045 switch (radeon_connector->hpd.hpd) { 1046 case RADEON_HPD_1: 1047 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); 1048 break; 1049 case RADEON_HPD_2: 1050 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); 1051 break; 1052 case RADEON_HPD_3: 1053 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); 1054 break; 1055 default: 1056 break; 1057 } 1058 } 1059 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 1060 disable |= 1 << radeon_connector->hpd.hpd; 1061 } 1062 radeon_irq_kms_disable_hpd(rdev, disable); 1063 } 1064 1065 /* 1066 * R600 PCIE GART 1067 */ 1068 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 1069 { 1070 unsigned i; 1071 u32 tmp; 1072 1073 /* flush hdp cache so updates hit vram */ 1074 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 1075 !(rdev->flags & RADEON_IS_AGP)) { 1076 volatile uint32_t *ptr = rdev->gart.ptr; 1077 u32 tmp; 1078 1079 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 1080 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 1081 * This seems to cause problems on some AGP cards. Just use the old 1082 * method for them. 1083 */ 1084 WREG32(HDP_DEBUG1, 0); 1085 tmp = *ptr; 1086 } else 1087 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1088 1089 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 1090 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 1091 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 1092 for (i = 0; i < rdev->usec_timeout; i++) { 1093 /* read MC_STATUS */ 1094 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); 1095 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; 1096 if (tmp == 2) { 1097 printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); 1098 return; 1099 } 1100 if (tmp) { 1101 return; 1102 } 1103 udelay(1); 1104 } 1105 } 1106 1107 int r600_pcie_gart_init(struct radeon_device *rdev) 1108 { 1109 int r; 1110 1111 if (rdev->gart.robj) { 1112 WARN(1, "R600 PCIE GART already initialized\n"); 1113 return 0; 1114 } 1115 /* Initialize common gart structure */ 1116 r = radeon_gart_init(rdev); 1117 if (r) 1118 return r; 1119 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 1120 return radeon_gart_table_vram_alloc(rdev); 1121 } 1122 1123 static int r600_pcie_gart_enable(struct radeon_device *rdev) 1124 { 1125 u32 tmp; 1126 int r, i; 1127 1128 if (rdev->gart.robj == NULL) { 1129 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 1130 return -EINVAL; 1131 } 1132 r = radeon_gart_table_vram_pin(rdev); 1133 if (r) 1134 return r; 1135 1136 /* Setup L2 cache */ 1137 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1138 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1139 EFFECTIVE_L2_QUEUE_SIZE(7)); 1140 WREG32(VM_L2_CNTL2, 0); 1141 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1142 /* Setup TLB control */ 1143 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1144 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1145 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1146 ENABLE_WAIT_L2_QUERY; 1147 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1148 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1149 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 1150 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1151 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1152 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1153 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1154 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1155 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1156 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1157 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1158 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1159 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); 1160 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); 1161 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1162 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1163 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1164 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1165 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1166 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1167 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1168 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1169 (u32)(rdev->dummy_page.addr >> 12)); 1170 for (i = 1; i < 7; i++) 1171 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1172 1173 r600_pcie_gart_tlb_flush(rdev); 1174 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1175 (unsigned)(rdev->mc.gtt_size >> 20), 1176 (unsigned long long)rdev->gart.table_addr); 1177 rdev->gart.ready = true; 1178 return 0; 1179 } 1180 1181 static void r600_pcie_gart_disable(struct radeon_device *rdev) 1182 { 1183 u32 tmp; 1184 int i; 1185 1186 /* Disable all tables */ 1187 for (i = 0; i < 7; i++) 1188 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1189 1190 /* Disable L2 cache */ 1191 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 1192 EFFECTIVE_L2_QUEUE_SIZE(7)); 1193 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1194 /* Setup L1 TLB control */ 1195 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1196 ENABLE_WAIT_L2_QUERY; 1197 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1198 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1199 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1200 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1201 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1202 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1203 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1204 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1205 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp); 1206 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp); 1207 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1208 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1209 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 1210 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1211 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); 1212 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); 1213 radeon_gart_table_vram_unpin(rdev); 1214 } 1215 1216 static void r600_pcie_gart_fini(struct radeon_device *rdev) 1217 { 1218 radeon_gart_fini(rdev); 1219 r600_pcie_gart_disable(rdev); 1220 radeon_gart_table_vram_free(rdev); 1221 } 1222 1223 static void r600_agp_enable(struct radeon_device *rdev) 1224 { 1225 u32 tmp; 1226 int i; 1227 1228 /* Setup L2 cache */ 1229 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1230 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1231 EFFECTIVE_L2_QUEUE_SIZE(7)); 1232 WREG32(VM_L2_CNTL2, 0); 1233 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1234 /* Setup TLB control */ 1235 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1236 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1237 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1238 ENABLE_WAIT_L2_QUERY; 1239 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1240 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1241 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 1242 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1243 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1244 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1245 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1246 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1247 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1248 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1249 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1250 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1251 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1252 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1253 for (i = 0; i < 7; i++) 1254 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1255 } 1256 1257 int r600_mc_wait_for_idle(struct radeon_device *rdev) 1258 { 1259 unsigned i; 1260 u32 tmp; 1261 1262 for (i = 0; i < rdev->usec_timeout; i++) { 1263 /* read MC_STATUS */ 1264 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00; 1265 if (!tmp) 1266 return 0; 1267 udelay(1); 1268 } 1269 return -1; 1270 } 1271 1272 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1273 { 1274 unsigned long flags; 1275 uint32_t r; 1276 1277 spin_lock_irqsave(&rdev->mc_idx_lock, flags); 1278 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1279 r = RREG32(R_0028FC_MC_DATA); 1280 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1281 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 1282 return r; 1283 } 1284 1285 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1286 { 1287 unsigned long flags; 1288 1289 spin_lock_irqsave(&rdev->mc_idx_lock, flags); 1290 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1291 S_0028F8_MC_IND_WR_EN(1)); 1292 WREG32(R_0028FC_MC_DATA, v); 1293 WREG32(R_0028F8_MC_INDEX, 0x7F); 1294 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 1295 } 1296 1297 static void r600_mc_program(struct radeon_device *rdev) 1298 { 1299 struct rv515_mc_save save; 1300 u32 tmp; 1301 int i, j; 1302 1303 /* Initialize HDP */ 1304 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1305 WREG32((0x2c14 + j), 0x00000000); 1306 WREG32((0x2c18 + j), 0x00000000); 1307 WREG32((0x2c1c + j), 0x00000000); 1308 WREG32((0x2c20 + j), 0x00000000); 1309 WREG32((0x2c24 + j), 0x00000000); 1310 } 1311 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 1312 1313 rv515_mc_stop(rdev, &save); 1314 if (r600_mc_wait_for_idle(rdev)) { 1315 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1316 } 1317 /* Lockout access through VGA aperture (doesn't exist before R600) */ 1318 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 1319 /* Update configuration */ 1320 if (rdev->flags & RADEON_IS_AGP) { 1321 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 1322 /* VRAM before AGP */ 1323 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1324 rdev->mc.vram_start >> 12); 1325 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1326 rdev->mc.gtt_end >> 12); 1327 } else { 1328 /* VRAM after AGP */ 1329 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1330 rdev->mc.gtt_start >> 12); 1331 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1332 rdev->mc.vram_end >> 12); 1333 } 1334 } else { 1335 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 1336 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); 1337 } 1338 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 1339 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1340 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1341 WREG32(MC_VM_FB_LOCATION, tmp); 1342 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1343 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1344 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1345 if (rdev->flags & RADEON_IS_AGP) { 1346 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); 1347 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); 1348 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 1349 } else { 1350 WREG32(MC_VM_AGP_BASE, 0); 1351 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 1352 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 1353 } 1354 if (r600_mc_wait_for_idle(rdev)) { 1355 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1356 } 1357 rv515_mc_resume(rdev, &save); 1358 /* we need to own VRAM, so turn off the VGA renderer here 1359 * to stop it overwriting our objects */ 1360 rv515_vga_render_disable(rdev); 1361 } 1362 1363 /** 1364 * r600_vram_gtt_location - try to find VRAM & GTT location 1365 * @rdev: radeon device structure holding all necessary informations 1366 * @mc: memory controller structure holding memory informations 1367 * 1368 * Function will place try to place VRAM at same place as in CPU (PCI) 1369 * address space as some GPU seems to have issue when we reprogram at 1370 * different address space. 1371 * 1372 * If there is not enough space to fit the unvisible VRAM after the 1373 * aperture then we limit the VRAM size to the aperture. 1374 * 1375 * If we are using AGP then place VRAM adjacent to AGP aperture are we need 1376 * them to be in one from GPU point of view so that we can program GPU to 1377 * catch access outside them (weird GPU policy see ??). 1378 * 1379 * This function will never fails, worst case are limiting VRAM or GTT. 1380 * 1381 * Note: GTT start, end, size should be initialized before calling this 1382 * function on AGP platform. 1383 */ 1384 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 1385 { 1386 u64 size_bf, size_af; 1387 1388 if (mc->mc_vram_size > 0xE0000000) { 1389 /* leave room for at least 512M GTT */ 1390 dev_warn(rdev->dev, "limiting VRAM\n"); 1391 mc->real_vram_size = 0xE0000000; 1392 mc->mc_vram_size = 0xE0000000; 1393 } 1394 if (rdev->flags & RADEON_IS_AGP) { 1395 size_bf = mc->gtt_start; 1396 size_af = mc->mc_mask - mc->gtt_end; 1397 if (size_bf > size_af) { 1398 if (mc->mc_vram_size > size_bf) { 1399 dev_warn(rdev->dev, "limiting VRAM\n"); 1400 mc->real_vram_size = size_bf; 1401 mc->mc_vram_size = size_bf; 1402 } 1403 mc->vram_start = mc->gtt_start - mc->mc_vram_size; 1404 } else { 1405 if (mc->mc_vram_size > size_af) { 1406 dev_warn(rdev->dev, "limiting VRAM\n"); 1407 mc->real_vram_size = size_af; 1408 mc->mc_vram_size = size_af; 1409 } 1410 mc->vram_start = mc->gtt_end + 1; 1411 } 1412 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 1413 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 1414 mc->mc_vram_size >> 20, mc->vram_start, 1415 mc->vram_end, mc->real_vram_size >> 20); 1416 } else { 1417 u64 base = 0; 1418 if (rdev->flags & RADEON_IS_IGP) { 1419 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; 1420 base <<= 24; 1421 } 1422 radeon_vram_location(rdev, &rdev->mc, base); 1423 rdev->mc.gtt_base_align = 0; 1424 radeon_gtt_location(rdev, mc); 1425 } 1426 } 1427 1428 static int r600_mc_init(struct radeon_device *rdev) 1429 { 1430 u32 tmp; 1431 int chansize, numchan; 1432 uint32_t h_addr, l_addr; 1433 unsigned long long k8_addr; 1434 1435 /* Get VRAM informations */ 1436 rdev->mc.vram_is_ddr = true; 1437 tmp = RREG32(RAMCFG); 1438 if (tmp & CHANSIZE_OVERRIDE) { 1439 chansize = 16; 1440 } else if (tmp & CHANSIZE_MASK) { 1441 chansize = 64; 1442 } else { 1443 chansize = 32; 1444 } 1445 tmp = RREG32(CHMAP); 1446 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 1447 case 0: 1448 default: 1449 numchan = 1; 1450 break; 1451 case 1: 1452 numchan = 2; 1453 break; 1454 case 2: 1455 numchan = 4; 1456 break; 1457 case 3: 1458 numchan = 8; 1459 break; 1460 } 1461 rdev->mc.vram_width = numchan * chansize; 1462 /* Could aper size report 0 ? */ 1463 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 1464 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 1465 /* Setup GPU memory space */ 1466 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1467 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1468 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1469 r600_vram_gtt_location(rdev, &rdev->mc); 1470 1471 if (rdev->flags & RADEON_IS_IGP) { 1472 rs690_pm_info(rdev); 1473 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1474 1475 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { 1476 /* Use K8 direct mapping for fast fb access. */ 1477 rdev->fastfb_working = false; 1478 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL)); 1479 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION); 1480 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; 1481 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) 1482 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) 1483 #endif 1484 { 1485 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 1486 * memory is present. 1487 */ 1488 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { 1489 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 1490 (unsigned long long)rdev->mc.aper_base, k8_addr); 1491 rdev->mc.aper_base = (resource_size_t)k8_addr; 1492 rdev->fastfb_working = true; 1493 } 1494 } 1495 } 1496 } 1497 1498 radeon_update_bandwidth_info(rdev); 1499 return 0; 1500 } 1501 1502 int r600_vram_scratch_init(struct radeon_device *rdev) 1503 { 1504 int r; 1505 void *vram_scratch_ptr_ptr; 1506 1507 if (rdev->vram_scratch.robj == NULL) { 1508 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1509 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1510 0, NULL, &rdev->vram_scratch.robj); 1511 if (r) { 1512 return r; 1513 } 1514 } 1515 1516 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1517 if (unlikely(r != 0)) 1518 return r; 1519 r = radeon_bo_pin(rdev->vram_scratch.robj, 1520 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); 1521 if (r) { 1522 radeon_bo_unreserve(rdev->vram_scratch.robj); 1523 return r; 1524 } 1525 vram_scratch_ptr_ptr = &rdev->vram_scratch.ptr; 1526 r = radeon_bo_kmap(rdev->vram_scratch.robj, 1527 vram_scratch_ptr_ptr); 1528 if (r) 1529 radeon_bo_unpin(rdev->vram_scratch.robj); 1530 radeon_bo_unreserve(rdev->vram_scratch.robj); 1531 1532 return r; 1533 } 1534 1535 void r600_vram_scratch_fini(struct radeon_device *rdev) 1536 { 1537 int r; 1538 1539 if (rdev->vram_scratch.robj == NULL) { 1540 return; 1541 } 1542 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1543 if (likely(r == 0)) { 1544 radeon_bo_kunmap(rdev->vram_scratch.robj); 1545 radeon_bo_unpin(rdev->vram_scratch.robj); 1546 radeon_bo_unreserve(rdev->vram_scratch.robj); 1547 } 1548 radeon_bo_unref(&rdev->vram_scratch.robj); 1549 } 1550 1551 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung) 1552 { 1553 u32 tmp = RREG32(R600_BIOS_3_SCRATCH); 1554 1555 if (hung) 1556 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1557 else 1558 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1559 1560 WREG32(R600_BIOS_3_SCRATCH, tmp); 1561 } 1562 1563 static void r600_print_gpu_status_regs(struct radeon_device *rdev) 1564 { 1565 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1566 RREG32(R_008010_GRBM_STATUS)); 1567 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1568 RREG32(R_008014_GRBM_STATUS2)); 1569 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1570 RREG32(R_000E50_SRBM_STATUS)); 1571 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1572 RREG32(CP_STALLED_STAT1)); 1573 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1574 RREG32(CP_STALLED_STAT2)); 1575 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1576 RREG32(CP_BUSY_STAT)); 1577 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1578 RREG32(CP_STAT)); 1579 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1580 RREG32(DMA_STATUS_REG)); 1581 } 1582 1583 static bool r600_is_display_hung(struct radeon_device *rdev) 1584 { 1585 u32 crtc_hung = 0; 1586 u32 crtc_status[2]; 1587 u32 i, j, tmp; 1588 1589 for (i = 0; i < rdev->num_crtc; i++) { 1590 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) { 1591 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1592 crtc_hung |= (1 << i); 1593 } 1594 } 1595 1596 for (j = 0; j < 10; j++) { 1597 for (i = 0; i < rdev->num_crtc; i++) { 1598 if (crtc_hung & (1 << i)) { 1599 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1600 if (tmp != crtc_status[i]) 1601 crtc_hung &= ~(1 << i); 1602 } 1603 } 1604 if (crtc_hung == 0) 1605 return false; 1606 udelay(100); 1607 } 1608 1609 return true; 1610 } 1611 1612 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) 1613 { 1614 u32 reset_mask = 0; 1615 u32 tmp; 1616 1617 /* GRBM_STATUS */ 1618 tmp = RREG32(R_008010_GRBM_STATUS); 1619 if (rdev->family >= CHIP_RV770) { 1620 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1621 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1622 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1623 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1624 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1625 reset_mask |= RADEON_RESET_GFX; 1626 } else { 1627 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1628 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1629 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1630 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1631 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1632 reset_mask |= RADEON_RESET_GFX; 1633 } 1634 1635 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) | 1636 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp)) 1637 reset_mask |= RADEON_RESET_CP; 1638 1639 if (G_008010_GRBM_EE_BUSY(tmp)) 1640 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; 1641 1642 /* DMA_STATUS_REG */ 1643 tmp = RREG32(DMA_STATUS_REG); 1644 if (!(tmp & DMA_IDLE)) 1645 reset_mask |= RADEON_RESET_DMA; 1646 1647 /* SRBM_STATUS */ 1648 tmp = RREG32(R_000E50_SRBM_STATUS); 1649 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp)) 1650 reset_mask |= RADEON_RESET_RLC; 1651 1652 if (G_000E50_IH_BUSY(tmp)) 1653 reset_mask |= RADEON_RESET_IH; 1654 1655 if (G_000E50_SEM_BUSY(tmp)) 1656 reset_mask |= RADEON_RESET_SEM; 1657 1658 if (G_000E50_GRBM_RQ_PENDING(tmp)) 1659 reset_mask |= RADEON_RESET_GRBM; 1660 1661 if (G_000E50_VMC_BUSY(tmp)) 1662 reset_mask |= RADEON_RESET_VMC; 1663 1664 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) | 1665 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) | 1666 G_000E50_MCDW_BUSY(tmp)) 1667 reset_mask |= RADEON_RESET_MC; 1668 1669 if (r600_is_display_hung(rdev)) 1670 reset_mask |= RADEON_RESET_DISPLAY; 1671 1672 /* Skip MC reset as it's mostly likely not hung, just busy */ 1673 if (reset_mask & RADEON_RESET_MC) { 1674 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 1675 reset_mask &= ~RADEON_RESET_MC; 1676 } 1677 1678 return reset_mask; 1679 } 1680 1681 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1682 { 1683 struct rv515_mc_save save; 1684 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 1685 u32 tmp; 1686 1687 if (reset_mask == 0) 1688 return; 1689 1690 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1691 1692 r600_print_gpu_status_regs(rdev); 1693 1694 /* Disable CP parsing/prefetching */ 1695 if (rdev->family >= CHIP_RV770) 1696 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); 1697 else 1698 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1699 1700 /* disable the RLC */ 1701 WREG32(RLC_CNTL, 0); 1702 1703 if (reset_mask & RADEON_RESET_DMA) { 1704 /* Disable DMA */ 1705 tmp = RREG32(DMA_RB_CNTL); 1706 tmp &= ~DMA_RB_ENABLE; 1707 WREG32(DMA_RB_CNTL, tmp); 1708 } 1709 1710 mdelay(50); 1711 1712 rv515_mc_stop(rdev, &save); 1713 if (r600_mc_wait_for_idle(rdev)) { 1714 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1715 } 1716 1717 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) { 1718 if (rdev->family >= CHIP_RV770) 1719 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) | 1720 S_008020_SOFT_RESET_CB(1) | 1721 S_008020_SOFT_RESET_PA(1) | 1722 S_008020_SOFT_RESET_SC(1) | 1723 S_008020_SOFT_RESET_SPI(1) | 1724 S_008020_SOFT_RESET_SX(1) | 1725 S_008020_SOFT_RESET_SH(1) | 1726 S_008020_SOFT_RESET_TC(1) | 1727 S_008020_SOFT_RESET_TA(1) | 1728 S_008020_SOFT_RESET_VC(1) | 1729 S_008020_SOFT_RESET_VGT(1); 1730 else 1731 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) | 1732 S_008020_SOFT_RESET_DB(1) | 1733 S_008020_SOFT_RESET_CB(1) | 1734 S_008020_SOFT_RESET_PA(1) | 1735 S_008020_SOFT_RESET_SC(1) | 1736 S_008020_SOFT_RESET_SMX(1) | 1737 S_008020_SOFT_RESET_SPI(1) | 1738 S_008020_SOFT_RESET_SX(1) | 1739 S_008020_SOFT_RESET_SH(1) | 1740 S_008020_SOFT_RESET_TC(1) | 1741 S_008020_SOFT_RESET_TA(1) | 1742 S_008020_SOFT_RESET_VC(1) | 1743 S_008020_SOFT_RESET_VGT(1); 1744 } 1745 1746 if (reset_mask & RADEON_RESET_CP) { 1747 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) | 1748 S_008020_SOFT_RESET_VGT(1); 1749 1750 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1751 } 1752 1753 if (reset_mask & RADEON_RESET_DMA) { 1754 if (rdev->family >= CHIP_RV770) 1755 srbm_soft_reset |= RV770_SOFT_RESET_DMA; 1756 else 1757 srbm_soft_reset |= SOFT_RESET_DMA; 1758 } 1759 1760 if (reset_mask & RADEON_RESET_RLC) 1761 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1); 1762 1763 if (reset_mask & RADEON_RESET_SEM) 1764 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1); 1765 1766 if (reset_mask & RADEON_RESET_IH) 1767 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1); 1768 1769 if (reset_mask & RADEON_RESET_GRBM) 1770 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1771 1772 if (!(rdev->flags & RADEON_IS_IGP)) { 1773 if (reset_mask & RADEON_RESET_MC) 1774 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1); 1775 } 1776 1777 if (reset_mask & RADEON_RESET_VMC) 1778 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1); 1779 1780 if (grbm_soft_reset) { 1781 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1782 tmp |= grbm_soft_reset; 1783 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1784 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1785 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1786 1787 udelay(50); 1788 1789 tmp &= ~grbm_soft_reset; 1790 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1791 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1792 } 1793 1794 if (srbm_soft_reset) { 1795 tmp = RREG32(SRBM_SOFT_RESET); 1796 tmp |= srbm_soft_reset; 1797 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1798 WREG32(SRBM_SOFT_RESET, tmp); 1799 tmp = RREG32(SRBM_SOFT_RESET); 1800 1801 udelay(50); 1802 1803 tmp &= ~srbm_soft_reset; 1804 WREG32(SRBM_SOFT_RESET, tmp); 1805 tmp = RREG32(SRBM_SOFT_RESET); 1806 } 1807 1808 /* Wait a little for things to settle down */ 1809 mdelay(1); 1810 1811 rv515_mc_resume(rdev, &save); 1812 udelay(50); 1813 1814 r600_print_gpu_status_regs(rdev); 1815 } 1816 1817 static void r600_gpu_pci_config_reset(struct radeon_device *rdev) 1818 { 1819 struct rv515_mc_save save; 1820 u32 tmp, i; 1821 1822 dev_info(rdev->dev, "GPU pci config reset\n"); 1823 1824 /* disable dpm? */ 1825 1826 /* Disable CP parsing/prefetching */ 1827 if (rdev->family >= CHIP_RV770) 1828 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); 1829 else 1830 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1831 1832 /* disable the RLC */ 1833 WREG32(RLC_CNTL, 0); 1834 1835 /* Disable DMA */ 1836 tmp = RREG32(DMA_RB_CNTL); 1837 tmp &= ~DMA_RB_ENABLE; 1838 WREG32(DMA_RB_CNTL, tmp); 1839 1840 mdelay(50); 1841 1842 /* set mclk/sclk to bypass */ 1843 if (rdev->family >= CHIP_RV770) 1844 rv770_set_clk_bypass_mode(rdev); 1845 /* disable BM */ 1846 pci_disable_busmaster(rdev->pdev->dev.bsddev); 1847 /* disable mem access */ 1848 rv515_mc_stop(rdev, &save); 1849 if (r600_mc_wait_for_idle(rdev)) { 1850 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1851 } 1852 1853 /* BIF reset workaround. Not sure if this is needed on 6xx */ 1854 tmp = RREG32(BUS_CNTL); 1855 tmp |= VGA_COHE_SPEC_TIMER_DIS; 1856 WREG32(BUS_CNTL, tmp); 1857 1858 tmp = RREG32(BIF_SCRATCH0); 1859 1860 /* reset */ 1861 radeon_pci_config_reset(rdev); 1862 mdelay(1); 1863 1864 /* BIF reset workaround. Not sure if this is needed on 6xx */ 1865 tmp = SOFT_RESET_BIF; 1866 WREG32(SRBM_SOFT_RESET, tmp); 1867 mdelay(1); 1868 WREG32(SRBM_SOFT_RESET, 0); 1869 1870 /* wait for asic to come out of reset */ 1871 for (i = 0; i < rdev->usec_timeout; i++) { 1872 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) 1873 break; 1874 udelay(1); 1875 } 1876 } 1877 1878 int r600_asic_reset(struct radeon_device *rdev, bool hard) 1879 { 1880 u32 reset_mask; 1881 1882 if (hard) { 1883 r600_gpu_pci_config_reset(rdev); 1884 return 0; 1885 } 1886 1887 reset_mask = r600_gpu_check_soft_reset(rdev); 1888 1889 if (reset_mask) 1890 r600_set_bios_scratch_engine_hung(rdev, true); 1891 1892 /* try soft reset */ 1893 r600_gpu_soft_reset(rdev, reset_mask); 1894 1895 reset_mask = r600_gpu_check_soft_reset(rdev); 1896 1897 /* try pci config reset */ 1898 if (reset_mask && radeon_hard_reset) 1899 r600_gpu_pci_config_reset(rdev); 1900 1901 reset_mask = r600_gpu_check_soft_reset(rdev); 1902 1903 if (!reset_mask) 1904 r600_set_bios_scratch_engine_hung(rdev, false); 1905 1906 return 0; 1907 } 1908 1909 /** 1910 * r600_gfx_is_lockup - Check if the GFX engine is locked up 1911 * 1912 * @rdev: radeon_device pointer 1913 * @ring: radeon_ring structure holding ring information 1914 * 1915 * Check if the GFX engine is locked up. 1916 * Returns true if the engine appears to be locked up, false if not. 1917 */ 1918 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1919 { 1920 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 1921 1922 if (!(reset_mask & (RADEON_RESET_GFX | 1923 RADEON_RESET_COMPUTE | 1924 RADEON_RESET_CP))) { 1925 radeon_ring_lockup_update(rdev, ring); 1926 return false; 1927 } 1928 return radeon_ring_test_lockup(rdev, ring); 1929 } 1930 1931 u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1932 u32 tiling_pipe_num, 1933 u32 max_rb_num, 1934 u32 total_max_rb_num, 1935 u32 disabled_rb_mask) 1936 { 1937 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1938 u32 pipe_rb_ratio, pipe_rb_remain, tmp; 1939 u32 data = 0, mask = 1 << (max_rb_num - 1); 1940 unsigned i, j; 1941 1942 /* mask out the RBs that don't exist on that asic */ 1943 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); 1944 /* make sure at least one RB is available */ 1945 if ((tmp & 0xff) != 0xff) 1946 disabled_rb_mask = tmp; 1947 1948 rendering_pipe_num = 1 << tiling_pipe_num; 1949 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); 1950 BUG_ON(rendering_pipe_num < req_rb_num); 1951 1952 pipe_rb_ratio = rendering_pipe_num / req_rb_num; 1953 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num; 1954 1955 if (rdev->family <= CHIP_RV740) { 1956 /* r6xx/r7xx */ 1957 rb_num_width = 2; 1958 } else { 1959 /* eg+ */ 1960 rb_num_width = 4; 1961 } 1962 1963 for (i = 0; i < max_rb_num; i++) { 1964 if (!(mask & disabled_rb_mask)) { 1965 for (j = 0; j < pipe_rb_ratio; j++) { 1966 data <<= rb_num_width; 1967 data |= max_rb_num - i - 1; 1968 } 1969 if (pipe_rb_remain) { 1970 data <<= rb_num_width; 1971 data |= max_rb_num - i - 1; 1972 pipe_rb_remain--; 1973 } 1974 } 1975 mask >>= 1; 1976 } 1977 1978 return data; 1979 } 1980 1981 int r600_count_pipe_bits(uint32_t val) 1982 { 1983 return hweight32(val); 1984 } 1985 1986 static void r600_gpu_init(struct radeon_device *rdev) 1987 { 1988 u32 tiling_config; 1989 u32 ramcfg; 1990 u32 cc_gc_shader_pipe_config; 1991 u32 tmp; 1992 int i, j; 1993 u32 sq_config; 1994 u32 sq_gpr_resource_mgmt_1 = 0; 1995 u32 sq_gpr_resource_mgmt_2 = 0; 1996 u32 sq_thread_resource_mgmt = 0; 1997 u32 sq_stack_resource_mgmt_1 = 0; 1998 u32 sq_stack_resource_mgmt_2 = 0; 1999 u32 disabled_rb_mask; 2000 2001 rdev->config.r600.tiling_group_size = 256; 2002 switch (rdev->family) { 2003 case CHIP_R600: 2004 rdev->config.r600.max_pipes = 4; 2005 rdev->config.r600.max_tile_pipes = 8; 2006 rdev->config.r600.max_simds = 4; 2007 rdev->config.r600.max_backends = 4; 2008 rdev->config.r600.max_gprs = 256; 2009 rdev->config.r600.max_threads = 192; 2010 rdev->config.r600.max_stack_entries = 256; 2011 rdev->config.r600.max_hw_contexts = 8; 2012 rdev->config.r600.max_gs_threads = 16; 2013 rdev->config.r600.sx_max_export_size = 128; 2014 rdev->config.r600.sx_max_export_pos_size = 16; 2015 rdev->config.r600.sx_max_export_smx_size = 128; 2016 rdev->config.r600.sq_num_cf_insts = 2; 2017 break; 2018 case CHIP_RV630: 2019 case CHIP_RV635: 2020 rdev->config.r600.max_pipes = 2; 2021 rdev->config.r600.max_tile_pipes = 2; 2022 rdev->config.r600.max_simds = 3; 2023 rdev->config.r600.max_backends = 1; 2024 rdev->config.r600.max_gprs = 128; 2025 rdev->config.r600.max_threads = 192; 2026 rdev->config.r600.max_stack_entries = 128; 2027 rdev->config.r600.max_hw_contexts = 8; 2028 rdev->config.r600.max_gs_threads = 4; 2029 rdev->config.r600.sx_max_export_size = 128; 2030 rdev->config.r600.sx_max_export_pos_size = 16; 2031 rdev->config.r600.sx_max_export_smx_size = 128; 2032 rdev->config.r600.sq_num_cf_insts = 2; 2033 break; 2034 case CHIP_RV610: 2035 case CHIP_RV620: 2036 case CHIP_RS780: 2037 case CHIP_RS880: 2038 rdev->config.r600.max_pipes = 1; 2039 rdev->config.r600.max_tile_pipes = 1; 2040 rdev->config.r600.max_simds = 2; 2041 rdev->config.r600.max_backends = 1; 2042 rdev->config.r600.max_gprs = 128; 2043 rdev->config.r600.max_threads = 192; 2044 rdev->config.r600.max_stack_entries = 128; 2045 rdev->config.r600.max_hw_contexts = 4; 2046 rdev->config.r600.max_gs_threads = 4; 2047 rdev->config.r600.sx_max_export_size = 128; 2048 rdev->config.r600.sx_max_export_pos_size = 16; 2049 rdev->config.r600.sx_max_export_smx_size = 128; 2050 rdev->config.r600.sq_num_cf_insts = 1; 2051 break; 2052 case CHIP_RV670: 2053 rdev->config.r600.max_pipes = 4; 2054 rdev->config.r600.max_tile_pipes = 4; 2055 rdev->config.r600.max_simds = 4; 2056 rdev->config.r600.max_backends = 4; 2057 rdev->config.r600.max_gprs = 192; 2058 rdev->config.r600.max_threads = 192; 2059 rdev->config.r600.max_stack_entries = 256; 2060 rdev->config.r600.max_hw_contexts = 8; 2061 rdev->config.r600.max_gs_threads = 16; 2062 rdev->config.r600.sx_max_export_size = 128; 2063 rdev->config.r600.sx_max_export_pos_size = 16; 2064 rdev->config.r600.sx_max_export_smx_size = 128; 2065 rdev->config.r600.sq_num_cf_insts = 2; 2066 break; 2067 default: 2068 break; 2069 } 2070 2071 /* Initialize HDP */ 2072 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 2073 WREG32((0x2c14 + j), 0x00000000); 2074 WREG32((0x2c18 + j), 0x00000000); 2075 WREG32((0x2c1c + j), 0x00000000); 2076 WREG32((0x2c20 + j), 0x00000000); 2077 WREG32((0x2c24 + j), 0x00000000); 2078 } 2079 2080 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 2081 2082 /* Setup tiling */ 2083 tiling_config = 0; 2084 ramcfg = RREG32(RAMCFG); 2085 switch (rdev->config.r600.max_tile_pipes) { 2086 case 1: 2087 tiling_config |= PIPE_TILING(0); 2088 break; 2089 case 2: 2090 tiling_config |= PIPE_TILING(1); 2091 break; 2092 case 4: 2093 tiling_config |= PIPE_TILING(2); 2094 break; 2095 case 8: 2096 tiling_config |= PIPE_TILING(3); 2097 break; 2098 default: 2099 break; 2100 } 2101 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; 2102 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 2103 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 2104 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 2105 2106 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 2107 if (tmp > 3) { 2108 tiling_config |= ROW_TILING(3); 2109 tiling_config |= SAMPLE_SPLIT(3); 2110 } else { 2111 tiling_config |= ROW_TILING(tmp); 2112 tiling_config |= SAMPLE_SPLIT(tmp); 2113 } 2114 tiling_config |= BANK_SWAPS(1); 2115 2116 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 2117 tmp = rdev->config.r600.max_simds - 2118 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 2119 rdev->config.r600.active_simds = tmp; 2120 2121 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 2122 tmp = 0; 2123 for (i = 0; i < rdev->config.r600.max_backends; i++) 2124 tmp |= (1 << i); 2125 /* if all the backends are disabled, fix it up here */ 2126 if ((disabled_rb_mask & tmp) == tmp) { 2127 for (i = 0; i < rdev->config.r600.max_backends; i++) 2128 disabled_rb_mask &= ~(1 << i); 2129 } 2130 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 2131 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 2132 R6XX_MAX_BACKENDS, disabled_rb_mask); 2133 tiling_config |= tmp << 16; 2134 rdev->config.r600.backend_map = tmp; 2135 2136 rdev->config.r600.tile_config = tiling_config; 2137 WREG32(GB_TILING_CONFIG, tiling_config); 2138 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 2139 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 2140 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); 2141 2142 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 2143 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 2144 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 2145 2146 /* Setup some CP states */ 2147 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b))); 2148 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40))); 2149 2150 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT | 2151 SYNC_WALKER | SYNC_ALIGNER)); 2152 /* Setup various GPU states */ 2153 if (rdev->family == CHIP_RV670) 2154 WREG32(ARB_GDEC_RD_CNTL, 0x00000021); 2155 2156 tmp = RREG32(SX_DEBUG_1); 2157 tmp |= SMX_EVENT_RELEASE; 2158 if ((rdev->family > CHIP_R600)) 2159 tmp |= ENABLE_NEW_SMX_ADDRESS; 2160 WREG32(SX_DEBUG_1, tmp); 2161 2162 if (((rdev->family) == CHIP_R600) || 2163 ((rdev->family) == CHIP_RV630) || 2164 ((rdev->family) == CHIP_RV610) || 2165 ((rdev->family) == CHIP_RV620) || 2166 ((rdev->family) == CHIP_RS780) || 2167 ((rdev->family) == CHIP_RS880)) { 2168 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); 2169 } else { 2170 WREG32(DB_DEBUG, 0); 2171 } 2172 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) | 2173 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4))); 2174 2175 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 2176 WREG32(VGT_NUM_INSTANCES, 0); 2177 2178 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); 2179 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0)); 2180 2181 tmp = RREG32(SQ_MS_FIFO_SIZES); 2182 if (((rdev->family) == CHIP_RV610) || 2183 ((rdev->family) == CHIP_RV620) || 2184 ((rdev->family) == CHIP_RS780) || 2185 ((rdev->family) == CHIP_RS880)) { 2186 tmp = (CACHE_FIFO_SIZE(0xa) | 2187 FETCH_FIFO_HIWATER(0xa) | 2188 DONE_FIFO_HIWATER(0xe0) | 2189 ALU_UPDATE_FIFO_HIWATER(0x8)); 2190 } else if (((rdev->family) == CHIP_R600) || 2191 ((rdev->family) == CHIP_RV630)) { 2192 tmp &= ~DONE_FIFO_HIWATER(0xff); 2193 tmp |= DONE_FIFO_HIWATER(0x4); 2194 } 2195 WREG32(SQ_MS_FIFO_SIZES, tmp); 2196 2197 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 2198 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 2199 */ 2200 sq_config = RREG32(SQ_CONFIG); 2201 sq_config &= ~(PS_PRIO(3) | 2202 VS_PRIO(3) | 2203 GS_PRIO(3) | 2204 ES_PRIO(3)); 2205 sq_config |= (DX9_CONSTS | 2206 VC_ENABLE | 2207 PS_PRIO(0) | 2208 VS_PRIO(1) | 2209 GS_PRIO(2) | 2210 ES_PRIO(3)); 2211 2212 if ((rdev->family) == CHIP_R600) { 2213 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) | 2214 NUM_VS_GPRS(124) | 2215 NUM_CLAUSE_TEMP_GPRS(4)); 2216 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) | 2217 NUM_ES_GPRS(0)); 2218 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) | 2219 NUM_VS_THREADS(48) | 2220 NUM_GS_THREADS(4) | 2221 NUM_ES_THREADS(4)); 2222 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) | 2223 NUM_VS_STACK_ENTRIES(128)); 2224 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) | 2225 NUM_ES_STACK_ENTRIES(0)); 2226 } else if (((rdev->family) == CHIP_RV610) || 2227 ((rdev->family) == CHIP_RV620) || 2228 ((rdev->family) == CHIP_RS780) || 2229 ((rdev->family) == CHIP_RS880)) { 2230 /* no vertex cache */ 2231 sq_config &= ~VC_ENABLE; 2232 2233 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2234 NUM_VS_GPRS(44) | 2235 NUM_CLAUSE_TEMP_GPRS(2)); 2236 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 2237 NUM_ES_GPRS(17)); 2238 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2239 NUM_VS_THREADS(78) | 2240 NUM_GS_THREADS(4) | 2241 NUM_ES_THREADS(31)); 2242 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 2243 NUM_VS_STACK_ENTRIES(40)); 2244 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 2245 NUM_ES_STACK_ENTRIES(16)); 2246 } else if (((rdev->family) == CHIP_RV630) || 2247 ((rdev->family) == CHIP_RV635)) { 2248 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2249 NUM_VS_GPRS(44) | 2250 NUM_CLAUSE_TEMP_GPRS(2)); 2251 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) | 2252 NUM_ES_GPRS(18)); 2253 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2254 NUM_VS_THREADS(78) | 2255 NUM_GS_THREADS(4) | 2256 NUM_ES_THREADS(31)); 2257 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 2258 NUM_VS_STACK_ENTRIES(40)); 2259 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 2260 NUM_ES_STACK_ENTRIES(16)); 2261 } else if ((rdev->family) == CHIP_RV670) { 2262 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2263 NUM_VS_GPRS(44) | 2264 NUM_CLAUSE_TEMP_GPRS(2)); 2265 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 2266 NUM_ES_GPRS(17)); 2267 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2268 NUM_VS_THREADS(78) | 2269 NUM_GS_THREADS(4) | 2270 NUM_ES_THREADS(31)); 2271 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) | 2272 NUM_VS_STACK_ENTRIES(64)); 2273 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) | 2274 NUM_ES_STACK_ENTRIES(64)); 2275 } 2276 2277 WREG32(SQ_CONFIG, sq_config); 2278 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 2279 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 2280 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 2281 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 2282 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 2283 2284 if (((rdev->family) == CHIP_RV610) || 2285 ((rdev->family) == CHIP_RV620) || 2286 ((rdev->family) == CHIP_RS780) || 2287 ((rdev->family) == CHIP_RS880)) { 2288 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); 2289 } else { 2290 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); 2291 } 2292 2293 /* More default values. 2D/3D driver should adjust as needed */ 2294 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) | 2295 S1_X(0x4) | S1_Y(0xc))); 2296 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) | 2297 S1_X(0x2) | S1_Y(0x2) | 2298 S2_X(0xa) | S2_Y(0x6) | 2299 S3_X(0x6) | S3_Y(0xa))); 2300 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) | 2301 S1_X(0x4) | S1_Y(0xc) | 2302 S2_X(0x1) | S2_Y(0x6) | 2303 S3_X(0xa) | S3_Y(0xe))); 2304 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) | 2305 S5_X(0x0) | S5_Y(0x0) | 2306 S6_X(0xb) | S6_Y(0x4) | 2307 S7_X(0x7) | S7_Y(0x8))); 2308 2309 WREG32(VGT_STRMOUT_EN, 0); 2310 tmp = rdev->config.r600.max_pipes * 16; 2311 switch (rdev->family) { 2312 case CHIP_RV610: 2313 case CHIP_RV620: 2314 case CHIP_RS780: 2315 case CHIP_RS880: 2316 tmp += 32; 2317 break; 2318 case CHIP_RV670: 2319 tmp += 128; 2320 break; 2321 default: 2322 break; 2323 } 2324 if (tmp > 256) { 2325 tmp = 256; 2326 } 2327 WREG32(VGT_ES_PER_GS, 128); 2328 WREG32(VGT_GS_PER_ES, tmp); 2329 WREG32(VGT_GS_PER_VS, 2); 2330 WREG32(VGT_GS_VERTEX_REUSE, 16); 2331 2332 /* more default values. 2D/3D driver should adjust as needed */ 2333 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2334 WREG32(VGT_STRMOUT_EN, 0); 2335 WREG32(SX_MISC, 0); 2336 WREG32(PA_SC_MODE_CNTL, 0); 2337 WREG32(PA_SC_AA_CONFIG, 0); 2338 WREG32(PA_SC_LINE_STIPPLE, 0); 2339 WREG32(SPI_INPUT_Z, 0); 2340 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); 2341 WREG32(CB_COLOR7_FRAG, 0); 2342 2343 /* Clear render buffer base addresses */ 2344 WREG32(CB_COLOR0_BASE, 0); 2345 WREG32(CB_COLOR1_BASE, 0); 2346 WREG32(CB_COLOR2_BASE, 0); 2347 WREG32(CB_COLOR3_BASE, 0); 2348 WREG32(CB_COLOR4_BASE, 0); 2349 WREG32(CB_COLOR5_BASE, 0); 2350 WREG32(CB_COLOR6_BASE, 0); 2351 WREG32(CB_COLOR7_BASE, 0); 2352 WREG32(CB_COLOR7_FRAG, 0); 2353 2354 switch (rdev->family) { 2355 case CHIP_RV610: 2356 case CHIP_RV620: 2357 case CHIP_RS780: 2358 case CHIP_RS880: 2359 tmp = TC_L2_SIZE(8); 2360 break; 2361 case CHIP_RV630: 2362 case CHIP_RV635: 2363 tmp = TC_L2_SIZE(4); 2364 break; 2365 case CHIP_R600: 2366 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT; 2367 break; 2368 default: 2369 tmp = TC_L2_SIZE(0); 2370 break; 2371 } 2372 WREG32(TC_CNTL, tmp); 2373 2374 tmp = RREG32(HDP_HOST_PATH_CNTL); 2375 WREG32(HDP_HOST_PATH_CNTL, tmp); 2376 2377 tmp = RREG32(ARB_POP); 2378 tmp |= ENABLE_TC128; 2379 WREG32(ARB_POP, tmp); 2380 2381 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 2382 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 2383 NUM_CLIP_SEQ(3))); 2384 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); 2385 WREG32(VC_ENHANCE, 0); 2386 } 2387 2388 2389 /* 2390 * Indirect registers accessor 2391 */ 2392 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2393 { 2394 unsigned long flags; 2395 u32 r; 2396 2397 spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 2398 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2399 (void)RREG32(PCIE_PORT_INDEX); 2400 r = RREG32(PCIE_PORT_DATA); 2401 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 2402 return r; 2403 } 2404 2405 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2406 { 2407 unsigned long flags; 2408 2409 spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 2410 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2411 (void)RREG32(PCIE_PORT_INDEX); 2412 WREG32(PCIE_PORT_DATA, (v)); 2413 (void)RREG32(PCIE_PORT_DATA); 2414 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 2415 } 2416 2417 /* 2418 * CP & Ring 2419 */ 2420 void r600_cp_stop(struct radeon_device *rdev) 2421 { 2422 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 2423 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2424 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 2425 WREG32(SCRATCH_UMSK, 0); 2426 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2427 } 2428 2429 int r600_init_microcode(struct radeon_device *rdev) 2430 { 2431 const char *chip_name; 2432 const char *rlc_chip_name; 2433 const char *smc_chip_name = "RV770"; 2434 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0; 2435 char fw_name[30]; 2436 int err; 2437 2438 DRM_DEBUG("\n"); 2439 2440 switch (rdev->family) { 2441 case CHIP_R600: 2442 chip_name = "R600"; 2443 rlc_chip_name = "R600"; 2444 break; 2445 case CHIP_RV610: 2446 chip_name = "RV610"; 2447 rlc_chip_name = "R600"; 2448 break; 2449 case CHIP_RV630: 2450 chip_name = "RV630"; 2451 rlc_chip_name = "R600"; 2452 break; 2453 case CHIP_RV620: 2454 chip_name = "RV620"; 2455 rlc_chip_name = "R600"; 2456 break; 2457 case CHIP_RV635: 2458 chip_name = "RV635"; 2459 rlc_chip_name = "R600"; 2460 break; 2461 case CHIP_RV670: 2462 chip_name = "RV670"; 2463 rlc_chip_name = "R600"; 2464 break; 2465 case CHIP_RS780: 2466 case CHIP_RS880: 2467 chip_name = "RS780"; 2468 rlc_chip_name = "R600"; 2469 break; 2470 case CHIP_RV770: 2471 chip_name = "RV770"; 2472 rlc_chip_name = "R700"; 2473 smc_chip_name = "RV770"; 2474 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4); 2475 break; 2476 case CHIP_RV730: 2477 chip_name = "RV730"; 2478 rlc_chip_name = "R700"; 2479 smc_chip_name = "RV730"; 2480 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4); 2481 break; 2482 case CHIP_RV710: 2483 chip_name = "RV710"; 2484 rlc_chip_name = "R700"; 2485 smc_chip_name = "RV710"; 2486 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4); 2487 break; 2488 case CHIP_RV740: 2489 chip_name = "RV730"; 2490 rlc_chip_name = "R700"; 2491 smc_chip_name = "RV740"; 2492 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4); 2493 break; 2494 case CHIP_CEDAR: 2495 chip_name = "CEDAR"; 2496 rlc_chip_name = "CEDAR"; 2497 smc_chip_name = "CEDAR"; 2498 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4); 2499 break; 2500 case CHIP_REDWOOD: 2501 chip_name = "REDWOOD"; 2502 rlc_chip_name = "REDWOOD"; 2503 smc_chip_name = "REDWOOD"; 2504 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4); 2505 break; 2506 case CHIP_JUNIPER: 2507 chip_name = "JUNIPER"; 2508 rlc_chip_name = "JUNIPER"; 2509 smc_chip_name = "JUNIPER"; 2510 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4); 2511 break; 2512 case CHIP_CYPRESS: 2513 case CHIP_HEMLOCK: 2514 chip_name = "CYPRESS"; 2515 rlc_chip_name = "CYPRESS"; 2516 smc_chip_name = "CYPRESS"; 2517 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4); 2518 break; 2519 case CHIP_PALM: 2520 chip_name = "PALM"; 2521 rlc_chip_name = "SUMO"; 2522 break; 2523 case CHIP_SUMO: 2524 chip_name = "SUMO"; 2525 rlc_chip_name = "SUMO"; 2526 break; 2527 case CHIP_SUMO2: 2528 chip_name = "SUMO2"; 2529 rlc_chip_name = "SUMO"; 2530 break; 2531 default: BUG(); 2532 } 2533 2534 if (rdev->family >= CHIP_CEDAR) { 2535 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 2536 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 2537 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 2538 } else if (rdev->family >= CHIP_RV770) { 2539 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 2540 me_req_size = R700_PM4_UCODE_SIZE * 4; 2541 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 2542 } else { 2543 pfp_req_size = R600_PFP_UCODE_SIZE * 4; 2544 me_req_size = R600_PM4_UCODE_SIZE * 12; 2545 rlc_req_size = R600_RLC_UCODE_SIZE * 4; 2546 } 2547 2548 DRM_INFO("Loading %s Microcode\n", chip_name); 2549 2550 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name); 2551 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); 2552 if (err) 2553 goto out; 2554 if (rdev->pfp_fw->datasize != pfp_req_size) { 2555 printk(KERN_ERR 2556 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2557 rdev->pfp_fw->datasize, fw_name); 2558 err = -EINVAL; 2559 goto out; 2560 } 2561 2562 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name); 2563 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 2564 if (err) 2565 goto out; 2566 if (rdev->me_fw->datasize != me_req_size) { 2567 printk(KERN_ERR 2568 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2569 rdev->me_fw->datasize, fw_name); 2570 err = -EINVAL; 2571 } 2572 2573 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", rlc_chip_name); 2574 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); 2575 if (err) 2576 goto out; 2577 if (rdev->rlc_fw->datasize != rlc_req_size) { 2578 printk(KERN_ERR 2579 "r600_rlc: Bogus length %zu in firmware \"%s\"\n", 2580 rdev->rlc_fw->datasize, fw_name); 2581 err = -EINVAL; 2582 } 2583 2584 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { 2585 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_smc", smc_chip_name); 2586 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2587 if (err) { 2588 printk(KERN_ERR 2589 "smc: error loading firmware \"%s\"\n", 2590 fw_name); 2591 release_firmware(rdev->smc_fw); 2592 rdev->smc_fw = NULL; 2593 err = 0; 2594 } else if (rdev->smc_fw->datasize != smc_req_size) { 2595 printk(KERN_ERR 2596 "smc: Bogus length %zu in firmware \"%s\"\n", 2597 rdev->smc_fw->datasize, fw_name); 2598 err = -EINVAL; 2599 } 2600 } 2601 2602 out: 2603 if (err) { 2604 if (err != -EINVAL) 2605 printk(KERN_ERR 2606 "r600_cp: Failed to load firmware \"%s\"\n", 2607 fw_name); 2608 release_firmware(rdev->pfp_fw); 2609 rdev->pfp_fw = NULL; 2610 release_firmware(rdev->me_fw); 2611 rdev->me_fw = NULL; 2612 release_firmware(rdev->rlc_fw); 2613 rdev->rlc_fw = NULL; 2614 release_firmware(rdev->smc_fw); 2615 rdev->smc_fw = NULL; 2616 } 2617 return err; 2618 } 2619 2620 u32 r600_gfx_get_rptr(struct radeon_device *rdev, 2621 struct radeon_ring *ring) 2622 { 2623 u32 rptr; 2624 2625 if (rdev->wb.enabled) 2626 rptr = rdev->wb.wb[ring->rptr_offs/4]; 2627 else 2628 rptr = RREG32(R600_CP_RB_RPTR); 2629 2630 return rptr; 2631 } 2632 2633 u32 r600_gfx_get_wptr(struct radeon_device *rdev, 2634 struct radeon_ring *ring) 2635 { 2636 u32 wptr; 2637 2638 wptr = RREG32(R600_CP_RB_WPTR); 2639 2640 return wptr; 2641 } 2642 2643 void r600_gfx_set_wptr(struct radeon_device *rdev, 2644 struct radeon_ring *ring) 2645 { 2646 WREG32(R600_CP_RB_WPTR, ring->wptr); 2647 (void)RREG32(R600_CP_RB_WPTR); 2648 } 2649 2650 /** 2651 * r600_fini_microcode - drop the firmwares image references 2652 * 2653 * @rdev: radeon_device pointer 2654 * 2655 * Drop the pfp, me and rlc firmwares image references. 2656 * Called at driver shutdown. 2657 */ 2658 void r600_fini_microcode(struct radeon_device *rdev) 2659 { 2660 release_firmware(rdev->pfp_fw); 2661 rdev->pfp_fw = NULL; 2662 release_firmware(rdev->me_fw); 2663 rdev->me_fw = NULL; 2664 release_firmware(rdev->rlc_fw); 2665 rdev->rlc_fw = NULL; 2666 release_firmware(rdev->smc_fw); 2667 rdev->smc_fw = NULL; 2668 } 2669 2670 static int r600_cp_load_microcode(struct radeon_device *rdev) 2671 { 2672 const __be32 *fw_data; 2673 int i; 2674 2675 if (!rdev->me_fw || !rdev->pfp_fw) 2676 return -EINVAL; 2677 2678 r600_cp_stop(rdev); 2679 2680 WREG32(CP_RB_CNTL, 2681 #ifdef __BIG_ENDIAN 2682 BUF_SWAP_32BIT | 2683 #endif 2684 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2685 2686 /* Reset cp */ 2687 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2688 RREG32(GRBM_SOFT_RESET); 2689 mdelay(15); 2690 WREG32(GRBM_SOFT_RESET, 0); 2691 2692 WREG32(CP_ME_RAM_WADDR, 0); 2693 2694 fw_data = (const __be32 *)rdev->me_fw->data; 2695 WREG32(CP_ME_RAM_WADDR, 0); 2696 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++) 2697 WREG32(CP_ME_RAM_DATA, 2698 be32_to_cpup(fw_data++)); 2699 2700 fw_data = (const __be32 *)rdev->pfp_fw->data; 2701 WREG32(CP_PFP_UCODE_ADDR, 0); 2702 for (i = 0; i < R600_PFP_UCODE_SIZE; i++) 2703 WREG32(CP_PFP_UCODE_DATA, 2704 be32_to_cpup(fw_data++)); 2705 2706 WREG32(CP_PFP_UCODE_ADDR, 0); 2707 WREG32(CP_ME_RAM_WADDR, 0); 2708 WREG32(CP_ME_RAM_RADDR, 0); 2709 return 0; 2710 } 2711 2712 int r600_cp_start(struct radeon_device *rdev) 2713 { 2714 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2715 int r; 2716 uint32_t cp_me; 2717 2718 r = radeon_ring_lock(rdev, ring, 7); 2719 if (r) { 2720 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2721 return r; 2722 } 2723 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2724 radeon_ring_write(ring, 0x1); 2725 if (rdev->family >= CHIP_RV770) { 2726 radeon_ring_write(ring, 0x0); 2727 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1); 2728 } else { 2729 radeon_ring_write(ring, 0x3); 2730 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1); 2731 } 2732 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2733 radeon_ring_write(ring, 0); 2734 radeon_ring_write(ring, 0); 2735 radeon_ring_unlock_commit(rdev, ring, false); 2736 2737 cp_me = 0xff; 2738 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2739 return 0; 2740 } 2741 2742 int r600_cp_resume(struct radeon_device *rdev) 2743 { 2744 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2745 u32 tmp; 2746 u32 rb_bufsz; 2747 int r; 2748 2749 /* Reset cp */ 2750 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2751 RREG32(GRBM_SOFT_RESET); 2752 mdelay(15); 2753 WREG32(GRBM_SOFT_RESET, 0); 2754 2755 /* Set ring buffer size */ 2756 rb_bufsz = order_base_2(ring->ring_size / 8); 2757 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2758 #ifdef __BIG_ENDIAN 2759 tmp |= BUF_SWAP_32BIT; 2760 #endif 2761 WREG32(CP_RB_CNTL, tmp); 2762 WREG32(CP_SEM_WAIT_TIMER, 0x0); 2763 2764 /* Set the write pointer delay */ 2765 WREG32(CP_RB_WPTR_DELAY, 0); 2766 2767 /* Initialize the ring buffer's read and write pointers */ 2768 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2769 WREG32(CP_RB_RPTR_WR, 0); 2770 ring->wptr = 0; 2771 WREG32(CP_RB_WPTR, ring->wptr); 2772 2773 /* set the wb address whether it's enabled or not */ 2774 WREG32(CP_RB_RPTR_ADDR, 2775 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 2776 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2777 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2778 2779 if (rdev->wb.enabled) 2780 WREG32(SCRATCH_UMSK, 0xff); 2781 else { 2782 tmp |= RB_NO_UPDATE; 2783 WREG32(SCRATCH_UMSK, 0); 2784 } 2785 2786 mdelay(1); 2787 WREG32(CP_RB_CNTL, tmp); 2788 2789 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 2790 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2791 2792 r600_cp_start(rdev); 2793 ring->ready = true; 2794 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 2795 if (r) { 2796 ring->ready = false; 2797 return r; 2798 } 2799 2800 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 2801 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 2802 2803 return 0; 2804 } 2805 2806 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) 2807 { 2808 u32 rb_bufsz; 2809 int r; 2810 2811 /* Align ring size */ 2812 rb_bufsz = order_base_2(ring_size / 8); 2813 ring_size = (1 << (rb_bufsz + 1)) * 4; 2814 ring->ring_size = ring_size; 2815 ring->align_mask = 16 - 1; 2816 2817 if (radeon_ring_supports_scratch_reg(rdev, ring)) { 2818 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 2819 if (r) { 2820 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 2821 ring->rptr_save_reg = 0; 2822 } 2823 } 2824 } 2825 2826 void r600_cp_fini(struct radeon_device *rdev) 2827 { 2828 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2829 r600_cp_stop(rdev); 2830 radeon_ring_fini(rdev, ring); 2831 radeon_scratch_free(rdev, ring->rptr_save_reg); 2832 } 2833 2834 /* 2835 * GPU scratch registers helpers function. 2836 */ 2837 void r600_scratch_init(struct radeon_device *rdev) 2838 { 2839 int i; 2840 2841 rdev->scratch.num_reg = 7; 2842 rdev->scratch.reg_base = SCRATCH_REG0; 2843 for (i = 0; i < rdev->scratch.num_reg; i++) { 2844 rdev->scratch.free[i] = true; 2845 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 2846 } 2847 } 2848 2849 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 2850 { 2851 uint32_t scratch; 2852 uint32_t tmp = 0; 2853 unsigned i; 2854 int r; 2855 2856 r = radeon_scratch_get(rdev, &scratch); 2857 if (r) { 2858 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 2859 return r; 2860 } 2861 WREG32(scratch, 0xCAFEDEAD); 2862 r = radeon_ring_lock(rdev, ring, 3); 2863 if (r) { 2864 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); 2865 radeon_scratch_free(rdev, scratch); 2866 return r; 2867 } 2868 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2869 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2870 radeon_ring_write(ring, 0xDEADBEEF); 2871 radeon_ring_unlock_commit(rdev, ring, false); 2872 for (i = 0; i < rdev->usec_timeout; i++) { 2873 tmp = RREG32(scratch); 2874 if (tmp == 0xDEADBEEF) 2875 break; 2876 DRM_UDELAY(1); 2877 } 2878 if (i < rdev->usec_timeout) { 2879 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 2880 } else { 2881 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 2882 ring->idx, scratch, tmp); 2883 r = -EINVAL; 2884 } 2885 radeon_scratch_free(rdev, scratch); 2886 return r; 2887 } 2888 2889 /* 2890 * CP fences/semaphores 2891 */ 2892 2893 void r600_fence_ring_emit(struct radeon_device *rdev, 2894 struct radeon_fence *fence) 2895 { 2896 struct radeon_ring *ring = &rdev->ring[fence->ring]; 2897 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA | 2898 PACKET3_SH_ACTION_ENA; 2899 2900 if (rdev->family >= CHIP_RV770) 2901 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA; 2902 2903 if (rdev->wb.use_event) { 2904 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 2905 /* flush read cache over gart */ 2906 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2907 radeon_ring_write(ring, cp_coher_cntl); 2908 radeon_ring_write(ring, 0xFFFFFFFF); 2909 radeon_ring_write(ring, 0); 2910 radeon_ring_write(ring, 10); /* poll interval */ 2911 /* EVENT_WRITE_EOP - flush caches, send int */ 2912 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2913 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2914 radeon_ring_write(ring, lower_32_bits(addr)); 2915 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2916 radeon_ring_write(ring, fence->seq); 2917 radeon_ring_write(ring, 0); 2918 } else { 2919 /* flush read cache over gart */ 2920 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2921 radeon_ring_write(ring, cp_coher_cntl); 2922 radeon_ring_write(ring, 0xFFFFFFFF); 2923 radeon_ring_write(ring, 0); 2924 radeon_ring_write(ring, 10); /* poll interval */ 2925 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2926 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2927 /* wait for 3D idle clean */ 2928 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2929 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2930 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); 2931 /* Emit fence sequence & fire IRQ */ 2932 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2933 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2934 radeon_ring_write(ring, fence->seq); 2935 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 2936 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0)); 2937 radeon_ring_write(ring, RB_INT_STAT); 2938 } 2939 } 2940 2941 /** 2942 * r600_semaphore_ring_emit - emit a semaphore on the CP ring 2943 * 2944 * @rdev: radeon_device pointer 2945 * @ring: radeon ring buffer object 2946 * @semaphore: radeon semaphore object 2947 * @emit_wait: Is this a sempahore wait? 2948 * 2949 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP 2950 * from running ahead of semaphore waits. 2951 */ 2952 bool r600_semaphore_ring_emit(struct radeon_device *rdev, 2953 struct radeon_ring *ring, 2954 struct radeon_semaphore *semaphore, 2955 bool emit_wait) 2956 { 2957 uint64_t addr = semaphore->gpu_addr; 2958 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 2959 2960 if (rdev->family < CHIP_CAYMAN) 2961 sel |= PACKET3_SEM_WAIT_ON_SIGNAL; 2962 2963 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2964 radeon_ring_write(ring, lower_32_bits(addr)); 2965 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2966 2967 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */ 2968 if (emit_wait && (rdev->family >= CHIP_CEDAR)) { 2969 /* Prevent the PFP from running ahead of the semaphore wait */ 2970 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2971 radeon_ring_write(ring, 0x0); 2972 } 2973 2974 return true; 2975 } 2976 2977 /** 2978 * r600_copy_cpdma - copy pages using the CP DMA engine 2979 * 2980 * @rdev: radeon_device pointer 2981 * @src_offset: src GPU address 2982 * @dst_offset: dst GPU address 2983 * @num_gpu_pages: number of GPU pages to xfer 2984 * @fence: radeon fence object 2985 * 2986 * Copy GPU paging using the CP DMA engine (r6xx+). 2987 * Used by the radeon ttm implementation to move pages if 2988 * registered as the asic copy callback. 2989 */ 2990 int r600_copy_cpdma(struct radeon_device *rdev, 2991 uint64_t src_offset, uint64_t dst_offset, 2992 unsigned num_gpu_pages, 2993 struct radeon_fence **fence) 2994 { 2995 struct radeon_semaphore *sem = NULL; 2996 int ring_index = rdev->asic->copy.blit_ring_index; 2997 struct radeon_ring *ring = &rdev->ring[ring_index]; 2998 u32 size_in_bytes, cur_size_in_bytes, tmp; 2999 int i, num_loops; 3000 int r = 0; 3001 3002 r = radeon_semaphore_create(rdev, &sem); 3003 if (r) { 3004 DRM_ERROR("radeon: moving bo (%d).\n", r); 3005 return r; 3006 } 3007 3008 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 3009 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 3010 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); 3011 if (r) { 3012 DRM_ERROR("radeon: moving bo (%d).\n", r); 3013 radeon_semaphore_free(rdev, &sem, NULL); 3014 return r; 3015 } 3016 3017 radeon_semaphore_sync_to(sem, *fence); 3018 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 3019 3020 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3021 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3022 radeon_ring_write(ring, WAIT_3D_IDLE_bit); 3023 for (i = 0; i < num_loops; i++) { 3024 cur_size_in_bytes = size_in_bytes; 3025 if (cur_size_in_bytes > 0x1fffff) 3026 cur_size_in_bytes = 0x1fffff; 3027 size_in_bytes -= cur_size_in_bytes; 3028 tmp = upper_32_bits(src_offset) & 0xff; 3029 if (size_in_bytes == 0) 3030 tmp |= PACKET3_CP_DMA_CP_SYNC; 3031 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); 3032 radeon_ring_write(ring, lower_32_bits(src_offset)); 3033 radeon_ring_write(ring, tmp); 3034 radeon_ring_write(ring, lower_32_bits(dst_offset)); 3035 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 3036 radeon_ring_write(ring, cur_size_in_bytes); 3037 src_offset += cur_size_in_bytes; 3038 dst_offset += cur_size_in_bytes; 3039 } 3040 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3041 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3042 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); 3043 3044 r = radeon_fence_emit(rdev, fence, ring->idx); 3045 if (r) { 3046 radeon_ring_unlock_undo(rdev, ring); 3047 radeon_semaphore_free(rdev, &sem, NULL); 3048 return r; 3049 } 3050 3051 radeon_ring_unlock_commit(rdev, ring, false); 3052 radeon_semaphore_free(rdev, &sem, *fence); 3053 3054 return r; 3055 } 3056 3057 int r600_set_surface_reg(struct radeon_device *rdev, int reg, 3058 uint32_t tiling_flags, uint32_t pitch, 3059 uint32_t offset, uint32_t obj_size) 3060 { 3061 /* FIXME: implement */ 3062 return 0; 3063 } 3064 3065 void r600_clear_surface_reg(struct radeon_device *rdev, int reg) 3066 { 3067 /* FIXME: implement */ 3068 } 3069 3070 static void r600_uvd_init(struct radeon_device *rdev) 3071 { 3072 int r; 3073 3074 if (!rdev->has_uvd) 3075 return; 3076 3077 r = radeon_uvd_init(rdev); 3078 if (r) { 3079 dev_err(rdev->dev, "failed UVD (%d) init.\n", r); 3080 /* 3081 * At this point rdev->uvd.vcpu_bo is NULL which trickles down 3082 * to early fails uvd_v1_0_resume() and thus nothing happens 3083 * there. So it is pointless to try to go through that code 3084 * hence why we disable uvd here. 3085 */ 3086 rdev->has_uvd = 0; 3087 return; 3088 } 3089 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; 3090 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); 3091 } 3092 3093 static void r600_uvd_start(struct radeon_device *rdev) 3094 { 3095 int r; 3096 3097 if (!rdev->has_uvd) 3098 return; 3099 3100 r = uvd_v1_0_resume(rdev); 3101 if (r) { 3102 dev_err(rdev->dev, "failed UVD resume (%d).\n", r); 3103 goto error; 3104 } 3105 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); 3106 if (r) { 3107 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); 3108 goto error; 3109 } 3110 return; 3111 3112 error: 3113 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 3114 } 3115 3116 static void r600_uvd_resume(struct radeon_device *rdev) 3117 { 3118 struct radeon_ring *ring; 3119 int r; 3120 3121 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size) 3122 return; 3123 3124 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 3125 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); 3126 if (r) { 3127 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); 3128 return; 3129 } 3130 r = uvd_v1_0_init(rdev); 3131 if (r) { 3132 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r); 3133 return; 3134 } 3135 } 3136 3137 static int r600_startup(struct radeon_device *rdev) 3138 { 3139 struct radeon_ring *ring; 3140 int r; 3141 3142 /* enable pcie gen2 link */ 3143 r600_pcie_gen2_enable(rdev); 3144 3145 /* scratch needs to be initialized before MC */ 3146 r = r600_vram_scratch_init(rdev); 3147 if (r) 3148 return r; 3149 3150 r600_mc_program(rdev); 3151 3152 if (rdev->flags & RADEON_IS_AGP) { 3153 r600_agp_enable(rdev); 3154 } else { 3155 r = r600_pcie_gart_enable(rdev); 3156 if (r) 3157 return r; 3158 } 3159 r600_gpu_init(rdev); 3160 3161 /* allocate wb buffer */ 3162 r = radeon_wb_init(rdev); 3163 if (r) 3164 return r; 3165 3166 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3167 if (r) { 3168 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3169 return r; 3170 } 3171 3172 r600_uvd_start(rdev); 3173 3174 /* Enable IRQ */ 3175 if (!rdev->irq.installed) { 3176 r = radeon_irq_kms_init(rdev); 3177 if (r) 3178 return r; 3179 } 3180 3181 r = r600_irq_init(rdev); 3182 if (r) { 3183 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3184 radeon_irq_kms_fini(rdev); 3185 return r; 3186 } 3187 r600_irq_set(rdev); 3188 3189 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3190 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 3191 RADEON_CP_PACKET2); 3192 if (r) 3193 return r; 3194 3195 r = r600_cp_load_microcode(rdev); 3196 if (r) 3197 return r; 3198 r = r600_cp_resume(rdev); 3199 if (r) 3200 return r; 3201 3202 r600_uvd_resume(rdev); 3203 3204 r = radeon_ib_pool_init(rdev); 3205 if (r) { 3206 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3207 return r; 3208 } 3209 3210 r = radeon_audio_init(rdev); 3211 if (r) { 3212 DRM_ERROR("radeon: audio init failed\n"); 3213 return r; 3214 } 3215 3216 return 0; 3217 } 3218 3219 void r600_vga_set_state(struct radeon_device *rdev, bool state) 3220 { 3221 uint32_t temp; 3222 3223 temp = RREG32(CONFIG_CNTL); 3224 if (state == false) { 3225 temp &= ~(1<<0); 3226 temp |= (1<<1); 3227 } else { 3228 temp &= ~(1<<1); 3229 } 3230 WREG32(CONFIG_CNTL, temp); 3231 } 3232 3233 int r600_resume(struct radeon_device *rdev) 3234 { 3235 int r; 3236 3237 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, 3238 * posting will perform necessary task to bring back GPU into good 3239 * shape. 3240 */ 3241 /* post card */ 3242 atom_asic_init(rdev->mode_info.atom_context); 3243 3244 if (rdev->pm.pm_method == PM_METHOD_DPM) 3245 radeon_pm_resume(rdev); 3246 3247 rdev->accel_working = true; 3248 r = r600_startup(rdev); 3249 if (r) { 3250 DRM_ERROR("r600 startup failed on resume\n"); 3251 rdev->accel_working = false; 3252 return r; 3253 } 3254 3255 return r; 3256 } 3257 3258 int r600_suspend(struct radeon_device *rdev) 3259 { 3260 radeon_pm_suspend(rdev); 3261 radeon_audio_fini(rdev); 3262 r600_cp_stop(rdev); 3263 if (rdev->has_uvd) { 3264 uvd_v1_0_fini(rdev); 3265 radeon_uvd_suspend(rdev); 3266 } 3267 r600_irq_suspend(rdev); 3268 radeon_wb_disable(rdev); 3269 r600_pcie_gart_disable(rdev); 3270 3271 return 0; 3272 } 3273 3274 /* Plan is to move initialization in that function and use 3275 * helper function so that radeon_device_init pretty much 3276 * do nothing more than calling asic specific function. This 3277 * should also allow to remove a bunch of callback function 3278 * like vram_info. 3279 */ 3280 int r600_init(struct radeon_device *rdev) 3281 { 3282 int r; 3283 3284 if (r600_debugfs_mc_info_init(rdev)) { 3285 DRM_ERROR("Failed to register debugfs file for mc !\n"); 3286 } 3287 /* Read BIOS */ 3288 if (!radeon_get_bios(rdev)) { 3289 if (ASIC_IS_AVIVO(rdev)) 3290 return -EINVAL; 3291 } 3292 /* Must be an ATOMBIOS */ 3293 if (!rdev->is_atom_bios) { 3294 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); 3295 return -EINVAL; 3296 } 3297 r = radeon_atombios_init(rdev); 3298 if (r) 3299 return r; 3300 /* Post card if necessary */ 3301 if (!radeon_card_posted(rdev)) { 3302 if (!rdev->bios) { 3303 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3304 return -EINVAL; 3305 } 3306 DRM_INFO("GPU not posted. posting now...\n"); 3307 atom_asic_init(rdev->mode_info.atom_context); 3308 } 3309 /* Initialize scratch registers */ 3310 r600_scratch_init(rdev); 3311 /* Initialize surface registers */ 3312 radeon_surface_init(rdev); 3313 /* Initialize clocks */ 3314 radeon_get_clock_info(rdev->ddev); 3315 /* Fence driver */ 3316 r = radeon_fence_driver_init(rdev); 3317 if (r) 3318 return r; 3319 if (rdev->flags & RADEON_IS_AGP) { 3320 r = radeon_agp_init(rdev); 3321 if (r) 3322 radeon_agp_disable(rdev); 3323 } 3324 r = r600_mc_init(rdev); 3325 if (r) 3326 return r; 3327 /* Memory manager */ 3328 r = radeon_bo_init(rdev); 3329 if (r) 3330 return r; 3331 3332 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 3333 r = r600_init_microcode(rdev); 3334 if (r) { 3335 DRM_ERROR("Failed to load firmware!\n"); 3336 return r; 3337 } 3338 } 3339 3340 /* Initialize power management */ 3341 radeon_pm_init(rdev); 3342 3343 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3344 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3345 3346 r600_uvd_init(rdev); 3347 3348 rdev->ih.ring_obj = NULL; 3349 r600_ih_ring_init(rdev, 64 * 1024); 3350 3351 r = r600_pcie_gart_init(rdev); 3352 if (r) 3353 return r; 3354 3355 rdev->accel_working = true; 3356 r = r600_startup(rdev); 3357 if (r) { 3358 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3359 r600_cp_fini(rdev); 3360 r600_irq_fini(rdev); 3361 radeon_wb_fini(rdev); 3362 radeon_ib_pool_fini(rdev); 3363 radeon_irq_kms_fini(rdev); 3364 r600_pcie_gart_fini(rdev); 3365 rdev->accel_working = false; 3366 } 3367 3368 return 0; 3369 } 3370 3371 void r600_fini(struct radeon_device *rdev) 3372 { 3373 radeon_pm_fini(rdev); 3374 radeon_audio_fini(rdev); 3375 r600_cp_fini(rdev); 3376 r600_irq_fini(rdev); 3377 if (rdev->has_uvd) { 3378 uvd_v1_0_fini(rdev); 3379 radeon_uvd_fini(rdev); 3380 } 3381 radeon_wb_fini(rdev); 3382 radeon_ib_pool_fini(rdev); 3383 radeon_irq_kms_fini(rdev); 3384 r600_pcie_gart_fini(rdev); 3385 r600_vram_scratch_fini(rdev); 3386 radeon_agp_fini(rdev); 3387 radeon_gem_fini(rdev); 3388 radeon_fence_driver_fini(rdev); 3389 radeon_bo_fini(rdev); 3390 radeon_atombios_fini(rdev); 3391 r600_fini_microcode(rdev); 3392 kfree(rdev->bios); 3393 rdev->bios = NULL; 3394 } 3395 3396 3397 /* 3398 * CS stuff 3399 */ 3400 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3401 { 3402 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3403 u32 next_rptr; 3404 3405 if (ring->rptr_save_reg) { 3406 next_rptr = ring->wptr + 3 + 4; 3407 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3408 radeon_ring_write(ring, ((ring->rptr_save_reg - 3409 PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 3410 radeon_ring_write(ring, next_rptr); 3411 } else if (rdev->wb.enabled) { 3412 next_rptr = ring->wptr + 5 + 4; 3413 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); 3414 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3415 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); 3416 radeon_ring_write(ring, next_rptr); 3417 radeon_ring_write(ring, 0); 3418 } 3419 3420 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3421 radeon_ring_write(ring, 3422 #ifdef __BIG_ENDIAN 3423 (2 << 0) | 3424 #endif 3425 (ib->gpu_addr & 0xFFFFFFFC)); 3426 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 3427 radeon_ring_write(ring, ib->length_dw); 3428 } 3429 3430 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3431 { 3432 struct radeon_ib ib; 3433 uint32_t scratch; 3434 uint32_t tmp = 0; 3435 unsigned i; 3436 int r; 3437 3438 r = radeon_scratch_get(rdev, &scratch); 3439 if (r) { 3440 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3441 return r; 3442 } 3443 WREG32(scratch, 0xCAFEDEAD); 3444 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3445 if (r) { 3446 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3447 goto free_scratch; 3448 } 3449 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 3450 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3451 ib.ptr[2] = 0xDEADBEEF; 3452 ib.length_dw = 3; 3453 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3454 if (r) { 3455 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3456 goto free_ib; 3457 } 3458 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 3459 RADEON_USEC_IB_TEST_TIMEOUT)); 3460 if (r < 0) { 3461 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3462 goto free_ib; 3463 } else if (r == 0) { 3464 DRM_ERROR("radeon: fence wait timed out.\n"); 3465 #if 0 3466 r = -ETIMEDOUT; 3467 goto free_ib; 3468 #endif 3469 } 3470 r = 0; 3471 for (i = 0; i < rdev->usec_timeout; i++) { 3472 tmp = RREG32(scratch); 3473 if (tmp == 0xDEADBEEF) 3474 break; 3475 DRM_UDELAY(1); 3476 } 3477 if (i < rdev->usec_timeout) { 3478 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 3479 } else { 3480 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3481 scratch, tmp); 3482 r = -EINVAL; 3483 } 3484 free_ib: 3485 radeon_ib_free(rdev, &ib); 3486 free_scratch: 3487 radeon_scratch_free(rdev, scratch); 3488 return r; 3489 } 3490 3491 /* 3492 * Interrupts 3493 * 3494 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty 3495 * the same as the CP ring buffer, but in reverse. Rather than the CPU 3496 * writing to the ring and the GPU consuming, the GPU writes to the ring 3497 * and host consumes. As the host irq handler processes interrupts, it 3498 * increments the rptr. When the rptr catches up with the wptr, all the 3499 * current interrupts have been processed. 3500 */ 3501 3502 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) 3503 { 3504 u32 rb_bufsz; 3505 3506 /* Align ring size */ 3507 rb_bufsz = order_base_2(ring_size / 4); 3508 ring_size = (1 << rb_bufsz) * 4; 3509 rdev->ih.ring_size = ring_size; 3510 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3511 rdev->ih.rptr = 0; 3512 } 3513 3514 int r600_ih_ring_alloc(struct radeon_device *rdev) 3515 { 3516 int r; 3517 void *ring_ptr; 3518 3519 /* Allocate ring buffer */ 3520 if (rdev->ih.ring_obj == NULL) { 3521 r = radeon_bo_create(rdev, rdev->ih.ring_size, 3522 PAGE_SIZE, true, 3523 RADEON_GEM_DOMAIN_GTT, 0, 3524 NULL, &rdev->ih.ring_obj); 3525 if (r) { 3526 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 3527 return r; 3528 } 3529 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3530 if (unlikely(r != 0)) 3531 return r; 3532 r = radeon_bo_pin(rdev->ih.ring_obj, 3533 RADEON_GEM_DOMAIN_GTT, 3534 (u64 *)&rdev->ih.gpu_addr); 3535 if (r) { 3536 radeon_bo_unreserve(rdev->ih.ring_obj); 3537 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); 3538 return r; 3539 } 3540 ring_ptr = &rdev->ih.ring; 3541 r = radeon_bo_kmap(rdev->ih.ring_obj, 3542 ring_ptr); 3543 radeon_bo_unreserve(rdev->ih.ring_obj); 3544 if (r) { 3545 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); 3546 return r; 3547 } 3548 } 3549 return 0; 3550 } 3551 3552 void r600_ih_ring_fini(struct radeon_device *rdev) 3553 { 3554 int r; 3555 if (rdev->ih.ring_obj) { 3556 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3557 if (likely(r == 0)) { 3558 radeon_bo_kunmap(rdev->ih.ring_obj); 3559 radeon_bo_unpin(rdev->ih.ring_obj); 3560 radeon_bo_unreserve(rdev->ih.ring_obj); 3561 } 3562 radeon_bo_unref(&rdev->ih.ring_obj); 3563 rdev->ih.ring = NULL; 3564 rdev->ih.ring_obj = NULL; 3565 } 3566 } 3567 3568 void r600_rlc_stop(struct radeon_device *rdev) 3569 { 3570 3571 if ((rdev->family >= CHIP_RV770) && 3572 (rdev->family <= CHIP_RV740)) { 3573 /* r7xx asics need to soft reset RLC before halting */ 3574 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 3575 RREG32(SRBM_SOFT_RESET); 3576 mdelay(15); 3577 WREG32(SRBM_SOFT_RESET, 0); 3578 RREG32(SRBM_SOFT_RESET); 3579 } 3580 3581 WREG32(RLC_CNTL, 0); 3582 } 3583 3584 static void r600_rlc_start(struct radeon_device *rdev) 3585 { 3586 WREG32(RLC_CNTL, RLC_ENABLE); 3587 } 3588 3589 static int r600_rlc_resume(struct radeon_device *rdev) 3590 { 3591 u32 i; 3592 const __be32 *fw_data; 3593 3594 if (!rdev->rlc_fw) 3595 return -EINVAL; 3596 3597 r600_rlc_stop(rdev); 3598 3599 WREG32(RLC_HB_CNTL, 0); 3600 3601 WREG32(RLC_HB_BASE, 0); 3602 WREG32(RLC_HB_RPTR, 0); 3603 WREG32(RLC_HB_WPTR, 0); 3604 WREG32(RLC_HB_WPTR_LSB_ADDR, 0); 3605 WREG32(RLC_HB_WPTR_MSB_ADDR, 0); 3606 WREG32(RLC_MC_CNTL, 0); 3607 WREG32(RLC_UCODE_CNTL, 0); 3608 3609 fw_data = (const __be32 *)rdev->rlc_fw->data; 3610 if (rdev->family >= CHIP_RV770) { 3611 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 3612 WREG32(RLC_UCODE_ADDR, i); 3613 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3614 } 3615 } else { 3616 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) { 3617 WREG32(RLC_UCODE_ADDR, i); 3618 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3619 } 3620 } 3621 WREG32(RLC_UCODE_ADDR, 0); 3622 3623 r600_rlc_start(rdev); 3624 3625 return 0; 3626 } 3627 3628 static void r600_enable_interrupts(struct radeon_device *rdev) 3629 { 3630 u32 ih_cntl = RREG32(IH_CNTL); 3631 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3632 3633 ih_cntl |= ENABLE_INTR; 3634 ih_rb_cntl |= IH_RB_ENABLE; 3635 WREG32(IH_CNTL, ih_cntl); 3636 WREG32(IH_RB_CNTL, ih_rb_cntl); 3637 rdev->ih.enabled = true; 3638 } 3639 3640 void r600_disable_interrupts(struct radeon_device *rdev) 3641 { 3642 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3643 u32 ih_cntl = RREG32(IH_CNTL); 3644 3645 ih_rb_cntl &= ~IH_RB_ENABLE; 3646 ih_cntl &= ~ENABLE_INTR; 3647 WREG32(IH_RB_CNTL, ih_rb_cntl); 3648 WREG32(IH_CNTL, ih_cntl); 3649 /* set rptr, wptr to 0 */ 3650 WREG32(IH_RB_RPTR, 0); 3651 WREG32(IH_RB_WPTR, 0); 3652 rdev->ih.enabled = false; 3653 rdev->ih.rptr = 0; 3654 } 3655 3656 static void r600_disable_interrupt_state(struct radeon_device *rdev) 3657 { 3658 u32 tmp; 3659 3660 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3661 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3662 WREG32(DMA_CNTL, tmp); 3663 WREG32(GRBM_INT_CNTL, 0); 3664 WREG32(DxMODE_INT_MASK, 0); 3665 WREG32(D1GRPH_INTERRUPT_CONTROL, 0); 3666 WREG32(D2GRPH_INTERRUPT_CONTROL, 0); 3667 if (ASIC_IS_DCE3(rdev)) { 3668 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); 3669 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); 3670 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3671 WREG32(DC_HPD1_INT_CONTROL, tmp); 3672 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3673 WREG32(DC_HPD2_INT_CONTROL, tmp); 3674 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3675 WREG32(DC_HPD3_INT_CONTROL, tmp); 3676 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3677 WREG32(DC_HPD4_INT_CONTROL, tmp); 3678 if (ASIC_IS_DCE32(rdev)) { 3679 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3680 WREG32(DC_HPD5_INT_CONTROL, tmp); 3681 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3682 WREG32(DC_HPD6_INT_CONTROL, tmp); 3683 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3684 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3685 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3686 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3687 } else { 3688 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3689 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3690 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3691 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3692 } 3693 } else { 3694 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 3695 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 3696 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3697 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3698 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3699 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3700 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3701 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3702 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3703 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3704 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3705 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3706 } 3707 } 3708 3709 int r600_irq_init(struct radeon_device *rdev) 3710 { 3711 int ret = 0; 3712 int rb_bufsz; 3713 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 3714 3715 /* allocate ring */ 3716 ret = r600_ih_ring_alloc(rdev); 3717 if (ret) 3718 return ret; 3719 3720 /* disable irqs */ 3721 r600_disable_interrupts(rdev); 3722 3723 /* init rlc */ 3724 if (rdev->family >= CHIP_CEDAR) 3725 ret = evergreen_rlc_resume(rdev); 3726 else 3727 ret = r600_rlc_resume(rdev); 3728 if (ret) { 3729 r600_ih_ring_fini(rdev); 3730 return ret; 3731 } 3732 3733 /* setup interrupt control */ 3734 /* set dummy read address to ring address */ 3735 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); 3736 interrupt_cntl = RREG32(INTERRUPT_CNTL); 3737 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi 3738 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN 3739 */ 3740 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; 3741 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ 3742 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; 3743 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3744 3745 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3746 rb_bufsz = order_base_2(rdev->ih.ring_size / 4); 3747 3748 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3749 IH_WPTR_OVERFLOW_CLEAR | 3750 (rb_bufsz << 1)); 3751 3752 if (rdev->wb.enabled) 3753 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; 3754 3755 /* set the writeback address whether it's enabled or not */ 3756 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); 3757 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); 3758 3759 WREG32(IH_RB_CNTL, ih_rb_cntl); 3760 3761 /* set rptr, wptr to 0 */ 3762 WREG32(IH_RB_RPTR, 0); 3763 WREG32(IH_RB_WPTR, 0); 3764 3765 /* Default settings for IH_CNTL (disabled at first) */ 3766 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); 3767 /* RPTR_REARM only works if msi's are enabled */ 3768 if (rdev->msi_enabled) 3769 ih_cntl |= RPTR_REARM; 3770 WREG32(IH_CNTL, ih_cntl); 3771 3772 /* force the active interrupt state to all disabled */ 3773 if (rdev->family >= CHIP_CEDAR) 3774 evergreen_disable_interrupt_state(rdev); 3775 else 3776 r600_disable_interrupt_state(rdev); 3777 3778 /* at this point everything should be setup correctly to enable master */ 3779 pci_enable_busmaster(rdev->dev->bsddev); 3780 3781 /* enable irqs */ 3782 r600_enable_interrupts(rdev); 3783 3784 return ret; 3785 } 3786 3787 void r600_irq_suspend(struct radeon_device *rdev) 3788 { 3789 r600_irq_disable(rdev); 3790 r600_rlc_stop(rdev); 3791 } 3792 3793 void r600_irq_fini(struct radeon_device *rdev) 3794 { 3795 r600_irq_suspend(rdev); 3796 r600_ih_ring_fini(rdev); 3797 } 3798 3799 int r600_irq_set(struct radeon_device *rdev) 3800 { 3801 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 3802 u32 mode_int = 0; 3803 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3804 u32 grbm_int_cntl = 0; 3805 u32 hdmi0, hdmi1; 3806 u32 dma_cntl; 3807 u32 thermal_int = 0; 3808 3809 if (!rdev->irq.installed) { 3810 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3811 return -EINVAL; 3812 } 3813 /* don't enable anything if the ih is disabled */ 3814 if (!rdev->ih.enabled) { 3815 r600_disable_interrupts(rdev); 3816 /* force the active interrupt state to all disabled */ 3817 r600_disable_interrupt_state(rdev); 3818 return 0; 3819 } 3820 3821 if (ASIC_IS_DCE3(rdev)) { 3822 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3823 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3824 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3825 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 3826 if (ASIC_IS_DCE32(rdev)) { 3827 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3828 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3829 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3830 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3831 } else { 3832 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3833 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3834 } 3835 } else { 3836 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3837 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3838 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3839 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3840 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3841 } 3842 3843 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3844 3845 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { 3846 thermal_int = RREG32(CG_THERMAL_INT) & 3847 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 3848 } else if (rdev->family >= CHIP_RV770) { 3849 thermal_int = RREG32(RV770_CG_THERMAL_INT) & 3850 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 3851 } 3852 if (rdev->irq.dpm_thermal) { 3853 DRM_DEBUG("dpm thermal\n"); 3854 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 3855 } 3856 3857 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3858 DRM_DEBUG("r600_irq_set: sw int\n"); 3859 cp_int_cntl |= RB_INT_ENABLE; 3860 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3861 } 3862 3863 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 3864 DRM_DEBUG("r600_irq_set: sw int dma\n"); 3865 dma_cntl |= TRAP_ENABLE; 3866 } 3867 3868 if (rdev->irq.crtc_vblank_int[0] || 3869 atomic_read(&rdev->irq.pflip[0])) { 3870 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3871 mode_int |= D1MODE_VBLANK_INT_MASK; 3872 } 3873 if (rdev->irq.crtc_vblank_int[1] || 3874 atomic_read(&rdev->irq.pflip[1])) { 3875 DRM_DEBUG("r600_irq_set: vblank 1\n"); 3876 mode_int |= D2MODE_VBLANK_INT_MASK; 3877 } 3878 if (rdev->irq.hpd[0]) { 3879 DRM_DEBUG("r600_irq_set: hpd 1\n"); 3880 hpd1 |= DC_HPDx_INT_EN; 3881 } 3882 if (rdev->irq.hpd[1]) { 3883 DRM_DEBUG("r600_irq_set: hpd 2\n"); 3884 hpd2 |= DC_HPDx_INT_EN; 3885 } 3886 if (rdev->irq.hpd[2]) { 3887 DRM_DEBUG("r600_irq_set: hpd 3\n"); 3888 hpd3 |= DC_HPDx_INT_EN; 3889 } 3890 if (rdev->irq.hpd[3]) { 3891 DRM_DEBUG("r600_irq_set: hpd 4\n"); 3892 hpd4 |= DC_HPDx_INT_EN; 3893 } 3894 if (rdev->irq.hpd[4]) { 3895 DRM_DEBUG("r600_irq_set: hpd 5\n"); 3896 hpd5 |= DC_HPDx_INT_EN; 3897 } 3898 if (rdev->irq.hpd[5]) { 3899 DRM_DEBUG("r600_irq_set: hpd 6\n"); 3900 hpd6 |= DC_HPDx_INT_EN; 3901 } 3902 if (rdev->irq.afmt[0]) { 3903 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3904 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3905 } 3906 if (rdev->irq.afmt[1]) { 3907 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3908 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3909 } 3910 3911 WREG32(CP_INT_CNTL, cp_int_cntl); 3912 WREG32(DMA_CNTL, dma_cntl); 3913 WREG32(DxMODE_INT_MASK, mode_int); 3914 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3915 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3916 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3917 if (ASIC_IS_DCE3(rdev)) { 3918 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3919 WREG32(DC_HPD2_INT_CONTROL, hpd2); 3920 WREG32(DC_HPD3_INT_CONTROL, hpd3); 3921 WREG32(DC_HPD4_INT_CONTROL, hpd4); 3922 if (ASIC_IS_DCE32(rdev)) { 3923 WREG32(DC_HPD5_INT_CONTROL, hpd5); 3924 WREG32(DC_HPD6_INT_CONTROL, hpd6); 3925 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0); 3926 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1); 3927 } else { 3928 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3929 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3930 } 3931 } else { 3932 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 3933 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 3934 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 3935 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3936 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3937 } 3938 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { 3939 WREG32(CG_THERMAL_INT, thermal_int); 3940 } else if (rdev->family >= CHIP_RV770) { 3941 WREG32(RV770_CG_THERMAL_INT, thermal_int); 3942 } 3943 3944 /* posting read */ 3945 RREG32(R_000E50_SRBM_STATUS); 3946 3947 return 0; 3948 } 3949 3950 static void r600_irq_ack(struct radeon_device *rdev) 3951 { 3952 u32 tmp; 3953 3954 if (ASIC_IS_DCE3(rdev)) { 3955 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 3956 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 3957 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 3958 if (ASIC_IS_DCE32(rdev)) { 3959 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0); 3960 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1); 3961 } else { 3962 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3963 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS); 3964 } 3965 } else { 3966 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); 3967 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3968 rdev->irq.stat_regs.r600.disp_int_cont2 = 0; 3969 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3970 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS); 3971 } 3972 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); 3973 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); 3974 3975 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3976 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3977 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3978 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3979 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) 3980 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3981 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) 3982 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3983 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) 3984 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3985 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) 3986 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3987 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 3988 if (ASIC_IS_DCE3(rdev)) { 3989 tmp = RREG32(DC_HPD1_INT_CONTROL); 3990 tmp |= DC_HPDx_INT_ACK; 3991 WREG32(DC_HPD1_INT_CONTROL, tmp); 3992 } else { 3993 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 3994 tmp |= DC_HPDx_INT_ACK; 3995 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3996 } 3997 } 3998 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 3999 if (ASIC_IS_DCE3(rdev)) { 4000 tmp = RREG32(DC_HPD2_INT_CONTROL); 4001 tmp |= DC_HPDx_INT_ACK; 4002 WREG32(DC_HPD2_INT_CONTROL, tmp); 4003 } else { 4004 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 4005 tmp |= DC_HPDx_INT_ACK; 4006 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 4007 } 4008 } 4009 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4010 if (ASIC_IS_DCE3(rdev)) { 4011 tmp = RREG32(DC_HPD3_INT_CONTROL); 4012 tmp |= DC_HPDx_INT_ACK; 4013 WREG32(DC_HPD3_INT_CONTROL, tmp); 4014 } else { 4015 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 4016 tmp |= DC_HPDx_INT_ACK; 4017 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 4018 } 4019 } 4020 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4021 tmp = RREG32(DC_HPD4_INT_CONTROL); 4022 tmp |= DC_HPDx_INT_ACK; 4023 WREG32(DC_HPD4_INT_CONTROL, tmp); 4024 } 4025 if (ASIC_IS_DCE32(rdev)) { 4026 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4027 tmp = RREG32(DC_HPD5_INT_CONTROL); 4028 tmp |= DC_HPDx_INT_ACK; 4029 WREG32(DC_HPD5_INT_CONTROL, tmp); 4030 } 4031 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4032 tmp = RREG32(DC_HPD5_INT_CONTROL); 4033 tmp |= DC_HPDx_INT_ACK; 4034 WREG32(DC_HPD6_INT_CONTROL, tmp); 4035 } 4036 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) { 4037 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0); 4038 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 4039 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 4040 } 4041 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) { 4042 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1); 4043 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 4044 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 4045 } 4046 } else { 4047 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4048 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL); 4049 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 4050 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 4051 } 4052 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4053 if (ASIC_IS_DCE3(rdev)) { 4054 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL); 4055 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 4056 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 4057 } else { 4058 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL); 4059 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 4060 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 4061 } 4062 } 4063 } 4064 } 4065 4066 void r600_irq_disable(struct radeon_device *rdev) 4067 { 4068 r600_disable_interrupts(rdev); 4069 /* Wait and acknowledge irq */ 4070 mdelay(1); 4071 r600_irq_ack(rdev); 4072 r600_disable_interrupt_state(rdev); 4073 } 4074 4075 static u32 r600_get_ih_wptr(struct radeon_device *rdev) 4076 { 4077 u32 wptr, tmp; 4078 4079 if (rdev->wb.enabled) 4080 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 4081 else 4082 wptr = RREG32(IH_RB_WPTR); 4083 4084 if (wptr & RB_OVERFLOW) { 4085 wptr &= ~RB_OVERFLOW; 4086 /* When a ring buffer overflow happen start parsing interrupt 4087 * from the last not overwritten vector (wptr + 16). Hopefully 4088 * this should allow us to catchup. 4089 */ 4090 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 4091 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); 4092 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 4093 tmp = RREG32(IH_RB_CNTL); 4094 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4095 WREG32(IH_RB_CNTL, tmp); 4096 } 4097 return (wptr & rdev->ih.ptr_mask); 4098 } 4099 4100 /* r600 IV Ring 4101 * Each IV ring entry is 128 bits: 4102 * [7:0] - interrupt source id 4103 * [31:8] - reserved 4104 * [59:32] - interrupt source data 4105 * [127:60] - reserved 4106 * 4107 * The basic interrupt vector entries 4108 * are decoded as follows: 4109 * src_id src_data description 4110 * 1 0 D1 Vblank 4111 * 1 1 D1 Vline 4112 * 5 0 D2 Vblank 4113 * 5 1 D2 Vline 4114 * 19 0 FP Hot plug detection A 4115 * 19 1 FP Hot plug detection B 4116 * 19 2 DAC A auto-detection 4117 * 19 3 DAC B auto-detection 4118 * 21 4 HDMI block A 4119 * 21 5 HDMI block B 4120 * 176 - CP_INT RB 4121 * 177 - CP_INT IB1 4122 * 178 - CP_INT IB2 4123 * 181 - EOP Interrupt 4124 * 233 - GUI Idle 4125 * 4126 * Note, these are based on r600 and may need to be 4127 * adjusted or added to on newer asics 4128 */ 4129 4130 irqreturn_t r600_irq_process(struct radeon_device *rdev) 4131 { 4132 u32 wptr; 4133 u32 rptr; 4134 u32 src_id, src_data; 4135 u32 ring_index; 4136 bool queue_hotplug = false; 4137 bool queue_hdmi = false; 4138 bool queue_thermal = false; 4139 4140 if (!rdev->ih.enabled || rdev->shutdown) 4141 return IRQ_NONE; 4142 4143 /* No MSIs, need a dummy read to flush PCI DMAs */ 4144 if (!rdev->msi_enabled) 4145 RREG32(IH_RB_WPTR); 4146 4147 wptr = r600_get_ih_wptr(rdev); 4148 4149 restart_ih: 4150 /* is somebody else already processing irqs? */ 4151 if (atomic_xchg(&rdev->ih.lock, 1)) 4152 return IRQ_NONE; 4153 4154 rptr = rdev->ih.rptr; 4155 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4156 4157 /* Order reading of wptr vs. reading of IH ring data */ 4158 rmb(); 4159 4160 /* display interrupts */ 4161 r600_irq_ack(rdev); 4162 4163 while (rptr != wptr) { 4164 /* wptr/rptr are in bytes! */ 4165 ring_index = rptr / 4; 4166 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 4167 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 4168 4169 switch (src_id) { 4170 case 1: /* D1 vblank/vline */ 4171 switch (src_data) { 4172 case 0: /* D1 vblank */ 4173 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)) 4174 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); 4175 4176 if (rdev->irq.crtc_vblank_int[0]) { 4177 drm_handle_vblank(rdev->ddev, 0); 4178 rdev->pm.vblank_sync = true; 4179 wake_up(&rdev->irq.vblank_queue); 4180 } 4181 if (atomic_read(&rdev->irq.pflip[0])) 4182 radeon_crtc_handle_vblank(rdev, 0); 4183 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 4184 DRM_DEBUG("IH: D1 vblank\n"); 4185 4186 break; 4187 case 1: /* D1 vline */ 4188 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)) 4189 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n"); 4190 4191 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4192 DRM_DEBUG("IH: D1 vline\n"); 4193 4194 break; 4195 default: 4196 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4197 break; 4198 } 4199 break; 4200 case 5: /* D2 vblank/vline */ 4201 switch (src_data) { 4202 case 0: /* D2 vblank */ 4203 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)) 4204 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); 4205 4206 if (rdev->irq.crtc_vblank_int[1]) { 4207 drm_handle_vblank(rdev->ddev, 1); 4208 rdev->pm.vblank_sync = true; 4209 wake_up(&rdev->irq.vblank_queue); 4210 } 4211 if (atomic_read(&rdev->irq.pflip[1])) 4212 radeon_crtc_handle_vblank(rdev, 1); 4213 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; 4214 DRM_DEBUG("IH: D2 vblank\n"); 4215 4216 break; 4217 case 1: /* D1 vline */ 4218 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)) 4219 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n"); 4220 4221 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4222 DRM_DEBUG("IH: D2 vline\n"); 4223 4224 break; 4225 default: 4226 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4227 break; 4228 } 4229 break; 4230 case 9: /* D1 pflip */ 4231 DRM_DEBUG("IH: D1 flip\n"); 4232 if (radeon_use_pflipirq > 0) 4233 radeon_crtc_handle_flip(rdev, 0); 4234 break; 4235 case 11: /* D2 pflip */ 4236 DRM_DEBUG("IH: D2 flip\n"); 4237 if (radeon_use_pflipirq > 0) 4238 radeon_crtc_handle_flip(rdev, 1); 4239 break; 4240 case 19: /* HPD/DAC hotplug */ 4241 switch (src_data) { 4242 case 0: 4243 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT)) 4244 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n"); 4245 4246 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4247 queue_hotplug = true; 4248 DRM_DEBUG("IH: HPD1\n"); 4249 break; 4250 case 1: 4251 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT)) 4252 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n"); 4253 4254 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4255 queue_hotplug = true; 4256 DRM_DEBUG("IH: HPD2\n"); 4257 break; 4258 case 4: 4259 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT)) 4260 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n"); 4261 4262 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4263 queue_hotplug = true; 4264 DRM_DEBUG("IH: HPD3\n"); 4265 break; 4266 case 5: 4267 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT)) 4268 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n"); 4269 4270 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4271 queue_hotplug = true; 4272 DRM_DEBUG("IH: HPD4\n"); 4273 break; 4274 case 10: 4275 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT)) 4276 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n"); 4277 4278 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4279 queue_hotplug = true; 4280 DRM_DEBUG("IH: HPD5\n"); 4281 break; 4282 case 12: 4283 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT)) 4284 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n"); 4285 4286 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4287 queue_hotplug = true; 4288 DRM_DEBUG("IH: HPD6\n"); 4289 4290 break; 4291 default: 4292 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4293 break; 4294 } 4295 break; 4296 case 21: /* hdmi */ 4297 switch (src_data) { 4298 case 4: 4299 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG)) 4300 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n"); 4301 4302 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4303 queue_hdmi = true; 4304 DRM_DEBUG("IH: HDMI0\n"); 4305 4306 break; 4307 case 5: 4308 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG)) 4309 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n"); 4310 4311 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4312 queue_hdmi = true; 4313 DRM_DEBUG("IH: HDMI1\n"); 4314 4315 break; 4316 default: 4317 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4318 break; 4319 } 4320 break; 4321 case 124: /* UVD */ 4322 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 4323 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 4324 break; 4325 case 176: /* CP_INT in ring buffer */ 4326 case 177: /* CP_INT in IB1 */ 4327 case 178: /* CP_INT in IB2 */ 4328 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 4329 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4330 break; 4331 case 181: /* CP EOP event */ 4332 DRM_DEBUG("IH: CP EOP\n"); 4333 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4334 break; 4335 case 224: /* DMA trap event */ 4336 DRM_DEBUG("IH: DMA trap\n"); 4337 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4338 break; 4339 case 230: /* thermal low to high */ 4340 DRM_DEBUG("IH: thermal low to high\n"); 4341 rdev->pm.dpm.thermal.high_to_low = false; 4342 queue_thermal = true; 4343 break; 4344 case 231: /* thermal high to low */ 4345 DRM_DEBUG("IH: thermal high to low\n"); 4346 rdev->pm.dpm.thermal.high_to_low = true; 4347 queue_thermal = true; 4348 break; 4349 case 233: /* GUI IDLE */ 4350 DRM_DEBUG("IH: GUI idle\n"); 4351 break; 4352 default: 4353 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4354 break; 4355 } 4356 4357 /* wptr/rptr are in bytes! */ 4358 rptr += 16; 4359 rptr &= rdev->ih.ptr_mask; 4360 WREG32(IH_RB_RPTR, rptr); 4361 } 4362 if (queue_hotplug) 4363 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 4364 if (queue_hdmi) 4365 taskqueue_enqueue(rdev->tq, &rdev->audio_work); 4366 if (queue_thermal && rdev->pm.dpm_enabled) 4367 taskqueue_enqueue(rdev->tq, &rdev->pm.dpm.thermal.work); 4368 rdev->ih.rptr = rptr; 4369 atomic_set(&rdev->ih.lock, 0); 4370 4371 /* make sure wptr hasn't changed while processing */ 4372 wptr = r600_get_ih_wptr(rdev); 4373 if (wptr != rptr) 4374 goto restart_ih; 4375 4376 return IRQ_HANDLED; 4377 } 4378 4379 /* 4380 * Debugfs info 4381 */ 4382 #if defined(CONFIG_DEBUG_FS) 4383 4384 static int r600_debugfs_mc_info(struct seq_file *m, void *data) 4385 { 4386 struct drm_info_node *node = (struct drm_info_node *) m->private; 4387 struct drm_device *dev = node->minor->dev; 4388 struct radeon_device *rdev = dev->dev_private; 4389 4390 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS); 4391 DREG32_SYS(m, rdev, VM_L2_STATUS); 4392 return 0; 4393 } 4394 4395 static struct drm_info_list r600_mc_info_list[] = { 4396 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL}, 4397 }; 4398 #endif 4399 4400 int r600_debugfs_mc_info_init(struct radeon_device *rdev) 4401 { 4402 #if defined(CONFIG_DEBUG_FS) 4403 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list)); 4404 #else 4405 return 0; 4406 #endif 4407 } 4408 4409 /** 4410 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO 4411 * rdev: radeon device structure 4412 * 4413 * Some R6XX/R7XX don't seem to take into account HDP flushes performed 4414 * through the ring buffer. This leads to corruption in rendering, see 4415 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we 4416 * directly perform the HDP flush by writing the register through MMIO. 4417 */ 4418 void r600_mmio_hdp_flush(struct radeon_device *rdev) 4419 { 4420 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 4421 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. 4422 * This seems to cause problems on some AGP cards. Just use the old 4423 * method for them. 4424 */ 4425 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 4426 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { 4427 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 4428 u32 tmp; 4429 4430 WREG32(HDP_DEBUG1, 0); 4431 tmp = *ptr; 4432 } else 4433 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 4434 } 4435 4436 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) 4437 { 4438 u32 link_width_cntl, mask; 4439 4440 if (rdev->flags & RADEON_IS_IGP) 4441 return; 4442 4443 if (!(rdev->flags & RADEON_IS_PCIE)) 4444 return; 4445 4446 /* x2 cards have a special sequence */ 4447 if (ASIC_IS_X2(rdev)) 4448 return; 4449 4450 radeon_gui_idle(rdev); 4451 4452 switch (lanes) { 4453 case 0: 4454 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 4455 break; 4456 case 1: 4457 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 4458 break; 4459 case 2: 4460 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 4461 break; 4462 case 4: 4463 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 4464 break; 4465 case 8: 4466 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 4467 break; 4468 case 12: 4469 /* not actually supported */ 4470 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 4471 break; 4472 case 16: 4473 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 4474 break; 4475 default: 4476 DRM_ERROR("invalid pcie lane request: %d\n", lanes); 4477 return; 4478 } 4479 4480 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4481 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK; 4482 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT; 4483 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW | 4484 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); 4485 4486 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4487 } 4488 4489 int r600_get_pcie_lanes(struct radeon_device *rdev) 4490 { 4491 u32 link_width_cntl; 4492 4493 if (rdev->flags & RADEON_IS_IGP) 4494 return 0; 4495 4496 if (!(rdev->flags & RADEON_IS_PCIE)) 4497 return 0; 4498 4499 /* x2 cards have a special sequence */ 4500 if (ASIC_IS_X2(rdev)) 4501 return 0; 4502 4503 radeon_gui_idle(rdev); 4504 4505 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4506 4507 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 4508 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4509 return 1; 4510 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4511 return 2; 4512 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4513 return 4; 4514 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4515 return 8; 4516 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4517 /* not actually supported */ 4518 return 12; 4519 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4520 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4521 default: 4522 return 16; 4523 } 4524 } 4525 4526 static void r600_pcie_gen2_enable(struct radeon_device *rdev) 4527 { 4528 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 4529 u16 link_cntl2; 4530 u32 mask; 4531 int ret; 4532 4533 if (radeon_pcie_gen2 == 0) 4534 return; 4535 4536 if (rdev->flags & RADEON_IS_IGP) 4537 return; 4538 4539 if (!(rdev->flags & RADEON_IS_PCIE)) 4540 return; 4541 4542 /* x2 cards have a special sequence */ 4543 if (ASIC_IS_X2(rdev)) 4544 return; 4545 4546 /* only RV6xx+ chips are supported */ 4547 if (rdev->family <= CHIP_R600) 4548 return; 4549 4550 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 4551 if (ret != 0) 4552 return; 4553 4554 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) 4555 return; 4556 4557 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4558 if (speed_cntl & LC_CURRENT_DATA_RATE) { 4559 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 4560 return; 4561 } 4562 4563 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 4564 4565 /* 55 nm r6xx asics */ 4566 if ((rdev->family == CHIP_RV670) || 4567 (rdev->family == CHIP_RV620) || 4568 (rdev->family == CHIP_RV635)) { 4569 /* advertise upconfig capability */ 4570 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4571 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4572 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4573 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4574 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 4575 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 4576 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 4577 LC_RECONFIG_ARC_MISSING_ESCAPE); 4578 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; 4579 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4580 } else { 4581 link_width_cntl |= LC_UPCONFIGURE_DIS; 4582 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4583 } 4584 } 4585 4586 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4587 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 4588 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 4589 4590 /* 55 nm r6xx asics */ 4591 if ((rdev->family == CHIP_RV670) || 4592 (rdev->family == CHIP_RV620) || 4593 (rdev->family == CHIP_RV635)) { 4594 WREG32(MM_CFGREGS_CNTL, 0x8); 4595 link_cntl2 = RREG32(0x4088); 4596 WREG32(MM_CFGREGS_CNTL, 0); 4597 /* not supported yet */ 4598 if (link_cntl2 & SELECTABLE_DEEMPHASIS) 4599 return; 4600 } 4601 4602 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; 4603 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); 4604 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; 4605 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; 4606 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; 4607 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4608 4609 tmp = RREG32(0x541c); 4610 WREG32(0x541c, tmp | 0x8); 4611 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); 4612 link_cntl2 = RREG16(0x4088); 4613 link_cntl2 &= ~TARGET_LINK_SPEED_MASK; 4614 link_cntl2 |= 0x2; 4615 WREG16(0x4088, link_cntl2); 4616 WREG32(MM_CFGREGS_CNTL, 0); 4617 4618 if ((rdev->family == CHIP_RV670) || 4619 (rdev->family == CHIP_RV620) || 4620 (rdev->family == CHIP_RV635)) { 4621 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL); 4622 training_cntl &= ~LC_POINT_7_PLUS_EN; 4623 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl); 4624 } else { 4625 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4626 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 4627 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4628 } 4629 4630 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4631 speed_cntl |= LC_GEN2_EN_STRAP; 4632 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4633 4634 } else { 4635 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4636 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 4637 if (1) 4638 link_width_cntl |= LC_UPCONFIGURE_DIS; 4639 else 4640 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4641 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4642 } 4643 } 4644 4645 /** 4646 * r600_get_gpu_clock_counter - return GPU clock counter snapshot 4647 * 4648 * @rdev: radeon_device pointer 4649 * 4650 * Fetches a GPU clock counter snapshot (R6xx-cayman). 4651 * Returns the 64 bit clock counter snapshot. 4652 */ 4653 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev) 4654 { 4655 uint64_t clock; 4656 4657 mutex_lock(&rdev->gpu_clock_mutex); 4658 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); 4659 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | 4660 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 4661 mutex_unlock(&rdev->gpu_clock_mutex); 4662 return clock; 4663 } 4664