1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <linux/firmware.h> 30 #include <linux/module.h> 31 #include <drm/drmP.h> 32 #include <uapi_drm/radeon_drm.h> 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "radeon_mode.h" 36 #include "r600d.h" 37 #include "atom.h" 38 #include "avivod.h" 39 #include "radeon_ucode.h" 40 41 /* Firmware Names */ 42 MODULE_FIRMWARE("radeon/R600_pfp.bin"); 43 MODULE_FIRMWARE("radeon/R600_me.bin"); 44 MODULE_FIRMWARE("radeon/RV610_pfp.bin"); 45 MODULE_FIRMWARE("radeon/RV610_me.bin"); 46 MODULE_FIRMWARE("radeon/RV630_pfp.bin"); 47 MODULE_FIRMWARE("radeon/RV630_me.bin"); 48 MODULE_FIRMWARE("radeon/RV620_pfp.bin"); 49 MODULE_FIRMWARE("radeon/RV620_me.bin"); 50 MODULE_FIRMWARE("radeon/RV635_pfp.bin"); 51 MODULE_FIRMWARE("radeon/RV635_me.bin"); 52 MODULE_FIRMWARE("radeon/RV670_pfp.bin"); 53 MODULE_FIRMWARE("radeon/RV670_me.bin"); 54 MODULE_FIRMWARE("radeon/RS780_pfp.bin"); 55 MODULE_FIRMWARE("radeon/RS780_me.bin"); 56 MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 57 MODULE_FIRMWARE("radeon/RV770_me.bin"); 58 MODULE_FIRMWARE("radeon/RV770_smc.bin"); 59 MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 60 MODULE_FIRMWARE("radeon/RV730_me.bin"); 61 MODULE_FIRMWARE("radeon/RV730_smc.bin"); 62 MODULE_FIRMWARE("radeon/RV740_smc.bin"); 63 MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 64 MODULE_FIRMWARE("radeon/RV710_me.bin"); 65 MODULE_FIRMWARE("radeon/RV710_smc.bin"); 66 MODULE_FIRMWARE("radeon/R600_rlc.bin"); 67 MODULE_FIRMWARE("radeon/R700_rlc.bin"); 68 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); 69 MODULE_FIRMWARE("radeon/CEDAR_me.bin"); 70 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); 71 MODULE_FIRMWARE("radeon/CEDAR_smc.bin"); 72 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); 73 MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); 74 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); 75 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin"); 76 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); 77 MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); 78 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); 79 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin"); 80 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 81 MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 82 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 83 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin"); 84 MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 85 MODULE_FIRMWARE("radeon/PALM_me.bin"); 86 MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 87 MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); 88 MODULE_FIRMWARE("radeon/SUMO_me.bin"); 89 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); 90 MODULE_FIRMWARE("radeon/SUMO2_me.bin"); 91 MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); 92 MODULE_FIRMWARE("radeon/OLAND_me.bin"); 93 MODULE_FIRMWARE("radeon/OLAND_ce.bin"); 94 MODULE_FIRMWARE("radeon/OLAND_mc.bin"); 95 MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); 96 97 static const u32 crtc_offsets[2] = 98 { 99 0, 100 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL 101 }; 102 103 int r600_debugfs_mc_info_init(struct radeon_device *rdev); 104 105 /* r600,rv610,rv630,rv620,rv635,rv670 */ 106 static void r600_gpu_init(struct radeon_device *rdev); 107 void r600_irq_disable(struct radeon_device *rdev); 108 static void r600_pcie_gen2_enable(struct radeon_device *rdev); 109 110 /** 111 * r600_get_xclk - get the xclk 112 * 113 * @rdev: radeon_device pointer 114 * 115 * Returns the reference clock used by the gfx engine 116 * (r6xx, IGPs, APUs). 117 */ 118 u32 r600_get_xclk(struct radeon_device *rdev) 119 { 120 return rdev->clock.spll.reference_freq; 121 } 122 123 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 124 { 125 unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; 126 int r; 127 128 /* bypass vclk and dclk with bclk */ 129 WREG32_P(CG_UPLL_FUNC_CNTL_2, 130 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), 131 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); 132 133 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ 134 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( 135 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); 136 137 if (rdev->family >= CHIP_RS780) 138 WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, 139 ~UPLL_BYPASS_CNTL); 140 141 if (!vclk || !dclk) { 142 /* keep the Bypass mode, put PLL to sleep */ 143 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); 144 return 0; 145 } 146 147 if (rdev->clock.spll.reference_freq == 10000) 148 ref_div = 34; 149 else 150 ref_div = 4; 151 152 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, 153 ref_div + 1, 0xFFF, 2, 30, ~0, 154 &fb_div, &vclk_div, &dclk_div); 155 if (r) 156 return r; 157 158 if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) 159 fb_div >>= 1; 160 else 161 fb_div |= 1; 162 163 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); 164 if (r) 165 return r; 166 167 /* assert PLL_RESET */ 168 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); 169 170 /* For RS780 we have to choose ref clk */ 171 if (rdev->family >= CHIP_RS780) 172 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, 173 ~UPLL_REFCLK_SRC_SEL_MASK); 174 175 /* set the required fb, ref and post divder values */ 176 WREG32_P(CG_UPLL_FUNC_CNTL, 177 UPLL_FB_DIV(fb_div) | 178 UPLL_REF_DIV(ref_div), 179 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); 180 WREG32_P(CG_UPLL_FUNC_CNTL_2, 181 UPLL_SW_HILEN(vclk_div >> 1) | 182 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | 183 UPLL_SW_HILEN2(dclk_div >> 1) | 184 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | 185 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, 186 ~UPLL_SW_MASK); 187 188 /* give the PLL some time to settle */ 189 mdelay(15); 190 191 /* deassert PLL_RESET */ 192 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); 193 194 mdelay(15); 195 196 /* deassert BYPASS EN */ 197 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); 198 199 if (rdev->family >= CHIP_RS780) 200 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); 201 202 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); 203 if (r) 204 return r; 205 206 /* switch VCLK and DCLK selection */ 207 WREG32_P(CG_UPLL_FUNC_CNTL_2, 208 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), 209 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); 210 211 mdelay(100); 212 213 return 0; 214 } 215 216 void dce3_program_fmt(struct drm_encoder *encoder) 217 { 218 struct drm_device *dev = encoder->dev; 219 struct radeon_device *rdev = dev->dev_private; 220 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 221 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 222 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 223 int bpc = 0; 224 u32 tmp = 0; 225 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; 226 227 if (connector) { 228 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 229 bpc = radeon_get_monitor_bpc(connector); 230 dither = radeon_connector->dither; 231 } 232 233 /* LVDS FMT is set up by atom */ 234 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 235 return; 236 237 /* not needed for analog */ 238 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 239 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 240 return; 241 242 if (bpc == 0) 243 return; 244 245 switch (bpc) { 246 case 6: 247 if (dither == RADEON_FMT_DITHER_ENABLE) 248 /* XXX sort out optimal dither settings */ 249 tmp |= FMT_SPATIAL_DITHER_EN; 250 else 251 tmp |= FMT_TRUNCATE_EN; 252 break; 253 case 8: 254 if (dither == RADEON_FMT_DITHER_ENABLE) 255 /* XXX sort out optimal dither settings */ 256 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); 257 else 258 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); 259 break; 260 case 10: 261 default: 262 /* not needed */ 263 break; 264 } 265 266 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp); 267 } 268 269 /* get temperature in millidegrees */ 270 int rv6xx_get_temp(struct radeon_device *rdev) 271 { 272 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 273 ASIC_T_SHIFT; 274 int actual_temp = temp & 0xff; 275 276 if (temp & 0x100) 277 actual_temp -= 256; 278 279 return actual_temp * 1000; 280 } 281 282 void r600_pm_get_dynpm_state(struct radeon_device *rdev) 283 { 284 int i; 285 286 rdev->pm.dynpm_can_upclock = true; 287 rdev->pm.dynpm_can_downclock = true; 288 289 /* power state array is low to high, default is first */ 290 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { 291 int min_power_state_index = 0; 292 293 if (rdev->pm.num_power_states > 2) 294 min_power_state_index = 1; 295 296 switch (rdev->pm.dynpm_planned_action) { 297 case DYNPM_ACTION_MINIMUM: 298 rdev->pm.requested_power_state_index = min_power_state_index; 299 rdev->pm.requested_clock_mode_index = 0; 300 rdev->pm.dynpm_can_downclock = false; 301 break; 302 case DYNPM_ACTION_DOWNCLOCK: 303 if (rdev->pm.current_power_state_index == min_power_state_index) { 304 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 305 rdev->pm.dynpm_can_downclock = false; 306 } else { 307 if (rdev->pm.active_crtc_count > 1) { 308 for (i = 0; i < rdev->pm.num_power_states; i++) { 309 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 310 continue; 311 else if (i >= rdev->pm.current_power_state_index) { 312 rdev->pm.requested_power_state_index = 313 rdev->pm.current_power_state_index; 314 break; 315 } else { 316 rdev->pm.requested_power_state_index = i; 317 break; 318 } 319 } 320 } else { 321 if (rdev->pm.current_power_state_index == 0) 322 rdev->pm.requested_power_state_index = 323 rdev->pm.num_power_states - 1; 324 else 325 rdev->pm.requested_power_state_index = 326 rdev->pm.current_power_state_index - 1; 327 } 328 } 329 rdev->pm.requested_clock_mode_index = 0; 330 /* don't use the power state if crtcs are active and no display flag is set */ 331 if ((rdev->pm.active_crtc_count > 0) && 332 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 333 clock_info[rdev->pm.requested_clock_mode_index].flags & 334 RADEON_PM_MODE_NO_DISPLAY)) { 335 rdev->pm.requested_power_state_index++; 336 } 337 break; 338 case DYNPM_ACTION_UPCLOCK: 339 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 340 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 341 rdev->pm.dynpm_can_upclock = false; 342 } else { 343 if (rdev->pm.active_crtc_count > 1) { 344 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 345 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 346 continue; 347 else if (i <= rdev->pm.current_power_state_index) { 348 rdev->pm.requested_power_state_index = 349 rdev->pm.current_power_state_index; 350 break; 351 } else { 352 rdev->pm.requested_power_state_index = i; 353 break; 354 } 355 } 356 } else 357 rdev->pm.requested_power_state_index = 358 rdev->pm.current_power_state_index + 1; 359 } 360 rdev->pm.requested_clock_mode_index = 0; 361 break; 362 case DYNPM_ACTION_DEFAULT: 363 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 364 rdev->pm.requested_clock_mode_index = 0; 365 rdev->pm.dynpm_can_upclock = false; 366 break; 367 case DYNPM_ACTION_NONE: 368 default: 369 DRM_ERROR("Requested mode for not defined action\n"); 370 return; 371 } 372 } else { 373 /* XXX select a power state based on AC/DC, single/dualhead, etc. */ 374 /* for now just select the first power state and switch between clock modes */ 375 /* power state array is low to high, default is first (0) */ 376 if (rdev->pm.active_crtc_count > 1) { 377 rdev->pm.requested_power_state_index = -1; 378 /* start at 1 as we don't want the default mode */ 379 for (i = 1; i < rdev->pm.num_power_states; i++) { 380 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 381 continue; 382 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || 383 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { 384 rdev->pm.requested_power_state_index = i; 385 break; 386 } 387 } 388 /* if nothing selected, grab the default state. */ 389 if (rdev->pm.requested_power_state_index == -1) 390 rdev->pm.requested_power_state_index = 0; 391 } else 392 rdev->pm.requested_power_state_index = 1; 393 394 switch (rdev->pm.dynpm_planned_action) { 395 case DYNPM_ACTION_MINIMUM: 396 rdev->pm.requested_clock_mode_index = 0; 397 rdev->pm.dynpm_can_downclock = false; 398 break; 399 case DYNPM_ACTION_DOWNCLOCK: 400 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 401 if (rdev->pm.current_clock_mode_index == 0) { 402 rdev->pm.requested_clock_mode_index = 0; 403 rdev->pm.dynpm_can_downclock = false; 404 } else 405 rdev->pm.requested_clock_mode_index = 406 rdev->pm.current_clock_mode_index - 1; 407 } else { 408 rdev->pm.requested_clock_mode_index = 0; 409 rdev->pm.dynpm_can_downclock = false; 410 } 411 /* don't use the power state if crtcs are active and no display flag is set */ 412 if ((rdev->pm.active_crtc_count > 0) && 413 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 414 clock_info[rdev->pm.requested_clock_mode_index].flags & 415 RADEON_PM_MODE_NO_DISPLAY)) { 416 rdev->pm.requested_clock_mode_index++; 417 } 418 break; 419 case DYNPM_ACTION_UPCLOCK: 420 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 421 if (rdev->pm.current_clock_mode_index == 422 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { 423 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; 424 rdev->pm.dynpm_can_upclock = false; 425 } else 426 rdev->pm.requested_clock_mode_index = 427 rdev->pm.current_clock_mode_index + 1; 428 } else { 429 rdev->pm.requested_clock_mode_index = 430 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; 431 rdev->pm.dynpm_can_upclock = false; 432 } 433 break; 434 case DYNPM_ACTION_DEFAULT: 435 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 436 rdev->pm.requested_clock_mode_index = 0; 437 rdev->pm.dynpm_can_upclock = false; 438 break; 439 case DYNPM_ACTION_NONE: 440 default: 441 DRM_ERROR("Requested mode for not defined action\n"); 442 return; 443 } 444 } 445 446 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 447 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 448 clock_info[rdev->pm.requested_clock_mode_index].sclk, 449 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 450 clock_info[rdev->pm.requested_clock_mode_index].mclk, 451 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 452 pcie_lanes); 453 } 454 455 void rs780_pm_init_profile(struct radeon_device *rdev) 456 { 457 if (rdev->pm.num_power_states == 2) { 458 /* default */ 459 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 460 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 461 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 462 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 463 /* low sh */ 464 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 465 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 466 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 467 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 468 /* mid sh */ 469 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 470 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 471 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 472 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 473 /* high sh */ 474 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 475 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 476 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 477 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 478 /* low mh */ 479 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 480 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 481 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 482 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 483 /* mid mh */ 484 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 485 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 486 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 487 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 488 /* high mh */ 489 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 490 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; 491 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 492 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 493 } else if (rdev->pm.num_power_states == 3) { 494 /* default */ 495 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 496 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 497 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 498 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 499 /* low sh */ 500 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 501 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 502 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 503 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 504 /* mid sh */ 505 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 506 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 507 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 508 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 509 /* high sh */ 510 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 511 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; 512 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 513 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 514 /* low mh */ 515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; 516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; 517 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 518 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 519 /* mid mh */ 520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; 521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; 522 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 523 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 524 /* high mh */ 525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; 526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 528 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 529 } else { 530 /* default */ 531 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 532 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 533 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 534 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 535 /* low sh */ 536 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; 537 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; 538 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 539 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 540 /* mid sh */ 541 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; 542 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; 543 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 544 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 545 /* high sh */ 546 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; 547 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; 548 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 549 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 550 /* low mh */ 551 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 553 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 555 /* mid mh */ 556 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 557 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 558 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 559 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 560 /* high mh */ 561 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 562 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; 563 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 564 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 565 } 566 } 567 568 void r600_pm_init_profile(struct radeon_device *rdev) 569 { 570 int idx; 571 572 if (rdev->family == CHIP_R600) { 573 /* XXX */ 574 /* default */ 575 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 576 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 577 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 578 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 579 /* low sh */ 580 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 581 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 582 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 583 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 584 /* mid sh */ 585 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 586 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 587 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 588 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 589 /* high sh */ 590 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 591 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 592 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 593 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 594 /* low mh */ 595 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 596 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 597 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 598 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 599 /* mid mh */ 600 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 601 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 602 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 603 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 604 /* high mh */ 605 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 606 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 607 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 608 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 609 } else { 610 if (rdev->pm.num_power_states < 4) { 611 /* default */ 612 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 613 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 614 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 615 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 616 /* low sh */ 617 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 618 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 619 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 620 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 621 /* mid sh */ 622 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 623 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 624 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 625 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 626 /* high sh */ 627 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 628 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 629 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 630 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 631 /* low mh */ 632 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 633 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; 634 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 635 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 636 /* low mh */ 637 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 638 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; 639 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 640 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 641 /* high mh */ 642 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 643 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 644 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 645 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 646 } else { 647 /* default */ 648 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 649 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 650 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 651 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 652 /* low sh */ 653 if (rdev->flags & RADEON_IS_MOBILITY) 654 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 655 else 656 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 657 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 658 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 659 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 660 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 661 /* mid sh */ 662 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 663 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 664 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 665 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 666 /* high sh */ 667 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 668 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 669 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 670 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 671 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 672 /* low mh */ 673 if (rdev->flags & RADEON_IS_MOBILITY) 674 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 675 else 676 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 677 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 678 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 679 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 680 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 681 /* mid mh */ 682 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 683 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 684 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 685 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 686 /* high mh */ 687 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 688 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 689 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 690 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 691 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 692 } 693 } 694 } 695 696 void r600_pm_misc(struct radeon_device *rdev) 697 { 698 int req_ps_idx = rdev->pm.requested_power_state_index; 699 int req_cm_idx = rdev->pm.requested_clock_mode_index; 700 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 701 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 702 703 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 704 /* 0xff01 is a flag rather then an actual voltage */ 705 if (voltage->voltage == 0xff01) 706 return; 707 if (voltage->voltage != rdev->pm.current_vddc) { 708 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 709 rdev->pm.current_vddc = voltage->voltage; 710 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); 711 } 712 } 713 } 714 715 bool r600_gui_idle(struct radeon_device *rdev) 716 { 717 if (RREG32(GRBM_STATUS) & GUI_ACTIVE) 718 return false; 719 else 720 return true; 721 } 722 723 /* hpd for digital panel detect/disconnect */ 724 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 725 { 726 bool connected = false; 727 728 if (ASIC_IS_DCE3(rdev)) { 729 switch (hpd) { 730 case RADEON_HPD_1: 731 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) 732 connected = true; 733 break; 734 case RADEON_HPD_2: 735 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) 736 connected = true; 737 break; 738 case RADEON_HPD_3: 739 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) 740 connected = true; 741 break; 742 case RADEON_HPD_4: 743 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) 744 connected = true; 745 break; 746 /* DCE 3.2 */ 747 case RADEON_HPD_5: 748 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) 749 connected = true; 750 break; 751 case RADEON_HPD_6: 752 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 753 connected = true; 754 break; 755 default: 756 break; 757 } 758 } else { 759 switch (hpd) { 760 case RADEON_HPD_1: 761 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 762 connected = true; 763 break; 764 case RADEON_HPD_2: 765 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 766 connected = true; 767 break; 768 case RADEON_HPD_3: 769 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 770 connected = true; 771 break; 772 default: 773 break; 774 } 775 } 776 return connected; 777 } 778 779 void r600_hpd_set_polarity(struct radeon_device *rdev, 780 enum radeon_hpd_id hpd) 781 { 782 u32 tmp; 783 bool connected = r600_hpd_sense(rdev, hpd); 784 785 if (ASIC_IS_DCE3(rdev)) { 786 switch (hpd) { 787 case RADEON_HPD_1: 788 tmp = RREG32(DC_HPD1_INT_CONTROL); 789 if (connected) 790 tmp &= ~DC_HPDx_INT_POLARITY; 791 else 792 tmp |= DC_HPDx_INT_POLARITY; 793 WREG32(DC_HPD1_INT_CONTROL, tmp); 794 break; 795 case RADEON_HPD_2: 796 tmp = RREG32(DC_HPD2_INT_CONTROL); 797 if (connected) 798 tmp &= ~DC_HPDx_INT_POLARITY; 799 else 800 tmp |= DC_HPDx_INT_POLARITY; 801 WREG32(DC_HPD2_INT_CONTROL, tmp); 802 break; 803 case RADEON_HPD_3: 804 tmp = RREG32(DC_HPD3_INT_CONTROL); 805 if (connected) 806 tmp &= ~DC_HPDx_INT_POLARITY; 807 else 808 tmp |= DC_HPDx_INT_POLARITY; 809 WREG32(DC_HPD3_INT_CONTROL, tmp); 810 break; 811 case RADEON_HPD_4: 812 tmp = RREG32(DC_HPD4_INT_CONTROL); 813 if (connected) 814 tmp &= ~DC_HPDx_INT_POLARITY; 815 else 816 tmp |= DC_HPDx_INT_POLARITY; 817 WREG32(DC_HPD4_INT_CONTROL, tmp); 818 break; 819 case RADEON_HPD_5: 820 tmp = RREG32(DC_HPD5_INT_CONTROL); 821 if (connected) 822 tmp &= ~DC_HPDx_INT_POLARITY; 823 else 824 tmp |= DC_HPDx_INT_POLARITY; 825 WREG32(DC_HPD5_INT_CONTROL, tmp); 826 break; 827 /* DCE 3.2 */ 828 case RADEON_HPD_6: 829 tmp = RREG32(DC_HPD6_INT_CONTROL); 830 if (connected) 831 tmp &= ~DC_HPDx_INT_POLARITY; 832 else 833 tmp |= DC_HPDx_INT_POLARITY; 834 WREG32(DC_HPD6_INT_CONTROL, tmp); 835 break; 836 default: 837 break; 838 } 839 } else { 840 switch (hpd) { 841 case RADEON_HPD_1: 842 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 843 if (connected) 844 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 845 else 846 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 847 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 848 break; 849 case RADEON_HPD_2: 850 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 851 if (connected) 852 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 853 else 854 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 855 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 856 break; 857 case RADEON_HPD_3: 858 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 859 if (connected) 860 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 861 else 862 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 863 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 864 break; 865 default: 866 break; 867 } 868 } 869 } 870 871 void r600_hpd_init(struct radeon_device *rdev) 872 { 873 struct drm_device *dev = rdev->ddev; 874 struct drm_connector *connector; 875 unsigned enable = 0; 876 877 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 878 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 879 880 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 881 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 882 /* don't try to enable hpd on eDP or LVDS avoid breaking the 883 * aux dp channel on imac and help (but not completely fix) 884 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 885 */ 886 continue; 887 } 888 if (ASIC_IS_DCE3(rdev)) { 889 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); 890 if (ASIC_IS_DCE32(rdev)) 891 tmp |= DC_HPDx_EN; 892 893 switch (radeon_connector->hpd.hpd) { 894 case RADEON_HPD_1: 895 WREG32(DC_HPD1_CONTROL, tmp); 896 break; 897 case RADEON_HPD_2: 898 WREG32(DC_HPD2_CONTROL, tmp); 899 break; 900 case RADEON_HPD_3: 901 WREG32(DC_HPD3_CONTROL, tmp); 902 break; 903 case RADEON_HPD_4: 904 WREG32(DC_HPD4_CONTROL, tmp); 905 break; 906 /* DCE 3.2 */ 907 case RADEON_HPD_5: 908 WREG32(DC_HPD5_CONTROL, tmp); 909 break; 910 case RADEON_HPD_6: 911 WREG32(DC_HPD6_CONTROL, tmp); 912 break; 913 default: 914 break; 915 } 916 } else { 917 switch (radeon_connector->hpd.hpd) { 918 case RADEON_HPD_1: 919 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); 920 break; 921 case RADEON_HPD_2: 922 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); 923 break; 924 case RADEON_HPD_3: 925 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); 926 break; 927 default: 928 break; 929 } 930 } 931 enable |= 1 << radeon_connector->hpd.hpd; 932 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 933 } 934 radeon_irq_kms_enable_hpd(rdev, enable); 935 } 936 937 void r600_hpd_fini(struct radeon_device *rdev) 938 { 939 struct drm_device *dev = rdev->ddev; 940 struct drm_connector *connector; 941 unsigned disable = 0; 942 943 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 944 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 945 if (ASIC_IS_DCE3(rdev)) { 946 switch (radeon_connector->hpd.hpd) { 947 case RADEON_HPD_1: 948 WREG32(DC_HPD1_CONTROL, 0); 949 break; 950 case RADEON_HPD_2: 951 WREG32(DC_HPD2_CONTROL, 0); 952 break; 953 case RADEON_HPD_3: 954 WREG32(DC_HPD3_CONTROL, 0); 955 break; 956 case RADEON_HPD_4: 957 WREG32(DC_HPD4_CONTROL, 0); 958 break; 959 /* DCE 3.2 */ 960 case RADEON_HPD_5: 961 WREG32(DC_HPD5_CONTROL, 0); 962 break; 963 case RADEON_HPD_6: 964 WREG32(DC_HPD6_CONTROL, 0); 965 break; 966 default: 967 break; 968 } 969 } else { 970 switch (radeon_connector->hpd.hpd) { 971 case RADEON_HPD_1: 972 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); 973 break; 974 case RADEON_HPD_2: 975 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); 976 break; 977 case RADEON_HPD_3: 978 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); 979 break; 980 default: 981 break; 982 } 983 } 984 disable |= 1 << radeon_connector->hpd.hpd; 985 } 986 radeon_irq_kms_disable_hpd(rdev, disable); 987 } 988 989 /* 990 * R600 PCIE GART 991 */ 992 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 993 { 994 unsigned i; 995 u32 tmp; 996 997 /* flush hdp cache so updates hit vram */ 998 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 999 !(rdev->flags & RADEON_IS_AGP)) { 1000 volatile uint32_t *ptr = rdev->gart.ptr; 1001 u32 tmp; 1002 1003 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 1004 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 1005 * This seems to cause problems on some AGP cards. Just use the old 1006 * method for them. 1007 */ 1008 WREG32(HDP_DEBUG1, 0); 1009 tmp = *ptr; 1010 } else 1011 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1012 1013 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 1014 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 1015 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 1016 for (i = 0; i < rdev->usec_timeout; i++) { 1017 /* read MC_STATUS */ 1018 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); 1019 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; 1020 if (tmp == 2) { 1021 printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); 1022 return; 1023 } 1024 if (tmp) { 1025 return; 1026 } 1027 udelay(1); 1028 } 1029 } 1030 1031 int r600_pcie_gart_init(struct radeon_device *rdev) 1032 { 1033 int r; 1034 1035 if (rdev->gart.robj) { 1036 WARN(1, "R600 PCIE GART already initialized\n"); 1037 return 0; 1038 } 1039 /* Initialize common gart structure */ 1040 r = radeon_gart_init(rdev); 1041 if (r) 1042 return r; 1043 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 1044 return radeon_gart_table_vram_alloc(rdev); 1045 } 1046 1047 static int r600_pcie_gart_enable(struct radeon_device *rdev) 1048 { 1049 u32 tmp; 1050 int r, i; 1051 1052 if (rdev->gart.robj == NULL) { 1053 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 1054 return -EINVAL; 1055 } 1056 r = radeon_gart_table_vram_pin(rdev); 1057 if (r) 1058 return r; 1059 1060 /* Setup L2 cache */ 1061 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1062 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1063 EFFECTIVE_L2_QUEUE_SIZE(7)); 1064 WREG32(VM_L2_CNTL2, 0); 1065 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1066 /* Setup TLB control */ 1067 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1068 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1069 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1070 ENABLE_WAIT_L2_QUERY; 1071 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1072 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1073 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 1074 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1075 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1076 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1077 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1078 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1079 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1080 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1081 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1082 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1083 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); 1084 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); 1085 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1086 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1087 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1088 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1089 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1090 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1091 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1092 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1093 (u32)(rdev->dummy_page.addr >> 12)); 1094 for (i = 1; i < 7; i++) 1095 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1096 1097 r600_pcie_gart_tlb_flush(rdev); 1098 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1099 (unsigned)(rdev->mc.gtt_size >> 20), 1100 (unsigned long long)rdev->gart.table_addr); 1101 rdev->gart.ready = true; 1102 return 0; 1103 } 1104 1105 static void r600_pcie_gart_disable(struct radeon_device *rdev) 1106 { 1107 u32 tmp; 1108 int i; 1109 1110 /* Disable all tables */ 1111 for (i = 0; i < 7; i++) 1112 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1113 1114 /* Disable L2 cache */ 1115 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 1116 EFFECTIVE_L2_QUEUE_SIZE(7)); 1117 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1118 /* Setup L1 TLB control */ 1119 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1120 ENABLE_WAIT_L2_QUERY; 1121 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1122 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1123 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1124 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1125 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1126 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1127 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1128 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1129 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp); 1130 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp); 1131 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1132 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1133 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 1134 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1135 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); 1136 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); 1137 radeon_gart_table_vram_unpin(rdev); 1138 } 1139 1140 static void r600_pcie_gart_fini(struct radeon_device *rdev) 1141 { 1142 radeon_gart_fini(rdev); 1143 r600_pcie_gart_disable(rdev); 1144 radeon_gart_table_vram_free(rdev); 1145 } 1146 1147 static void r600_agp_enable(struct radeon_device *rdev) 1148 { 1149 u32 tmp; 1150 int i; 1151 1152 /* Setup L2 cache */ 1153 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1154 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1155 EFFECTIVE_L2_QUEUE_SIZE(7)); 1156 WREG32(VM_L2_CNTL2, 0); 1157 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1158 /* Setup TLB control */ 1159 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1160 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1161 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1162 ENABLE_WAIT_L2_QUERY; 1163 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1164 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1165 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 1166 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1167 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1168 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1169 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1170 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1171 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1172 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1173 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1174 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1175 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1176 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1177 for (i = 0; i < 7; i++) 1178 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1179 } 1180 1181 int r600_mc_wait_for_idle(struct radeon_device *rdev) 1182 { 1183 unsigned i; 1184 u32 tmp; 1185 1186 for (i = 0; i < rdev->usec_timeout; i++) { 1187 /* read MC_STATUS */ 1188 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00; 1189 if (!tmp) 1190 return 0; 1191 udelay(1); 1192 } 1193 return -1; 1194 } 1195 1196 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1197 { 1198 uint32_t r; 1199 1200 spin_lock(&rdev->mc_idx_lock); 1201 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1202 r = RREG32(R_0028FC_MC_DATA); 1203 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1204 spin_unlock(&rdev->mc_idx_lock); 1205 return r; 1206 } 1207 1208 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1209 { 1210 spin_lock(&rdev->mc_idx_lock); 1211 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1212 S_0028F8_MC_IND_WR_EN(1)); 1213 WREG32(R_0028FC_MC_DATA, v); 1214 WREG32(R_0028F8_MC_INDEX, 0x7F); 1215 spin_unlock(&rdev->mc_idx_lock); 1216 } 1217 1218 static void r600_mc_program(struct radeon_device *rdev) 1219 { 1220 struct rv515_mc_save save; 1221 u32 tmp; 1222 int i, j; 1223 1224 /* Initialize HDP */ 1225 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1226 WREG32((0x2c14 + j), 0x00000000); 1227 WREG32((0x2c18 + j), 0x00000000); 1228 WREG32((0x2c1c + j), 0x00000000); 1229 WREG32((0x2c20 + j), 0x00000000); 1230 WREG32((0x2c24 + j), 0x00000000); 1231 } 1232 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 1233 1234 rv515_mc_stop(rdev, &save); 1235 if (r600_mc_wait_for_idle(rdev)) { 1236 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1237 } 1238 /* Lockout access through VGA aperture (doesn't exist before R600) */ 1239 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 1240 /* Update configuration */ 1241 if (rdev->flags & RADEON_IS_AGP) { 1242 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 1243 /* VRAM before AGP */ 1244 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1245 rdev->mc.vram_start >> 12); 1246 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1247 rdev->mc.gtt_end >> 12); 1248 } else { 1249 /* VRAM after AGP */ 1250 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1251 rdev->mc.gtt_start >> 12); 1252 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1253 rdev->mc.vram_end >> 12); 1254 } 1255 } else { 1256 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 1257 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); 1258 } 1259 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 1260 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1261 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1262 WREG32(MC_VM_FB_LOCATION, tmp); 1263 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1264 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1265 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1266 if (rdev->flags & RADEON_IS_AGP) { 1267 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); 1268 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); 1269 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 1270 } else { 1271 WREG32(MC_VM_AGP_BASE, 0); 1272 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 1273 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 1274 } 1275 if (r600_mc_wait_for_idle(rdev)) { 1276 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1277 } 1278 rv515_mc_resume(rdev, &save); 1279 /* we need to own VRAM, so turn off the VGA renderer here 1280 * to stop it overwriting our objects */ 1281 rv515_vga_render_disable(rdev); 1282 } 1283 1284 /** 1285 * r600_vram_gtt_location - try to find VRAM & GTT location 1286 * @rdev: radeon device structure holding all necessary informations 1287 * @mc: memory controller structure holding memory informations 1288 * 1289 * Function will place try to place VRAM at same place as in CPU (PCI) 1290 * address space as some GPU seems to have issue when we reprogram at 1291 * different address space. 1292 * 1293 * If there is not enough space to fit the unvisible VRAM after the 1294 * aperture then we limit the VRAM size to the aperture. 1295 * 1296 * If we are using AGP then place VRAM adjacent to AGP aperture are we need 1297 * them to be in one from GPU point of view so that we can program GPU to 1298 * catch access outside them (weird GPU policy see ??). 1299 * 1300 * This function will never fails, worst case are limiting VRAM or GTT. 1301 * 1302 * Note: GTT start, end, size should be initialized before calling this 1303 * function on AGP platform. 1304 */ 1305 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 1306 { 1307 u64 size_bf, size_af; 1308 1309 if (mc->mc_vram_size > 0xE0000000) { 1310 /* leave room for at least 512M GTT */ 1311 dev_warn(rdev->dev, "limiting VRAM\n"); 1312 mc->real_vram_size = 0xE0000000; 1313 mc->mc_vram_size = 0xE0000000; 1314 } 1315 if (rdev->flags & RADEON_IS_AGP) { 1316 size_bf = mc->gtt_start; 1317 size_af = mc->mc_mask - mc->gtt_end; 1318 if (size_bf > size_af) { 1319 if (mc->mc_vram_size > size_bf) { 1320 dev_warn(rdev->dev, "limiting VRAM\n"); 1321 mc->real_vram_size = size_bf; 1322 mc->mc_vram_size = size_bf; 1323 } 1324 mc->vram_start = mc->gtt_start - mc->mc_vram_size; 1325 } else { 1326 if (mc->mc_vram_size > size_af) { 1327 dev_warn(rdev->dev, "limiting VRAM\n"); 1328 mc->real_vram_size = size_af; 1329 mc->mc_vram_size = size_af; 1330 } 1331 mc->vram_start = mc->gtt_end + 1; 1332 } 1333 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 1334 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 1335 mc->mc_vram_size >> 20, mc->vram_start, 1336 mc->vram_end, mc->real_vram_size >> 20); 1337 } else { 1338 u64 base = 0; 1339 if (rdev->flags & RADEON_IS_IGP) { 1340 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; 1341 base <<= 24; 1342 } 1343 radeon_vram_location(rdev, &rdev->mc, base); 1344 rdev->mc.gtt_base_align = 0; 1345 radeon_gtt_location(rdev, mc); 1346 } 1347 } 1348 1349 static int r600_mc_init(struct radeon_device *rdev) 1350 { 1351 u32 tmp; 1352 int chansize, numchan; 1353 uint32_t h_addr, l_addr; 1354 unsigned long long k8_addr; 1355 1356 /* Get VRAM informations */ 1357 rdev->mc.vram_is_ddr = true; 1358 tmp = RREG32(RAMCFG); 1359 if (tmp & CHANSIZE_OVERRIDE) { 1360 chansize = 16; 1361 } else if (tmp & CHANSIZE_MASK) { 1362 chansize = 64; 1363 } else { 1364 chansize = 32; 1365 } 1366 tmp = RREG32(CHMAP); 1367 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 1368 case 0: 1369 default: 1370 numchan = 1; 1371 break; 1372 case 1: 1373 numchan = 2; 1374 break; 1375 case 2: 1376 numchan = 4; 1377 break; 1378 case 3: 1379 numchan = 8; 1380 break; 1381 } 1382 rdev->mc.vram_width = numchan * chansize; 1383 /* Could aper size report 0 ? */ 1384 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 1385 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 1386 /* Setup GPU memory space */ 1387 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1388 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1389 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1390 r600_vram_gtt_location(rdev, &rdev->mc); 1391 1392 if (rdev->flags & RADEON_IS_IGP) { 1393 rs690_pm_info(rdev); 1394 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1395 1396 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { 1397 /* Use K8 direct mapping for fast fb access. */ 1398 rdev->fastfb_working = false; 1399 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL)); 1400 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION); 1401 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; 1402 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) 1403 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) 1404 #endif 1405 { 1406 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 1407 * memory is present. 1408 */ 1409 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { 1410 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 1411 (unsigned long long)rdev->mc.aper_base, k8_addr); 1412 rdev->mc.aper_base = (resource_size_t)k8_addr; 1413 rdev->fastfb_working = true; 1414 } 1415 } 1416 } 1417 } 1418 1419 radeon_update_bandwidth_info(rdev); 1420 return 0; 1421 } 1422 1423 int r600_vram_scratch_init(struct radeon_device *rdev) 1424 { 1425 int r; 1426 void *vram_scratch_ptr_ptr; 1427 1428 if (rdev->vram_scratch.robj == NULL) { 1429 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1430 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1431 0, NULL, &rdev->vram_scratch.robj); 1432 if (r) { 1433 return r; 1434 } 1435 } 1436 1437 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1438 if (unlikely(r != 0)) { 1439 radeon_bo_unref(&rdev->vram_scratch.robj); 1440 return r; 1441 } 1442 r = radeon_bo_pin(rdev->vram_scratch.robj, 1443 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); 1444 if (r) { 1445 radeon_bo_unreserve(rdev->vram_scratch.robj); 1446 radeon_bo_unref(&rdev->vram_scratch.robj); 1447 return r; 1448 } 1449 vram_scratch_ptr_ptr = &rdev->vram_scratch.ptr; 1450 r = radeon_bo_kmap(rdev->vram_scratch.robj, 1451 vram_scratch_ptr_ptr); 1452 if (r) 1453 radeon_bo_unpin(rdev->vram_scratch.robj); 1454 radeon_bo_unreserve(rdev->vram_scratch.robj); 1455 if (r) 1456 radeon_bo_unref(&rdev->vram_scratch.robj); 1457 1458 return r; 1459 } 1460 1461 void r600_vram_scratch_fini(struct radeon_device *rdev) 1462 { 1463 int r; 1464 1465 if (rdev->vram_scratch.robj == NULL) { 1466 return; 1467 } 1468 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1469 if (likely(r == 0)) { 1470 radeon_bo_kunmap(rdev->vram_scratch.robj); 1471 radeon_bo_unpin(rdev->vram_scratch.robj); 1472 radeon_bo_unreserve(rdev->vram_scratch.robj); 1473 } 1474 radeon_bo_unref(&rdev->vram_scratch.robj); 1475 } 1476 1477 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung) 1478 { 1479 u32 tmp = RREG32(R600_BIOS_3_SCRATCH); 1480 1481 if (hung) 1482 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1483 else 1484 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1485 1486 WREG32(R600_BIOS_3_SCRATCH, tmp); 1487 } 1488 1489 static void r600_print_gpu_status_regs(struct radeon_device *rdev) 1490 { 1491 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1492 RREG32(R_008010_GRBM_STATUS)); 1493 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1494 RREG32(R_008014_GRBM_STATUS2)); 1495 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1496 RREG32(R_000E50_SRBM_STATUS)); 1497 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1498 RREG32(CP_STALLED_STAT1)); 1499 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1500 RREG32(CP_STALLED_STAT2)); 1501 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1502 RREG32(CP_BUSY_STAT)); 1503 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1504 RREG32(CP_STAT)); 1505 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1506 RREG32(DMA_STATUS_REG)); 1507 } 1508 1509 static bool r600_is_display_hung(struct radeon_device *rdev) 1510 { 1511 u32 crtc_hung = 0; 1512 u32 crtc_status[2]; 1513 u32 i, j, tmp; 1514 1515 for (i = 0; i < rdev->num_crtc; i++) { 1516 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) { 1517 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1518 crtc_hung |= (1 << i); 1519 } 1520 } 1521 1522 for (j = 0; j < 10; j++) { 1523 for (i = 0; i < rdev->num_crtc; i++) { 1524 if (crtc_hung & (1 << i)) { 1525 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1526 if (tmp != crtc_status[i]) 1527 crtc_hung &= ~(1 << i); 1528 } 1529 } 1530 if (crtc_hung == 0) 1531 return false; 1532 udelay(100); 1533 } 1534 1535 return true; 1536 } 1537 1538 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) 1539 { 1540 u32 reset_mask = 0; 1541 u32 tmp; 1542 1543 /* GRBM_STATUS */ 1544 tmp = RREG32(R_008010_GRBM_STATUS); 1545 if (rdev->family >= CHIP_RV770) { 1546 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1547 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1548 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1549 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1550 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1551 reset_mask |= RADEON_RESET_GFX; 1552 } else { 1553 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1554 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1555 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1556 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1557 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1558 reset_mask |= RADEON_RESET_GFX; 1559 } 1560 1561 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) | 1562 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp)) 1563 reset_mask |= RADEON_RESET_CP; 1564 1565 if (G_008010_GRBM_EE_BUSY(tmp)) 1566 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; 1567 1568 /* DMA_STATUS_REG */ 1569 tmp = RREG32(DMA_STATUS_REG); 1570 if (!(tmp & DMA_IDLE)) 1571 reset_mask |= RADEON_RESET_DMA; 1572 1573 /* SRBM_STATUS */ 1574 tmp = RREG32(R_000E50_SRBM_STATUS); 1575 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp)) 1576 reset_mask |= RADEON_RESET_RLC; 1577 1578 if (G_000E50_IH_BUSY(tmp)) 1579 reset_mask |= RADEON_RESET_IH; 1580 1581 if (G_000E50_SEM_BUSY(tmp)) 1582 reset_mask |= RADEON_RESET_SEM; 1583 1584 if (G_000E50_GRBM_RQ_PENDING(tmp)) 1585 reset_mask |= RADEON_RESET_GRBM; 1586 1587 if (G_000E50_VMC_BUSY(tmp)) 1588 reset_mask |= RADEON_RESET_VMC; 1589 1590 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) | 1591 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) | 1592 G_000E50_MCDW_BUSY(tmp)) 1593 reset_mask |= RADEON_RESET_MC; 1594 1595 if (r600_is_display_hung(rdev)) 1596 reset_mask |= RADEON_RESET_DISPLAY; 1597 1598 /* Skip MC reset as it's mostly likely not hung, just busy */ 1599 if (reset_mask & RADEON_RESET_MC) { 1600 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 1601 reset_mask &= ~RADEON_RESET_MC; 1602 } 1603 1604 return reset_mask; 1605 } 1606 1607 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1608 { 1609 struct rv515_mc_save save; 1610 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 1611 u32 tmp; 1612 1613 if (reset_mask == 0) 1614 return; 1615 1616 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1617 1618 r600_print_gpu_status_regs(rdev); 1619 1620 /* Disable CP parsing/prefetching */ 1621 if (rdev->family >= CHIP_RV770) 1622 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); 1623 else 1624 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1625 1626 /* disable the RLC */ 1627 WREG32(RLC_CNTL, 0); 1628 1629 if (reset_mask & RADEON_RESET_DMA) { 1630 /* Disable DMA */ 1631 tmp = RREG32(DMA_RB_CNTL); 1632 tmp &= ~DMA_RB_ENABLE; 1633 WREG32(DMA_RB_CNTL, tmp); 1634 } 1635 1636 mdelay(50); 1637 1638 rv515_mc_stop(rdev, &save); 1639 if (r600_mc_wait_for_idle(rdev)) { 1640 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1641 } 1642 1643 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) { 1644 if (rdev->family >= CHIP_RV770) 1645 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) | 1646 S_008020_SOFT_RESET_CB(1) | 1647 S_008020_SOFT_RESET_PA(1) | 1648 S_008020_SOFT_RESET_SC(1) | 1649 S_008020_SOFT_RESET_SPI(1) | 1650 S_008020_SOFT_RESET_SX(1) | 1651 S_008020_SOFT_RESET_SH(1) | 1652 S_008020_SOFT_RESET_TC(1) | 1653 S_008020_SOFT_RESET_TA(1) | 1654 S_008020_SOFT_RESET_VC(1) | 1655 S_008020_SOFT_RESET_VGT(1); 1656 else 1657 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) | 1658 S_008020_SOFT_RESET_DB(1) | 1659 S_008020_SOFT_RESET_CB(1) | 1660 S_008020_SOFT_RESET_PA(1) | 1661 S_008020_SOFT_RESET_SC(1) | 1662 S_008020_SOFT_RESET_SMX(1) | 1663 S_008020_SOFT_RESET_SPI(1) | 1664 S_008020_SOFT_RESET_SX(1) | 1665 S_008020_SOFT_RESET_SH(1) | 1666 S_008020_SOFT_RESET_TC(1) | 1667 S_008020_SOFT_RESET_TA(1) | 1668 S_008020_SOFT_RESET_VC(1) | 1669 S_008020_SOFT_RESET_VGT(1); 1670 } 1671 1672 if (reset_mask & RADEON_RESET_CP) { 1673 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) | 1674 S_008020_SOFT_RESET_VGT(1); 1675 1676 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1677 } 1678 1679 if (reset_mask & RADEON_RESET_DMA) { 1680 if (rdev->family >= CHIP_RV770) 1681 srbm_soft_reset |= RV770_SOFT_RESET_DMA; 1682 else 1683 srbm_soft_reset |= SOFT_RESET_DMA; 1684 } 1685 1686 if (reset_mask & RADEON_RESET_RLC) 1687 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1); 1688 1689 if (reset_mask & RADEON_RESET_SEM) 1690 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1); 1691 1692 if (reset_mask & RADEON_RESET_IH) 1693 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1); 1694 1695 if (reset_mask & RADEON_RESET_GRBM) 1696 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1697 1698 if (!(rdev->flags & RADEON_IS_IGP)) { 1699 if (reset_mask & RADEON_RESET_MC) 1700 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1); 1701 } 1702 1703 if (reset_mask & RADEON_RESET_VMC) 1704 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1); 1705 1706 if (grbm_soft_reset) { 1707 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1708 tmp |= grbm_soft_reset; 1709 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1710 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1711 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1712 1713 udelay(50); 1714 1715 tmp &= ~grbm_soft_reset; 1716 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1717 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1718 } 1719 1720 if (srbm_soft_reset) { 1721 tmp = RREG32(SRBM_SOFT_RESET); 1722 tmp |= srbm_soft_reset; 1723 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1724 WREG32(SRBM_SOFT_RESET, tmp); 1725 tmp = RREG32(SRBM_SOFT_RESET); 1726 1727 udelay(50); 1728 1729 tmp &= ~srbm_soft_reset; 1730 WREG32(SRBM_SOFT_RESET, tmp); 1731 tmp = RREG32(SRBM_SOFT_RESET); 1732 } 1733 1734 /* Wait a little for things to settle down */ 1735 mdelay(1); 1736 1737 rv515_mc_resume(rdev, &save); 1738 udelay(50); 1739 1740 r600_print_gpu_status_regs(rdev); 1741 } 1742 1743 static void r600_gpu_pci_config_reset(struct radeon_device *rdev) 1744 { 1745 struct rv515_mc_save save; 1746 u32 tmp, i; 1747 1748 dev_info(rdev->dev, "GPU pci config reset\n"); 1749 1750 /* disable dpm? */ 1751 1752 /* Disable CP parsing/prefetching */ 1753 if (rdev->family >= CHIP_RV770) 1754 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); 1755 else 1756 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1757 1758 /* disable the RLC */ 1759 WREG32(RLC_CNTL, 0); 1760 1761 /* Disable DMA */ 1762 tmp = RREG32(DMA_RB_CNTL); 1763 tmp &= ~DMA_RB_ENABLE; 1764 WREG32(DMA_RB_CNTL, tmp); 1765 1766 mdelay(50); 1767 1768 /* set mclk/sclk to bypass */ 1769 if (rdev->family >= CHIP_RV770) 1770 rv770_set_clk_bypass_mode(rdev); 1771 /* disable BM */ 1772 pci_disable_busmaster(rdev->pdev->dev.bsddev); 1773 /* disable mem access */ 1774 rv515_mc_stop(rdev, &save); 1775 if (r600_mc_wait_for_idle(rdev)) { 1776 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1777 } 1778 1779 /* BIF reset workaround. Not sure if this is needed on 6xx */ 1780 tmp = RREG32(BUS_CNTL); 1781 tmp |= VGA_COHE_SPEC_TIMER_DIS; 1782 WREG32(BUS_CNTL, tmp); 1783 1784 tmp = RREG32(BIF_SCRATCH0); 1785 1786 /* reset */ 1787 radeon_pci_config_reset(rdev); 1788 mdelay(1); 1789 1790 /* BIF reset workaround. Not sure if this is needed on 6xx */ 1791 tmp = SOFT_RESET_BIF; 1792 WREG32(SRBM_SOFT_RESET, tmp); 1793 mdelay(1); 1794 WREG32(SRBM_SOFT_RESET, 0); 1795 1796 /* wait for asic to come out of reset */ 1797 for (i = 0; i < rdev->usec_timeout; i++) { 1798 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) 1799 break; 1800 udelay(1); 1801 } 1802 } 1803 1804 int r600_asic_reset(struct radeon_device *rdev) 1805 { 1806 u32 reset_mask; 1807 1808 reset_mask = r600_gpu_check_soft_reset(rdev); 1809 1810 if (reset_mask) 1811 r600_set_bios_scratch_engine_hung(rdev, true); 1812 1813 /* try soft reset */ 1814 r600_gpu_soft_reset(rdev, reset_mask); 1815 1816 reset_mask = r600_gpu_check_soft_reset(rdev); 1817 1818 /* try pci config reset */ 1819 if (reset_mask && radeon_hard_reset) 1820 r600_gpu_pci_config_reset(rdev); 1821 1822 reset_mask = r600_gpu_check_soft_reset(rdev); 1823 1824 if (!reset_mask) 1825 r600_set_bios_scratch_engine_hung(rdev, false); 1826 1827 return 0; 1828 } 1829 1830 /** 1831 * r600_gfx_is_lockup - Check if the GFX engine is locked up 1832 * 1833 * @rdev: radeon_device pointer 1834 * @ring: radeon_ring structure holding ring information 1835 * 1836 * Check if the GFX engine is locked up. 1837 * Returns true if the engine appears to be locked up, false if not. 1838 */ 1839 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1840 { 1841 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 1842 1843 if (!(reset_mask & (RADEON_RESET_GFX | 1844 RADEON_RESET_COMPUTE | 1845 RADEON_RESET_CP))) { 1846 radeon_ring_lockup_update(rdev, ring); 1847 return false; 1848 } 1849 return radeon_ring_test_lockup(rdev, ring); 1850 } 1851 1852 u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1853 u32 tiling_pipe_num, 1854 u32 max_rb_num, 1855 u32 total_max_rb_num, 1856 u32 disabled_rb_mask) 1857 { 1858 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1859 u32 pipe_rb_ratio, pipe_rb_remain, tmp; 1860 u32 data = 0, mask = 1 << (max_rb_num - 1); 1861 unsigned i, j; 1862 1863 /* mask out the RBs that don't exist on that asic */ 1864 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); 1865 /* make sure at least one RB is available */ 1866 if ((tmp & 0xff) != 0xff) 1867 disabled_rb_mask = tmp; 1868 1869 rendering_pipe_num = 1 << tiling_pipe_num; 1870 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); 1871 BUG_ON(rendering_pipe_num < req_rb_num); 1872 1873 pipe_rb_ratio = rendering_pipe_num / req_rb_num; 1874 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num; 1875 1876 if (rdev->family <= CHIP_RV740) { 1877 /* r6xx/r7xx */ 1878 rb_num_width = 2; 1879 } else { 1880 /* eg+ */ 1881 rb_num_width = 4; 1882 } 1883 1884 for (i = 0; i < max_rb_num; i++) { 1885 if (!(mask & disabled_rb_mask)) { 1886 for (j = 0; j < pipe_rb_ratio; j++) { 1887 data <<= rb_num_width; 1888 data |= max_rb_num - i - 1; 1889 } 1890 if (pipe_rb_remain) { 1891 data <<= rb_num_width; 1892 data |= max_rb_num - i - 1; 1893 pipe_rb_remain--; 1894 } 1895 } 1896 mask >>= 1; 1897 } 1898 1899 return data; 1900 } 1901 1902 int r600_count_pipe_bits(uint32_t val) 1903 { 1904 return hweight32(val); 1905 } 1906 1907 static void r600_gpu_init(struct radeon_device *rdev) 1908 { 1909 u32 tiling_config; 1910 u32 ramcfg; 1911 u32 cc_gc_shader_pipe_config; 1912 u32 tmp; 1913 int i, j; 1914 u32 sq_config; 1915 u32 sq_gpr_resource_mgmt_1 = 0; 1916 u32 sq_gpr_resource_mgmt_2 = 0; 1917 u32 sq_thread_resource_mgmt = 0; 1918 u32 sq_stack_resource_mgmt_1 = 0; 1919 u32 sq_stack_resource_mgmt_2 = 0; 1920 u32 disabled_rb_mask; 1921 1922 rdev->config.r600.tiling_group_size = 256; 1923 switch (rdev->family) { 1924 case CHIP_R600: 1925 rdev->config.r600.max_pipes = 4; 1926 rdev->config.r600.max_tile_pipes = 8; 1927 rdev->config.r600.max_simds = 4; 1928 rdev->config.r600.max_backends = 4; 1929 rdev->config.r600.max_gprs = 256; 1930 rdev->config.r600.max_threads = 192; 1931 rdev->config.r600.max_stack_entries = 256; 1932 rdev->config.r600.max_hw_contexts = 8; 1933 rdev->config.r600.max_gs_threads = 16; 1934 rdev->config.r600.sx_max_export_size = 128; 1935 rdev->config.r600.sx_max_export_pos_size = 16; 1936 rdev->config.r600.sx_max_export_smx_size = 128; 1937 rdev->config.r600.sq_num_cf_insts = 2; 1938 break; 1939 case CHIP_RV630: 1940 case CHIP_RV635: 1941 rdev->config.r600.max_pipes = 2; 1942 rdev->config.r600.max_tile_pipes = 2; 1943 rdev->config.r600.max_simds = 3; 1944 rdev->config.r600.max_backends = 1; 1945 rdev->config.r600.max_gprs = 128; 1946 rdev->config.r600.max_threads = 192; 1947 rdev->config.r600.max_stack_entries = 128; 1948 rdev->config.r600.max_hw_contexts = 8; 1949 rdev->config.r600.max_gs_threads = 4; 1950 rdev->config.r600.sx_max_export_size = 128; 1951 rdev->config.r600.sx_max_export_pos_size = 16; 1952 rdev->config.r600.sx_max_export_smx_size = 128; 1953 rdev->config.r600.sq_num_cf_insts = 2; 1954 break; 1955 case CHIP_RV610: 1956 case CHIP_RV620: 1957 case CHIP_RS780: 1958 case CHIP_RS880: 1959 rdev->config.r600.max_pipes = 1; 1960 rdev->config.r600.max_tile_pipes = 1; 1961 rdev->config.r600.max_simds = 2; 1962 rdev->config.r600.max_backends = 1; 1963 rdev->config.r600.max_gprs = 128; 1964 rdev->config.r600.max_threads = 192; 1965 rdev->config.r600.max_stack_entries = 128; 1966 rdev->config.r600.max_hw_contexts = 4; 1967 rdev->config.r600.max_gs_threads = 4; 1968 rdev->config.r600.sx_max_export_size = 128; 1969 rdev->config.r600.sx_max_export_pos_size = 16; 1970 rdev->config.r600.sx_max_export_smx_size = 128; 1971 rdev->config.r600.sq_num_cf_insts = 1; 1972 break; 1973 case CHIP_RV670: 1974 rdev->config.r600.max_pipes = 4; 1975 rdev->config.r600.max_tile_pipes = 4; 1976 rdev->config.r600.max_simds = 4; 1977 rdev->config.r600.max_backends = 4; 1978 rdev->config.r600.max_gprs = 192; 1979 rdev->config.r600.max_threads = 192; 1980 rdev->config.r600.max_stack_entries = 256; 1981 rdev->config.r600.max_hw_contexts = 8; 1982 rdev->config.r600.max_gs_threads = 16; 1983 rdev->config.r600.sx_max_export_size = 128; 1984 rdev->config.r600.sx_max_export_pos_size = 16; 1985 rdev->config.r600.sx_max_export_smx_size = 128; 1986 rdev->config.r600.sq_num_cf_insts = 2; 1987 break; 1988 default: 1989 break; 1990 } 1991 1992 /* Initialize HDP */ 1993 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1994 WREG32((0x2c14 + j), 0x00000000); 1995 WREG32((0x2c18 + j), 0x00000000); 1996 WREG32((0x2c1c + j), 0x00000000); 1997 WREG32((0x2c20 + j), 0x00000000); 1998 WREG32((0x2c24 + j), 0x00000000); 1999 } 2000 2001 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 2002 2003 /* Setup tiling */ 2004 tiling_config = 0; 2005 ramcfg = RREG32(RAMCFG); 2006 switch (rdev->config.r600.max_tile_pipes) { 2007 case 1: 2008 tiling_config |= PIPE_TILING(0); 2009 break; 2010 case 2: 2011 tiling_config |= PIPE_TILING(1); 2012 break; 2013 case 4: 2014 tiling_config |= PIPE_TILING(2); 2015 break; 2016 case 8: 2017 tiling_config |= PIPE_TILING(3); 2018 break; 2019 default: 2020 break; 2021 } 2022 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; 2023 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 2024 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 2025 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 2026 2027 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 2028 if (tmp > 3) { 2029 tiling_config |= ROW_TILING(3); 2030 tiling_config |= SAMPLE_SPLIT(3); 2031 } else { 2032 tiling_config |= ROW_TILING(tmp); 2033 tiling_config |= SAMPLE_SPLIT(tmp); 2034 } 2035 tiling_config |= BANK_SWAPS(1); 2036 2037 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 2038 tmp = rdev->config.r600.max_simds - 2039 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 2040 rdev->config.r600.active_simds = tmp; 2041 2042 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 2043 tmp = 0; 2044 for (i = 0; i < rdev->config.r600.max_backends; i++) 2045 tmp |= (1 << i); 2046 /* if all the backends are disabled, fix it up here */ 2047 if ((disabled_rb_mask & tmp) == tmp) { 2048 for (i = 0; i < rdev->config.r600.max_backends; i++) 2049 disabled_rb_mask &= ~(1 << i); 2050 } 2051 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 2052 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 2053 R6XX_MAX_BACKENDS, disabled_rb_mask); 2054 tiling_config |= tmp << 16; 2055 rdev->config.r600.backend_map = tmp; 2056 2057 rdev->config.r600.tile_config = tiling_config; 2058 WREG32(GB_TILING_CONFIG, tiling_config); 2059 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 2060 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 2061 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); 2062 2063 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 2064 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 2065 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 2066 2067 /* Setup some CP states */ 2068 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b))); 2069 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40))); 2070 2071 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT | 2072 SYNC_WALKER | SYNC_ALIGNER)); 2073 /* Setup various GPU states */ 2074 if (rdev->family == CHIP_RV670) 2075 WREG32(ARB_GDEC_RD_CNTL, 0x00000021); 2076 2077 tmp = RREG32(SX_DEBUG_1); 2078 tmp |= SMX_EVENT_RELEASE; 2079 if ((rdev->family > CHIP_R600)) 2080 tmp |= ENABLE_NEW_SMX_ADDRESS; 2081 WREG32(SX_DEBUG_1, tmp); 2082 2083 if (((rdev->family) == CHIP_R600) || 2084 ((rdev->family) == CHIP_RV630) || 2085 ((rdev->family) == CHIP_RV610) || 2086 ((rdev->family) == CHIP_RV620) || 2087 ((rdev->family) == CHIP_RS780) || 2088 ((rdev->family) == CHIP_RS880)) { 2089 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); 2090 } else { 2091 WREG32(DB_DEBUG, 0); 2092 } 2093 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) | 2094 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4))); 2095 2096 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 2097 WREG32(VGT_NUM_INSTANCES, 0); 2098 2099 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); 2100 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0)); 2101 2102 tmp = RREG32(SQ_MS_FIFO_SIZES); 2103 if (((rdev->family) == CHIP_RV610) || 2104 ((rdev->family) == CHIP_RV620) || 2105 ((rdev->family) == CHIP_RS780) || 2106 ((rdev->family) == CHIP_RS880)) { 2107 tmp = (CACHE_FIFO_SIZE(0xa) | 2108 FETCH_FIFO_HIWATER(0xa) | 2109 DONE_FIFO_HIWATER(0xe0) | 2110 ALU_UPDATE_FIFO_HIWATER(0x8)); 2111 } else if (((rdev->family) == CHIP_R600) || 2112 ((rdev->family) == CHIP_RV630)) { 2113 tmp &= ~DONE_FIFO_HIWATER(0xff); 2114 tmp |= DONE_FIFO_HIWATER(0x4); 2115 } 2116 WREG32(SQ_MS_FIFO_SIZES, tmp); 2117 2118 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 2119 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 2120 */ 2121 sq_config = RREG32(SQ_CONFIG); 2122 sq_config &= ~(PS_PRIO(3) | 2123 VS_PRIO(3) | 2124 GS_PRIO(3) | 2125 ES_PRIO(3)); 2126 sq_config |= (DX9_CONSTS | 2127 VC_ENABLE | 2128 PS_PRIO(0) | 2129 VS_PRIO(1) | 2130 GS_PRIO(2) | 2131 ES_PRIO(3)); 2132 2133 if ((rdev->family) == CHIP_R600) { 2134 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) | 2135 NUM_VS_GPRS(124) | 2136 NUM_CLAUSE_TEMP_GPRS(4)); 2137 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) | 2138 NUM_ES_GPRS(0)); 2139 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) | 2140 NUM_VS_THREADS(48) | 2141 NUM_GS_THREADS(4) | 2142 NUM_ES_THREADS(4)); 2143 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) | 2144 NUM_VS_STACK_ENTRIES(128)); 2145 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) | 2146 NUM_ES_STACK_ENTRIES(0)); 2147 } else if (((rdev->family) == CHIP_RV610) || 2148 ((rdev->family) == CHIP_RV620) || 2149 ((rdev->family) == CHIP_RS780) || 2150 ((rdev->family) == CHIP_RS880)) { 2151 /* no vertex cache */ 2152 sq_config &= ~VC_ENABLE; 2153 2154 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2155 NUM_VS_GPRS(44) | 2156 NUM_CLAUSE_TEMP_GPRS(2)); 2157 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 2158 NUM_ES_GPRS(17)); 2159 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2160 NUM_VS_THREADS(78) | 2161 NUM_GS_THREADS(4) | 2162 NUM_ES_THREADS(31)); 2163 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 2164 NUM_VS_STACK_ENTRIES(40)); 2165 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 2166 NUM_ES_STACK_ENTRIES(16)); 2167 } else if (((rdev->family) == CHIP_RV630) || 2168 ((rdev->family) == CHIP_RV635)) { 2169 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2170 NUM_VS_GPRS(44) | 2171 NUM_CLAUSE_TEMP_GPRS(2)); 2172 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) | 2173 NUM_ES_GPRS(18)); 2174 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2175 NUM_VS_THREADS(78) | 2176 NUM_GS_THREADS(4) | 2177 NUM_ES_THREADS(31)); 2178 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 2179 NUM_VS_STACK_ENTRIES(40)); 2180 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 2181 NUM_ES_STACK_ENTRIES(16)); 2182 } else if ((rdev->family) == CHIP_RV670) { 2183 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2184 NUM_VS_GPRS(44) | 2185 NUM_CLAUSE_TEMP_GPRS(2)); 2186 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 2187 NUM_ES_GPRS(17)); 2188 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2189 NUM_VS_THREADS(78) | 2190 NUM_GS_THREADS(4) | 2191 NUM_ES_THREADS(31)); 2192 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) | 2193 NUM_VS_STACK_ENTRIES(64)); 2194 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) | 2195 NUM_ES_STACK_ENTRIES(64)); 2196 } 2197 2198 WREG32(SQ_CONFIG, sq_config); 2199 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 2200 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 2201 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 2202 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 2203 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 2204 2205 if (((rdev->family) == CHIP_RV610) || 2206 ((rdev->family) == CHIP_RV620) || 2207 ((rdev->family) == CHIP_RS780) || 2208 ((rdev->family) == CHIP_RS880)) { 2209 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); 2210 } else { 2211 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); 2212 } 2213 2214 /* More default values. 2D/3D driver should adjust as needed */ 2215 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) | 2216 S1_X(0x4) | S1_Y(0xc))); 2217 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) | 2218 S1_X(0x2) | S1_Y(0x2) | 2219 S2_X(0xa) | S2_Y(0x6) | 2220 S3_X(0x6) | S3_Y(0xa))); 2221 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) | 2222 S1_X(0x4) | S1_Y(0xc) | 2223 S2_X(0x1) | S2_Y(0x6) | 2224 S3_X(0xa) | S3_Y(0xe))); 2225 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) | 2226 S5_X(0x0) | S5_Y(0x0) | 2227 S6_X(0xb) | S6_Y(0x4) | 2228 S7_X(0x7) | S7_Y(0x8))); 2229 2230 WREG32(VGT_STRMOUT_EN, 0); 2231 tmp = rdev->config.r600.max_pipes * 16; 2232 switch (rdev->family) { 2233 case CHIP_RV610: 2234 case CHIP_RV620: 2235 case CHIP_RS780: 2236 case CHIP_RS880: 2237 tmp += 32; 2238 break; 2239 case CHIP_RV670: 2240 tmp += 128; 2241 break; 2242 default: 2243 break; 2244 } 2245 if (tmp > 256) { 2246 tmp = 256; 2247 } 2248 WREG32(VGT_ES_PER_GS, 128); 2249 WREG32(VGT_GS_PER_ES, tmp); 2250 WREG32(VGT_GS_PER_VS, 2); 2251 WREG32(VGT_GS_VERTEX_REUSE, 16); 2252 2253 /* more default values. 2D/3D driver should adjust as needed */ 2254 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2255 WREG32(VGT_STRMOUT_EN, 0); 2256 WREG32(SX_MISC, 0); 2257 WREG32(PA_SC_MODE_CNTL, 0); 2258 WREG32(PA_SC_AA_CONFIG, 0); 2259 WREG32(PA_SC_LINE_STIPPLE, 0); 2260 WREG32(SPI_INPUT_Z, 0); 2261 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); 2262 WREG32(CB_COLOR7_FRAG, 0); 2263 2264 /* Clear render buffer base addresses */ 2265 WREG32(CB_COLOR0_BASE, 0); 2266 WREG32(CB_COLOR1_BASE, 0); 2267 WREG32(CB_COLOR2_BASE, 0); 2268 WREG32(CB_COLOR3_BASE, 0); 2269 WREG32(CB_COLOR4_BASE, 0); 2270 WREG32(CB_COLOR5_BASE, 0); 2271 WREG32(CB_COLOR6_BASE, 0); 2272 WREG32(CB_COLOR7_BASE, 0); 2273 WREG32(CB_COLOR7_FRAG, 0); 2274 2275 switch (rdev->family) { 2276 case CHIP_RV610: 2277 case CHIP_RV620: 2278 case CHIP_RS780: 2279 case CHIP_RS880: 2280 tmp = TC_L2_SIZE(8); 2281 break; 2282 case CHIP_RV630: 2283 case CHIP_RV635: 2284 tmp = TC_L2_SIZE(4); 2285 break; 2286 case CHIP_R600: 2287 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT; 2288 break; 2289 default: 2290 tmp = TC_L2_SIZE(0); 2291 break; 2292 } 2293 WREG32(TC_CNTL, tmp); 2294 2295 tmp = RREG32(HDP_HOST_PATH_CNTL); 2296 WREG32(HDP_HOST_PATH_CNTL, tmp); 2297 2298 tmp = RREG32(ARB_POP); 2299 tmp |= ENABLE_TC128; 2300 WREG32(ARB_POP, tmp); 2301 2302 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 2303 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 2304 NUM_CLIP_SEQ(3))); 2305 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); 2306 WREG32(VC_ENHANCE, 0); 2307 } 2308 2309 2310 /* 2311 * Indirect registers accessor 2312 */ 2313 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2314 { 2315 u32 r; 2316 2317 spin_lock(&rdev->pciep_idx_lock); 2318 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2319 (void)RREG32(PCIE_PORT_INDEX); 2320 r = RREG32(PCIE_PORT_DATA); 2321 spin_unlock(&rdev->pciep_idx_lock); 2322 return r; 2323 } 2324 2325 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2326 { 2327 spin_lock(&rdev->pciep_idx_lock); 2328 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2329 (void)RREG32(PCIE_PORT_INDEX); 2330 WREG32(PCIE_PORT_DATA, (v)); 2331 (void)RREG32(PCIE_PORT_DATA); 2332 spin_unlock(&rdev->pciep_idx_lock); 2333 } 2334 2335 /* 2336 * CP & Ring 2337 */ 2338 void r600_cp_stop(struct radeon_device *rdev) 2339 { 2340 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 2341 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2342 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 2343 WREG32(SCRATCH_UMSK, 0); 2344 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2345 } 2346 2347 int r600_init_microcode(struct radeon_device *rdev) 2348 { 2349 const char *chip_name; 2350 const char *rlc_chip_name; 2351 const char *smc_chip_name = "RV770"; 2352 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0; 2353 char fw_name[30]; 2354 int err; 2355 2356 DRM_DEBUG("\n"); 2357 2358 switch (rdev->family) { 2359 case CHIP_R600: 2360 chip_name = "R600"; 2361 rlc_chip_name = "R600"; 2362 break; 2363 case CHIP_RV610: 2364 chip_name = "RV610"; 2365 rlc_chip_name = "R600"; 2366 break; 2367 case CHIP_RV630: 2368 chip_name = "RV630"; 2369 rlc_chip_name = "R600"; 2370 break; 2371 case CHIP_RV620: 2372 chip_name = "RV620"; 2373 rlc_chip_name = "R600"; 2374 break; 2375 case CHIP_RV635: 2376 chip_name = "RV635"; 2377 rlc_chip_name = "R600"; 2378 break; 2379 case CHIP_RV670: 2380 chip_name = "RV670"; 2381 rlc_chip_name = "R600"; 2382 break; 2383 case CHIP_RS780: 2384 case CHIP_RS880: 2385 chip_name = "RS780"; 2386 rlc_chip_name = "R600"; 2387 break; 2388 case CHIP_RV770: 2389 chip_name = "RV770"; 2390 rlc_chip_name = "R700"; 2391 smc_chip_name = "RV770"; 2392 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4); 2393 break; 2394 case CHIP_RV730: 2395 chip_name = "RV730"; 2396 rlc_chip_name = "R700"; 2397 smc_chip_name = "RV730"; 2398 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4); 2399 break; 2400 case CHIP_RV710: 2401 chip_name = "RV710"; 2402 rlc_chip_name = "R700"; 2403 smc_chip_name = "RV710"; 2404 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4); 2405 break; 2406 case CHIP_RV740: 2407 chip_name = "RV730"; 2408 rlc_chip_name = "R700"; 2409 smc_chip_name = "RV740"; 2410 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4); 2411 break; 2412 case CHIP_CEDAR: 2413 chip_name = "CEDAR"; 2414 rlc_chip_name = "CEDAR"; 2415 smc_chip_name = "CEDAR"; 2416 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4); 2417 break; 2418 case CHIP_REDWOOD: 2419 chip_name = "REDWOOD"; 2420 rlc_chip_name = "REDWOOD"; 2421 smc_chip_name = "REDWOOD"; 2422 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4); 2423 break; 2424 case CHIP_JUNIPER: 2425 chip_name = "JUNIPER"; 2426 rlc_chip_name = "JUNIPER"; 2427 smc_chip_name = "JUNIPER"; 2428 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4); 2429 break; 2430 case CHIP_CYPRESS: 2431 case CHIP_HEMLOCK: 2432 chip_name = "CYPRESS"; 2433 rlc_chip_name = "CYPRESS"; 2434 smc_chip_name = "CYPRESS"; 2435 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4); 2436 break; 2437 case CHIP_PALM: 2438 chip_name = "PALM"; 2439 rlc_chip_name = "SUMO"; 2440 break; 2441 case CHIP_SUMO: 2442 chip_name = "SUMO"; 2443 rlc_chip_name = "SUMO"; 2444 break; 2445 case CHIP_SUMO2: 2446 chip_name = "SUMO2"; 2447 rlc_chip_name = "SUMO"; 2448 break; 2449 default: BUG(); 2450 } 2451 2452 if (rdev->family >= CHIP_CEDAR) { 2453 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 2454 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 2455 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 2456 } else if (rdev->family >= CHIP_RV770) { 2457 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 2458 me_req_size = R700_PM4_UCODE_SIZE * 4; 2459 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 2460 } else { 2461 pfp_req_size = R600_PFP_UCODE_SIZE * 4; 2462 me_req_size = R600_PM4_UCODE_SIZE * 12; 2463 rlc_req_size = R600_RLC_UCODE_SIZE * 4; 2464 } 2465 2466 DRM_INFO("Loading %s Microcode\n", chip_name); 2467 2468 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name); 2469 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); 2470 if (err) 2471 goto out; 2472 if (rdev->pfp_fw->datasize != pfp_req_size) { 2473 printk(KERN_ERR 2474 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2475 rdev->pfp_fw->datasize, fw_name); 2476 err = -EINVAL; 2477 goto out; 2478 } 2479 2480 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name); 2481 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 2482 if (err) 2483 goto out; 2484 if (rdev->me_fw->datasize != me_req_size) { 2485 printk(KERN_ERR 2486 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2487 rdev->me_fw->datasize, fw_name); 2488 err = -EINVAL; 2489 } 2490 2491 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", rlc_chip_name); 2492 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); 2493 if (err) 2494 goto out; 2495 if (rdev->rlc_fw->datasize != rlc_req_size) { 2496 printk(KERN_ERR 2497 "r600_rlc: Bogus length %zu in firmware \"%s\"\n", 2498 rdev->rlc_fw->datasize, fw_name); 2499 err = -EINVAL; 2500 } 2501 2502 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { 2503 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_smc", smc_chip_name); 2504 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2505 if (err) { 2506 printk(KERN_ERR 2507 "smc: error loading firmware \"%s\"\n", 2508 fw_name); 2509 release_firmware(rdev->smc_fw); 2510 rdev->smc_fw = NULL; 2511 err = 0; 2512 } else if (rdev->smc_fw->datasize != smc_req_size) { 2513 printk(KERN_ERR 2514 "smc: Bogus length %zu in firmware \"%s\"\n", 2515 rdev->smc_fw->datasize, fw_name); 2516 err = -EINVAL; 2517 } 2518 } 2519 2520 out: 2521 if (err) { 2522 if (err != -EINVAL) 2523 printk(KERN_ERR 2524 "r600_cp: Failed to load firmware \"%s\"\n", 2525 fw_name); 2526 release_firmware(rdev->pfp_fw); 2527 rdev->pfp_fw = NULL; 2528 release_firmware(rdev->me_fw); 2529 rdev->me_fw = NULL; 2530 release_firmware(rdev->rlc_fw); 2531 rdev->rlc_fw = NULL; 2532 release_firmware(rdev->smc_fw); 2533 rdev->smc_fw = NULL; 2534 } 2535 return err; 2536 } 2537 2538 u32 r600_gfx_get_rptr(struct radeon_device *rdev, 2539 struct radeon_ring *ring) 2540 { 2541 u32 rptr; 2542 2543 if (rdev->wb.enabled) 2544 rptr = rdev->wb.wb[ring->rptr_offs/4]; 2545 else 2546 rptr = RREG32(R600_CP_RB_RPTR); 2547 2548 return rptr; 2549 } 2550 2551 u32 r600_gfx_get_wptr(struct radeon_device *rdev, 2552 struct radeon_ring *ring) 2553 { 2554 u32 wptr; 2555 2556 wptr = RREG32(R600_CP_RB_WPTR); 2557 2558 return wptr; 2559 } 2560 2561 void r600_gfx_set_wptr(struct radeon_device *rdev, 2562 struct radeon_ring *ring) 2563 { 2564 WREG32(R600_CP_RB_WPTR, ring->wptr); 2565 (void)RREG32(R600_CP_RB_WPTR); 2566 } 2567 2568 /** 2569 * r600_fini_microcode - drop the firmwares image references 2570 * 2571 * @rdev: radeon_device pointer 2572 * 2573 * Drop the pfp, me and rlc firmwares image references. 2574 * Called at driver shutdown. 2575 */ 2576 void r600_fini_microcode(struct radeon_device *rdev) 2577 { 2578 release_firmware(rdev->pfp_fw); 2579 rdev->pfp_fw = NULL; 2580 release_firmware(rdev->me_fw); 2581 rdev->me_fw = NULL; 2582 release_firmware(rdev->rlc_fw); 2583 rdev->rlc_fw = NULL; 2584 release_firmware(rdev->smc_fw); 2585 rdev->smc_fw = NULL; 2586 } 2587 2588 static int r600_cp_load_microcode(struct radeon_device *rdev) 2589 { 2590 const __be32 *fw_data; 2591 int i; 2592 2593 if (!rdev->me_fw || !rdev->pfp_fw) 2594 return -EINVAL; 2595 2596 r600_cp_stop(rdev); 2597 2598 WREG32(CP_RB_CNTL, 2599 #ifdef __BIG_ENDIAN 2600 BUF_SWAP_32BIT | 2601 #endif 2602 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2603 2604 /* Reset cp */ 2605 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2606 RREG32(GRBM_SOFT_RESET); 2607 mdelay(15); 2608 WREG32(GRBM_SOFT_RESET, 0); 2609 2610 WREG32(CP_ME_RAM_WADDR, 0); 2611 2612 fw_data = (const __be32 *)rdev->me_fw->data; 2613 WREG32(CP_ME_RAM_WADDR, 0); 2614 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++) 2615 WREG32(CP_ME_RAM_DATA, 2616 be32_to_cpup(fw_data++)); 2617 2618 fw_data = (const __be32 *)rdev->pfp_fw->data; 2619 WREG32(CP_PFP_UCODE_ADDR, 0); 2620 for (i = 0; i < R600_PFP_UCODE_SIZE; i++) 2621 WREG32(CP_PFP_UCODE_DATA, 2622 be32_to_cpup(fw_data++)); 2623 2624 WREG32(CP_PFP_UCODE_ADDR, 0); 2625 WREG32(CP_ME_RAM_WADDR, 0); 2626 WREG32(CP_ME_RAM_RADDR, 0); 2627 return 0; 2628 } 2629 2630 int r600_cp_start(struct radeon_device *rdev) 2631 { 2632 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2633 int r; 2634 uint32_t cp_me; 2635 2636 r = radeon_ring_lock(rdev, ring, 7); 2637 if (r) { 2638 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2639 return r; 2640 } 2641 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2642 radeon_ring_write(ring, 0x1); 2643 if (rdev->family >= CHIP_RV770) { 2644 radeon_ring_write(ring, 0x0); 2645 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1); 2646 } else { 2647 radeon_ring_write(ring, 0x3); 2648 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1); 2649 } 2650 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2651 radeon_ring_write(ring, 0); 2652 radeon_ring_write(ring, 0); 2653 radeon_ring_unlock_commit(rdev, ring, false); 2654 2655 cp_me = 0xff; 2656 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2657 return 0; 2658 } 2659 2660 int r600_cp_resume(struct radeon_device *rdev) 2661 { 2662 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2663 u32 tmp; 2664 u32 rb_bufsz; 2665 int r; 2666 2667 /* Reset cp */ 2668 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2669 RREG32(GRBM_SOFT_RESET); 2670 mdelay(15); 2671 WREG32(GRBM_SOFT_RESET, 0); 2672 2673 /* Set ring buffer size */ 2674 rb_bufsz = order_base_2(ring->ring_size / 8); 2675 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2676 #ifdef __BIG_ENDIAN 2677 tmp |= BUF_SWAP_32BIT; 2678 #endif 2679 WREG32(CP_RB_CNTL, tmp); 2680 WREG32(CP_SEM_WAIT_TIMER, 0x0); 2681 2682 /* Set the write pointer delay */ 2683 WREG32(CP_RB_WPTR_DELAY, 0); 2684 2685 /* Initialize the ring buffer's read and write pointers */ 2686 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2687 WREG32(CP_RB_RPTR_WR, 0); 2688 ring->wptr = 0; 2689 WREG32(CP_RB_WPTR, ring->wptr); 2690 2691 /* set the wb address whether it's enabled or not */ 2692 WREG32(CP_RB_RPTR_ADDR, 2693 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 2694 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2695 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2696 2697 if (rdev->wb.enabled) 2698 WREG32(SCRATCH_UMSK, 0xff); 2699 else { 2700 tmp |= RB_NO_UPDATE; 2701 WREG32(SCRATCH_UMSK, 0); 2702 } 2703 2704 mdelay(1); 2705 WREG32(CP_RB_CNTL, tmp); 2706 2707 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 2708 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2709 2710 r600_cp_start(rdev); 2711 ring->ready = true; 2712 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 2713 if (r) { 2714 ring->ready = false; 2715 return r; 2716 } 2717 2718 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 2719 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 2720 2721 return 0; 2722 } 2723 2724 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) 2725 { 2726 u32 rb_bufsz; 2727 int r; 2728 2729 /* Align ring size */ 2730 rb_bufsz = order_base_2(ring_size / 8); 2731 ring_size = (1 << (rb_bufsz + 1)) * 4; 2732 ring->ring_size = ring_size; 2733 ring->align_mask = 16 - 1; 2734 2735 if (radeon_ring_supports_scratch_reg(rdev, ring)) { 2736 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 2737 if (r) { 2738 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 2739 ring->rptr_save_reg = 0; 2740 } 2741 } 2742 } 2743 2744 void r600_cp_fini(struct radeon_device *rdev) 2745 { 2746 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2747 r600_cp_stop(rdev); 2748 radeon_ring_fini(rdev, ring); 2749 radeon_scratch_free(rdev, ring->rptr_save_reg); 2750 } 2751 2752 /* 2753 * GPU scratch registers helpers function. 2754 */ 2755 void r600_scratch_init(struct radeon_device *rdev) 2756 { 2757 int i; 2758 2759 rdev->scratch.num_reg = 7; 2760 rdev->scratch.reg_base = SCRATCH_REG0; 2761 for (i = 0; i < rdev->scratch.num_reg; i++) { 2762 rdev->scratch.free[i] = true; 2763 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 2764 } 2765 } 2766 2767 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 2768 { 2769 uint32_t scratch; 2770 uint32_t tmp = 0; 2771 unsigned i; 2772 int r; 2773 2774 r = radeon_scratch_get(rdev, &scratch); 2775 if (r) { 2776 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 2777 return r; 2778 } 2779 WREG32(scratch, 0xCAFEDEAD); 2780 r = radeon_ring_lock(rdev, ring, 3); 2781 if (r) { 2782 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); 2783 radeon_scratch_free(rdev, scratch); 2784 return r; 2785 } 2786 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2787 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2788 radeon_ring_write(ring, 0xDEADBEEF); 2789 radeon_ring_unlock_commit(rdev, ring, false); 2790 for (i = 0; i < rdev->usec_timeout; i++) { 2791 tmp = RREG32(scratch); 2792 if (tmp == 0xDEADBEEF) 2793 break; 2794 DRM_UDELAY(1); 2795 } 2796 if (i < rdev->usec_timeout) { 2797 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 2798 } else { 2799 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 2800 ring->idx, scratch, tmp); 2801 r = -EINVAL; 2802 } 2803 radeon_scratch_free(rdev, scratch); 2804 return r; 2805 } 2806 2807 /* 2808 * CP fences/semaphores 2809 */ 2810 2811 void r600_fence_ring_emit(struct radeon_device *rdev, 2812 struct radeon_fence *fence) 2813 { 2814 struct radeon_ring *ring = &rdev->ring[fence->ring]; 2815 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA | 2816 PACKET3_SH_ACTION_ENA; 2817 2818 if (rdev->family >= CHIP_RV770) 2819 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA; 2820 2821 if (rdev->wb.use_event) { 2822 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 2823 /* flush read cache over gart */ 2824 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2825 radeon_ring_write(ring, cp_coher_cntl); 2826 radeon_ring_write(ring, 0xFFFFFFFF); 2827 radeon_ring_write(ring, 0); 2828 radeon_ring_write(ring, 10); /* poll interval */ 2829 /* EVENT_WRITE_EOP - flush caches, send int */ 2830 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2831 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2832 radeon_ring_write(ring, lower_32_bits(addr)); 2833 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2834 radeon_ring_write(ring, fence->seq); 2835 radeon_ring_write(ring, 0); 2836 } else { 2837 /* flush read cache over gart */ 2838 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2839 radeon_ring_write(ring, cp_coher_cntl); 2840 radeon_ring_write(ring, 0xFFFFFFFF); 2841 radeon_ring_write(ring, 0); 2842 radeon_ring_write(ring, 10); /* poll interval */ 2843 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2844 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2845 /* wait for 3D idle clean */ 2846 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2847 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2848 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); 2849 /* Emit fence sequence & fire IRQ */ 2850 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2851 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2852 radeon_ring_write(ring, fence->seq); 2853 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 2854 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0)); 2855 radeon_ring_write(ring, RB_INT_STAT); 2856 } 2857 } 2858 2859 /** 2860 * r600_semaphore_ring_emit - emit a semaphore on the CP ring 2861 * 2862 * @rdev: radeon_device pointer 2863 * @ring: radeon ring buffer object 2864 * @semaphore: radeon semaphore object 2865 * @emit_wait: Is this a sempahore wait? 2866 * 2867 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP 2868 * from running ahead of semaphore waits. 2869 */ 2870 bool r600_semaphore_ring_emit(struct radeon_device *rdev, 2871 struct radeon_ring *ring, 2872 struct radeon_semaphore *semaphore, 2873 bool emit_wait) 2874 { 2875 uint64_t addr = semaphore->gpu_addr; 2876 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 2877 2878 if (rdev->family < CHIP_CAYMAN) 2879 sel |= PACKET3_SEM_WAIT_ON_SIGNAL; 2880 2881 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2882 radeon_ring_write(ring, lower_32_bits(addr)); 2883 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2884 2885 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */ 2886 if (emit_wait && (rdev->family >= CHIP_CEDAR)) { 2887 /* Prevent the PFP from running ahead of the semaphore wait */ 2888 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2889 radeon_ring_write(ring, 0x0); 2890 } 2891 2892 return true; 2893 } 2894 2895 /** 2896 * r600_copy_cpdma - copy pages using the CP DMA engine 2897 * 2898 * @rdev: radeon_device pointer 2899 * @src_offset: src GPU address 2900 * @dst_offset: dst GPU address 2901 * @num_gpu_pages: number of GPU pages to xfer 2902 * @fence: radeon fence object 2903 * 2904 * Copy GPU paging using the CP DMA engine (r6xx+). 2905 * Used by the radeon ttm implementation to move pages if 2906 * registered as the asic copy callback. 2907 */ 2908 int r600_copy_cpdma(struct radeon_device *rdev, 2909 uint64_t src_offset, uint64_t dst_offset, 2910 unsigned num_gpu_pages, 2911 struct radeon_fence **fence) 2912 { 2913 struct radeon_semaphore *sem = NULL; 2914 int ring_index = rdev->asic->copy.blit_ring_index; 2915 struct radeon_ring *ring = &rdev->ring[ring_index]; 2916 u32 size_in_bytes, cur_size_in_bytes, tmp; 2917 int i, num_loops; 2918 int r = 0; 2919 2920 r = radeon_semaphore_create(rdev, &sem); 2921 if (r) { 2922 DRM_ERROR("radeon: moving bo (%d).\n", r); 2923 return r; 2924 } 2925 2926 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 2927 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 2928 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); 2929 if (r) { 2930 DRM_ERROR("radeon: moving bo (%d).\n", r); 2931 radeon_semaphore_free(rdev, &sem, NULL); 2932 return r; 2933 } 2934 2935 radeon_semaphore_sync_to(sem, *fence); 2936 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 2937 2938 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2939 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2940 radeon_ring_write(ring, WAIT_3D_IDLE_bit); 2941 for (i = 0; i < num_loops; i++) { 2942 cur_size_in_bytes = size_in_bytes; 2943 if (cur_size_in_bytes > 0x1fffff) 2944 cur_size_in_bytes = 0x1fffff; 2945 size_in_bytes -= cur_size_in_bytes; 2946 tmp = upper_32_bits(src_offset) & 0xff; 2947 if (size_in_bytes == 0) 2948 tmp |= PACKET3_CP_DMA_CP_SYNC; 2949 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); 2950 radeon_ring_write(ring, lower_32_bits(src_offset)); 2951 radeon_ring_write(ring, tmp); 2952 radeon_ring_write(ring, lower_32_bits(dst_offset)); 2953 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 2954 radeon_ring_write(ring, cur_size_in_bytes); 2955 src_offset += cur_size_in_bytes; 2956 dst_offset += cur_size_in_bytes; 2957 } 2958 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2959 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2960 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); 2961 2962 r = radeon_fence_emit(rdev, fence, ring->idx); 2963 if (r) { 2964 radeon_ring_unlock_undo(rdev, ring); 2965 radeon_semaphore_free(rdev, &sem, NULL); 2966 return r; 2967 } 2968 2969 radeon_ring_unlock_commit(rdev, ring, false); 2970 radeon_semaphore_free(rdev, &sem, *fence); 2971 2972 return r; 2973 } 2974 2975 int r600_set_surface_reg(struct radeon_device *rdev, int reg, 2976 uint32_t tiling_flags, uint32_t pitch, 2977 uint32_t offset, uint32_t obj_size) 2978 { 2979 /* FIXME: implement */ 2980 return 0; 2981 } 2982 2983 void r600_clear_surface_reg(struct radeon_device *rdev, int reg) 2984 { 2985 /* FIXME: implement */ 2986 } 2987 2988 static int r600_startup(struct radeon_device *rdev) 2989 { 2990 struct radeon_ring *ring; 2991 int r; 2992 2993 /* enable pcie gen2 link */ 2994 r600_pcie_gen2_enable(rdev); 2995 2996 /* scratch needs to be initialized before MC */ 2997 r = r600_vram_scratch_init(rdev); 2998 if (r) 2999 return r; 3000 3001 r600_mc_program(rdev); 3002 3003 if (rdev->flags & RADEON_IS_AGP) { 3004 r600_agp_enable(rdev); 3005 } else { 3006 r = r600_pcie_gart_enable(rdev); 3007 if (r) 3008 return r; 3009 } 3010 r600_gpu_init(rdev); 3011 3012 /* allocate wb buffer */ 3013 r = radeon_wb_init(rdev); 3014 if (r) 3015 return r; 3016 3017 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3018 if (r) { 3019 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3020 return r; 3021 } 3022 3023 if (rdev->has_uvd) { 3024 r = uvd_v1_0_resume(rdev); 3025 if (!r) { 3026 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); 3027 if (r) { 3028 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); 3029 } 3030 } 3031 if (r) 3032 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 3033 } 3034 3035 /* Enable IRQ */ 3036 if (!rdev->irq.installed) { 3037 r = radeon_irq_kms_init(rdev); 3038 if (r) 3039 return r; 3040 } 3041 3042 r = r600_irq_init(rdev); 3043 if (r) { 3044 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3045 radeon_irq_kms_fini(rdev); 3046 return r; 3047 } 3048 r600_irq_set(rdev); 3049 3050 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3051 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 3052 RADEON_CP_PACKET2); 3053 if (r) 3054 return r; 3055 3056 r = r600_cp_load_microcode(rdev); 3057 if (r) 3058 return r; 3059 r = r600_cp_resume(rdev); 3060 if (r) 3061 return r; 3062 3063 if (rdev->has_uvd) { 3064 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 3065 if (ring->ring_size) { 3066 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 3067 RADEON_CP_PACKET2); 3068 if (!r) 3069 r = uvd_v1_0_init(rdev); 3070 if (r) 3071 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 3072 } 3073 } 3074 3075 r = radeon_ib_pool_init(rdev); 3076 if (r) { 3077 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3078 return r; 3079 } 3080 3081 r = r600_audio_init(rdev); 3082 if (r) { 3083 DRM_ERROR("radeon: audio init failed\n"); 3084 return r; 3085 } 3086 3087 return 0; 3088 } 3089 3090 void r600_vga_set_state(struct radeon_device *rdev, bool state) 3091 { 3092 uint32_t temp; 3093 3094 temp = RREG32(CONFIG_CNTL); 3095 if (state == false) { 3096 temp &= ~(1<<0); 3097 temp |= (1<<1); 3098 } else { 3099 temp &= ~(1<<1); 3100 } 3101 WREG32(CONFIG_CNTL, temp); 3102 } 3103 3104 int r600_resume(struct radeon_device *rdev) 3105 { 3106 int r; 3107 3108 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, 3109 * posting will perform necessary task to bring back GPU into good 3110 * shape. 3111 */ 3112 /* post card */ 3113 atom_asic_init(rdev->mode_info.atom_context); 3114 3115 if (rdev->pm.pm_method == PM_METHOD_DPM) 3116 radeon_pm_resume(rdev); 3117 3118 rdev->accel_working = true; 3119 r = r600_startup(rdev); 3120 if (r) { 3121 DRM_ERROR("r600 startup failed on resume\n"); 3122 rdev->accel_working = false; 3123 return r; 3124 } 3125 3126 return r; 3127 } 3128 3129 int r600_suspend(struct radeon_device *rdev) 3130 { 3131 radeon_pm_suspend(rdev); 3132 r600_audio_fini(rdev); 3133 r600_cp_stop(rdev); 3134 if (rdev->has_uvd) { 3135 uvd_v1_0_fini(rdev); 3136 radeon_uvd_suspend(rdev); 3137 } 3138 r600_irq_suspend(rdev); 3139 radeon_wb_disable(rdev); 3140 r600_pcie_gart_disable(rdev); 3141 3142 return 0; 3143 } 3144 3145 /* Plan is to move initialization in that function and use 3146 * helper function so that radeon_device_init pretty much 3147 * do nothing more than calling asic specific function. This 3148 * should also allow to remove a bunch of callback function 3149 * like vram_info. 3150 */ 3151 int r600_init(struct radeon_device *rdev) 3152 { 3153 int r; 3154 3155 if (r600_debugfs_mc_info_init(rdev)) { 3156 DRM_ERROR("Failed to register debugfs file for mc !\n"); 3157 } 3158 /* Read BIOS */ 3159 if (!radeon_get_bios(rdev)) { 3160 if (ASIC_IS_AVIVO(rdev)) 3161 return -EINVAL; 3162 } 3163 /* Must be an ATOMBIOS */ 3164 if (!rdev->is_atom_bios) { 3165 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); 3166 return -EINVAL; 3167 } 3168 r = radeon_atombios_init(rdev); 3169 if (r) 3170 return r; 3171 /* Post card if necessary */ 3172 if (!radeon_card_posted(rdev)) { 3173 if (!rdev->bios) { 3174 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3175 return -EINVAL; 3176 } 3177 DRM_INFO("GPU not posted. posting now...\n"); 3178 atom_asic_init(rdev->mode_info.atom_context); 3179 } 3180 /* Initialize scratch registers */ 3181 r600_scratch_init(rdev); 3182 /* Initialize surface registers */ 3183 radeon_surface_init(rdev); 3184 /* Initialize clocks */ 3185 radeon_get_clock_info(rdev->ddev); 3186 /* Fence driver */ 3187 r = radeon_fence_driver_init(rdev); 3188 if (r) 3189 return r; 3190 if (rdev->flags & RADEON_IS_AGP) { 3191 r = radeon_agp_init(rdev); 3192 if (r) 3193 radeon_agp_disable(rdev); 3194 } 3195 r = r600_mc_init(rdev); 3196 if (r) 3197 return r; 3198 /* Memory manager */ 3199 r = radeon_bo_init(rdev); 3200 if (r) 3201 return r; 3202 3203 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 3204 r = r600_init_microcode(rdev); 3205 if (r) { 3206 DRM_ERROR("Failed to load firmware!\n"); 3207 return r; 3208 } 3209 } 3210 3211 /* Initialize power management */ 3212 radeon_pm_init(rdev); 3213 3214 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3215 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3216 3217 if (rdev->has_uvd) { 3218 r = radeon_uvd_init(rdev); 3219 if (!r) { 3220 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; 3221 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); 3222 } 3223 } 3224 3225 rdev->ih.ring_obj = NULL; 3226 r600_ih_ring_init(rdev, 64 * 1024); 3227 3228 r = r600_pcie_gart_init(rdev); 3229 if (r) 3230 return r; 3231 3232 rdev->accel_working = true; 3233 r = r600_startup(rdev); 3234 if (r) { 3235 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3236 r600_cp_fini(rdev); 3237 r600_irq_fini(rdev); 3238 radeon_wb_fini(rdev); 3239 radeon_ib_pool_fini(rdev); 3240 radeon_irq_kms_fini(rdev); 3241 r600_pcie_gart_fini(rdev); 3242 rdev->accel_working = false; 3243 } 3244 3245 return 0; 3246 } 3247 3248 void r600_fini(struct radeon_device *rdev) 3249 { 3250 radeon_pm_fini(rdev); 3251 r600_audio_fini(rdev); 3252 r600_cp_fini(rdev); 3253 r600_irq_fini(rdev); 3254 if (rdev->has_uvd) { 3255 uvd_v1_0_fini(rdev); 3256 radeon_uvd_fini(rdev); 3257 } 3258 radeon_wb_fini(rdev); 3259 radeon_ib_pool_fini(rdev); 3260 radeon_irq_kms_fini(rdev); 3261 r600_pcie_gart_fini(rdev); 3262 r600_vram_scratch_fini(rdev); 3263 radeon_agp_fini(rdev); 3264 radeon_gem_fini(rdev); 3265 radeon_fence_driver_fini(rdev); 3266 radeon_bo_fini(rdev); 3267 radeon_atombios_fini(rdev); 3268 r600_fini_microcode(rdev); 3269 kfree(rdev->bios); 3270 rdev->bios = NULL; 3271 } 3272 3273 3274 /* 3275 * CS stuff 3276 */ 3277 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3278 { 3279 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3280 u32 next_rptr; 3281 3282 if (ring->rptr_save_reg) { 3283 next_rptr = ring->wptr + 3 + 4; 3284 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3285 radeon_ring_write(ring, ((ring->rptr_save_reg - 3286 PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 3287 radeon_ring_write(ring, next_rptr); 3288 } else if (rdev->wb.enabled) { 3289 next_rptr = ring->wptr + 5 + 4; 3290 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); 3291 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3292 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); 3293 radeon_ring_write(ring, next_rptr); 3294 radeon_ring_write(ring, 0); 3295 } 3296 3297 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3298 radeon_ring_write(ring, 3299 #ifdef __BIG_ENDIAN 3300 (2 << 0) | 3301 #endif 3302 (ib->gpu_addr & 0xFFFFFFFC)); 3303 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 3304 radeon_ring_write(ring, ib->length_dw); 3305 } 3306 3307 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3308 { 3309 struct radeon_ib ib; 3310 uint32_t scratch; 3311 uint32_t tmp = 0; 3312 unsigned i; 3313 int r; 3314 3315 r = radeon_scratch_get(rdev, &scratch); 3316 if (r) { 3317 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3318 return r; 3319 } 3320 WREG32(scratch, 0xCAFEDEAD); 3321 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3322 if (r) { 3323 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3324 goto free_scratch; 3325 } 3326 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 3327 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3328 ib.ptr[2] = 0xDEADBEEF; 3329 ib.length_dw = 3; 3330 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3331 if (r) { 3332 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3333 goto free_ib; 3334 } 3335 r = radeon_fence_wait(ib.fence, false); 3336 if (r) { 3337 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3338 goto free_ib; 3339 } 3340 for (i = 0; i < rdev->usec_timeout; i++) { 3341 tmp = RREG32(scratch); 3342 if (tmp == 0xDEADBEEF) 3343 break; 3344 DRM_UDELAY(1); 3345 } 3346 if (i < rdev->usec_timeout) { 3347 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 3348 } else { 3349 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3350 scratch, tmp); 3351 r = -EINVAL; 3352 } 3353 free_ib: 3354 radeon_ib_free(rdev, &ib); 3355 free_scratch: 3356 radeon_scratch_free(rdev, scratch); 3357 return r; 3358 } 3359 3360 /* 3361 * Interrupts 3362 * 3363 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty 3364 * the same as the CP ring buffer, but in reverse. Rather than the CPU 3365 * writing to the ring and the GPU consuming, the GPU writes to the ring 3366 * and host consumes. As the host irq handler processes interrupts, it 3367 * increments the rptr. When the rptr catches up with the wptr, all the 3368 * current interrupts have been processed. 3369 */ 3370 3371 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) 3372 { 3373 u32 rb_bufsz; 3374 3375 /* Align ring size */ 3376 rb_bufsz = order_base_2(ring_size / 4); 3377 ring_size = (1 << rb_bufsz) * 4; 3378 rdev->ih.ring_size = ring_size; 3379 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3380 rdev->ih.rptr = 0; 3381 } 3382 3383 int r600_ih_ring_alloc(struct radeon_device *rdev) 3384 { 3385 int r; 3386 void *ring_ptr; 3387 3388 /* Allocate ring buffer */ 3389 if (rdev->ih.ring_obj == NULL) { 3390 r = radeon_bo_create(rdev, rdev->ih.ring_size, 3391 PAGE_SIZE, true, 3392 RADEON_GEM_DOMAIN_GTT, 0, 3393 NULL, &rdev->ih.ring_obj); 3394 if (r) { 3395 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 3396 return r; 3397 } 3398 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3399 if (unlikely(r != 0)) { 3400 radeon_bo_unref(&rdev->ih.ring_obj); 3401 return r; 3402 } 3403 r = radeon_bo_pin(rdev->ih.ring_obj, 3404 RADEON_GEM_DOMAIN_GTT, 3405 (u64 *)&rdev->ih.gpu_addr); 3406 if (r) { 3407 radeon_bo_unreserve(rdev->ih.ring_obj); 3408 radeon_bo_unref(&rdev->ih.ring_obj); 3409 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); 3410 return r; 3411 } 3412 ring_ptr = &rdev->ih.ring; 3413 r = radeon_bo_kmap(rdev->ih.ring_obj, 3414 ring_ptr); 3415 if (r) 3416 radeon_bo_unpin(rdev->ih.ring_obj); 3417 radeon_bo_unreserve(rdev->ih.ring_obj); 3418 if (r) { 3419 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); 3420 radeon_bo_unref(&rdev->ih.ring_obj); 3421 return r; 3422 } 3423 } 3424 return 0; 3425 } 3426 3427 void r600_ih_ring_fini(struct radeon_device *rdev) 3428 { 3429 int r; 3430 if (rdev->ih.ring_obj) { 3431 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3432 if (likely(r == 0)) { 3433 radeon_bo_kunmap(rdev->ih.ring_obj); 3434 radeon_bo_unpin(rdev->ih.ring_obj); 3435 radeon_bo_unreserve(rdev->ih.ring_obj); 3436 } 3437 radeon_bo_unref(&rdev->ih.ring_obj); 3438 rdev->ih.ring = NULL; 3439 rdev->ih.ring_obj = NULL; 3440 } 3441 } 3442 3443 void r600_rlc_stop(struct radeon_device *rdev) 3444 { 3445 3446 if ((rdev->family >= CHIP_RV770) && 3447 (rdev->family <= CHIP_RV740)) { 3448 /* r7xx asics need to soft reset RLC before halting */ 3449 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 3450 RREG32(SRBM_SOFT_RESET); 3451 mdelay(15); 3452 WREG32(SRBM_SOFT_RESET, 0); 3453 RREG32(SRBM_SOFT_RESET); 3454 } 3455 3456 WREG32(RLC_CNTL, 0); 3457 } 3458 3459 static void r600_rlc_start(struct radeon_device *rdev) 3460 { 3461 WREG32(RLC_CNTL, RLC_ENABLE); 3462 } 3463 3464 static int r600_rlc_resume(struct radeon_device *rdev) 3465 { 3466 u32 i; 3467 const __be32 *fw_data; 3468 3469 if (!rdev->rlc_fw) 3470 return -EINVAL; 3471 3472 r600_rlc_stop(rdev); 3473 3474 WREG32(RLC_HB_CNTL, 0); 3475 3476 WREG32(RLC_HB_BASE, 0); 3477 WREG32(RLC_HB_RPTR, 0); 3478 WREG32(RLC_HB_WPTR, 0); 3479 WREG32(RLC_HB_WPTR_LSB_ADDR, 0); 3480 WREG32(RLC_HB_WPTR_MSB_ADDR, 0); 3481 WREG32(RLC_MC_CNTL, 0); 3482 WREG32(RLC_UCODE_CNTL, 0); 3483 3484 fw_data = (const __be32 *)rdev->rlc_fw->data; 3485 if (rdev->family >= CHIP_RV770) { 3486 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 3487 WREG32(RLC_UCODE_ADDR, i); 3488 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3489 } 3490 } else { 3491 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) { 3492 WREG32(RLC_UCODE_ADDR, i); 3493 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3494 } 3495 } 3496 WREG32(RLC_UCODE_ADDR, 0); 3497 3498 r600_rlc_start(rdev); 3499 3500 return 0; 3501 } 3502 3503 static void r600_enable_interrupts(struct radeon_device *rdev) 3504 { 3505 u32 ih_cntl = RREG32(IH_CNTL); 3506 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3507 3508 ih_cntl |= ENABLE_INTR; 3509 ih_rb_cntl |= IH_RB_ENABLE; 3510 WREG32(IH_CNTL, ih_cntl); 3511 WREG32(IH_RB_CNTL, ih_rb_cntl); 3512 rdev->ih.enabled = true; 3513 } 3514 3515 void r600_disable_interrupts(struct radeon_device *rdev) 3516 { 3517 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3518 u32 ih_cntl = RREG32(IH_CNTL); 3519 3520 ih_rb_cntl &= ~IH_RB_ENABLE; 3521 ih_cntl &= ~ENABLE_INTR; 3522 WREG32(IH_RB_CNTL, ih_rb_cntl); 3523 WREG32(IH_CNTL, ih_cntl); 3524 /* set rptr, wptr to 0 */ 3525 WREG32(IH_RB_RPTR, 0); 3526 WREG32(IH_RB_WPTR, 0); 3527 rdev->ih.enabled = false; 3528 rdev->ih.rptr = 0; 3529 } 3530 3531 static void r600_disable_interrupt_state(struct radeon_device *rdev) 3532 { 3533 u32 tmp; 3534 3535 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3536 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3537 WREG32(DMA_CNTL, tmp); 3538 WREG32(GRBM_INT_CNTL, 0); 3539 WREG32(DxMODE_INT_MASK, 0); 3540 WREG32(D1GRPH_INTERRUPT_CONTROL, 0); 3541 WREG32(D2GRPH_INTERRUPT_CONTROL, 0); 3542 if (ASIC_IS_DCE3(rdev)) { 3543 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); 3544 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); 3545 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3546 WREG32(DC_HPD1_INT_CONTROL, tmp); 3547 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3548 WREG32(DC_HPD2_INT_CONTROL, tmp); 3549 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3550 WREG32(DC_HPD3_INT_CONTROL, tmp); 3551 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3552 WREG32(DC_HPD4_INT_CONTROL, tmp); 3553 if (ASIC_IS_DCE32(rdev)) { 3554 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3555 WREG32(DC_HPD5_INT_CONTROL, tmp); 3556 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3557 WREG32(DC_HPD6_INT_CONTROL, tmp); 3558 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3559 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3560 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3561 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3562 } else { 3563 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3564 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3565 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3566 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3567 } 3568 } else { 3569 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 3570 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 3571 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3572 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3573 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3574 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3575 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3576 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3577 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3578 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3579 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3580 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3581 } 3582 } 3583 3584 int r600_irq_init(struct radeon_device *rdev) 3585 { 3586 int ret = 0; 3587 int rb_bufsz; 3588 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 3589 3590 /* allocate ring */ 3591 ret = r600_ih_ring_alloc(rdev); 3592 if (ret) 3593 return ret; 3594 3595 /* disable irqs */ 3596 r600_disable_interrupts(rdev); 3597 3598 /* init rlc */ 3599 if (rdev->family >= CHIP_CEDAR) 3600 ret = evergreen_rlc_resume(rdev); 3601 else 3602 ret = r600_rlc_resume(rdev); 3603 if (ret) { 3604 r600_ih_ring_fini(rdev); 3605 return ret; 3606 } 3607 3608 /* setup interrupt control */ 3609 /* set dummy read address to ring address */ 3610 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); 3611 interrupt_cntl = RREG32(INTERRUPT_CNTL); 3612 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi 3613 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN 3614 */ 3615 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; 3616 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ 3617 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; 3618 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3619 3620 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3621 rb_bufsz = order_base_2(rdev->ih.ring_size / 4); 3622 3623 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3624 IH_WPTR_OVERFLOW_CLEAR | 3625 (rb_bufsz << 1)); 3626 3627 if (rdev->wb.enabled) 3628 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; 3629 3630 /* set the writeback address whether it's enabled or not */ 3631 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); 3632 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); 3633 3634 WREG32(IH_RB_CNTL, ih_rb_cntl); 3635 3636 /* set rptr, wptr to 0 */ 3637 WREG32(IH_RB_RPTR, 0); 3638 WREG32(IH_RB_WPTR, 0); 3639 3640 /* Default settings for IH_CNTL (disabled at first) */ 3641 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); 3642 /* RPTR_REARM only works if msi's are enabled */ 3643 if (rdev->msi_enabled) 3644 ih_cntl |= RPTR_REARM; 3645 WREG32(IH_CNTL, ih_cntl); 3646 3647 /* force the active interrupt state to all disabled */ 3648 if (rdev->family >= CHIP_CEDAR) 3649 evergreen_disable_interrupt_state(rdev); 3650 else 3651 r600_disable_interrupt_state(rdev); 3652 3653 /* at this point everything should be setup correctly to enable master */ 3654 pci_enable_busmaster(rdev->dev->bsddev); 3655 3656 /* enable irqs */ 3657 r600_enable_interrupts(rdev); 3658 3659 return ret; 3660 } 3661 3662 void r600_irq_suspend(struct radeon_device *rdev) 3663 { 3664 r600_irq_disable(rdev); 3665 r600_rlc_stop(rdev); 3666 } 3667 3668 void r600_irq_fini(struct radeon_device *rdev) 3669 { 3670 r600_irq_suspend(rdev); 3671 r600_ih_ring_fini(rdev); 3672 } 3673 3674 int r600_irq_set(struct radeon_device *rdev) 3675 { 3676 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 3677 u32 mode_int = 0; 3678 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3679 u32 grbm_int_cntl = 0; 3680 u32 hdmi0, hdmi1; 3681 u32 dma_cntl; 3682 u32 thermal_int = 0; 3683 3684 if (!rdev->irq.installed) { 3685 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3686 return -EINVAL; 3687 } 3688 /* don't enable anything if the ih is disabled */ 3689 if (!rdev->ih.enabled) { 3690 r600_disable_interrupts(rdev); 3691 /* force the active interrupt state to all disabled */ 3692 r600_disable_interrupt_state(rdev); 3693 return 0; 3694 } 3695 3696 if (ASIC_IS_DCE3(rdev)) { 3697 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3698 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3699 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3700 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 3701 if (ASIC_IS_DCE32(rdev)) { 3702 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3703 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3704 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3705 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3706 } else { 3707 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3708 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3709 } 3710 } else { 3711 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3712 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3713 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3714 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3715 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3716 } 3717 3718 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3719 3720 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { 3721 thermal_int = RREG32(CG_THERMAL_INT) & 3722 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 3723 } else if (rdev->family >= CHIP_RV770) { 3724 thermal_int = RREG32(RV770_CG_THERMAL_INT) & 3725 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 3726 } 3727 if (rdev->irq.dpm_thermal) { 3728 DRM_DEBUG("dpm thermal\n"); 3729 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 3730 } 3731 3732 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3733 DRM_DEBUG("r600_irq_set: sw int\n"); 3734 cp_int_cntl |= RB_INT_ENABLE; 3735 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3736 } 3737 3738 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 3739 DRM_DEBUG("r600_irq_set: sw int dma\n"); 3740 dma_cntl |= TRAP_ENABLE; 3741 } 3742 3743 if (rdev->irq.crtc_vblank_int[0] || 3744 atomic_read(&rdev->irq.pflip[0])) { 3745 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3746 mode_int |= D1MODE_VBLANK_INT_MASK; 3747 } 3748 if (rdev->irq.crtc_vblank_int[1] || 3749 atomic_read(&rdev->irq.pflip[1])) { 3750 DRM_DEBUG("r600_irq_set: vblank 1\n"); 3751 mode_int |= D2MODE_VBLANK_INT_MASK; 3752 } 3753 if (rdev->irq.hpd[0]) { 3754 DRM_DEBUG("r600_irq_set: hpd 1\n"); 3755 hpd1 |= DC_HPDx_INT_EN; 3756 } 3757 if (rdev->irq.hpd[1]) { 3758 DRM_DEBUG("r600_irq_set: hpd 2\n"); 3759 hpd2 |= DC_HPDx_INT_EN; 3760 } 3761 if (rdev->irq.hpd[2]) { 3762 DRM_DEBUG("r600_irq_set: hpd 3\n"); 3763 hpd3 |= DC_HPDx_INT_EN; 3764 } 3765 if (rdev->irq.hpd[3]) { 3766 DRM_DEBUG("r600_irq_set: hpd 4\n"); 3767 hpd4 |= DC_HPDx_INT_EN; 3768 } 3769 if (rdev->irq.hpd[4]) { 3770 DRM_DEBUG("r600_irq_set: hpd 5\n"); 3771 hpd5 |= DC_HPDx_INT_EN; 3772 } 3773 if (rdev->irq.hpd[5]) { 3774 DRM_DEBUG("r600_irq_set: hpd 6\n"); 3775 hpd6 |= DC_HPDx_INT_EN; 3776 } 3777 if (rdev->irq.afmt[0]) { 3778 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3779 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3780 } 3781 if (rdev->irq.afmt[1]) { 3782 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3783 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3784 } 3785 3786 WREG32(CP_INT_CNTL, cp_int_cntl); 3787 WREG32(DMA_CNTL, dma_cntl); 3788 WREG32(DxMODE_INT_MASK, mode_int); 3789 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3790 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3791 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3792 if (ASIC_IS_DCE3(rdev)) { 3793 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3794 WREG32(DC_HPD2_INT_CONTROL, hpd2); 3795 WREG32(DC_HPD3_INT_CONTROL, hpd3); 3796 WREG32(DC_HPD4_INT_CONTROL, hpd4); 3797 if (ASIC_IS_DCE32(rdev)) { 3798 WREG32(DC_HPD5_INT_CONTROL, hpd5); 3799 WREG32(DC_HPD6_INT_CONTROL, hpd6); 3800 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0); 3801 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1); 3802 } else { 3803 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3804 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3805 } 3806 } else { 3807 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 3808 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 3809 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 3810 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3811 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3812 } 3813 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { 3814 WREG32(CG_THERMAL_INT, thermal_int); 3815 } else if (rdev->family >= CHIP_RV770) { 3816 WREG32(RV770_CG_THERMAL_INT, thermal_int); 3817 } 3818 3819 return 0; 3820 } 3821 3822 static void r600_irq_ack(struct radeon_device *rdev) 3823 { 3824 u32 tmp; 3825 3826 if (ASIC_IS_DCE3(rdev)) { 3827 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 3828 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 3829 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 3830 if (ASIC_IS_DCE32(rdev)) { 3831 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0); 3832 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1); 3833 } else { 3834 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3835 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS); 3836 } 3837 } else { 3838 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); 3839 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3840 rdev->irq.stat_regs.r600.disp_int_cont2 = 0; 3841 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3842 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS); 3843 } 3844 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); 3845 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); 3846 3847 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3848 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3849 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3850 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3851 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) 3852 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3853 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) 3854 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3855 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) 3856 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3857 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) 3858 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3859 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 3860 if (ASIC_IS_DCE3(rdev)) { 3861 tmp = RREG32(DC_HPD1_INT_CONTROL); 3862 tmp |= DC_HPDx_INT_ACK; 3863 WREG32(DC_HPD1_INT_CONTROL, tmp); 3864 } else { 3865 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 3866 tmp |= DC_HPDx_INT_ACK; 3867 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3868 } 3869 } 3870 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 3871 if (ASIC_IS_DCE3(rdev)) { 3872 tmp = RREG32(DC_HPD2_INT_CONTROL); 3873 tmp |= DC_HPDx_INT_ACK; 3874 WREG32(DC_HPD2_INT_CONTROL, tmp); 3875 } else { 3876 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 3877 tmp |= DC_HPDx_INT_ACK; 3878 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3879 } 3880 } 3881 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 3882 if (ASIC_IS_DCE3(rdev)) { 3883 tmp = RREG32(DC_HPD3_INT_CONTROL); 3884 tmp |= DC_HPDx_INT_ACK; 3885 WREG32(DC_HPD3_INT_CONTROL, tmp); 3886 } else { 3887 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 3888 tmp |= DC_HPDx_INT_ACK; 3889 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3890 } 3891 } 3892 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 3893 tmp = RREG32(DC_HPD4_INT_CONTROL); 3894 tmp |= DC_HPDx_INT_ACK; 3895 WREG32(DC_HPD4_INT_CONTROL, tmp); 3896 } 3897 if (ASIC_IS_DCE32(rdev)) { 3898 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 3899 tmp = RREG32(DC_HPD5_INT_CONTROL); 3900 tmp |= DC_HPDx_INT_ACK; 3901 WREG32(DC_HPD5_INT_CONTROL, tmp); 3902 } 3903 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 3904 tmp = RREG32(DC_HPD5_INT_CONTROL); 3905 tmp |= DC_HPDx_INT_ACK; 3906 WREG32(DC_HPD6_INT_CONTROL, tmp); 3907 } 3908 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) { 3909 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0); 3910 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 3911 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3912 } 3913 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) { 3914 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1); 3915 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 3916 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3917 } 3918 } else { 3919 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 3920 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL); 3921 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3922 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3923 } 3924 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 3925 if (ASIC_IS_DCE3(rdev)) { 3926 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL); 3927 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3928 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3929 } else { 3930 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL); 3931 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3932 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3933 } 3934 } 3935 } 3936 } 3937 3938 void r600_irq_disable(struct radeon_device *rdev) 3939 { 3940 r600_disable_interrupts(rdev); 3941 /* Wait and acknowledge irq */ 3942 mdelay(1); 3943 r600_irq_ack(rdev); 3944 r600_disable_interrupt_state(rdev); 3945 } 3946 3947 static u32 r600_get_ih_wptr(struct radeon_device *rdev) 3948 { 3949 u32 wptr, tmp; 3950 3951 if (rdev->wb.enabled) 3952 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 3953 else 3954 wptr = RREG32(IH_RB_WPTR); 3955 3956 if (wptr & RB_OVERFLOW) { 3957 wptr &= ~RB_OVERFLOW; 3958 /* When a ring buffer overflow happen start parsing interrupt 3959 * from the last not overwritten vector (wptr + 16). Hopefully 3960 * this should allow us to catchup. 3961 */ 3962 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 3963 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); 3964 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 3965 tmp = RREG32(IH_RB_CNTL); 3966 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3967 WREG32(IH_RB_CNTL, tmp); 3968 } 3969 return (wptr & rdev->ih.ptr_mask); 3970 } 3971 3972 /* r600 IV Ring 3973 * Each IV ring entry is 128 bits: 3974 * [7:0] - interrupt source id 3975 * [31:8] - reserved 3976 * [59:32] - interrupt source data 3977 * [127:60] - reserved 3978 * 3979 * The basic interrupt vector entries 3980 * are decoded as follows: 3981 * src_id src_data description 3982 * 1 0 D1 Vblank 3983 * 1 1 D1 Vline 3984 * 5 0 D2 Vblank 3985 * 5 1 D2 Vline 3986 * 19 0 FP Hot plug detection A 3987 * 19 1 FP Hot plug detection B 3988 * 19 2 DAC A auto-detection 3989 * 19 3 DAC B auto-detection 3990 * 21 4 HDMI block A 3991 * 21 5 HDMI block B 3992 * 176 - CP_INT RB 3993 * 177 - CP_INT IB1 3994 * 178 - CP_INT IB2 3995 * 181 - EOP Interrupt 3996 * 233 - GUI Idle 3997 * 3998 * Note, these are based on r600 and may need to be 3999 * adjusted or added to on newer asics 4000 */ 4001 4002 irqreturn_t r600_irq_process(struct radeon_device *rdev) 4003 { 4004 u32 wptr; 4005 u32 rptr; 4006 u32 src_id, src_data; 4007 u32 ring_index; 4008 bool queue_hotplug = false; 4009 bool queue_hdmi = false; 4010 bool queue_thermal = false; 4011 4012 if (!rdev->ih.enabled || rdev->shutdown) 4013 return IRQ_NONE; 4014 4015 /* No MSIs, need a dummy read to flush PCI DMAs */ 4016 if (!rdev->msi_enabled) 4017 RREG32(IH_RB_WPTR); 4018 4019 wptr = r600_get_ih_wptr(rdev); 4020 4021 restart_ih: 4022 /* is somebody else already processing irqs? */ 4023 if (atomic_xchg(&rdev->ih.lock, 1)) 4024 return IRQ_NONE; 4025 4026 rptr = rdev->ih.rptr; 4027 DRM_DEBUG_VBLANK("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4028 4029 /* Order reading of wptr vs. reading of IH ring data */ 4030 rmb(); 4031 4032 /* display interrupts */ 4033 r600_irq_ack(rdev); 4034 4035 while (rptr != wptr) { 4036 /* wptr/rptr are in bytes! */ 4037 ring_index = rptr / 4; 4038 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 4039 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 4040 4041 switch (src_id) { 4042 case 1: /* D1 vblank/vline */ 4043 switch (src_data) { 4044 case 0: /* D1 vblank */ 4045 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { 4046 if (rdev->irq.crtc_vblank_int[0]) { 4047 drm_handle_vblank(rdev->ddev, 0); 4048 rdev->pm.vblank_sync = true; 4049 wake_up(&rdev->irq.vblank_queue); 4050 } 4051 if (atomic_read(&rdev->irq.pflip[0])) 4052 radeon_crtc_handle_vblank(rdev, 0); 4053 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 4054 DRM_DEBUG_VBLANK("IH: D1 vblank\n"); 4055 } 4056 break; 4057 case 1: /* D1 vline */ 4058 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { 4059 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4060 DRM_DEBUG_VBLANK("IH: D1 vline\n"); 4061 } 4062 break; 4063 default: 4064 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4065 break; 4066 } 4067 break; 4068 case 5: /* D2 vblank/vline */ 4069 switch (src_data) { 4070 case 0: /* D2 vblank */ 4071 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { 4072 if (rdev->irq.crtc_vblank_int[1]) { 4073 drm_handle_vblank(rdev->ddev, 1); 4074 rdev->pm.vblank_sync = true; 4075 wake_up(&rdev->irq.vblank_queue); 4076 } 4077 if (atomic_read(&rdev->irq.pflip[1])) 4078 radeon_crtc_handle_vblank(rdev, 1); 4079 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; 4080 DRM_DEBUG_VBLANK("IH: D2 vblank\n"); 4081 } 4082 break; 4083 case 1: /* D1 vline */ 4084 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { 4085 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4086 DRM_DEBUG_VBLANK("IH: D2 vline\n"); 4087 } 4088 break; 4089 default: 4090 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4091 break; 4092 } 4093 break; 4094 case 9: /* D1 pflip */ 4095 DRM_DEBUG_VBLANK("IH: D1 flip\n"); 4096 if (radeon_use_pflipirq > 0) 4097 radeon_crtc_handle_flip(rdev, 0); 4098 break; 4099 case 11: /* D2 pflip */ 4100 DRM_DEBUG_VBLANK("IH: D2 flip\n"); 4101 if (radeon_use_pflipirq > 0) 4102 radeon_crtc_handle_flip(rdev, 1); 4103 break; 4104 case 19: /* HPD/DAC hotplug */ 4105 switch (src_data) { 4106 case 0: 4107 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 4108 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4109 queue_hotplug = true; 4110 DRM_DEBUG("IH: HPD1\n"); 4111 } 4112 break; 4113 case 1: 4114 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 4115 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4116 queue_hotplug = true; 4117 DRM_DEBUG("IH: HPD2\n"); 4118 } 4119 break; 4120 case 4: 4121 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4122 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4123 queue_hotplug = true; 4124 DRM_DEBUG("IH: HPD3\n"); 4125 } 4126 break; 4127 case 5: 4128 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4129 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4130 queue_hotplug = true; 4131 DRM_DEBUG("IH: HPD4\n"); 4132 } 4133 break; 4134 case 10: 4135 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4136 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4137 queue_hotplug = true; 4138 DRM_DEBUG("IH: HPD5\n"); 4139 } 4140 break; 4141 case 12: 4142 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4143 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4144 queue_hotplug = true; 4145 DRM_DEBUG("IH: HPD6\n"); 4146 } 4147 break; 4148 default: 4149 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4150 break; 4151 } 4152 break; 4153 case 21: /* hdmi */ 4154 switch (src_data) { 4155 case 4: 4156 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4157 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4158 queue_hdmi = true; 4159 DRM_DEBUG("IH: HDMI0\n"); 4160 } 4161 break; 4162 case 5: 4163 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4164 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4165 queue_hdmi = true; 4166 DRM_DEBUG("IH: HDMI1\n"); 4167 } 4168 break; 4169 default: 4170 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4171 break; 4172 } 4173 break; 4174 case 124: /* UVD */ 4175 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 4176 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 4177 break; 4178 case 176: /* CP_INT in ring buffer */ 4179 case 177: /* CP_INT in IB1 */ 4180 case 178: /* CP_INT in IB2 */ 4181 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 4182 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4183 break; 4184 case 181: /* CP EOP event */ 4185 DRM_DEBUG("IH: CP EOP\n"); 4186 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4187 break; 4188 case 224: /* DMA trap event */ 4189 DRM_DEBUG("IH: DMA trap\n"); 4190 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4191 break; 4192 case 230: /* thermal low to high */ 4193 DRM_DEBUG("IH: thermal low to high\n"); 4194 rdev->pm.dpm.thermal.high_to_low = false; 4195 queue_thermal = true; 4196 break; 4197 case 231: /* thermal high to low */ 4198 DRM_DEBUG("IH: thermal high to low\n"); 4199 rdev->pm.dpm.thermal.high_to_low = true; 4200 queue_thermal = true; 4201 break; 4202 case 233: /* GUI IDLE */ 4203 DRM_DEBUG("IH: GUI idle\n"); 4204 break; 4205 default: 4206 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4207 break; 4208 } 4209 4210 /* wptr/rptr are in bytes! */ 4211 rptr += 16; 4212 rptr &= rdev->ih.ptr_mask; 4213 WREG32(IH_RB_RPTR, rptr); 4214 } 4215 if (queue_hotplug) 4216 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 4217 if (queue_hdmi) 4218 taskqueue_enqueue(rdev->tq, &rdev->audio_work); 4219 if (queue_thermal && rdev->pm.dpm_enabled) 4220 taskqueue_enqueue(rdev->tq, &rdev->pm.dpm.thermal.work); 4221 rdev->ih.rptr = rptr; 4222 atomic_set(&rdev->ih.lock, 0); 4223 4224 /* make sure wptr hasn't changed while processing */ 4225 wptr = r600_get_ih_wptr(rdev); 4226 if (wptr != rptr) 4227 goto restart_ih; 4228 4229 return IRQ_HANDLED; 4230 } 4231 4232 /* 4233 * Debugfs info 4234 */ 4235 #if defined(CONFIG_DEBUG_FS) 4236 4237 static int r600_debugfs_mc_info(struct seq_file *m, void *data) 4238 { 4239 struct drm_info_node *node = (struct drm_info_node *) m->private; 4240 struct drm_device *dev = node->minor->dev; 4241 struct radeon_device *rdev = dev->dev_private; 4242 4243 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS); 4244 DREG32_SYS(m, rdev, VM_L2_STATUS); 4245 return 0; 4246 } 4247 4248 static struct drm_info_list r600_mc_info_list[] = { 4249 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL}, 4250 }; 4251 #endif 4252 4253 int r600_debugfs_mc_info_init(struct radeon_device *rdev) 4254 { 4255 #if defined(CONFIG_DEBUG_FS) 4256 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list)); 4257 #else 4258 return 0; 4259 #endif 4260 } 4261 4262 /** 4263 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO 4264 * rdev: radeon device structure 4265 * 4266 * Some R6XX/R7XX don't seem to take into account HDP flushes performed 4267 * through the ring buffer. This leads to corruption in rendering, see 4268 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we 4269 * directly perform the HDP flush by writing the register through MMIO. 4270 */ 4271 void r600_mmio_hdp_flush(struct radeon_device *rdev) 4272 { 4273 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 4274 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. 4275 * This seems to cause problems on some AGP cards. Just use the old 4276 * method for them. 4277 */ 4278 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 4279 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { 4280 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 4281 u32 tmp; 4282 4283 WREG32(HDP_DEBUG1, 0); 4284 tmp = *ptr; 4285 } else 4286 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 4287 } 4288 4289 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) 4290 { 4291 u32 link_width_cntl, mask; 4292 4293 if (rdev->flags & RADEON_IS_IGP) 4294 return; 4295 4296 if (!(rdev->flags & RADEON_IS_PCIE)) 4297 return; 4298 4299 /* x2 cards have a special sequence */ 4300 if (ASIC_IS_X2(rdev)) 4301 return; 4302 4303 radeon_gui_idle(rdev); 4304 4305 switch (lanes) { 4306 case 0: 4307 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 4308 break; 4309 case 1: 4310 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 4311 break; 4312 case 2: 4313 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 4314 break; 4315 case 4: 4316 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 4317 break; 4318 case 8: 4319 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 4320 break; 4321 case 12: 4322 /* not actually supported */ 4323 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 4324 break; 4325 case 16: 4326 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 4327 break; 4328 default: 4329 DRM_ERROR("invalid pcie lane request: %d\n", lanes); 4330 return; 4331 } 4332 4333 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4334 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK; 4335 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT; 4336 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW | 4337 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); 4338 4339 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4340 } 4341 4342 int r600_get_pcie_lanes(struct radeon_device *rdev) 4343 { 4344 u32 link_width_cntl; 4345 4346 if (rdev->flags & RADEON_IS_IGP) 4347 return 0; 4348 4349 if (!(rdev->flags & RADEON_IS_PCIE)) 4350 return 0; 4351 4352 /* x2 cards have a special sequence */ 4353 if (ASIC_IS_X2(rdev)) 4354 return 0; 4355 4356 radeon_gui_idle(rdev); 4357 4358 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4359 4360 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 4361 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4362 return 1; 4363 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4364 return 2; 4365 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4366 return 4; 4367 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4368 return 8; 4369 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4370 /* not actually supported */ 4371 return 12; 4372 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4373 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4374 default: 4375 return 16; 4376 } 4377 } 4378 4379 static void r600_pcie_gen2_enable(struct radeon_device *rdev) 4380 { 4381 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 4382 u16 link_cntl2; 4383 u32 mask; 4384 int ret; 4385 4386 if (radeon_pcie_gen2 == 0) 4387 return; 4388 4389 if (rdev->flags & RADEON_IS_IGP) 4390 return; 4391 4392 if (!(rdev->flags & RADEON_IS_PCIE)) 4393 return; 4394 4395 /* x2 cards have a special sequence */ 4396 if (ASIC_IS_X2(rdev)) 4397 return; 4398 4399 /* only RV6xx+ chips are supported */ 4400 if (rdev->family <= CHIP_R600) 4401 return; 4402 4403 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 4404 if (ret != 0) 4405 return; 4406 4407 if (!(mask & DRM_PCIE_SPEED_50)) 4408 return; 4409 4410 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4411 if (speed_cntl & LC_CURRENT_DATA_RATE) { 4412 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 4413 return; 4414 } 4415 4416 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 4417 4418 /* 55 nm r6xx asics */ 4419 if ((rdev->family == CHIP_RV670) || 4420 (rdev->family == CHIP_RV620) || 4421 (rdev->family == CHIP_RV635)) { 4422 /* advertise upconfig capability */ 4423 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4424 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4425 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4426 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4427 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 4428 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 4429 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 4430 LC_RECONFIG_ARC_MISSING_ESCAPE); 4431 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; 4432 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4433 } else { 4434 link_width_cntl |= LC_UPCONFIGURE_DIS; 4435 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4436 } 4437 } 4438 4439 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4440 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 4441 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 4442 4443 /* 55 nm r6xx asics */ 4444 if ((rdev->family == CHIP_RV670) || 4445 (rdev->family == CHIP_RV620) || 4446 (rdev->family == CHIP_RV635)) { 4447 WREG32(MM_CFGREGS_CNTL, 0x8); 4448 link_cntl2 = RREG32(0x4088); 4449 WREG32(MM_CFGREGS_CNTL, 0); 4450 /* not supported yet */ 4451 if (link_cntl2 & SELECTABLE_DEEMPHASIS) 4452 return; 4453 } 4454 4455 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; 4456 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); 4457 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; 4458 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; 4459 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; 4460 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4461 4462 tmp = RREG32(0x541c); 4463 WREG32(0x541c, tmp | 0x8); 4464 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); 4465 link_cntl2 = RREG16(0x4088); 4466 link_cntl2 &= ~TARGET_LINK_SPEED_MASK; 4467 link_cntl2 |= 0x2; 4468 WREG16(0x4088, link_cntl2); 4469 WREG32(MM_CFGREGS_CNTL, 0); 4470 4471 if ((rdev->family == CHIP_RV670) || 4472 (rdev->family == CHIP_RV620) || 4473 (rdev->family == CHIP_RV635)) { 4474 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL); 4475 training_cntl &= ~LC_POINT_7_PLUS_EN; 4476 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl); 4477 } else { 4478 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4479 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 4480 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4481 } 4482 4483 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4484 speed_cntl |= LC_GEN2_EN_STRAP; 4485 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4486 4487 } else { 4488 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4489 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 4490 if (1) 4491 link_width_cntl |= LC_UPCONFIGURE_DIS; 4492 else 4493 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4494 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4495 } 4496 } 4497 4498 /** 4499 * r600_get_gpu_clock_counter - return GPU clock counter snapshot 4500 * 4501 * @rdev: radeon_device pointer 4502 * 4503 * Fetches a GPU clock counter snapshot (R6xx-cayman). 4504 * Returns the 64 bit clock counter snapshot. 4505 */ 4506 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev) 4507 { 4508 uint64_t clock; 4509 4510 mutex_lock(&rdev->gpu_clock_mutex); 4511 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); 4512 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | 4513 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 4514 mutex_unlock(&rdev->gpu_clock_mutex); 4515 return clock; 4516 } 4517