1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 * 24 * $FreeBSD: head/sys/dev/drm2/radeon/evergreen.c 254885 2013-08-25 19:37:15Z dumbbell $ 25 */ 26 27 #include <drm/drmP.h> 28 #include "radeon.h" 29 #include "radeon_asic.h" 30 #include <uapi_drm/radeon_drm.h> 31 #include "evergreend.h" 32 #include "atom.h" 33 #include "avivod.h" 34 #include "evergreen_reg.h" 35 #include "evergreen_blit_shaders.h" 36 37 #define EVERGREEN_PFP_UCODE_SIZE 1120 38 #define EVERGREEN_PM4_UCODE_SIZE 1376 39 40 static const u32 crtc_offsets[6] = 41 { 42 EVERGREEN_CRTC0_REGISTER_OFFSET, 43 EVERGREEN_CRTC1_REGISTER_OFFSET, 44 EVERGREEN_CRTC2_REGISTER_OFFSET, 45 EVERGREEN_CRTC3_REGISTER_OFFSET, 46 EVERGREEN_CRTC4_REGISTER_OFFSET, 47 EVERGREEN_CRTC5_REGISTER_OFFSET 48 }; 49 50 static void evergreen_gpu_init(struct radeon_device *rdev); 51 void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 52 53 void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 54 unsigned *bankh, unsigned *mtaspect, 55 unsigned *tile_split) 56 { 57 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 58 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 59 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 60 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 61 switch (*bankw) { 62 default: 63 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break; 64 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break; 65 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break; 66 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break; 67 } 68 switch (*bankh) { 69 default: 70 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break; 71 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break; 72 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break; 73 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break; 74 } 75 switch (*mtaspect) { 76 default: 77 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break; 78 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break; 79 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break; 80 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break; 81 } 82 } 83 84 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 85 { 86 u16 ctl, v; 87 int err, cap; 88 89 err = pci_find_extcap(rdev->dev, PCIY_EXPRESS, &cap); 90 if (err) 91 return; 92 93 cap += PCIER_DEVCTRL; 94 95 ctl = pci_read_config(rdev->dev, cap, 2); 96 97 v = (ctl & PCIEM_DEVCTL_MAX_READRQ_MASK) >> 12; 98 99 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it 100 * to avoid hangs or perfomance issues 101 */ 102 if ((v == 0) || (v == 6) || (v == 7)) { 103 ctl &= ~PCIEM_DEVCTL_MAX_READRQ_MASK; 104 ctl |= (2 << 12); 105 pci_write_config(rdev->dev, cap, ctl, 2); 106 } 107 } 108 109 /** 110 * dce4_wait_for_vblank - vblank wait asic callback. 111 * 112 * @rdev: radeon_device pointer 113 * @crtc: crtc to wait for vblank on 114 * 115 * Wait for vblank on the requested crtc (evergreen+). 116 */ 117 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) 118 { 119 int i; 120 121 if (crtc >= rdev->num_crtc) 122 return; 123 124 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { 125 for (i = 0; i < rdev->usec_timeout; i++) { 126 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) 127 break; 128 DRM_UDELAY(1); 129 } 130 for (i = 0; i < rdev->usec_timeout; i++) { 131 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) 132 break; 133 DRM_UDELAY(1); 134 } 135 } 136 } 137 138 /** 139 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback. 140 * 141 * @rdev: radeon_device pointer 142 * @crtc: crtc to prepare for pageflip on 143 * 144 * Pre-pageflip callback (evergreen+). 145 * Enables the pageflip irq (vblank irq). 146 */ 147 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 148 { 149 /* enable the pflip int */ 150 radeon_irq_kms_pflip_irq_get(rdev, crtc); 151 } 152 153 /** 154 * evergreen_post_page_flip - pos-pageflip callback. 155 * 156 * @rdev: radeon_device pointer 157 * @crtc: crtc to cleanup pageflip on 158 * 159 * Post-pageflip callback (evergreen+). 160 * Disables the pageflip irq (vblank irq). 161 */ 162 void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) 163 { 164 /* disable the pflip int */ 165 radeon_irq_kms_pflip_irq_put(rdev, crtc); 166 } 167 168 /** 169 * evergreen_page_flip - pageflip callback. 170 * 171 * @rdev: radeon_device pointer 172 * @crtc_id: crtc to cleanup pageflip on 173 * @crtc_base: new address of the crtc (GPU MC address) 174 * 175 * Does the actual pageflip (evergreen+). 176 * During vblank we take the crtc lock and wait for the update_pending 177 * bit to go high, when it does, we release the lock, and allow the 178 * double buffered update to take place. 179 * Returns the current update pending status. 180 */ 181 u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 182 { 183 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 184 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); 185 int i; 186 187 /* Lock the graphics update lock */ 188 tmp |= EVERGREEN_GRPH_UPDATE_LOCK; 189 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 190 191 /* update the scanout addresses */ 192 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 193 upper_32_bits(crtc_base)); 194 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 195 (u32)crtc_base); 196 197 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 198 upper_32_bits(crtc_base)); 199 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 200 (u32)crtc_base); 201 202 /* Wait for update_pending to go high. */ 203 for (i = 0; i < rdev->usec_timeout; i++) { 204 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) 205 break; 206 DRM_UDELAY(1); 207 } 208 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 209 210 /* Unlock the lock, so double-buffering can take place inside vblank */ 211 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; 212 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 213 214 /* Return current update_pending status: */ 215 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; 216 } 217 218 /* get temperature in millidegrees */ 219 int evergreen_get_temp(struct radeon_device *rdev) 220 { 221 u32 temp, toffset; 222 int actual_temp = 0; 223 224 if (rdev->family == CHIP_JUNIPER) { 225 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >> 226 TOFFSET_SHIFT; 227 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >> 228 TS0_ADC_DOUT_SHIFT; 229 230 if (toffset & 0x100) 231 actual_temp = temp / 2 - (0x200 - toffset); 232 else 233 actual_temp = temp / 2 + toffset; 234 235 actual_temp = actual_temp * 1000; 236 237 } else { 238 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 239 ASIC_T_SHIFT; 240 241 if (temp & 0x400) 242 actual_temp = -256; 243 else if (temp & 0x200) 244 actual_temp = 255; 245 else if (temp & 0x100) { 246 actual_temp = temp & 0x1ff; 247 actual_temp |= ~0x1ff; 248 } else 249 actual_temp = temp & 0xff; 250 251 actual_temp = (actual_temp * 1000) / 2; 252 } 253 254 return actual_temp; 255 } 256 257 int sumo_get_temp(struct radeon_device *rdev) 258 { 259 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; 260 int actual_temp = temp - 49; 261 262 return actual_temp * 1000; 263 } 264 265 /** 266 * sumo_pm_init_profile - Initialize power profiles callback. 267 * 268 * @rdev: radeon_device pointer 269 * 270 * Initialize the power states used in profile mode 271 * (sumo, trinity, SI). 272 * Used for profile mode only. 273 */ 274 void sumo_pm_init_profile(struct radeon_device *rdev) 275 { 276 int idx; 277 278 /* default */ 279 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 280 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 281 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 282 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 283 284 /* low,mid sh/mh */ 285 if (rdev->flags & RADEON_IS_MOBILITY) 286 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 287 else 288 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 289 290 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 291 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 292 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 293 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 294 295 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 296 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 297 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 298 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 299 300 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 301 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 302 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 303 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 304 305 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 306 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 307 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 309 310 /* high sh/mh */ 311 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 312 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 313 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 314 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 315 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 316 rdev->pm.power_state[idx].num_clock_modes - 1; 317 318 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 319 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 320 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 321 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 322 rdev->pm.power_state[idx].num_clock_modes - 1; 323 } 324 325 /** 326 * btc_pm_init_profile - Initialize power profiles callback. 327 * 328 * @rdev: radeon_device pointer 329 * 330 * Initialize the power states used in profile mode 331 * (BTC, cayman). 332 * Used for profile mode only. 333 */ 334 void btc_pm_init_profile(struct radeon_device *rdev) 335 { 336 int idx; 337 338 /* default */ 339 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 340 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 341 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 342 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 343 /* starting with BTC, there is one state that is used for both 344 * MH and SH. Difference is that we always use the high clock index for 345 * mclk. 346 */ 347 if (rdev->flags & RADEON_IS_MOBILITY) 348 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 349 else 350 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 351 /* low sh */ 352 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 353 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 354 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 355 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 356 /* mid sh */ 357 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 358 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 359 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 360 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 361 /* high sh */ 362 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 363 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 364 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 365 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 366 /* low mh */ 367 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 368 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 369 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 370 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 371 /* mid mh */ 372 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 373 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 374 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 375 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 376 /* high mh */ 377 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 378 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 379 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 380 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 381 } 382 383 /** 384 * evergreen_pm_misc - set additional pm hw parameters callback. 385 * 386 * @rdev: radeon_device pointer 387 * 388 * Set non-clock parameters associated with a power state 389 * (voltage, etc.) (evergreen+). 390 */ 391 void evergreen_pm_misc(struct radeon_device *rdev) 392 { 393 int req_ps_idx = rdev->pm.requested_power_state_index; 394 int req_cm_idx = rdev->pm.requested_clock_mode_index; 395 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 396 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 397 398 if (voltage->type == VOLTAGE_SW) { 399 /* 0xff01 is a flag rather then an actual voltage */ 400 if (voltage->voltage == 0xff01) 401 return; 402 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { 403 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 404 rdev->pm.current_vddc = voltage->voltage; 405 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); 406 } 407 /* 0xff01 is a flag rather then an actual voltage */ 408 if (voltage->vddci == 0xff01) 409 return; 410 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { 411 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); 412 rdev->pm.current_vddci = voltage->vddci; 413 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci); 414 } 415 } 416 } 417 418 /** 419 * evergreen_pm_prepare - pre-power state change callback. 420 * 421 * @rdev: radeon_device pointer 422 * 423 * Prepare for a power state change (evergreen+). 424 */ 425 void evergreen_pm_prepare(struct radeon_device *rdev) 426 { 427 struct drm_device *ddev = rdev->ddev; 428 struct drm_crtc *crtc; 429 struct radeon_crtc *radeon_crtc; 430 u32 tmp; 431 432 /* disable any active CRTCs */ 433 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 434 radeon_crtc = to_radeon_crtc(crtc); 435 if (radeon_crtc->enabled) { 436 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); 437 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 438 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 439 } 440 } 441 } 442 443 /** 444 * evergreen_pm_finish - post-power state change callback. 445 * 446 * @rdev: radeon_device pointer 447 * 448 * Clean up after a power state change (evergreen+). 449 */ 450 void evergreen_pm_finish(struct radeon_device *rdev) 451 { 452 struct drm_device *ddev = rdev->ddev; 453 struct drm_crtc *crtc; 454 struct radeon_crtc *radeon_crtc; 455 u32 tmp; 456 457 /* enable any active CRTCs */ 458 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 459 radeon_crtc = to_radeon_crtc(crtc); 460 if (radeon_crtc->enabled) { 461 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); 462 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 463 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 464 } 465 } 466 } 467 468 /** 469 * evergreen_hpd_sense - hpd sense callback. 470 * 471 * @rdev: radeon_device pointer 472 * @hpd: hpd (hotplug detect) pin 473 * 474 * Checks if a digital monitor is connected (evergreen+). 475 * Returns true if connected, false if not connected. 476 */ 477 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 478 { 479 bool connected = false; 480 481 switch (hpd) { 482 case RADEON_HPD_1: 483 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) 484 connected = true; 485 break; 486 case RADEON_HPD_2: 487 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) 488 connected = true; 489 break; 490 case RADEON_HPD_3: 491 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) 492 connected = true; 493 break; 494 case RADEON_HPD_4: 495 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) 496 connected = true; 497 break; 498 case RADEON_HPD_5: 499 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) 500 connected = true; 501 break; 502 case RADEON_HPD_6: 503 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 504 connected = true; 505 break; 506 default: 507 break; 508 } 509 510 return connected; 511 } 512 513 /** 514 * evergreen_hpd_set_polarity - hpd set polarity callback. 515 * 516 * @rdev: radeon_device pointer 517 * @hpd: hpd (hotplug detect) pin 518 * 519 * Set the polarity of the hpd pin (evergreen+). 520 */ 521 void evergreen_hpd_set_polarity(struct radeon_device *rdev, 522 enum radeon_hpd_id hpd) 523 { 524 u32 tmp; 525 bool connected = evergreen_hpd_sense(rdev, hpd); 526 527 switch (hpd) { 528 case RADEON_HPD_1: 529 tmp = RREG32(DC_HPD1_INT_CONTROL); 530 if (connected) 531 tmp &= ~DC_HPDx_INT_POLARITY; 532 else 533 tmp |= DC_HPDx_INT_POLARITY; 534 WREG32(DC_HPD1_INT_CONTROL, tmp); 535 break; 536 case RADEON_HPD_2: 537 tmp = RREG32(DC_HPD2_INT_CONTROL); 538 if (connected) 539 tmp &= ~DC_HPDx_INT_POLARITY; 540 else 541 tmp |= DC_HPDx_INT_POLARITY; 542 WREG32(DC_HPD2_INT_CONTROL, tmp); 543 break; 544 case RADEON_HPD_3: 545 tmp = RREG32(DC_HPD3_INT_CONTROL); 546 if (connected) 547 tmp &= ~DC_HPDx_INT_POLARITY; 548 else 549 tmp |= DC_HPDx_INT_POLARITY; 550 WREG32(DC_HPD3_INT_CONTROL, tmp); 551 break; 552 case RADEON_HPD_4: 553 tmp = RREG32(DC_HPD4_INT_CONTROL); 554 if (connected) 555 tmp &= ~DC_HPDx_INT_POLARITY; 556 else 557 tmp |= DC_HPDx_INT_POLARITY; 558 WREG32(DC_HPD4_INT_CONTROL, tmp); 559 break; 560 case RADEON_HPD_5: 561 tmp = RREG32(DC_HPD5_INT_CONTROL); 562 if (connected) 563 tmp &= ~DC_HPDx_INT_POLARITY; 564 else 565 tmp |= DC_HPDx_INT_POLARITY; 566 WREG32(DC_HPD5_INT_CONTROL, tmp); 567 break; 568 case RADEON_HPD_6: 569 tmp = RREG32(DC_HPD6_INT_CONTROL); 570 if (connected) 571 tmp &= ~DC_HPDx_INT_POLARITY; 572 else 573 tmp |= DC_HPDx_INT_POLARITY; 574 WREG32(DC_HPD6_INT_CONTROL, tmp); 575 break; 576 default: 577 break; 578 } 579 } 580 581 /** 582 * evergreen_hpd_init - hpd setup callback. 583 * 584 * @rdev: radeon_device pointer 585 * 586 * Setup the hpd pins used by the card (evergreen+). 587 * Enable the pin, set the polarity, and enable the hpd interrupts. 588 */ 589 void evergreen_hpd_init(struct radeon_device *rdev) 590 { 591 struct drm_device *dev = rdev->ddev; 592 struct drm_connector *connector; 593 unsigned enabled = 0; 594 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | 595 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; 596 597 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 598 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 599 switch (radeon_connector->hpd.hpd) { 600 case RADEON_HPD_1: 601 WREG32(DC_HPD1_CONTROL, tmp); 602 break; 603 case RADEON_HPD_2: 604 WREG32(DC_HPD2_CONTROL, tmp); 605 break; 606 case RADEON_HPD_3: 607 WREG32(DC_HPD3_CONTROL, tmp); 608 break; 609 case RADEON_HPD_4: 610 WREG32(DC_HPD4_CONTROL, tmp); 611 break; 612 case RADEON_HPD_5: 613 WREG32(DC_HPD5_CONTROL, tmp); 614 break; 615 case RADEON_HPD_6: 616 WREG32(DC_HPD6_CONTROL, tmp); 617 break; 618 default: 619 break; 620 } 621 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 622 enabled |= 1 << radeon_connector->hpd.hpd; 623 } 624 radeon_irq_kms_enable_hpd(rdev, enabled); 625 } 626 627 /** 628 * evergreen_hpd_fini - hpd tear down callback. 629 * 630 * @rdev: radeon_device pointer 631 * 632 * Tear down the hpd pins used by the card (evergreen+). 633 * Disable the hpd interrupts. 634 */ 635 void evergreen_hpd_fini(struct radeon_device *rdev) 636 { 637 struct drm_device *dev = rdev->ddev; 638 struct drm_connector *connector; 639 unsigned disabled = 0; 640 641 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 642 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 643 switch (radeon_connector->hpd.hpd) { 644 case RADEON_HPD_1: 645 WREG32(DC_HPD1_CONTROL, 0); 646 break; 647 case RADEON_HPD_2: 648 WREG32(DC_HPD2_CONTROL, 0); 649 break; 650 case RADEON_HPD_3: 651 WREG32(DC_HPD3_CONTROL, 0); 652 break; 653 case RADEON_HPD_4: 654 WREG32(DC_HPD4_CONTROL, 0); 655 break; 656 case RADEON_HPD_5: 657 WREG32(DC_HPD5_CONTROL, 0); 658 break; 659 case RADEON_HPD_6: 660 WREG32(DC_HPD6_CONTROL, 0); 661 break; 662 default: 663 break; 664 } 665 disabled |= 1 << radeon_connector->hpd.hpd; 666 } 667 radeon_irq_kms_disable_hpd(rdev, disabled); 668 } 669 670 /* watermark setup */ 671 672 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, 673 struct radeon_crtc *radeon_crtc, 674 struct drm_display_mode *mode, 675 struct drm_display_mode *other_mode) 676 { 677 u32 tmp; 678 /* 679 * Line Buffer Setup 680 * There are 3 line buffers, each one shared by 2 display controllers. 681 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between 682 * the display controllers. The paritioning is done via one of four 683 * preset allocations specified in bits 2:0: 684 * first display controller 685 * 0 - first half of lb (3840 * 2) 686 * 1 - first 3/4 of lb (5760 * 2) 687 * 2 - whole lb (7680 * 2), other crtc must be disabled 688 * 3 - first 1/4 of lb (1920 * 2) 689 * second display controller 690 * 4 - second half of lb (3840 * 2) 691 * 5 - second 3/4 of lb (5760 * 2) 692 * 6 - whole lb (7680 * 2), other crtc must be disabled 693 * 7 - last 1/4 of lb (1920 * 2) 694 */ 695 /* this can get tricky if we have two large displays on a paired group 696 * of crtcs. Ideally for multiple large displays we'd assign them to 697 * non-linked crtcs for maximum line buffer allocation. 698 */ 699 if (radeon_crtc->base.enabled && mode) { 700 if (other_mode) 701 tmp = 0; /* 1/2 */ 702 else 703 tmp = 2; /* whole */ 704 } else 705 tmp = 0; 706 707 /* second controller of the pair uses second half of the lb */ 708 if (radeon_crtc->crtc_id % 2) 709 tmp += 4; 710 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 711 712 if (radeon_crtc->base.enabled && mode) { 713 switch (tmp) { 714 case 0: 715 case 4: 716 default: 717 if (ASIC_IS_DCE5(rdev)) 718 return 4096 * 2; 719 else 720 return 3840 * 2; 721 case 1: 722 case 5: 723 if (ASIC_IS_DCE5(rdev)) 724 return 6144 * 2; 725 else 726 return 5760 * 2; 727 case 2: 728 case 6: 729 if (ASIC_IS_DCE5(rdev)) 730 return 8192 * 2; 731 else 732 return 7680 * 2; 733 case 3: 734 case 7: 735 if (ASIC_IS_DCE5(rdev)) 736 return 2048 * 2; 737 else 738 return 1920 * 2; 739 } 740 } 741 742 /* controller not enabled, so no lb used */ 743 return 0; 744 } 745 746 u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) 747 { 748 u32 tmp = RREG32(MC_SHARED_CHMAP); 749 750 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 751 case 0: 752 default: 753 return 1; 754 case 1: 755 return 2; 756 case 2: 757 return 4; 758 case 3: 759 return 8; 760 } 761 } 762 763 struct evergreen_wm_params { 764 u32 dram_channels; /* number of dram channels */ 765 u32 yclk; /* bandwidth per dram data pin in kHz */ 766 u32 sclk; /* engine clock in kHz */ 767 u32 disp_clk; /* display clock in kHz */ 768 u32 src_width; /* viewport width */ 769 u32 active_time; /* active display time in ns */ 770 u32 blank_time; /* blank time in ns */ 771 bool interlaced; /* mode is interlaced */ 772 fixed20_12 vsc; /* vertical scale ratio */ 773 u32 num_heads; /* number of active crtcs */ 774 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 775 u32 lb_size; /* line buffer allocated to pipe */ 776 u32 vtaps; /* vertical scaler taps */ 777 }; 778 779 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm) 780 { 781 /* Calculate DRAM Bandwidth and the part allocated to display. */ 782 fixed20_12 dram_efficiency; /* 0.7 */ 783 fixed20_12 yclk, dram_channels, bandwidth; 784 fixed20_12 a; 785 786 a.full = dfixed_const(1000); 787 yclk.full = dfixed_const(wm->yclk); 788 yclk.full = dfixed_div(yclk, a); 789 dram_channels.full = dfixed_const(wm->dram_channels * 4); 790 a.full = dfixed_const(10); 791 dram_efficiency.full = dfixed_const(7); 792 dram_efficiency.full = dfixed_div(dram_efficiency, a); 793 bandwidth.full = dfixed_mul(dram_channels, yclk); 794 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 795 796 return dfixed_trunc(bandwidth); 797 } 798 799 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm) 800 { 801 /* Calculate DRAM Bandwidth and the part allocated to display. */ 802 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 803 fixed20_12 yclk, dram_channels, bandwidth; 804 fixed20_12 a; 805 806 a.full = dfixed_const(1000); 807 yclk.full = dfixed_const(wm->yclk); 808 yclk.full = dfixed_div(yclk, a); 809 dram_channels.full = dfixed_const(wm->dram_channels * 4); 810 a.full = dfixed_const(10); 811 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 812 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 813 bandwidth.full = dfixed_mul(dram_channels, yclk); 814 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 815 816 return dfixed_trunc(bandwidth); 817 } 818 819 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm) 820 { 821 /* Calculate the display Data return Bandwidth */ 822 fixed20_12 return_efficiency; /* 0.8 */ 823 fixed20_12 sclk, bandwidth; 824 fixed20_12 a; 825 826 a.full = dfixed_const(1000); 827 sclk.full = dfixed_const(wm->sclk); 828 sclk.full = dfixed_div(sclk, a); 829 a.full = dfixed_const(10); 830 return_efficiency.full = dfixed_const(8); 831 return_efficiency.full = dfixed_div(return_efficiency, a); 832 a.full = dfixed_const(32); 833 bandwidth.full = dfixed_mul(a, sclk); 834 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 835 836 return dfixed_trunc(bandwidth); 837 } 838 839 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm) 840 { 841 /* Calculate the DMIF Request Bandwidth */ 842 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 843 fixed20_12 disp_clk, bandwidth; 844 fixed20_12 a; 845 846 a.full = dfixed_const(1000); 847 disp_clk.full = dfixed_const(wm->disp_clk); 848 disp_clk.full = dfixed_div(disp_clk, a); 849 a.full = dfixed_const(10); 850 disp_clk_request_efficiency.full = dfixed_const(8); 851 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 852 a.full = dfixed_const(32); 853 bandwidth.full = dfixed_mul(a, disp_clk); 854 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency); 855 856 return dfixed_trunc(bandwidth); 857 } 858 859 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm) 860 { 861 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 862 u32 dram_bandwidth = evergreen_dram_bandwidth(wm); 863 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm); 864 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm); 865 866 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 867 } 868 869 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm) 870 { 871 /* Calculate the display mode Average Bandwidth 872 * DisplayMode should contain the source and destination dimensions, 873 * timing, etc. 874 */ 875 fixed20_12 bpp; 876 fixed20_12 line_time; 877 fixed20_12 src_width; 878 fixed20_12 bandwidth; 879 fixed20_12 a; 880 881 a.full = dfixed_const(1000); 882 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 883 line_time.full = dfixed_div(line_time, a); 884 bpp.full = dfixed_const(wm->bytes_per_pixel); 885 src_width.full = dfixed_const(wm->src_width); 886 bandwidth.full = dfixed_mul(src_width, bpp); 887 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 888 bandwidth.full = dfixed_div(bandwidth, line_time); 889 890 return dfixed_trunc(bandwidth); 891 } 892 893 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm) 894 { 895 /* First calcualte the latency in ns */ 896 u32 mc_latency = 2000; /* 2000 ns. */ 897 u32 available_bandwidth = evergreen_available_bandwidth(wm); 898 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 899 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 900 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 901 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 902 (wm->num_heads * cursor_line_pair_return_time); 903 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 904 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 905 fixed20_12 a, b, c; 906 907 if (wm->num_heads == 0) 908 return 0; 909 910 a.full = dfixed_const(2); 911 b.full = dfixed_const(1); 912 if ((wm->vsc.full > a.full) || 913 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 914 (wm->vtaps >= 5) || 915 ((wm->vsc.full >= a.full) && wm->interlaced)) 916 max_src_lines_per_dst_line = 4; 917 else 918 max_src_lines_per_dst_line = 2; 919 920 a.full = dfixed_const(available_bandwidth); 921 b.full = dfixed_const(wm->num_heads); 922 a.full = dfixed_div(a, b); 923 924 b.full = dfixed_const(1000); 925 c.full = dfixed_const(wm->disp_clk); 926 b.full = dfixed_div(c, b); 927 c.full = dfixed_const(wm->bytes_per_pixel); 928 b.full = dfixed_mul(b, c); 929 930 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b)); 931 932 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 933 b.full = dfixed_const(1000); 934 c.full = dfixed_const(lb_fill_bw); 935 b.full = dfixed_div(c, b); 936 a.full = dfixed_div(a, b); 937 line_fill_time = dfixed_trunc(a); 938 939 if (line_fill_time < wm->active_time) 940 return latency; 941 else 942 return latency + (line_fill_time - wm->active_time); 943 944 } 945 946 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm) 947 { 948 if (evergreen_average_bandwidth(wm) <= 949 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads)) 950 return true; 951 else 952 return false; 953 }; 954 955 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm) 956 { 957 if (evergreen_average_bandwidth(wm) <= 958 (evergreen_available_bandwidth(wm) / wm->num_heads)) 959 return true; 960 else 961 return false; 962 }; 963 964 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm) 965 { 966 u32 lb_partitions = wm->lb_size / wm->src_width; 967 u32 line_time = wm->active_time + wm->blank_time; 968 u32 latency_tolerant_lines; 969 u32 latency_hiding; 970 fixed20_12 a; 971 972 a.full = dfixed_const(1); 973 if (wm->vsc.full > a.full) 974 latency_tolerant_lines = 1; 975 else { 976 if (lb_partitions <= (wm->vtaps + 1)) 977 latency_tolerant_lines = 1; 978 else 979 latency_tolerant_lines = 2; 980 } 981 982 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 983 984 if (evergreen_latency_watermark(wm) <= latency_hiding) 985 return true; 986 else 987 return false; 988 } 989 990 static void evergreen_program_watermarks(struct radeon_device *rdev, 991 struct radeon_crtc *radeon_crtc, 992 u32 lb_size, u32 num_heads) 993 { 994 struct drm_display_mode *mode = &radeon_crtc->base.mode; 995 struct evergreen_wm_params wm; 996 u32 pixel_period; 997 u32 line_time = 0; 998 u32 latency_watermark_a = 0, latency_watermark_b = 0; 999 u32 priority_a_mark = 0, priority_b_mark = 0; 1000 u32 priority_a_cnt = PRIORITY_OFF; 1001 u32 priority_b_cnt = PRIORITY_OFF; 1002 u32 pipe_offset = radeon_crtc->crtc_id * 16; 1003 u32 tmp, arb_control3; 1004 fixed20_12 a, b, c; 1005 1006 if (radeon_crtc->base.enabled && num_heads && mode) { 1007 pixel_period = 1000000 / (u32)mode->clock; 1008 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1009 priority_a_cnt = 0; 1010 priority_b_cnt = 0; 1011 1012 wm.yclk = rdev->pm.current_mclk * 10; 1013 wm.sclk = rdev->pm.current_sclk * 10; 1014 wm.disp_clk = mode->clock; 1015 wm.src_width = mode->crtc_hdisplay; 1016 wm.active_time = mode->crtc_hdisplay * pixel_period; 1017 wm.blank_time = line_time - wm.active_time; 1018 wm.interlaced = false; 1019 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1020 wm.interlaced = true; 1021 wm.vsc = radeon_crtc->vsc; 1022 wm.vtaps = 1; 1023 if (radeon_crtc->rmx_type != RMX_OFF) 1024 wm.vtaps = 2; 1025 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1026 wm.lb_size = lb_size; 1027 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); 1028 wm.num_heads = num_heads; 1029 1030 /* set for high clocks */ 1031 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); 1032 /* set for low clocks */ 1033 /* wm.yclk = low clk; wm.sclk = low clk */ 1034 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); 1035 1036 /* possibly force display priority to high */ 1037 /* should really do this at mode validation time... */ 1038 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 1039 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || 1040 !evergreen_check_latency_hiding(&wm) || 1041 (rdev->disp_priority == 2)) { 1042 DRM_DEBUG_KMS("force priority to high\n"); 1043 priority_a_cnt |= PRIORITY_ALWAYS_ON; 1044 priority_b_cnt |= PRIORITY_ALWAYS_ON; 1045 } 1046 1047 a.full = dfixed_const(1000); 1048 b.full = dfixed_const(mode->clock); 1049 b.full = dfixed_div(b, a); 1050 c.full = dfixed_const(latency_watermark_a); 1051 c.full = dfixed_mul(c, b); 1052 c.full = dfixed_mul(c, radeon_crtc->hsc); 1053 c.full = dfixed_div(c, a); 1054 a.full = dfixed_const(16); 1055 c.full = dfixed_div(c, a); 1056 priority_a_mark = dfixed_trunc(c); 1057 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; 1058 1059 a.full = dfixed_const(1000); 1060 b.full = dfixed_const(mode->clock); 1061 b.full = dfixed_div(b, a); 1062 c.full = dfixed_const(latency_watermark_b); 1063 c.full = dfixed_mul(c, b); 1064 c.full = dfixed_mul(c, radeon_crtc->hsc); 1065 c.full = dfixed_div(c, a); 1066 a.full = dfixed_const(16); 1067 c.full = dfixed_div(c, a); 1068 priority_b_mark = dfixed_trunc(c); 1069 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 1070 } 1071 1072 /* select wm A */ 1073 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); 1074 tmp = arb_control3; 1075 tmp &= ~LATENCY_WATERMARK_MASK(3); 1076 tmp |= LATENCY_WATERMARK_MASK(1); 1077 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); 1078 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, 1079 (LATENCY_LOW_WATERMARK(latency_watermark_a) | 1080 LATENCY_HIGH_WATERMARK(line_time))); 1081 /* select wm B */ 1082 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); 1083 tmp &= ~LATENCY_WATERMARK_MASK(3); 1084 tmp |= LATENCY_WATERMARK_MASK(2); 1085 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); 1086 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, 1087 (LATENCY_LOW_WATERMARK(latency_watermark_b) | 1088 LATENCY_HIGH_WATERMARK(line_time))); 1089 /* restore original selection */ 1090 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3); 1091 1092 /* write the priority marks */ 1093 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); 1094 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); 1095 1096 } 1097 1098 /** 1099 * evergreen_bandwidth_update - update display watermarks callback. 1100 * 1101 * @rdev: radeon_device pointer 1102 * 1103 * Update the display watermarks based on the requested mode(s) 1104 * (evergreen+). 1105 */ 1106 void evergreen_bandwidth_update(struct radeon_device *rdev) 1107 { 1108 struct drm_display_mode *mode0 = NULL; 1109 struct drm_display_mode *mode1 = NULL; 1110 u32 num_heads = 0, lb_size; 1111 int i; 1112 1113 radeon_update_display_priority(rdev); 1114 1115 for (i = 0; i < rdev->num_crtc; i++) { 1116 if (rdev->mode_info.crtcs[i]->base.enabled) 1117 num_heads++; 1118 } 1119 for (i = 0; i < rdev->num_crtc; i += 2) { 1120 mode0 = &rdev->mode_info.crtcs[i]->base.mode; 1121 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; 1122 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); 1123 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); 1124 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); 1125 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); 1126 } 1127 } 1128 1129 /** 1130 * evergreen_mc_wait_for_idle - wait for MC idle callback. 1131 * 1132 * @rdev: radeon_device pointer 1133 * 1134 * Wait for the MC (memory controller) to be idle. 1135 * (evergreen+). 1136 * Returns 0 if the MC is idle, -1 if not. 1137 */ 1138 int evergreen_mc_wait_for_idle(struct radeon_device *rdev) 1139 { 1140 unsigned i; 1141 u32 tmp; 1142 1143 for (i = 0; i < rdev->usec_timeout; i++) { 1144 /* read MC_STATUS */ 1145 tmp = RREG32(SRBM_STATUS) & 0x1F00; 1146 if (!tmp) 1147 return 0; 1148 DRM_UDELAY(1); 1149 } 1150 return -1; 1151 } 1152 1153 /* 1154 * GART 1155 */ 1156 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) 1157 { 1158 unsigned i; 1159 u32 tmp; 1160 1161 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1162 1163 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 1164 for (i = 0; i < rdev->usec_timeout; i++) { 1165 /* read MC_STATUS */ 1166 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); 1167 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; 1168 if (tmp == 2) { 1169 DRM_ERROR("[drm] r600 flush TLB failed\n"); 1170 return; 1171 } 1172 if (tmp) { 1173 return; 1174 } 1175 DRM_UDELAY(1); 1176 } 1177 } 1178 1179 static int evergreen_pcie_gart_enable(struct radeon_device *rdev) 1180 { 1181 u32 tmp; 1182 int r; 1183 1184 if (rdev->gart.robj == NULL) { 1185 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 1186 return -EINVAL; 1187 } 1188 r = radeon_gart_table_vram_pin(rdev); 1189 if (r) 1190 return r; 1191 radeon_gart_restore(rdev); 1192 /* Setup L2 cache */ 1193 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1194 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1195 EFFECTIVE_L2_QUEUE_SIZE(7)); 1196 WREG32(VM_L2_CNTL2, 0); 1197 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 1198 /* Setup TLB control */ 1199 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1200 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1201 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 1202 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 1203 if (rdev->flags & RADEON_IS_IGP) { 1204 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp); 1205 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp); 1206 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp); 1207 } else { 1208 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1209 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1210 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1211 if ((rdev->family == CHIP_JUNIPER) || 1212 (rdev->family == CHIP_CYPRESS) || 1213 (rdev->family == CHIP_HEMLOCK) || 1214 (rdev->family == CHIP_BARTS)) 1215 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); 1216 } 1217 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1218 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1219 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 1220 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 1221 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1222 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1223 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1224 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1225 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1226 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1227 (u32)(rdev->dummy_page.addr >> 12)); 1228 WREG32(VM_CONTEXT1_CNTL, 0); 1229 1230 evergreen_pcie_gart_tlb_flush(rdev); 1231 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1232 (unsigned)(rdev->mc.gtt_size >> 20), 1233 (unsigned long long)rdev->gart.table_addr); 1234 rdev->gart.ready = true; 1235 return 0; 1236 } 1237 1238 static void evergreen_pcie_gart_disable(struct radeon_device *rdev) 1239 { 1240 u32 tmp; 1241 1242 /* Disable all tables */ 1243 WREG32(VM_CONTEXT0_CNTL, 0); 1244 WREG32(VM_CONTEXT1_CNTL, 0); 1245 1246 /* Setup L2 cache */ 1247 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 1248 EFFECTIVE_L2_QUEUE_SIZE(7)); 1249 WREG32(VM_L2_CNTL2, 0); 1250 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 1251 /* Setup TLB control */ 1252 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 1253 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1254 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1255 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1256 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1257 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1258 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 1259 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 1260 radeon_gart_table_vram_unpin(rdev); 1261 } 1262 1263 static void evergreen_pcie_gart_fini(struct radeon_device *rdev) 1264 { 1265 evergreen_pcie_gart_disable(rdev); 1266 radeon_gart_table_vram_free(rdev); 1267 radeon_gart_fini(rdev); 1268 } 1269 1270 1271 static void evergreen_agp_enable(struct radeon_device *rdev) 1272 { 1273 u32 tmp; 1274 1275 /* Setup L2 cache */ 1276 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1277 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1278 EFFECTIVE_L2_QUEUE_SIZE(7)); 1279 WREG32(VM_L2_CNTL2, 0); 1280 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 1281 /* Setup TLB control */ 1282 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1283 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1284 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 1285 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 1286 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1287 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1288 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1289 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1290 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1291 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 1292 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 1293 WREG32(VM_CONTEXT0_CNTL, 0); 1294 WREG32(VM_CONTEXT1_CNTL, 0); 1295 } 1296 1297 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 1298 { 1299 u32 crtc_enabled, tmp, frame_count, blackout; 1300 int i, j; 1301 1302 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 1303 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 1304 1305 /* disable VGA render */ 1306 WREG32(VGA_RENDER_CONTROL, 0); 1307 /* blank the display controllers */ 1308 for (i = 0; i < rdev->num_crtc; i++) { 1309 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; 1310 if (crtc_enabled) { 1311 save->crtc_enabled[i] = true; 1312 if (ASIC_IS_DCE6(rdev)) { 1313 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 1314 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { 1315 radeon_wait_for_vblank(rdev, i); 1316 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1317 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1318 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1319 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1320 } 1321 } else { 1322 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1323 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { 1324 radeon_wait_for_vblank(rdev, i); 1325 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1326 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1327 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1328 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1329 } 1330 } 1331 /* wait for the next frame */ 1332 frame_count = radeon_get_vblank_counter(rdev, i); 1333 for (j = 0; j < rdev->usec_timeout; j++) { 1334 if (radeon_get_vblank_counter(rdev, i) != frame_count) 1335 break; 1336 DRM_UDELAY(1); 1337 } 1338 } else { 1339 save->crtc_enabled[i] = false; 1340 } 1341 } 1342 1343 radeon_mc_wait_for_idle(rdev); 1344 1345 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 1346 if ((blackout & BLACKOUT_MODE_MASK) != 1) { 1347 /* Block CPU access */ 1348 WREG32(BIF_FB_EN, 0); 1349 /* blackout the MC */ 1350 blackout &= ~BLACKOUT_MODE_MASK; 1351 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 1352 } 1353 /* wait for the MC to settle */ 1354 DRM_UDELAY(100); 1355 } 1356 1357 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) 1358 { 1359 u32 tmp, frame_count; 1360 int i, j; 1361 1362 /* update crtc base addresses */ 1363 for (i = 0; i < rdev->num_crtc; i++) { 1364 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 1365 upper_32_bits(rdev->mc.vram_start)); 1366 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 1367 upper_32_bits(rdev->mc.vram_start)); 1368 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 1369 (u32)rdev->mc.vram_start); 1370 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 1371 (u32)rdev->mc.vram_start); 1372 } 1373 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 1374 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 1375 1376 /* unblackout the MC */ 1377 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); 1378 tmp &= ~BLACKOUT_MODE_MASK; 1379 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp); 1380 /* allow CPU access */ 1381 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); 1382 1383 for (i = 0; i < rdev->num_crtc; i++) { 1384 if (save->crtc_enabled[i]) { 1385 if (ASIC_IS_DCE6(rdev)) { 1386 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 1387 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1388 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1389 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1390 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1391 } else { 1392 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1393 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1394 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1395 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1396 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1397 } 1398 /* wait for the next frame */ 1399 frame_count = radeon_get_vblank_counter(rdev, i); 1400 for (j = 0; j < rdev->usec_timeout; j++) { 1401 if (radeon_get_vblank_counter(rdev, i) != frame_count) 1402 break; 1403 DRM_UDELAY(1); 1404 } 1405 } 1406 } 1407 /* Unlock vga access */ 1408 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); 1409 DRM_MDELAY(1); 1410 WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 1411 } 1412 1413 void evergreen_mc_program(struct radeon_device *rdev) 1414 { 1415 struct evergreen_mc_save save; 1416 u32 tmp; 1417 int i, j; 1418 1419 /* Initialize HDP */ 1420 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1421 WREG32((0x2c14 + j), 0x00000000); 1422 WREG32((0x2c18 + j), 0x00000000); 1423 WREG32((0x2c1c + j), 0x00000000); 1424 WREG32((0x2c20 + j), 0x00000000); 1425 WREG32((0x2c24 + j), 0x00000000); 1426 } 1427 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 1428 1429 evergreen_mc_stop(rdev, &save); 1430 if (evergreen_mc_wait_for_idle(rdev)) { 1431 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1432 } 1433 /* Lockout access through VGA aperture*/ 1434 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 1435 /* Update configuration */ 1436 if (rdev->flags & RADEON_IS_AGP) { 1437 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 1438 /* VRAM before AGP */ 1439 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1440 rdev->mc.vram_start >> 12); 1441 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1442 rdev->mc.gtt_end >> 12); 1443 } else { 1444 /* VRAM after AGP */ 1445 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1446 rdev->mc.gtt_start >> 12); 1447 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1448 rdev->mc.vram_end >> 12); 1449 } 1450 } else { 1451 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1452 rdev->mc.vram_start >> 12); 1453 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1454 rdev->mc.vram_end >> 12); 1455 } 1456 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 1457 /* llano/ontario only */ 1458 if ((rdev->family == CHIP_PALM) || 1459 (rdev->family == CHIP_SUMO) || 1460 (rdev->family == CHIP_SUMO2)) { 1461 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; 1462 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; 1463 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; 1464 WREG32(MC_FUS_VM_FB_OFFSET, tmp); 1465 } 1466 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1467 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1468 WREG32(MC_VM_FB_LOCATION, tmp); 1469 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1470 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); 1471 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1472 if (rdev->flags & RADEON_IS_AGP) { 1473 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 1474 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 1475 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 1476 } else { 1477 WREG32(MC_VM_AGP_BASE, 0); 1478 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 1479 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 1480 } 1481 if (evergreen_mc_wait_for_idle(rdev)) { 1482 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1483 } 1484 evergreen_mc_resume(rdev, &save); 1485 /* we need to own VRAM, so turn off the VGA renderer here 1486 * to stop it overwriting our objects */ 1487 rv515_vga_render_disable(rdev); 1488 } 1489 1490 /* 1491 * CP. 1492 */ 1493 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1494 { 1495 struct radeon_ring *ring = &rdev->ring[ib->ring]; 1496 u32 next_rptr; 1497 1498 /* set to DX10/11 mode */ 1499 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 1500 radeon_ring_write(ring, 1); 1501 1502 if (ring->rptr_save_reg) { 1503 next_rptr = ring->wptr + 3 + 4; 1504 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1505 radeon_ring_write(ring, ((ring->rptr_save_reg - 1506 PACKET3_SET_CONFIG_REG_START) >> 2)); 1507 radeon_ring_write(ring, next_rptr); 1508 } else if (rdev->wb.enabled) { 1509 next_rptr = ring->wptr + 5 + 4; 1510 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); 1511 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 1512 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); 1513 radeon_ring_write(ring, next_rptr); 1514 radeon_ring_write(ring, 0); 1515 } 1516 1517 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1518 radeon_ring_write(ring, 1519 #ifdef __BIG_ENDIAN 1520 (2 << 0) | 1521 #endif 1522 (ib->gpu_addr & 0xFFFFFFFC)); 1523 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 1524 radeon_ring_write(ring, ib->length_dw); 1525 } 1526 1527 1528 static int evergreen_cp_load_microcode(struct radeon_device *rdev) 1529 { 1530 const __be32 *fw_data; 1531 int i; 1532 1533 if (!rdev->me_fw || !rdev->pfp_fw) 1534 return -EINVAL; 1535 1536 r700_cp_stop(rdev); 1537 WREG32(CP_RB_CNTL, 1538 #ifdef __BIG_ENDIAN 1539 BUF_SWAP_32BIT | 1540 #endif 1541 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 1542 1543 fw_data = (const __be32 *)rdev->pfp_fw->data; 1544 WREG32(CP_PFP_UCODE_ADDR, 0); 1545 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) 1546 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1547 WREG32(CP_PFP_UCODE_ADDR, 0); 1548 1549 fw_data = (const __be32 *)rdev->me_fw->data; 1550 WREG32(CP_ME_RAM_WADDR, 0); 1551 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) 1552 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1553 1554 WREG32(CP_PFP_UCODE_ADDR, 0); 1555 WREG32(CP_ME_RAM_WADDR, 0); 1556 WREG32(CP_ME_RAM_RADDR, 0); 1557 return 0; 1558 } 1559 1560 static int evergreen_cp_start(struct radeon_device *rdev) 1561 { 1562 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1563 int r, i; 1564 uint32_t cp_me; 1565 1566 r = radeon_ring_lock(rdev, ring, 7); 1567 if (r) { 1568 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1569 return r; 1570 } 1571 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1572 radeon_ring_write(ring, 0x1); 1573 radeon_ring_write(ring, 0x0); 1574 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1); 1575 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1576 radeon_ring_write(ring, 0); 1577 radeon_ring_write(ring, 0); 1578 radeon_ring_unlock_commit(rdev, ring); 1579 1580 cp_me = 0xff; 1581 WREG32(CP_ME_CNTL, cp_me); 1582 1583 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19); 1584 if (r) { 1585 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1586 return r; 1587 } 1588 1589 /* setup clear context state */ 1590 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1591 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1592 1593 for (i = 0; i < evergreen_default_size; i++) 1594 radeon_ring_write(ring, evergreen_default_state[i]); 1595 1596 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1597 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 1598 1599 /* set clear context state */ 1600 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 1601 radeon_ring_write(ring, 0); 1602 1603 /* SQ_VTX_BASE_VTX_LOC */ 1604 radeon_ring_write(ring, 0xc0026f00); 1605 radeon_ring_write(ring, 0x00000000); 1606 radeon_ring_write(ring, 0x00000000); 1607 radeon_ring_write(ring, 0x00000000); 1608 1609 /* Clear consts */ 1610 radeon_ring_write(ring, 0xc0036f00); 1611 radeon_ring_write(ring, 0x00000bc4); 1612 radeon_ring_write(ring, 0xffffffff); 1613 radeon_ring_write(ring, 0xffffffff); 1614 radeon_ring_write(ring, 0xffffffff); 1615 1616 radeon_ring_write(ring, 0xc0026900); 1617 radeon_ring_write(ring, 0x00000316); 1618 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1619 radeon_ring_write(ring, 0x00000010); /* */ 1620 1621 radeon_ring_unlock_commit(rdev, ring); 1622 1623 return 0; 1624 } 1625 1626 static int evergreen_cp_resume(struct radeon_device *rdev) 1627 { 1628 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1629 u32 tmp; 1630 u32 rb_bufsz; 1631 int r; 1632 1633 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1634 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1635 SOFT_RESET_PA | 1636 SOFT_RESET_SH | 1637 SOFT_RESET_VGT | 1638 SOFT_RESET_SPI | 1639 SOFT_RESET_SX)); 1640 RREG32(GRBM_SOFT_RESET); 1641 DRM_MDELAY(15); 1642 WREG32(GRBM_SOFT_RESET, 0); 1643 RREG32(GRBM_SOFT_RESET); 1644 1645 /* Set ring buffer size */ 1646 rb_bufsz = drm_order(ring->ring_size / 8); 1647 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1648 #ifdef __BIG_ENDIAN 1649 tmp |= BUF_SWAP_32BIT; 1650 #endif 1651 WREG32(CP_RB_CNTL, tmp); 1652 WREG32(CP_SEM_WAIT_TIMER, 0x0); 1653 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 1654 1655 /* Set the write pointer delay */ 1656 WREG32(CP_RB_WPTR_DELAY, 0); 1657 1658 /* Initialize the ring buffer's read and write pointers */ 1659 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1660 WREG32(CP_RB_RPTR_WR, 0); 1661 ring->wptr = 0; 1662 WREG32(CP_RB_WPTR, ring->wptr); 1663 1664 /* set the wb address whether it's enabled or not */ 1665 WREG32(CP_RB_RPTR_ADDR, 1666 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 1667 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1668 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1669 1670 if (rdev->wb.enabled) 1671 WREG32(SCRATCH_UMSK, 0xff); 1672 else { 1673 tmp |= RB_NO_UPDATE; 1674 WREG32(SCRATCH_UMSK, 0); 1675 } 1676 1677 DRM_MDELAY(1); 1678 WREG32(CP_RB_CNTL, tmp); 1679 1680 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 1681 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 1682 1683 ring->rptr = RREG32(CP_RB_RPTR); 1684 1685 evergreen_cp_start(rdev); 1686 ring->ready = true; 1687 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1688 if (r) { 1689 ring->ready = false; 1690 return r; 1691 } 1692 return 0; 1693 } 1694 1695 /* 1696 * Core functions 1697 */ 1698 static void evergreen_gpu_init(struct radeon_device *rdev) 1699 { 1700 u32 gb_addr_config; 1701 u32 mc_shared_chmap, mc_arb_ramcfg; 1702 u32 sx_debug_1; 1703 u32 smx_dc_ctl0; 1704 u32 sq_config; 1705 u32 sq_lds_resource_mgmt; 1706 u32 sq_gpr_resource_mgmt_1; 1707 u32 sq_gpr_resource_mgmt_2; 1708 u32 sq_gpr_resource_mgmt_3; 1709 u32 sq_thread_resource_mgmt; 1710 u32 sq_thread_resource_mgmt_2; 1711 u32 sq_stack_resource_mgmt_1; 1712 u32 sq_stack_resource_mgmt_2; 1713 u32 sq_stack_resource_mgmt_3; 1714 u32 vgt_cache_invalidation; 1715 u32 hdp_host_path_cntl, tmp; 1716 u32 disabled_rb_mask; 1717 int i, j, num_shader_engines, ps_thread_count; 1718 1719 switch (rdev->family) { 1720 case CHIP_CYPRESS: 1721 case CHIP_HEMLOCK: 1722 rdev->config.evergreen.num_ses = 2; 1723 rdev->config.evergreen.max_pipes = 4; 1724 rdev->config.evergreen.max_tile_pipes = 8; 1725 rdev->config.evergreen.max_simds = 10; 1726 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; 1727 rdev->config.evergreen.max_gprs = 256; 1728 rdev->config.evergreen.max_threads = 248; 1729 rdev->config.evergreen.max_gs_threads = 32; 1730 rdev->config.evergreen.max_stack_entries = 512; 1731 rdev->config.evergreen.sx_num_of_sets = 4; 1732 rdev->config.evergreen.sx_max_export_size = 256; 1733 rdev->config.evergreen.sx_max_export_pos_size = 64; 1734 rdev->config.evergreen.sx_max_export_smx_size = 192; 1735 rdev->config.evergreen.max_hw_contexts = 8; 1736 rdev->config.evergreen.sq_num_cf_insts = 2; 1737 1738 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1739 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1740 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1741 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN; 1742 break; 1743 case CHIP_JUNIPER: 1744 rdev->config.evergreen.num_ses = 1; 1745 rdev->config.evergreen.max_pipes = 4; 1746 rdev->config.evergreen.max_tile_pipes = 4; 1747 rdev->config.evergreen.max_simds = 10; 1748 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; 1749 rdev->config.evergreen.max_gprs = 256; 1750 rdev->config.evergreen.max_threads = 248; 1751 rdev->config.evergreen.max_gs_threads = 32; 1752 rdev->config.evergreen.max_stack_entries = 512; 1753 rdev->config.evergreen.sx_num_of_sets = 4; 1754 rdev->config.evergreen.sx_max_export_size = 256; 1755 rdev->config.evergreen.sx_max_export_pos_size = 64; 1756 rdev->config.evergreen.sx_max_export_smx_size = 192; 1757 rdev->config.evergreen.max_hw_contexts = 8; 1758 rdev->config.evergreen.sq_num_cf_insts = 2; 1759 1760 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1761 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1762 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1763 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN; 1764 break; 1765 case CHIP_REDWOOD: 1766 rdev->config.evergreen.num_ses = 1; 1767 rdev->config.evergreen.max_pipes = 4; 1768 rdev->config.evergreen.max_tile_pipes = 4; 1769 rdev->config.evergreen.max_simds = 5; 1770 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; 1771 rdev->config.evergreen.max_gprs = 256; 1772 rdev->config.evergreen.max_threads = 248; 1773 rdev->config.evergreen.max_gs_threads = 32; 1774 rdev->config.evergreen.max_stack_entries = 256; 1775 rdev->config.evergreen.sx_num_of_sets = 4; 1776 rdev->config.evergreen.sx_max_export_size = 256; 1777 rdev->config.evergreen.sx_max_export_pos_size = 64; 1778 rdev->config.evergreen.sx_max_export_smx_size = 192; 1779 rdev->config.evergreen.max_hw_contexts = 8; 1780 rdev->config.evergreen.sq_num_cf_insts = 2; 1781 1782 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1783 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1784 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1785 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; 1786 break; 1787 case CHIP_CEDAR: 1788 default: 1789 rdev->config.evergreen.num_ses = 1; 1790 rdev->config.evergreen.max_pipes = 2; 1791 rdev->config.evergreen.max_tile_pipes = 2; 1792 rdev->config.evergreen.max_simds = 2; 1793 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1794 rdev->config.evergreen.max_gprs = 256; 1795 rdev->config.evergreen.max_threads = 192; 1796 rdev->config.evergreen.max_gs_threads = 16; 1797 rdev->config.evergreen.max_stack_entries = 256; 1798 rdev->config.evergreen.sx_num_of_sets = 4; 1799 rdev->config.evergreen.sx_max_export_size = 128; 1800 rdev->config.evergreen.sx_max_export_pos_size = 32; 1801 rdev->config.evergreen.sx_max_export_smx_size = 96; 1802 rdev->config.evergreen.max_hw_contexts = 4; 1803 rdev->config.evergreen.sq_num_cf_insts = 1; 1804 1805 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1806 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1807 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1808 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN; 1809 break; 1810 case CHIP_PALM: 1811 rdev->config.evergreen.num_ses = 1; 1812 rdev->config.evergreen.max_pipes = 2; 1813 rdev->config.evergreen.max_tile_pipes = 2; 1814 rdev->config.evergreen.max_simds = 2; 1815 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1816 rdev->config.evergreen.max_gprs = 256; 1817 rdev->config.evergreen.max_threads = 192; 1818 rdev->config.evergreen.max_gs_threads = 16; 1819 rdev->config.evergreen.max_stack_entries = 256; 1820 rdev->config.evergreen.sx_num_of_sets = 4; 1821 rdev->config.evergreen.sx_max_export_size = 128; 1822 rdev->config.evergreen.sx_max_export_pos_size = 32; 1823 rdev->config.evergreen.sx_max_export_smx_size = 96; 1824 rdev->config.evergreen.max_hw_contexts = 4; 1825 rdev->config.evergreen.sq_num_cf_insts = 1; 1826 1827 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1828 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1829 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1830 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN; 1831 break; 1832 case CHIP_SUMO: 1833 rdev->config.evergreen.num_ses = 1; 1834 rdev->config.evergreen.max_pipes = 4; 1835 rdev->config.evergreen.max_tile_pipes = 4; 1836 if (rdev->ddev->pci_device == 0x9648) 1837 rdev->config.evergreen.max_simds = 3; 1838 else if ((rdev->ddev->pci_device == 0x9647) || 1839 (rdev->ddev->pci_device == 0x964a)) 1840 rdev->config.evergreen.max_simds = 4; 1841 else 1842 rdev->config.evergreen.max_simds = 5; 1843 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; 1844 rdev->config.evergreen.max_gprs = 256; 1845 rdev->config.evergreen.max_threads = 248; 1846 rdev->config.evergreen.max_gs_threads = 32; 1847 rdev->config.evergreen.max_stack_entries = 256; 1848 rdev->config.evergreen.sx_num_of_sets = 4; 1849 rdev->config.evergreen.sx_max_export_size = 256; 1850 rdev->config.evergreen.sx_max_export_pos_size = 64; 1851 rdev->config.evergreen.sx_max_export_smx_size = 192; 1852 rdev->config.evergreen.max_hw_contexts = 8; 1853 rdev->config.evergreen.sq_num_cf_insts = 2; 1854 1855 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1856 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1857 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1858 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN; 1859 break; 1860 case CHIP_SUMO2: 1861 rdev->config.evergreen.num_ses = 1; 1862 rdev->config.evergreen.max_pipes = 4; 1863 rdev->config.evergreen.max_tile_pipes = 4; 1864 rdev->config.evergreen.max_simds = 2; 1865 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1866 rdev->config.evergreen.max_gprs = 256; 1867 rdev->config.evergreen.max_threads = 248; 1868 rdev->config.evergreen.max_gs_threads = 32; 1869 rdev->config.evergreen.max_stack_entries = 512; 1870 rdev->config.evergreen.sx_num_of_sets = 4; 1871 rdev->config.evergreen.sx_max_export_size = 256; 1872 rdev->config.evergreen.sx_max_export_pos_size = 64; 1873 rdev->config.evergreen.sx_max_export_smx_size = 192; 1874 rdev->config.evergreen.max_hw_contexts = 8; 1875 rdev->config.evergreen.sq_num_cf_insts = 2; 1876 1877 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1878 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1879 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1880 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN; 1881 break; 1882 case CHIP_BARTS: 1883 rdev->config.evergreen.num_ses = 2; 1884 rdev->config.evergreen.max_pipes = 4; 1885 rdev->config.evergreen.max_tile_pipes = 8; 1886 rdev->config.evergreen.max_simds = 7; 1887 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; 1888 rdev->config.evergreen.max_gprs = 256; 1889 rdev->config.evergreen.max_threads = 248; 1890 rdev->config.evergreen.max_gs_threads = 32; 1891 rdev->config.evergreen.max_stack_entries = 512; 1892 rdev->config.evergreen.sx_num_of_sets = 4; 1893 rdev->config.evergreen.sx_max_export_size = 256; 1894 rdev->config.evergreen.sx_max_export_pos_size = 64; 1895 rdev->config.evergreen.sx_max_export_smx_size = 192; 1896 rdev->config.evergreen.max_hw_contexts = 8; 1897 rdev->config.evergreen.sq_num_cf_insts = 2; 1898 1899 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1900 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1901 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1902 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN; 1903 break; 1904 case CHIP_TURKS: 1905 rdev->config.evergreen.num_ses = 1; 1906 rdev->config.evergreen.max_pipes = 4; 1907 rdev->config.evergreen.max_tile_pipes = 4; 1908 rdev->config.evergreen.max_simds = 6; 1909 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; 1910 rdev->config.evergreen.max_gprs = 256; 1911 rdev->config.evergreen.max_threads = 248; 1912 rdev->config.evergreen.max_gs_threads = 32; 1913 rdev->config.evergreen.max_stack_entries = 256; 1914 rdev->config.evergreen.sx_num_of_sets = 4; 1915 rdev->config.evergreen.sx_max_export_size = 256; 1916 rdev->config.evergreen.sx_max_export_pos_size = 64; 1917 rdev->config.evergreen.sx_max_export_smx_size = 192; 1918 rdev->config.evergreen.max_hw_contexts = 8; 1919 rdev->config.evergreen.sq_num_cf_insts = 2; 1920 1921 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1922 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1923 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1924 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN; 1925 break; 1926 case CHIP_CAICOS: 1927 rdev->config.evergreen.num_ses = 1; 1928 rdev->config.evergreen.max_pipes = 2; 1929 rdev->config.evergreen.max_tile_pipes = 2; 1930 rdev->config.evergreen.max_simds = 2; 1931 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1932 rdev->config.evergreen.max_gprs = 256; 1933 rdev->config.evergreen.max_threads = 192; 1934 rdev->config.evergreen.max_gs_threads = 16; 1935 rdev->config.evergreen.max_stack_entries = 256; 1936 rdev->config.evergreen.sx_num_of_sets = 4; 1937 rdev->config.evergreen.sx_max_export_size = 128; 1938 rdev->config.evergreen.sx_max_export_pos_size = 32; 1939 rdev->config.evergreen.sx_max_export_smx_size = 96; 1940 rdev->config.evergreen.max_hw_contexts = 4; 1941 rdev->config.evergreen.sq_num_cf_insts = 1; 1942 1943 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1944 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1945 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1946 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN; 1947 break; 1948 } 1949 1950 /* Initialize HDP */ 1951 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1952 WREG32((0x2c14 + j), 0x00000000); 1953 WREG32((0x2c18 + j), 0x00000000); 1954 WREG32((0x2c1c + j), 0x00000000); 1955 WREG32((0x2c20 + j), 0x00000000); 1956 WREG32((0x2c24 + j), 0x00000000); 1957 } 1958 1959 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1960 1961 evergreen_fix_pci_max_read_req_size(rdev); 1962 1963 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1964 if ((rdev->family == CHIP_PALM) || 1965 (rdev->family == CHIP_SUMO) || 1966 (rdev->family == CHIP_SUMO2)) 1967 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); 1968 else 1969 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1970 1971 /* setup tiling info dword. gb_addr_config is not adequate since it does 1972 * not have bank info, so create a custom tiling dword. 1973 * bits 3:0 num_pipes 1974 * bits 7:4 num_banks 1975 * bits 11:8 group_size 1976 * bits 15:12 row_size 1977 */ 1978 rdev->config.evergreen.tile_config = 0; 1979 switch (rdev->config.evergreen.max_tile_pipes) { 1980 case 1: 1981 default: 1982 rdev->config.evergreen.tile_config |= (0 << 0); 1983 break; 1984 case 2: 1985 rdev->config.evergreen.tile_config |= (1 << 0); 1986 break; 1987 case 4: 1988 rdev->config.evergreen.tile_config |= (2 << 0); 1989 break; 1990 case 8: 1991 rdev->config.evergreen.tile_config |= (3 << 0); 1992 break; 1993 } 1994 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 1995 if (rdev->flags & RADEON_IS_IGP) 1996 rdev->config.evergreen.tile_config |= 1 << 4; 1997 else { 1998 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { 1999 case 0: /* four banks */ 2000 rdev->config.evergreen.tile_config |= 0 << 4; 2001 break; 2002 case 1: /* eight banks */ 2003 rdev->config.evergreen.tile_config |= 1 << 4; 2004 break; 2005 case 2: /* sixteen banks */ 2006 default: 2007 rdev->config.evergreen.tile_config |= 2 << 4; 2008 break; 2009 } 2010 } 2011 rdev->config.evergreen.tile_config |= 0 << 8; 2012 rdev->config.evergreen.tile_config |= 2013 ((gb_addr_config & 0x30000000) >> 28) << 12; 2014 2015 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1; 2016 2017 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) { 2018 u32 efuse_straps_4; 2019 u32 efuse_straps_3; 2020 2021 WREG32(RCU_IND_INDEX, 0x204); 2022 efuse_straps_4 = RREG32(RCU_IND_DATA); 2023 WREG32(RCU_IND_INDEX, 0x203); 2024 efuse_straps_3 = RREG32(RCU_IND_DATA); 2025 tmp = (((efuse_straps_4 & 0xf) << 4) | 2026 ((efuse_straps_3 & 0xf0000000) >> 28)); 2027 } else { 2028 tmp = 0; 2029 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) { 2030 u32 rb_disable_bitmap; 2031 2032 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 2033 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 2034 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; 2035 tmp <<= 4; 2036 tmp |= rb_disable_bitmap; 2037 } 2038 } 2039 /* enabled rb are just the one not disabled :) */ 2040 disabled_rb_mask = tmp; 2041 2042 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 2043 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 2044 2045 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2046 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2047 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2048 WREG32(DMA_TILING_CONFIG, gb_addr_config); 2049 2050 if ((rdev->config.evergreen.max_backends == 1) && 2051 (rdev->flags & RADEON_IS_IGP)) { 2052 if ((disabled_rb_mask & 3) == 1) { 2053 /* RB0 disabled, RB1 enabled */ 2054 tmp = 0x11111111; 2055 } else { 2056 /* RB1 disabled, RB0 enabled */ 2057 tmp = 0x00000000; 2058 } 2059 } else { 2060 tmp = gb_addr_config & NUM_PIPES_MASK; 2061 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2062 EVERGREEN_MAX_BACKENDS, disabled_rb_mask); 2063 } 2064 WREG32(GB_BACKEND_MAP, tmp); 2065 2066 WREG32(CGTS_SYS_TCC_DISABLE, 0); 2067 WREG32(CGTS_TCC_DISABLE, 0); 2068 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); 2069 WREG32(CGTS_USER_TCC_DISABLE, 0); 2070 2071 /* set HW defaults for 3D engine */ 2072 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 2073 ROQ_IB2_START(0x2b))); 2074 2075 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 2076 2077 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | 2078 SYNC_GRADIENT | 2079 SYNC_WALKER | 2080 SYNC_ALIGNER)); 2081 2082 sx_debug_1 = RREG32(SX_DEBUG_1); 2083 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 2084 WREG32(SX_DEBUG_1, sx_debug_1); 2085 2086 2087 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 2088 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 2089 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); 2090 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 2091 2092 if (rdev->family <= CHIP_SUMO2) 2093 WREG32(SMX_SAR_CTL0, 0x00010000); 2094 2095 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | 2096 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | 2097 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); 2098 2099 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | 2100 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | 2101 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); 2102 2103 WREG32(VGT_NUM_INSTANCES, 1); 2104 WREG32(SPI_CONFIG_CNTL, 0); 2105 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 2106 WREG32(CP_PERFMON_CNTL, 0); 2107 2108 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | 2109 FETCH_FIFO_HIWATER(0x4) | 2110 DONE_FIFO_HIWATER(0xe0) | 2111 ALU_UPDATE_FIFO_HIWATER(0x8))); 2112 2113 sq_config = RREG32(SQ_CONFIG); 2114 sq_config &= ~(PS_PRIO(3) | 2115 VS_PRIO(3) | 2116 GS_PRIO(3) | 2117 ES_PRIO(3)); 2118 sq_config |= (VC_ENABLE | 2119 EXPORT_SRC_C | 2120 PS_PRIO(0) | 2121 VS_PRIO(1) | 2122 GS_PRIO(2) | 2123 ES_PRIO(3)); 2124 2125 switch (rdev->family) { 2126 case CHIP_CEDAR: 2127 case CHIP_PALM: 2128 case CHIP_SUMO: 2129 case CHIP_SUMO2: 2130 case CHIP_CAICOS: 2131 /* no vertex cache */ 2132 sq_config &= ~VC_ENABLE; 2133 break; 2134 default: 2135 break; 2136 } 2137 2138 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); 2139 2140 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); 2141 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); 2142 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); 2143 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); 2144 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); 2145 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); 2146 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); 2147 2148 switch (rdev->family) { 2149 case CHIP_CEDAR: 2150 case CHIP_PALM: 2151 case CHIP_SUMO: 2152 case CHIP_SUMO2: 2153 ps_thread_count = 96; 2154 break; 2155 default: 2156 ps_thread_count = 128; 2157 break; 2158 } 2159 2160 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); 2161 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2162 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2163 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2164 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2165 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2166 2167 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2168 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2169 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2170 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2171 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2172 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 2173 2174 WREG32(SQ_CONFIG, sq_config); 2175 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 2176 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 2177 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); 2178 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 2179 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); 2180 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 2181 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 2182 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); 2183 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); 2184 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); 2185 2186 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 2187 FORCE_EOV_MAX_REZ_CNT(255))); 2188 2189 switch (rdev->family) { 2190 case CHIP_CEDAR: 2191 case CHIP_PALM: 2192 case CHIP_SUMO: 2193 case CHIP_SUMO2: 2194 case CHIP_CAICOS: 2195 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); 2196 break; 2197 default: 2198 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); 2199 break; 2200 } 2201 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); 2202 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); 2203 2204 WREG32(VGT_GS_VERTEX_REUSE, 16); 2205 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0); 2206 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2207 2208 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); 2209 WREG32(VGT_OUT_DEALLOC_CNTL, 16); 2210 2211 WREG32(CB_PERF_CTR0_SEL_0, 0); 2212 WREG32(CB_PERF_CTR0_SEL_1, 0); 2213 WREG32(CB_PERF_CTR1_SEL_0, 0); 2214 WREG32(CB_PERF_CTR1_SEL_1, 0); 2215 WREG32(CB_PERF_CTR2_SEL_0, 0); 2216 WREG32(CB_PERF_CTR2_SEL_1, 0); 2217 WREG32(CB_PERF_CTR3_SEL_0, 0); 2218 WREG32(CB_PERF_CTR3_SEL_1, 0); 2219 2220 /* clear render buffer base addresses */ 2221 WREG32(CB_COLOR0_BASE, 0); 2222 WREG32(CB_COLOR1_BASE, 0); 2223 WREG32(CB_COLOR2_BASE, 0); 2224 WREG32(CB_COLOR3_BASE, 0); 2225 WREG32(CB_COLOR4_BASE, 0); 2226 WREG32(CB_COLOR5_BASE, 0); 2227 WREG32(CB_COLOR6_BASE, 0); 2228 WREG32(CB_COLOR7_BASE, 0); 2229 WREG32(CB_COLOR8_BASE, 0); 2230 WREG32(CB_COLOR9_BASE, 0); 2231 WREG32(CB_COLOR10_BASE, 0); 2232 WREG32(CB_COLOR11_BASE, 0); 2233 2234 /* set the shader const cache sizes to 0 */ 2235 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) 2236 WREG32(i, 0); 2237 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) 2238 WREG32(i, 0); 2239 2240 tmp = RREG32(HDP_MISC_CNTL); 2241 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 2242 WREG32(HDP_MISC_CNTL, tmp); 2243 2244 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 2245 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 2246 2247 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 2248 2249 DRM_UDELAY(50); 2250 2251 } 2252 2253 int evergreen_mc_init(struct radeon_device *rdev) 2254 { 2255 u32 tmp; 2256 int chansize, numchan; 2257 2258 /* Get VRAM informations */ 2259 rdev->mc.vram_is_ddr = true; 2260 if ((rdev->family == CHIP_PALM) || 2261 (rdev->family == CHIP_SUMO) || 2262 (rdev->family == CHIP_SUMO2)) 2263 tmp = RREG32(FUS_MC_ARB_RAMCFG); 2264 else 2265 tmp = RREG32(MC_ARB_RAMCFG); 2266 if (tmp & CHANSIZE_OVERRIDE) { 2267 chansize = 16; 2268 } else if (tmp & CHANSIZE_MASK) { 2269 chansize = 64; 2270 } else { 2271 chansize = 32; 2272 } 2273 tmp = RREG32(MC_SHARED_CHMAP); 2274 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 2275 case 0: 2276 default: 2277 numchan = 1; 2278 break; 2279 case 1: 2280 numchan = 2; 2281 break; 2282 case 2: 2283 numchan = 4; 2284 break; 2285 case 3: 2286 numchan = 8; 2287 break; 2288 } 2289 rdev->mc.vram_width = numchan * chansize; 2290 /* Could aper size report 0 ? */ 2291 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 2292 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 2293 /* Setup GPU memory space */ 2294 if ((rdev->family == CHIP_PALM) || 2295 (rdev->family == CHIP_SUMO) || 2296 (rdev->family == CHIP_SUMO2)) { 2297 /* size in bytes on fusion */ 2298 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 2299 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 2300 } else { 2301 /* size in MB on evergreen/cayman/tn */ 2302 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2303 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2304 } 2305 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2306 r700_vram_gtt_location(rdev, &rdev->mc); 2307 radeon_update_bandwidth_info(rdev); 2308 2309 return 0; 2310 } 2311 2312 bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2313 { 2314 u32 srbm_status; 2315 u32 grbm_status; 2316 u32 grbm_status_se0, grbm_status_se1; 2317 2318 srbm_status = RREG32(SRBM_STATUS); 2319 grbm_status = RREG32(GRBM_STATUS); 2320 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 2321 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 2322 if (!(grbm_status & GUI_ACTIVE)) { 2323 radeon_ring_lockup_update(ring); 2324 return false; 2325 } 2326 /* force CP activities */ 2327 radeon_ring_force_activity(rdev, ring); 2328 return radeon_ring_test_lockup(rdev, ring); 2329 } 2330 2331 static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev) 2332 { 2333 u32 grbm_reset = 0; 2334 2335 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2336 return; 2337 2338 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 2339 RREG32(GRBM_STATUS)); 2340 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 2341 RREG32(GRBM_STATUS_SE0)); 2342 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", 2343 RREG32(GRBM_STATUS_SE1)); 2344 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 2345 RREG32(SRBM_STATUS)); 2346 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2347 RREG32(CP_STALLED_STAT1)); 2348 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 2349 RREG32(CP_STALLED_STAT2)); 2350 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 2351 RREG32(CP_BUSY_STAT)); 2352 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 2353 RREG32(CP_STAT)); 2354 2355 /* Disable CP parsing/prefetching */ 2356 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 2357 2358 /* reset all the gfx blocks */ 2359 grbm_reset = (SOFT_RESET_CP | 2360 SOFT_RESET_CB | 2361 SOFT_RESET_DB | 2362 SOFT_RESET_PA | 2363 SOFT_RESET_SC | 2364 SOFT_RESET_SPI | 2365 SOFT_RESET_SH | 2366 SOFT_RESET_SX | 2367 SOFT_RESET_TC | 2368 SOFT_RESET_TA | 2369 SOFT_RESET_VC | 2370 SOFT_RESET_VGT); 2371 2372 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 2373 WREG32(GRBM_SOFT_RESET, grbm_reset); 2374 (void)RREG32(GRBM_SOFT_RESET); 2375 DRM_UDELAY(50); 2376 WREG32(GRBM_SOFT_RESET, 0); 2377 (void)RREG32(GRBM_SOFT_RESET); 2378 2379 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 2380 RREG32(GRBM_STATUS)); 2381 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 2382 RREG32(GRBM_STATUS_SE0)); 2383 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", 2384 RREG32(GRBM_STATUS_SE1)); 2385 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 2386 RREG32(SRBM_STATUS)); 2387 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2388 RREG32(CP_STALLED_STAT1)); 2389 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 2390 RREG32(CP_STALLED_STAT2)); 2391 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 2392 RREG32(CP_BUSY_STAT)); 2393 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 2394 RREG32(CP_STAT)); 2395 } 2396 2397 static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev) 2398 { 2399 u32 tmp; 2400 2401 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 2402 return; 2403 2404 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 2405 RREG32(DMA_STATUS_REG)); 2406 2407 /* Disable DMA */ 2408 tmp = RREG32(DMA_RB_CNTL); 2409 tmp &= ~DMA_RB_ENABLE; 2410 WREG32(DMA_RB_CNTL, tmp); 2411 2412 /* Reset dma */ 2413 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); 2414 RREG32(SRBM_SOFT_RESET); 2415 DRM_UDELAY(50); 2416 WREG32(SRBM_SOFT_RESET, 0); 2417 2418 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 2419 RREG32(DMA_STATUS_REG)); 2420 } 2421 2422 static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 2423 { 2424 struct evergreen_mc_save save; 2425 2426 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2427 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 2428 2429 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 2430 reset_mask &= ~RADEON_RESET_DMA; 2431 2432 if (reset_mask == 0) 2433 return 0; 2434 2435 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 2436 2437 evergreen_mc_stop(rdev, &save); 2438 if (evergreen_mc_wait_for_idle(rdev)) { 2439 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2440 } 2441 2442 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 2443 evergreen_gpu_soft_reset_gfx(rdev); 2444 2445 if (reset_mask & RADEON_RESET_DMA) 2446 evergreen_gpu_soft_reset_dma(rdev); 2447 2448 /* Wait a little for things to settle down */ 2449 DRM_UDELAY(50); 2450 2451 evergreen_mc_resume(rdev, &save); 2452 return 0; 2453 } 2454 2455 int evergreen_asic_reset(struct radeon_device *rdev) 2456 { 2457 return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX | 2458 RADEON_RESET_COMPUTE | 2459 RADEON_RESET_DMA)); 2460 } 2461 2462 /* Interrupts */ 2463 2464 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) 2465 { 2466 if (crtc >= rdev->num_crtc) 2467 return 0; 2468 else 2469 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 2470 } 2471 2472 void evergreen_disable_interrupt_state(struct radeon_device *rdev) 2473 { 2474 u32 tmp; 2475 2476 if (rdev->family >= CHIP_CAYMAN) { 2477 cayman_cp_int_cntl_setup(rdev, 0, 2478 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2479 cayman_cp_int_cntl_setup(rdev, 1, 0); 2480 cayman_cp_int_cntl_setup(rdev, 2, 0); 2481 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; 2482 WREG32(CAYMAN_DMA1_CNTL, tmp); 2483 } else 2484 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2485 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 2486 WREG32(DMA_CNTL, tmp); 2487 WREG32(GRBM_INT_CNTL, 0); 2488 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2489 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2490 if (rdev->num_crtc >= 4) { 2491 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 2492 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 2493 } 2494 if (rdev->num_crtc >= 6) { 2495 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 2496 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 2497 } 2498 2499 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2500 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2501 if (rdev->num_crtc >= 4) { 2502 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 2503 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 2504 } 2505 if (rdev->num_crtc >= 6) { 2506 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 2507 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 2508 } 2509 2510 /* only one DAC on DCE6 */ 2511 if (!ASIC_IS_DCE6(rdev)) 2512 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 2513 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 2514 2515 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2516 WREG32(DC_HPD1_INT_CONTROL, tmp); 2517 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2518 WREG32(DC_HPD2_INT_CONTROL, tmp); 2519 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2520 WREG32(DC_HPD3_INT_CONTROL, tmp); 2521 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2522 WREG32(DC_HPD4_INT_CONTROL, tmp); 2523 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2524 WREG32(DC_HPD5_INT_CONTROL, tmp); 2525 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2526 WREG32(DC_HPD6_INT_CONTROL, tmp); 2527 2528 } 2529 2530 int evergreen_irq_set(struct radeon_device *rdev) 2531 { 2532 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 2533 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 2534 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 2535 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 2536 u32 grbm_int_cntl = 0; 2537 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 2538 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 2539 u32 dma_cntl, dma_cntl1 = 0; 2540 2541 if (!rdev->irq.installed) { 2542 dev_warn(rdev->dev, "Can't enable IRQ/MSI because no handler is installed\n"); 2543 return -EINVAL; 2544 } 2545 /* don't enable anything if the ih is disabled */ 2546 if (!rdev->ih.enabled) { 2547 r600_disable_interrupts(rdev); 2548 /* force the active interrupt state to all disabled */ 2549 evergreen_disable_interrupt_state(rdev); 2550 return 0; 2551 } 2552 2553 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2554 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 2555 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 2556 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 2557 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 2558 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 2559 2560 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2561 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2562 afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2563 afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2564 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2565 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2566 2567 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 2568 2569 if (rdev->family >= CHIP_CAYMAN) { 2570 /* enable CP interrupts on all rings */ 2571 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 2572 DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); 2573 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2574 } 2575 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { 2576 DRM_DEBUG("evergreen_irq_set: sw int cp1\n"); 2577 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; 2578 } 2579 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { 2580 DRM_DEBUG("evergreen_irq_set: sw int cp2\n"); 2581 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; 2582 } 2583 } else { 2584 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 2585 DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); 2586 cp_int_cntl |= RB_INT_ENABLE; 2587 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2588 } 2589 } 2590 2591 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 2592 DRM_DEBUG("r600_irq_set: sw int dma\n"); 2593 dma_cntl |= TRAP_ENABLE; 2594 } 2595 2596 if (rdev->family >= CHIP_CAYMAN) { 2597 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; 2598 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { 2599 DRM_DEBUG("r600_irq_set: sw int dma1\n"); 2600 dma_cntl1 |= TRAP_ENABLE; 2601 } 2602 } 2603 2604 if (rdev->irq.crtc_vblank_int[0] || 2605 atomic_read(&rdev->irq.pflip[0])) { 2606 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2607 crtc1 |= VBLANK_INT_MASK; 2608 } 2609 if (rdev->irq.crtc_vblank_int[1] || 2610 atomic_read(&rdev->irq.pflip[1])) { 2611 DRM_DEBUG("evergreen_irq_set: vblank 1\n"); 2612 crtc2 |= VBLANK_INT_MASK; 2613 } 2614 if (rdev->irq.crtc_vblank_int[2] || 2615 atomic_read(&rdev->irq.pflip[2])) { 2616 DRM_DEBUG("evergreen_irq_set: vblank 2\n"); 2617 crtc3 |= VBLANK_INT_MASK; 2618 } 2619 if (rdev->irq.crtc_vblank_int[3] || 2620 atomic_read(&rdev->irq.pflip[3])) { 2621 DRM_DEBUG("evergreen_irq_set: vblank 3\n"); 2622 crtc4 |= VBLANK_INT_MASK; 2623 } 2624 if (rdev->irq.crtc_vblank_int[4] || 2625 atomic_read(&rdev->irq.pflip[4])) { 2626 DRM_DEBUG("evergreen_irq_set: vblank 4\n"); 2627 crtc5 |= VBLANK_INT_MASK; 2628 } 2629 if (rdev->irq.crtc_vblank_int[5] || 2630 atomic_read(&rdev->irq.pflip[5])) { 2631 DRM_DEBUG("evergreen_irq_set: vblank 5\n"); 2632 crtc6 |= VBLANK_INT_MASK; 2633 } 2634 if (rdev->irq.hpd[0]) { 2635 DRM_DEBUG("evergreen_irq_set: hpd 1\n"); 2636 hpd1 |= DC_HPDx_INT_EN; 2637 } 2638 if (rdev->irq.hpd[1]) { 2639 DRM_DEBUG("evergreen_irq_set: hpd 2\n"); 2640 hpd2 |= DC_HPDx_INT_EN; 2641 } 2642 if (rdev->irq.hpd[2]) { 2643 DRM_DEBUG("evergreen_irq_set: hpd 3\n"); 2644 hpd3 |= DC_HPDx_INT_EN; 2645 } 2646 if (rdev->irq.hpd[3]) { 2647 DRM_DEBUG("evergreen_irq_set: hpd 4\n"); 2648 hpd4 |= DC_HPDx_INT_EN; 2649 } 2650 if (rdev->irq.hpd[4]) { 2651 DRM_DEBUG("evergreen_irq_set: hpd 5\n"); 2652 hpd5 |= DC_HPDx_INT_EN; 2653 } 2654 if (rdev->irq.hpd[5]) { 2655 DRM_DEBUG("evergreen_irq_set: hpd 6\n"); 2656 hpd6 |= DC_HPDx_INT_EN; 2657 } 2658 if (rdev->irq.afmt[0]) { 2659 DRM_DEBUG("evergreen_irq_set: hdmi 0\n"); 2660 afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2661 } 2662 if (rdev->irq.afmt[1]) { 2663 DRM_DEBUG("evergreen_irq_set: hdmi 1\n"); 2664 afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2665 } 2666 if (rdev->irq.afmt[2]) { 2667 DRM_DEBUG("evergreen_irq_set: hdmi 2\n"); 2668 afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2669 } 2670 if (rdev->irq.afmt[3]) { 2671 DRM_DEBUG("evergreen_irq_set: hdmi 3\n"); 2672 afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2673 } 2674 if (rdev->irq.afmt[4]) { 2675 DRM_DEBUG("evergreen_irq_set: hdmi 4\n"); 2676 afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2677 } 2678 if (rdev->irq.afmt[5]) { 2679 DRM_DEBUG("evergreen_irq_set: hdmi 5\n"); 2680 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2681 } 2682 2683 if (rdev->family >= CHIP_CAYMAN) { 2684 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl); 2685 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1); 2686 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); 2687 } else 2688 WREG32(CP_INT_CNTL, cp_int_cntl); 2689 2690 WREG32(DMA_CNTL, dma_cntl); 2691 2692 if (rdev->family >= CHIP_CAYMAN) 2693 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1); 2694 2695 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 2696 2697 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 2698 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 2699 if (rdev->num_crtc >= 4) { 2700 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); 2701 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); 2702 } 2703 if (rdev->num_crtc >= 6) { 2704 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); 2705 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 2706 } 2707 2708 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 2709 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 2710 if (rdev->num_crtc >= 4) { 2711 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 2712 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 2713 } 2714 if (rdev->num_crtc >= 6) { 2715 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 2716 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 2717 } 2718 2719 WREG32(DC_HPD1_INT_CONTROL, hpd1); 2720 WREG32(DC_HPD2_INT_CONTROL, hpd2); 2721 WREG32(DC_HPD3_INT_CONTROL, hpd3); 2722 WREG32(DC_HPD4_INT_CONTROL, hpd4); 2723 WREG32(DC_HPD5_INT_CONTROL, hpd5); 2724 WREG32(DC_HPD6_INT_CONTROL, hpd6); 2725 2726 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1); 2727 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2); 2728 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3); 2729 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4); 2730 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); 2731 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); 2732 2733 return 0; 2734 } 2735 2736 static void evergreen_irq_ack(struct radeon_device *rdev) 2737 { 2738 u32 tmp; 2739 2740 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); 2741 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 2742 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); 2743 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); 2744 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); 2745 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 2746 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); 2747 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); 2748 if (rdev->num_crtc >= 4) { 2749 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); 2750 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); 2751 } 2752 if (rdev->num_crtc >= 6) { 2753 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); 2754 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); 2755 } 2756 2757 rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); 2758 rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); 2759 rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); 2760 rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); 2761 rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); 2762 rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); 2763 2764 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) 2765 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2766 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) 2767 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2768 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) 2769 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 2770 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) 2771 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); 2772 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) 2773 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); 2774 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) 2775 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 2776 2777 if (rdev->num_crtc >= 4) { 2778 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) 2779 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2780 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) 2781 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2782 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 2783 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 2784 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) 2785 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); 2786 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) 2787 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); 2788 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) 2789 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); 2790 } 2791 2792 if (rdev->num_crtc >= 6) { 2793 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) 2794 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2795 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) 2796 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2797 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 2798 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 2799 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) 2800 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); 2801 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) 2802 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); 2803 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) 2804 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); 2805 } 2806 2807 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 2808 tmp = RREG32(DC_HPD1_INT_CONTROL); 2809 tmp |= DC_HPDx_INT_ACK; 2810 WREG32(DC_HPD1_INT_CONTROL, tmp); 2811 } 2812 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 2813 tmp = RREG32(DC_HPD2_INT_CONTROL); 2814 tmp |= DC_HPDx_INT_ACK; 2815 WREG32(DC_HPD2_INT_CONTROL, tmp); 2816 } 2817 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 2818 tmp = RREG32(DC_HPD3_INT_CONTROL); 2819 tmp |= DC_HPDx_INT_ACK; 2820 WREG32(DC_HPD3_INT_CONTROL, tmp); 2821 } 2822 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 2823 tmp = RREG32(DC_HPD4_INT_CONTROL); 2824 tmp |= DC_HPDx_INT_ACK; 2825 WREG32(DC_HPD4_INT_CONTROL, tmp); 2826 } 2827 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 2828 tmp = RREG32(DC_HPD5_INT_CONTROL); 2829 tmp |= DC_HPDx_INT_ACK; 2830 WREG32(DC_HPD5_INT_CONTROL, tmp); 2831 } 2832 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 2833 tmp = RREG32(DC_HPD5_INT_CONTROL); 2834 tmp |= DC_HPDx_INT_ACK; 2835 WREG32(DC_HPD6_INT_CONTROL, tmp); 2836 } 2837 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { 2838 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); 2839 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 2840 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp); 2841 } 2842 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { 2843 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 2844 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 2845 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp); 2846 } 2847 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { 2848 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); 2849 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 2850 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp); 2851 } 2852 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { 2853 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); 2854 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 2855 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp); 2856 } 2857 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { 2858 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); 2859 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 2860 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp); 2861 } 2862 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { 2863 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 2864 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 2865 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp); 2866 } 2867 } 2868 2869 static void evergreen_irq_disable(struct radeon_device *rdev) 2870 { 2871 r600_disable_interrupts(rdev); 2872 /* Wait and acknowledge irq */ 2873 DRM_MDELAY(1); 2874 evergreen_irq_ack(rdev); 2875 evergreen_disable_interrupt_state(rdev); 2876 } 2877 2878 void evergreen_irq_suspend(struct radeon_device *rdev) 2879 { 2880 evergreen_irq_disable(rdev); 2881 r600_rlc_stop(rdev); 2882 } 2883 2884 static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) 2885 { 2886 u32 wptr, tmp; 2887 2888 if (rdev->wb.enabled) 2889 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 2890 else 2891 wptr = RREG32(IH_RB_WPTR); 2892 2893 if (wptr & RB_OVERFLOW) { 2894 /* When a ring buffer overflow happen start parsing interrupt 2895 * from the last not overwritten vector (wptr + 16). Hopefully 2896 * this should allow us to catchup. 2897 */ 2898 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 2899 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 2900 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 2901 tmp = RREG32(IH_RB_CNTL); 2902 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2903 WREG32(IH_RB_CNTL, tmp); 2904 } 2905 return (wptr & rdev->ih.ptr_mask); 2906 } 2907 2908 irqreturn_t evergreen_irq_process(struct radeon_device *rdev) 2909 { 2910 u32 wptr; 2911 u32 rptr; 2912 u32 src_id, src_data; 2913 u32 ring_index; 2914 bool queue_hotplug = false; 2915 bool queue_hdmi = false; 2916 2917 if (!rdev->ih.enabled || rdev->shutdown) 2918 return IRQ_NONE; 2919 2920 wptr = evergreen_get_ih_wptr(rdev); 2921 2922 restart_ih: 2923 /* is somebody else already processing irqs? */ 2924 if (atomic_xchg(&rdev->ih.lock, 1)) 2925 return IRQ_NONE; 2926 2927 rptr = rdev->ih.rptr; 2928 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2929 2930 /* Order reading of wptr vs. reading of IH ring data */ 2931 cpu_lfence(); 2932 2933 /* display interrupts */ 2934 evergreen_irq_ack(rdev); 2935 2936 while (rptr != wptr) { 2937 /* wptr/rptr are in bytes! */ 2938 ring_index = rptr / 4; 2939 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 2940 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 2941 2942 switch (src_id) { 2943 case 1: /* D1 vblank/vline */ 2944 switch (src_data) { 2945 case 0: /* D1 vblank */ 2946 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 2947 if (rdev->irq.crtc_vblank_int[0]) { 2948 drm_handle_vblank(rdev->ddev, 0); 2949 rdev->pm.vblank_sync = true; 2950 DRM_WAKEUP(&rdev->irq.vblank_queue); 2951 } 2952 if (atomic_read(&rdev->irq.pflip[0])) 2953 radeon_crtc_handle_flip(rdev, 0); 2954 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2955 DRM_DEBUG("IH: D1 vblank\n"); 2956 } 2957 break; 2958 case 1: /* D1 vline */ 2959 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 2960 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 2961 DRM_DEBUG("IH: D1 vline\n"); 2962 } 2963 break; 2964 default: 2965 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2966 break; 2967 } 2968 break; 2969 case 2: /* D2 vblank/vline */ 2970 switch (src_data) { 2971 case 0: /* D2 vblank */ 2972 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2973 if (rdev->irq.crtc_vblank_int[1]) { 2974 drm_handle_vblank(rdev->ddev, 1); 2975 rdev->pm.vblank_sync = true; 2976 DRM_WAKEUP(&rdev->irq.vblank_queue); 2977 } 2978 if (atomic_read(&rdev->irq.pflip[1])) 2979 radeon_crtc_handle_flip(rdev, 1); 2980 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2981 DRM_DEBUG("IH: D2 vblank\n"); 2982 } 2983 break; 2984 case 1: /* D2 vline */ 2985 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 2986 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 2987 DRM_DEBUG("IH: D2 vline\n"); 2988 } 2989 break; 2990 default: 2991 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 2992 break; 2993 } 2994 break; 2995 case 3: /* D3 vblank/vline */ 2996 switch (src_data) { 2997 case 0: /* D3 vblank */ 2998 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2999 if (rdev->irq.crtc_vblank_int[2]) { 3000 drm_handle_vblank(rdev->ddev, 2); 3001 rdev->pm.vblank_sync = true; 3002 DRM_WAKEUP(&rdev->irq.vblank_queue); 3003 } 3004 if (atomic_read(&rdev->irq.pflip[2])) 3005 radeon_crtc_handle_flip(rdev, 2); 3006 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 3007 DRM_DEBUG("IH: D3 vblank\n"); 3008 } 3009 break; 3010 case 1: /* D3 vline */ 3011 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 3012 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 3013 DRM_DEBUG("IH: D3 vline\n"); 3014 } 3015 break; 3016 default: 3017 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3018 break; 3019 } 3020 break; 3021 case 4: /* D4 vblank/vline */ 3022 switch (src_data) { 3023 case 0: /* D4 vblank */ 3024 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 3025 if (rdev->irq.crtc_vblank_int[3]) { 3026 drm_handle_vblank(rdev->ddev, 3); 3027 rdev->pm.vblank_sync = true; 3028 DRM_WAKEUP(&rdev->irq.vblank_queue); 3029 } 3030 if (atomic_read(&rdev->irq.pflip[3])) 3031 radeon_crtc_handle_flip(rdev, 3); 3032 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 3033 DRM_DEBUG("IH: D4 vblank\n"); 3034 } 3035 break; 3036 case 1: /* D4 vline */ 3037 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 3038 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 3039 DRM_DEBUG("IH: D4 vline\n"); 3040 } 3041 break; 3042 default: 3043 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3044 break; 3045 } 3046 break; 3047 case 5: /* D5 vblank/vline */ 3048 switch (src_data) { 3049 case 0: /* D5 vblank */ 3050 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 3051 if (rdev->irq.crtc_vblank_int[4]) { 3052 drm_handle_vblank(rdev->ddev, 4); 3053 rdev->pm.vblank_sync = true; 3054 DRM_WAKEUP(&rdev->irq.vblank_queue); 3055 } 3056 if (atomic_read(&rdev->irq.pflip[4])) 3057 radeon_crtc_handle_flip(rdev, 4); 3058 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 3059 DRM_DEBUG("IH: D5 vblank\n"); 3060 } 3061 break; 3062 case 1: /* D5 vline */ 3063 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 3064 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 3065 DRM_DEBUG("IH: D5 vline\n"); 3066 } 3067 break; 3068 default: 3069 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3070 break; 3071 } 3072 break; 3073 case 6: /* D6 vblank/vline */ 3074 switch (src_data) { 3075 case 0: /* D6 vblank */ 3076 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 3077 if (rdev->irq.crtc_vblank_int[5]) { 3078 drm_handle_vblank(rdev->ddev, 5); 3079 rdev->pm.vblank_sync = true; 3080 DRM_WAKEUP(&rdev->irq.vblank_queue); 3081 } 3082 if (atomic_read(&rdev->irq.pflip[5])) 3083 radeon_crtc_handle_flip(rdev, 5); 3084 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 3085 DRM_DEBUG("IH: D6 vblank\n"); 3086 } 3087 break; 3088 case 1: /* D6 vline */ 3089 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 3090 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 3091 DRM_DEBUG("IH: D6 vline\n"); 3092 } 3093 break; 3094 default: 3095 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3096 break; 3097 } 3098 break; 3099 case 42: /* HPD hotplug */ 3100 switch (src_data) { 3101 case 0: 3102 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 3103 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 3104 queue_hotplug = true; 3105 DRM_DEBUG("IH: HPD1\n"); 3106 } 3107 break; 3108 case 1: 3109 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 3110 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 3111 queue_hotplug = true; 3112 DRM_DEBUG("IH: HPD2\n"); 3113 } 3114 break; 3115 case 2: 3116 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 3117 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 3118 queue_hotplug = true; 3119 DRM_DEBUG("IH: HPD3\n"); 3120 } 3121 break; 3122 case 3: 3123 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 3124 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 3125 queue_hotplug = true; 3126 DRM_DEBUG("IH: HPD4\n"); 3127 } 3128 break; 3129 case 4: 3130 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 3131 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 3132 queue_hotplug = true; 3133 DRM_DEBUG("IH: HPD5\n"); 3134 } 3135 break; 3136 case 5: 3137 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 3138 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 3139 queue_hotplug = true; 3140 DRM_DEBUG("IH: HPD6\n"); 3141 } 3142 break; 3143 default: 3144 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3145 break; 3146 } 3147 break; 3148 case 44: /* hdmi */ 3149 switch (src_data) { 3150 case 0: 3151 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { 3152 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; 3153 queue_hdmi = true; 3154 DRM_DEBUG("IH: HDMI0\n"); 3155 } 3156 break; 3157 case 1: 3158 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { 3159 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; 3160 queue_hdmi = true; 3161 DRM_DEBUG("IH: HDMI1\n"); 3162 } 3163 break; 3164 case 2: 3165 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { 3166 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; 3167 queue_hdmi = true; 3168 DRM_DEBUG("IH: HDMI2\n"); 3169 } 3170 break; 3171 case 3: 3172 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { 3173 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; 3174 queue_hdmi = true; 3175 DRM_DEBUG("IH: HDMI3\n"); 3176 } 3177 break; 3178 case 4: 3179 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { 3180 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; 3181 queue_hdmi = true; 3182 DRM_DEBUG("IH: HDMI4\n"); 3183 } 3184 break; 3185 case 5: 3186 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { 3187 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; 3188 queue_hdmi = true; 3189 DRM_DEBUG("IH: HDMI5\n"); 3190 } 3191 break; 3192 default: 3193 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 3194 break; 3195 } 3196 break; 3197 case 146: 3198 case 147: 3199 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 3200 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 3201 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 3202 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 3203 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 3204 /* reset addr and status */ 3205 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 3206 break; 3207 case 176: /* CP_INT in ring buffer */ 3208 case 177: /* CP_INT in IB1 */ 3209 case 178: /* CP_INT in IB2 */ 3210 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 3211 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3212 break; 3213 case 181: /* CP EOP event */ 3214 DRM_DEBUG("IH: CP EOP\n"); 3215 if (rdev->family >= CHIP_CAYMAN) { 3216 switch (src_data) { 3217 case 0: 3218 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3219 break; 3220 case 1: 3221 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 3222 break; 3223 case 2: 3224 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 3225 break; 3226 } 3227 } else 3228 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3229 break; 3230 case 224: /* DMA trap event */ 3231 DRM_DEBUG("IH: DMA trap\n"); 3232 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 3233 break; 3234 case 233: /* GUI IDLE */ 3235 DRM_DEBUG("IH: GUI idle\n"); 3236 break; 3237 case 244: /* DMA trap event */ 3238 if (rdev->family >= CHIP_CAYMAN) { 3239 DRM_DEBUG("IH: DMA1 trap\n"); 3240 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); 3241 } 3242 break; 3243 default: 3244 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3245 break; 3246 } 3247 3248 /* wptr/rptr are in bytes! */ 3249 rptr += 16; 3250 rptr &= rdev->ih.ptr_mask; 3251 } 3252 if (queue_hotplug) 3253 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 3254 if (queue_hdmi) 3255 taskqueue_enqueue(rdev->tq, &rdev->audio_work); 3256 rdev->ih.rptr = rptr; 3257 WREG32(IH_RB_RPTR, rdev->ih.rptr); 3258 atomic_set(&rdev->ih.lock, 0); 3259 3260 /* make sure wptr hasn't changed while processing */ 3261 wptr = evergreen_get_ih_wptr(rdev); 3262 if (wptr != rptr) 3263 goto restart_ih; 3264 3265 return IRQ_HANDLED; 3266 } 3267 3268 /** 3269 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring 3270 * 3271 * @rdev: radeon_device pointer 3272 * @fence: radeon fence object 3273 * 3274 * Add a DMA fence packet to the ring to write 3275 * the fence seq number and DMA trap packet to generate 3276 * an interrupt if needed (evergreen-SI). 3277 */ 3278 void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, 3279 struct radeon_fence *fence) 3280 { 3281 struct radeon_ring *ring = &rdev->ring[fence->ring]; 3282 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 3283 /* write the fence */ 3284 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); 3285 radeon_ring_write(ring, addr & 0xfffffffc); 3286 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 3287 radeon_ring_write(ring, fence->seq); 3288 /* generate an interrupt */ 3289 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); 3290 /* flush HDP */ 3291 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 3292 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); 3293 radeon_ring_write(ring, 1); 3294 } 3295 3296 /** 3297 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine 3298 * 3299 * @rdev: radeon_device pointer 3300 * @ib: IB object to schedule 3301 * 3302 * Schedule an IB in the DMA ring (evergreen). 3303 */ 3304 void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, 3305 struct radeon_ib *ib) 3306 { 3307 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3308 3309 if (rdev->wb.enabled) { 3310 u32 next_rptr = ring->wptr + 4; 3311 while ((next_rptr & 7) != 5) 3312 next_rptr++; 3313 next_rptr += 3; 3314 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 3315 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3316 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 3317 radeon_ring_write(ring, next_rptr); 3318 } 3319 3320 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 3321 * Pad as necessary with NOPs. 3322 */ 3323 while ((ring->wptr & 7) != 5) 3324 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 3325 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); 3326 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 3327 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 3328 3329 } 3330 3331 /** 3332 * evergreen_copy_dma - copy pages using the DMA engine 3333 * 3334 * @rdev: radeon_device pointer 3335 * @src_offset: src GPU address 3336 * @dst_offset: dst GPU address 3337 * @num_gpu_pages: number of GPU pages to xfer 3338 * @fence: radeon fence object 3339 * 3340 * Copy GPU paging using the DMA engine (evergreen-cayman). 3341 * Used by the radeon ttm implementation to move pages if 3342 * registered as the asic copy callback. 3343 */ 3344 int evergreen_copy_dma(struct radeon_device *rdev, 3345 uint64_t src_offset, uint64_t dst_offset, 3346 unsigned num_gpu_pages, 3347 struct radeon_fence **fence) 3348 { 3349 struct radeon_semaphore *sem = NULL; 3350 int ring_index = rdev->asic->copy.dma_ring_index; 3351 struct radeon_ring *ring = &rdev->ring[ring_index]; 3352 u32 size_in_dw, cur_size_in_dw; 3353 int i, num_loops; 3354 int r = 0; 3355 3356 r = radeon_semaphore_create(rdev, &sem); 3357 if (r) { 3358 DRM_ERROR("radeon: moving bo (%d).\n", r); 3359 return r; 3360 } 3361 3362 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 3363 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); 3364 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); 3365 if (r) { 3366 DRM_ERROR("radeon: moving bo (%d).\n", r); 3367 radeon_semaphore_free(rdev, &sem, NULL); 3368 return r; 3369 } 3370 3371 if (radeon_fence_need_sync(*fence, ring->idx)) { 3372 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 3373 ring->idx); 3374 radeon_fence_note_sync(*fence, ring->idx); 3375 } else { 3376 radeon_semaphore_free(rdev, &sem, NULL); 3377 } 3378 3379 for (i = 0; i < num_loops; i++) { 3380 cur_size_in_dw = size_in_dw; 3381 if (cur_size_in_dw > 0xFFFFF) 3382 cur_size_in_dw = 0xFFFFF; 3383 size_in_dw -= cur_size_in_dw; 3384 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 3385 radeon_ring_write(ring, dst_offset & 0xfffffffc); 3386 radeon_ring_write(ring, src_offset & 0xfffffffc); 3387 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 3388 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 3389 src_offset += cur_size_in_dw * 4; 3390 dst_offset += cur_size_in_dw * 4; 3391 } 3392 3393 r = radeon_fence_emit(rdev, fence, ring->idx); 3394 if (r) { 3395 radeon_ring_unlock_undo(rdev, ring); 3396 return r; 3397 } 3398 3399 radeon_ring_unlock_commit(rdev, ring); 3400 radeon_semaphore_free(rdev, &sem, *fence); 3401 3402 return r; 3403 } 3404 3405 static int evergreen_startup(struct radeon_device *rdev) 3406 { 3407 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3408 int r; 3409 3410 /* enable pcie gen2 link */ 3411 evergreen_pcie_gen2_enable(rdev); 3412 3413 if (ASIC_IS_DCE5(rdev)) { 3414 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 3415 r = ni_init_microcode(rdev); 3416 if (r) { 3417 DRM_ERROR("Failed to load firmware!\n"); 3418 return r; 3419 } 3420 } 3421 r = ni_mc_load_microcode(rdev); 3422 if (r) { 3423 DRM_ERROR("Failed to load MC firmware!\n"); 3424 return r; 3425 } 3426 } else { 3427 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 3428 r = r600_init_microcode(rdev); 3429 if (r) { 3430 DRM_ERROR("Failed to load firmware!\n"); 3431 return r; 3432 } 3433 } 3434 } 3435 3436 r = r600_vram_scratch_init(rdev); 3437 if (r) 3438 return r; 3439 3440 evergreen_mc_program(rdev); 3441 if (rdev->flags & RADEON_IS_AGP) { 3442 evergreen_agp_enable(rdev); 3443 } else { 3444 r = evergreen_pcie_gart_enable(rdev); 3445 if (r) 3446 return r; 3447 } 3448 evergreen_gpu_init(rdev); 3449 3450 r = evergreen_blit_init(rdev); 3451 if (r) { 3452 r600_blit_fini(rdev); 3453 rdev->asic->copy.copy = NULL; 3454 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 3455 } 3456 3457 /* allocate wb buffer */ 3458 r = radeon_wb_init(rdev); 3459 if (r) 3460 return r; 3461 3462 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3463 if (r) { 3464 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3465 return r; 3466 } 3467 3468 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 3469 if (r) { 3470 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 3471 return r; 3472 } 3473 3474 /* Enable IRQ */ 3475 r = r600_irq_init(rdev); 3476 if (r) { 3477 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3478 radeon_irq_kms_fini(rdev); 3479 return r; 3480 } 3481 evergreen_irq_set(rdev); 3482 3483 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 3484 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 3485 0, 0xfffff, RADEON_CP_PACKET2); 3486 if (r) 3487 return r; 3488 3489 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 3490 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 3491 DMA_RB_RPTR, DMA_RB_WPTR, 3492 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 3493 if (r) 3494 return r; 3495 3496 r = evergreen_cp_load_microcode(rdev); 3497 if (r) 3498 return r; 3499 r = evergreen_cp_resume(rdev); 3500 if (r) 3501 return r; 3502 r = r600_dma_resume(rdev); 3503 if (r) 3504 return r; 3505 3506 r = radeon_ib_pool_init(rdev); 3507 if (r) { 3508 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3509 return r; 3510 } 3511 3512 r = r600_audio_init(rdev); 3513 if (r) { 3514 DRM_ERROR("radeon: audio init failed\n"); 3515 return r; 3516 } 3517 3518 return 0; 3519 } 3520 3521 int evergreen_resume(struct radeon_device *rdev) 3522 { 3523 int r; 3524 3525 /* reset the asic, the gfx blocks are often in a bad state 3526 * after the driver is unloaded or after a resume 3527 */ 3528 if (radeon_asic_reset(rdev)) 3529 dev_warn(rdev->dev, "GPU reset failed !\n"); 3530 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 3531 * posting will perform necessary task to bring back GPU into good 3532 * shape. 3533 */ 3534 /* post card */ 3535 atom_asic_init(rdev->mode_info.atom_context); 3536 3537 rdev->accel_working = true; 3538 r = evergreen_startup(rdev); 3539 if (r) { 3540 DRM_ERROR("evergreen startup failed on resume\n"); 3541 rdev->accel_working = false; 3542 return r; 3543 } 3544 3545 return r; 3546 3547 } 3548 3549 int evergreen_suspend(struct radeon_device *rdev) 3550 { 3551 r600_audio_fini(rdev); 3552 r700_cp_stop(rdev); 3553 r600_dma_stop(rdev); 3554 evergreen_irq_suspend(rdev); 3555 radeon_wb_disable(rdev); 3556 evergreen_pcie_gart_disable(rdev); 3557 3558 return 0; 3559 } 3560 3561 /* Plan is to move initialization in that function and use 3562 * helper function so that radeon_device_init pretty much 3563 * do nothing more than calling asic specific function. This 3564 * should also allow to remove a bunch of callback function 3565 * like vram_info. 3566 */ 3567 int evergreen_init(struct radeon_device *rdev) 3568 { 3569 int r; 3570 3571 /* Read BIOS */ 3572 if (!radeon_get_bios(rdev)) { 3573 if (ASIC_IS_AVIVO(rdev)) 3574 return -EINVAL; 3575 } 3576 /* Must be an ATOMBIOS */ 3577 if (!rdev->is_atom_bios) { 3578 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n"); 3579 return -EINVAL; 3580 } 3581 r = radeon_atombios_init(rdev); 3582 if (r) 3583 return r; 3584 /* reset the asic, the gfx blocks are often in a bad state 3585 * after the driver is unloaded or after a resume 3586 */ 3587 if (radeon_asic_reset(rdev)) 3588 dev_warn(rdev->dev, "GPU reset failed !\n"); 3589 /* Post card if necessary */ 3590 if (!radeon_card_posted(rdev)) { 3591 if (!rdev->bios) { 3592 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3593 return -EINVAL; 3594 } 3595 DRM_INFO("GPU not posted. posting now...\n"); 3596 atom_asic_init(rdev->mode_info.atom_context); 3597 } 3598 /* Initialize scratch registers */ 3599 r600_scratch_init(rdev); 3600 /* Initialize surface registers */ 3601 radeon_surface_init(rdev); 3602 /* Initialize clocks */ 3603 radeon_get_clock_info(rdev->ddev); 3604 /* Fence driver */ 3605 r = radeon_fence_driver_init(rdev); 3606 if (r) 3607 return r; 3608 /* initialize AGP */ 3609 if (rdev->flags & RADEON_IS_AGP) { 3610 r = radeon_agp_init(rdev); 3611 if (r) 3612 radeon_agp_disable(rdev); 3613 } 3614 /* initialize memory controller */ 3615 r = evergreen_mc_init(rdev); 3616 if (r) 3617 return r; 3618 /* Memory manager */ 3619 r = radeon_bo_init(rdev); 3620 if (r) 3621 return r; 3622 3623 r = radeon_irq_kms_init(rdev); 3624 if (r) 3625 return r; 3626 3627 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3628 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3629 3630 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 3631 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 3632 3633 rdev->ih.ring_obj = NULL; 3634 r600_ih_ring_init(rdev, 64 * 1024); 3635 3636 r = r600_pcie_gart_init(rdev); 3637 if (r) 3638 return r; 3639 3640 rdev->accel_working = true; 3641 r = evergreen_startup(rdev); 3642 if (r) { 3643 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3644 r700_cp_fini(rdev); 3645 r600_dma_fini(rdev); 3646 r600_irq_fini(rdev); 3647 radeon_wb_fini(rdev); 3648 radeon_ib_pool_fini(rdev); 3649 radeon_irq_kms_fini(rdev); 3650 evergreen_pcie_gart_fini(rdev); 3651 rdev->accel_working = false; 3652 } 3653 3654 /* Don't start up if the MC ucode is missing on BTC parts. 3655 * The default clocks and voltages before the MC ucode 3656 * is loaded are not suffient for advanced operations. 3657 */ 3658 if (ASIC_IS_DCE5(rdev)) { 3659 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { 3660 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 3661 return -EINVAL; 3662 } 3663 } 3664 3665 return 0; 3666 } 3667 3668 void evergreen_fini(struct radeon_device *rdev) 3669 { 3670 r600_audio_fini(rdev); 3671 r600_blit_fini(rdev); 3672 r700_cp_fini(rdev); 3673 r600_dma_fini(rdev); 3674 r600_irq_fini(rdev); 3675 radeon_wb_fini(rdev); 3676 radeon_ib_pool_fini(rdev); 3677 radeon_irq_kms_fini(rdev); 3678 evergreen_pcie_gart_fini(rdev); 3679 r600_vram_scratch_fini(rdev); 3680 radeon_gem_fini(rdev); 3681 radeon_fence_driver_fini(rdev); 3682 radeon_agp_fini(rdev); 3683 radeon_bo_fini(rdev); 3684 radeon_atombios_fini(rdev); 3685 if (ASIC_IS_DCE5(rdev)) 3686 ni_fini_microcode(rdev); 3687 else 3688 r600_fini_microcode(rdev); 3689 drm_free(rdev->bios, M_DRM); 3690 rdev->bios = NULL; 3691 } 3692 3693 void evergreen_pcie_gen2_enable(struct radeon_device *rdev) 3694 { 3695 u32 link_width_cntl, speed_cntl, mask; 3696 int ret; 3697 3698 if (radeon_pcie_gen2 == 0) 3699 return; 3700 3701 if (rdev->flags & RADEON_IS_IGP) 3702 return; 3703 3704 if (!(rdev->flags & RADEON_IS_PCIE)) 3705 return; 3706 3707 /* x2 cards have a special sequence */ 3708 if (ASIC_IS_X2(rdev)) 3709 return; 3710 3711 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 3712 if (ret != 0) 3713 return; 3714 3715 if (!(mask & DRM_PCIE_SPEED_50)) 3716 return; 3717 3718 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3719 if (speed_cntl & LC_CURRENT_DATA_RATE) { 3720 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 3721 return; 3722 } 3723 3724 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 3725 3726 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || 3727 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 3728 3729 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 3730 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 3731 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 3732 3733 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3734 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 3735 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3736 3737 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3738 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 3739 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3740 3741 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3742 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 3743 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3744 3745 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3746 speed_cntl |= LC_GEN2_EN_STRAP; 3747 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 3748 3749 } else { 3750 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 3751 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 3752 if (1) 3753 link_width_cntl |= LC_UPCONFIGURE_DIS; 3754 else 3755 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 3756 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 3757 } 3758 } 3759