1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <linux/slab.h> 30 #include <drm/drmP.h> 31 #include <drm/radeon_drm.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "r100d.h" 36 #include "rs100d.h" 37 #include "rv200d.h" 38 #include "rv250d.h" 39 #include "atom.h" 40 41 #include <linux/firmware.h> 42 #include <linux/module.h> 43 44 #include "r100_reg_safe.h" 45 #include "rn50_reg_safe.h" 46 47 /* Firmware Names */ 48 #define FIRMWARE_R100 "radeonkmsfw_R100_cp" 49 #define FIRMWARE_R200 "radeonkmsfw_R200_cp" 50 #define FIRMWARE_R300 "radeonkmsfw_R300_cp" 51 #define FIRMWARE_R420 "radeonkmsfw_R420_cp" 52 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 53 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 54 #define FIRMWARE_R520 "radeonkmsfw_R520_cp" 55 56 MODULE_FIRMWARE(FIRMWARE_R100); 57 MODULE_FIRMWARE(FIRMWARE_R200); 58 MODULE_FIRMWARE(FIRMWARE_R300); 59 MODULE_FIRMWARE(FIRMWARE_R420); 60 MODULE_FIRMWARE(FIRMWARE_RS690); 61 MODULE_FIRMWARE(FIRMWARE_RS600); 62 MODULE_FIRMWARE(FIRMWARE_R520); 63 64 #include "r100_track.h" 65 66 /* This files gather functions specifics to: 67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * and others in some cases. 69 */ 70 71 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) 72 { 73 if (crtc == 0) { 74 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 75 return true; 76 else 77 return false; 78 } else { 79 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 80 return true; 81 else 82 return false; 83 } 84 } 85 86 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) 87 { 88 u32 vline1, vline2; 89 90 if (crtc == 0) { 91 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 92 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 93 } else { 94 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 95 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 96 } 97 if (vline1 != vline2) 98 return true; 99 else 100 return false; 101 } 102 103 /** 104 * r100_wait_for_vblank - vblank wait asic callback. 105 * 106 * @rdev: radeon_device pointer 107 * @crtc: crtc to wait for vblank on 108 * 109 * Wait for vblank on the requested crtc (r1xx-r4xx). 110 */ 111 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 112 { 113 unsigned i = 0; 114 115 if (crtc >= rdev->num_crtc) 116 return; 117 118 if (crtc == 0) { 119 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) 120 return; 121 } else { 122 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) 123 return; 124 } 125 126 /* depending on when we hit vblank, we may be close to active; if so, 127 * wait for another frame. 128 */ 129 while (r100_is_in_vblank(rdev, crtc)) { 130 if (i++ % 100 == 0) { 131 if (!r100_is_counter_moving(rdev, crtc)) 132 break; 133 } 134 } 135 136 while (!r100_is_in_vblank(rdev, crtc)) { 137 if (i++ % 100 == 0) { 138 if (!r100_is_counter_moving(rdev, crtc)) 139 break; 140 } 141 } 142 } 143 144 /** 145 * r100_page_flip - pageflip callback. 146 * 147 * @rdev: radeon_device pointer 148 * @crtc_id: crtc to cleanup pageflip on 149 * @crtc_base: new address of the crtc (GPU MC address) 150 * 151 * Does the actual pageflip (r1xx-r4xx). 152 * During vblank we take the crtc lock and wait for the update_pending 153 * bit to go high, when it does, we release the lock, and allow the 154 * double buffered update to take place. 155 */ 156 void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async) 157 { 158 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 159 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 160 int i; 161 162 /* Lock the graphics update lock */ 163 /* update the scanout addresses */ 164 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 165 166 /* Wait for update_pending to go high. */ 167 for (i = 0; i < rdev->usec_timeout; i++) { 168 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 169 break; 170 udelay(1); 171 } 172 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 173 174 /* Unlock the lock, so double-buffering can take place inside vblank */ 175 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 176 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 177 178 } 179 180 /** 181 * r100_page_flip_pending - check if page flip is still pending 182 * 183 * @rdev: radeon_device pointer 184 * @crtc_id: crtc to check 185 * 186 * Check if the last pagefilp is still pending (r1xx-r4xx). 187 * Returns the current update pending status. 188 */ 189 bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id) 190 { 191 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 192 193 /* Return current update_pending status: */ 194 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & 195 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET); 196 } 197 198 /** 199 * r100_pm_get_dynpm_state - look up dynpm power state callback. 200 * 201 * @rdev: radeon_device pointer 202 * 203 * Look up the optimal power state based on the 204 * current state of the GPU (r1xx-r5xx). 205 * Used for dynpm only. 206 */ 207 void r100_pm_get_dynpm_state(struct radeon_device *rdev) 208 { 209 int i; 210 rdev->pm.dynpm_can_upclock = true; 211 rdev->pm.dynpm_can_downclock = true; 212 213 switch (rdev->pm.dynpm_planned_action) { 214 case DYNPM_ACTION_MINIMUM: 215 rdev->pm.requested_power_state_index = 0; 216 rdev->pm.dynpm_can_downclock = false; 217 break; 218 case DYNPM_ACTION_DOWNCLOCK: 219 if (rdev->pm.current_power_state_index == 0) { 220 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 221 rdev->pm.dynpm_can_downclock = false; 222 } else { 223 if (rdev->pm.active_crtc_count > 1) { 224 for (i = 0; i < rdev->pm.num_power_states; i++) { 225 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 226 continue; 227 else if (i >= rdev->pm.current_power_state_index) { 228 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 229 break; 230 } else { 231 rdev->pm.requested_power_state_index = i; 232 break; 233 } 234 } 235 } else 236 rdev->pm.requested_power_state_index = 237 rdev->pm.current_power_state_index - 1; 238 } 239 /* don't use the power state if crtcs are active and no display flag is set */ 240 if ((rdev->pm.active_crtc_count > 0) && 241 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 242 RADEON_PM_MODE_NO_DISPLAY)) { 243 rdev->pm.requested_power_state_index++; 244 } 245 break; 246 case DYNPM_ACTION_UPCLOCK: 247 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 248 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 249 rdev->pm.dynpm_can_upclock = false; 250 } else { 251 if (rdev->pm.active_crtc_count > 1) { 252 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 253 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 254 continue; 255 else if (i <= rdev->pm.current_power_state_index) { 256 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 257 break; 258 } else { 259 rdev->pm.requested_power_state_index = i; 260 break; 261 } 262 } 263 } else 264 rdev->pm.requested_power_state_index = 265 rdev->pm.current_power_state_index + 1; 266 } 267 break; 268 case DYNPM_ACTION_DEFAULT: 269 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 270 rdev->pm.dynpm_can_upclock = false; 271 break; 272 case DYNPM_ACTION_NONE: 273 default: 274 DRM_ERROR("Requested mode for not defined action\n"); 275 return; 276 } 277 /* only one clock mode per power state */ 278 rdev->pm.requested_clock_mode_index = 0; 279 280 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 281 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 282 clock_info[rdev->pm.requested_clock_mode_index].sclk, 283 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 284 clock_info[rdev->pm.requested_clock_mode_index].mclk, 285 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 286 pcie_lanes); 287 } 288 289 /** 290 * r100_pm_init_profile - Initialize power profiles callback. 291 * 292 * @rdev: radeon_device pointer 293 * 294 * Initialize the power states used in profile mode 295 * (r1xx-r3xx). 296 * Used for profile mode only. 297 */ 298 void r100_pm_init_profile(struct radeon_device *rdev) 299 { 300 /* default */ 301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 303 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 304 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 305 /* low sh */ 306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 309 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 310 /* mid sh */ 311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 313 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 314 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 315 /* high sh */ 316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 318 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 319 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 320 /* low mh */ 321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 323 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 324 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 325 /* mid mh */ 326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 328 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 329 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 330 /* high mh */ 331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 334 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 335 } 336 337 /** 338 * r100_pm_misc - set additional pm hw parameters callback. 339 * 340 * @rdev: radeon_device pointer 341 * 342 * Set non-clock parameters associated with a power state 343 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 344 */ 345 void r100_pm_misc(struct radeon_device *rdev) 346 { 347 int requested_index = rdev->pm.requested_power_state_index; 348 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 349 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 350 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 351 352 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 353 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 354 tmp = RREG32(voltage->gpio.reg); 355 if (voltage->active_high) 356 tmp |= voltage->gpio.mask; 357 else 358 tmp &= ~(voltage->gpio.mask); 359 WREG32(voltage->gpio.reg, tmp); 360 if (voltage->delay) 361 udelay(voltage->delay); 362 } else { 363 tmp = RREG32(voltage->gpio.reg); 364 if (voltage->active_high) 365 tmp &= ~voltage->gpio.mask; 366 else 367 tmp |= voltage->gpio.mask; 368 WREG32(voltage->gpio.reg, tmp); 369 if (voltage->delay) 370 udelay(voltage->delay); 371 } 372 } 373 374 sclk_cntl = RREG32_PLL(SCLK_CNTL); 375 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 376 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 377 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 378 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 379 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 380 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 381 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 382 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 383 else 384 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 385 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 386 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 387 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 388 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 389 } else 390 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 391 392 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 393 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 394 if (voltage->delay) { 395 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 396 switch (voltage->delay) { 397 case 33: 398 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 399 break; 400 case 66: 401 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 402 break; 403 case 99: 404 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 405 break; 406 case 132: 407 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 408 break; 409 } 410 } else 411 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 412 } else 413 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 414 415 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 416 sclk_cntl &= ~FORCE_HDP; 417 else 418 sclk_cntl |= FORCE_HDP; 419 420 WREG32_PLL(SCLK_CNTL, sclk_cntl); 421 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 422 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 423 424 /* set pcie lanes */ 425 if ((rdev->flags & RADEON_IS_PCIE) && 426 !(rdev->flags & RADEON_IS_IGP) && 427 rdev->asic->pm.set_pcie_lanes && 428 (ps->pcie_lanes != 429 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 430 radeon_set_pcie_lanes(rdev, 431 ps->pcie_lanes); 432 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 433 } 434 } 435 436 /** 437 * r100_pm_prepare - pre-power state change callback. 438 * 439 * @rdev: radeon_device pointer 440 * 441 * Prepare for a power state change (r1xx-r4xx). 442 */ 443 void r100_pm_prepare(struct radeon_device *rdev) 444 { 445 struct drm_device *ddev = rdev->ddev; 446 struct drm_crtc *crtc; 447 struct radeon_crtc *radeon_crtc; 448 u32 tmp; 449 450 /* disable any active CRTCs */ 451 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 452 radeon_crtc = to_radeon_crtc(crtc); 453 if (radeon_crtc->enabled) { 454 if (radeon_crtc->crtc_id) { 455 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 456 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 457 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 458 } else { 459 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 460 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 461 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 462 } 463 } 464 } 465 } 466 467 /** 468 * r100_pm_finish - post-power state change callback. 469 * 470 * @rdev: radeon_device pointer 471 * 472 * Clean up after a power state change (r1xx-r4xx). 473 */ 474 void r100_pm_finish(struct radeon_device *rdev) 475 { 476 struct drm_device *ddev = rdev->ddev; 477 struct drm_crtc *crtc; 478 struct radeon_crtc *radeon_crtc; 479 u32 tmp; 480 481 /* enable any active CRTCs */ 482 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 483 radeon_crtc = to_radeon_crtc(crtc); 484 if (radeon_crtc->enabled) { 485 if (radeon_crtc->crtc_id) { 486 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 487 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 488 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 489 } else { 490 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 491 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 492 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 493 } 494 } 495 } 496 } 497 498 /** 499 * r100_gui_idle - gui idle callback. 500 * 501 * @rdev: radeon_device pointer 502 * 503 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 504 * Returns true if idle, false if not. 505 */ 506 bool r100_gui_idle(struct radeon_device *rdev) 507 { 508 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 509 return false; 510 else 511 return true; 512 } 513 514 /* hpd for digital panel detect/disconnect */ 515 /** 516 * r100_hpd_sense - hpd sense callback. 517 * 518 * @rdev: radeon_device pointer 519 * @hpd: hpd (hotplug detect) pin 520 * 521 * Checks if a digital monitor is connected (r1xx-r4xx). 522 * Returns true if connected, false if not connected. 523 */ 524 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 525 { 526 bool connected = false; 527 528 switch (hpd) { 529 case RADEON_HPD_1: 530 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 531 connected = true; 532 break; 533 case RADEON_HPD_2: 534 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 535 connected = true; 536 break; 537 default: 538 break; 539 } 540 return connected; 541 } 542 543 /** 544 * r100_hpd_set_polarity - hpd set polarity callback. 545 * 546 * @rdev: radeon_device pointer 547 * @hpd: hpd (hotplug detect) pin 548 * 549 * Set the polarity of the hpd pin (r1xx-r4xx). 550 */ 551 void r100_hpd_set_polarity(struct radeon_device *rdev, 552 enum radeon_hpd_id hpd) 553 { 554 u32 tmp; 555 bool connected = r100_hpd_sense(rdev, hpd); 556 557 switch (hpd) { 558 case RADEON_HPD_1: 559 tmp = RREG32(RADEON_FP_GEN_CNTL); 560 if (connected) 561 tmp &= ~RADEON_FP_DETECT_INT_POL; 562 else 563 tmp |= RADEON_FP_DETECT_INT_POL; 564 WREG32(RADEON_FP_GEN_CNTL, tmp); 565 break; 566 case RADEON_HPD_2: 567 tmp = RREG32(RADEON_FP2_GEN_CNTL); 568 if (connected) 569 tmp &= ~RADEON_FP2_DETECT_INT_POL; 570 else 571 tmp |= RADEON_FP2_DETECT_INT_POL; 572 WREG32(RADEON_FP2_GEN_CNTL, tmp); 573 break; 574 default: 575 break; 576 } 577 } 578 579 /** 580 * r100_hpd_init - hpd setup callback. 581 * 582 * @rdev: radeon_device pointer 583 * 584 * Setup the hpd pins used by the card (r1xx-r4xx). 585 * Set the polarity, and enable the hpd interrupts. 586 */ 587 void r100_hpd_init(struct radeon_device *rdev) 588 { 589 struct drm_device *dev = rdev->ddev; 590 struct drm_connector *connector; 591 unsigned enable = 0; 592 593 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 594 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 595 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 596 enable |= 1 << radeon_connector->hpd.hpd; 597 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 598 } 599 radeon_irq_kms_enable_hpd(rdev, enable); 600 } 601 602 /** 603 * r100_hpd_fini - hpd tear down callback. 604 * 605 * @rdev: radeon_device pointer 606 * 607 * Tear down the hpd pins used by the card (r1xx-r4xx). 608 * Disable the hpd interrupts. 609 */ 610 void r100_hpd_fini(struct radeon_device *rdev) 611 { 612 struct drm_device *dev = rdev->ddev; 613 struct drm_connector *connector; 614 unsigned disable = 0; 615 616 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 617 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 618 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 619 disable |= 1 << radeon_connector->hpd.hpd; 620 } 621 radeon_irq_kms_disable_hpd(rdev, disable); 622 } 623 624 /* 625 * PCI GART 626 */ 627 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 628 { 629 /* TODO: can we do somethings here ? */ 630 /* It seems hw only cache one entry so we should discard this 631 * entry otherwise if first GPU GART read hit this entry it 632 * could end up in wrong address. */ 633 } 634 635 int r100_pci_gart_init(struct radeon_device *rdev) 636 { 637 int r; 638 639 if (rdev->gart.ptr) { 640 WARN(1, "R100 PCI GART already initialized\n"); 641 return 0; 642 } 643 /* Initialize common gart structure */ 644 r = radeon_gart_init(rdev); 645 if (r) 646 return r; 647 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 648 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 649 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; 650 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 651 return radeon_gart_table_ram_alloc(rdev); 652 } 653 654 int r100_pci_gart_enable(struct radeon_device *rdev) 655 { 656 uint32_t tmp; 657 658 /* discard memory request outside of configured range */ 659 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 660 WREG32(RADEON_AIC_CNTL, tmp); 661 /* set address range for PCI address translate */ 662 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 663 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 664 /* set PCI GART page-table base address */ 665 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 666 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 667 WREG32(RADEON_AIC_CNTL, tmp); 668 r100_pci_gart_tlb_flush(rdev); 669 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 670 (unsigned)(rdev->mc.gtt_size >> 20), 671 (unsigned long long)rdev->gart.table_addr); 672 rdev->gart.ready = true; 673 return 0; 674 } 675 676 void r100_pci_gart_disable(struct radeon_device *rdev) 677 { 678 uint32_t tmp; 679 680 /* discard memory request outside of configured range */ 681 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 682 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 683 WREG32(RADEON_AIC_LO_ADDR, 0); 684 WREG32(RADEON_AIC_HI_ADDR, 0); 685 } 686 687 uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) 688 { 689 return addr; 690 } 691 692 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 693 uint64_t entry) 694 { 695 u32 *gtt = rdev->gart.ptr; 696 gtt[i] = cpu_to_le32(lower_32_bits(entry)); 697 } 698 699 void r100_pci_gart_fini(struct radeon_device *rdev) 700 { 701 radeon_gart_fini(rdev); 702 r100_pci_gart_disable(rdev); 703 radeon_gart_table_ram_free(rdev); 704 } 705 706 int r100_irq_set(struct radeon_device *rdev) 707 { 708 uint32_t tmp = 0; 709 710 if (!rdev->irq.installed) { 711 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 712 WREG32(R_000040_GEN_INT_CNTL, 0); 713 return -EINVAL; 714 } 715 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 716 tmp |= RADEON_SW_INT_ENABLE; 717 } 718 if (rdev->irq.crtc_vblank_int[0] || 719 atomic_read(&rdev->irq.pflip[0])) { 720 tmp |= RADEON_CRTC_VBLANK_MASK; 721 } 722 if (rdev->irq.crtc_vblank_int[1] || 723 atomic_read(&rdev->irq.pflip[1])) { 724 tmp |= RADEON_CRTC2_VBLANK_MASK; 725 } 726 if (rdev->irq.hpd[0]) { 727 tmp |= RADEON_FP_DETECT_MASK; 728 } 729 if (rdev->irq.hpd[1]) { 730 tmp |= RADEON_FP2_DETECT_MASK; 731 } 732 WREG32(RADEON_GEN_INT_CNTL, tmp); 733 734 /* read back to post the write */ 735 RREG32(RADEON_GEN_INT_CNTL); 736 737 return 0; 738 } 739 740 void r100_irq_disable(struct radeon_device *rdev) 741 { 742 u32 tmp; 743 744 WREG32(R_000040_GEN_INT_CNTL, 0); 745 /* Wait and acknowledge irq */ 746 mdelay(1); 747 tmp = RREG32(R_000044_GEN_INT_STATUS); 748 WREG32(R_000044_GEN_INT_STATUS, tmp); 749 } 750 751 static uint32_t r100_irq_ack(struct radeon_device *rdev) 752 { 753 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 754 uint32_t irq_mask = RADEON_SW_INT_TEST | 755 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 756 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 757 758 if (irqs) { 759 WREG32(RADEON_GEN_INT_STATUS, irqs); 760 } 761 return irqs & irq_mask; 762 } 763 764 irqreturn_t r100_irq_process(struct radeon_device *rdev) 765 { 766 uint32_t status, msi_rearm; 767 bool queue_hotplug = false; 768 769 status = r100_irq_ack(rdev); 770 if (!status) { 771 return IRQ_NONE; 772 } 773 if (rdev->shutdown) { 774 return IRQ_NONE; 775 } 776 while (status) { 777 /* SW interrupt */ 778 if (status & RADEON_SW_INT_TEST) { 779 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 780 } 781 /* Vertical blank interrupts */ 782 if (status & RADEON_CRTC_VBLANK_STAT) { 783 if (rdev->irq.crtc_vblank_int[0]) { 784 drm_handle_vblank(rdev->ddev, 0); 785 rdev->pm.vblank_sync = true; 786 wake_up(&rdev->irq.vblank_queue); 787 } 788 if (atomic_read(&rdev->irq.pflip[0])) 789 radeon_crtc_handle_vblank(rdev, 0); 790 } 791 if (status & RADEON_CRTC2_VBLANK_STAT) { 792 if (rdev->irq.crtc_vblank_int[1]) { 793 drm_handle_vblank(rdev->ddev, 1); 794 rdev->pm.vblank_sync = true; 795 wake_up(&rdev->irq.vblank_queue); 796 } 797 if (atomic_read(&rdev->irq.pflip[1])) 798 radeon_crtc_handle_vblank(rdev, 1); 799 } 800 if (status & RADEON_FP_DETECT_STAT) { 801 queue_hotplug = true; 802 DRM_DEBUG("HPD1\n"); 803 } 804 if (status & RADEON_FP2_DETECT_STAT) { 805 queue_hotplug = true; 806 DRM_DEBUG("HPD2\n"); 807 } 808 status = r100_irq_ack(rdev); 809 } 810 if (queue_hotplug) 811 schedule_delayed_work(&rdev->hotplug_work, 0); 812 if (rdev->msi_enabled) { 813 switch (rdev->family) { 814 case CHIP_RS400: 815 case CHIP_RS480: 816 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 817 WREG32(RADEON_AIC_CNTL, msi_rearm); 818 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 819 break; 820 default: 821 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 822 break; 823 } 824 } 825 return IRQ_HANDLED; 826 } 827 828 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 829 { 830 if (crtc == 0) 831 return RREG32(RADEON_CRTC_CRNT_FRAME); 832 else 833 return RREG32(RADEON_CRTC2_CRNT_FRAME); 834 } 835 836 /** 837 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer 838 * rdev: radeon device structure 839 * ring: ring buffer struct for emitting packets 840 */ 841 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) 842 { 843 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 844 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 845 RADEON_HDP_READ_BUFFER_INVALIDATE); 846 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 847 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 848 } 849 850 /* Who ever call radeon_fence_emit should call ring_lock and ask 851 * for enough space (today caller are ib schedule and buffer move) */ 852 void r100_fence_ring_emit(struct radeon_device *rdev, 853 struct radeon_fence *fence) 854 { 855 struct radeon_ring *ring = &rdev->ring[fence->ring]; 856 857 /* We have to make sure that caches are flushed before 858 * CPU might read something from VRAM. */ 859 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 860 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 861 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 862 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 863 /* Wait until IDLE & CLEAN */ 864 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 865 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 866 r100_ring_hdp_flush(rdev, ring); 867 /* Emit fence sequence & fire IRQ */ 868 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 869 radeon_ring_write(ring, fence->seq); 870 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 871 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 872 } 873 874 bool r100_semaphore_ring_emit(struct radeon_device *rdev, 875 struct radeon_ring *ring, 876 struct radeon_semaphore *semaphore, 877 bool emit_wait) 878 { 879 /* Unused on older asics, since we don't have semaphores or multiple rings */ 880 BUG(); 881 return false; 882 } 883 884 struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, 885 uint64_t src_offset, 886 uint64_t dst_offset, 887 unsigned num_gpu_pages, 888 struct reservation_object *resv) 889 { 890 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 891 struct radeon_fence *fence; 892 uint32_t cur_pages; 893 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 894 uint32_t pitch; 895 uint32_t stride_pixels; 896 unsigned ndw; 897 int num_loops; 898 int r = 0; 899 900 /* radeon limited to 16k stride */ 901 stride_bytes &= 0x3fff; 902 /* radeon pitch is /64 */ 903 pitch = stride_bytes / 64; 904 stride_pixels = stride_bytes / 4; 905 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 906 907 /* Ask for enough room for blit + flush + fence */ 908 ndw = 64 + (10 * num_loops); 909 r = radeon_ring_lock(rdev, ring, ndw); 910 if (r) { 911 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 912 return ERR_PTR(-EINVAL); 913 } 914 while (num_gpu_pages > 0) { 915 cur_pages = num_gpu_pages; 916 if (cur_pages > 8191) { 917 cur_pages = 8191; 918 } 919 num_gpu_pages -= cur_pages; 920 921 /* pages are in Y direction - height 922 page width in X direction - width */ 923 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 924 radeon_ring_write(ring, 925 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 926 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 927 RADEON_GMC_SRC_CLIPPING | 928 RADEON_GMC_DST_CLIPPING | 929 RADEON_GMC_BRUSH_NONE | 930 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 931 RADEON_GMC_SRC_DATATYPE_COLOR | 932 RADEON_ROP3_S | 933 RADEON_DP_SRC_SOURCE_MEMORY | 934 RADEON_GMC_CLR_CMP_CNTL_DIS | 935 RADEON_GMC_WR_MSK_DIS); 936 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 937 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 938 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 939 radeon_ring_write(ring, 0); 940 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 941 radeon_ring_write(ring, num_gpu_pages); 942 radeon_ring_write(ring, num_gpu_pages); 943 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 944 } 945 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 946 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 947 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 948 radeon_ring_write(ring, 949 RADEON_WAIT_2D_IDLECLEAN | 950 RADEON_WAIT_HOST_IDLECLEAN | 951 RADEON_WAIT_DMA_GUI_IDLE); 952 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); 953 if (r) { 954 radeon_ring_unlock_undo(rdev, ring); 955 return ERR_PTR(r); 956 } 957 radeon_ring_unlock_commit(rdev, ring, false); 958 return fence; 959 } 960 961 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 962 { 963 unsigned i; 964 u32 tmp; 965 966 for (i = 0; i < rdev->usec_timeout; i++) { 967 tmp = RREG32(R_000E40_RBBM_STATUS); 968 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 969 return 0; 970 } 971 udelay(1); 972 } 973 return -1; 974 } 975 976 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 977 { 978 int r; 979 980 r = radeon_ring_lock(rdev, ring, 2); 981 if (r) { 982 return; 983 } 984 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 985 radeon_ring_write(ring, 986 RADEON_ISYNC_ANY2D_IDLE3D | 987 RADEON_ISYNC_ANY3D_IDLE2D | 988 RADEON_ISYNC_WAIT_IDLEGUI | 989 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 990 radeon_ring_unlock_commit(rdev, ring, false); 991 } 992 993 994 /* Load the microcode for the CP */ 995 static int r100_cp_init_microcode(struct radeon_device *rdev) 996 { 997 const char *fw_name = NULL; 998 int err; 999 1000 DRM_DEBUG_KMS("\n"); 1001 1002 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 1003 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 1004 (rdev->family == CHIP_RS200)) { 1005 DRM_INFO("Loading R100 Microcode\n"); 1006 fw_name = FIRMWARE_R100; 1007 } else if ((rdev->family == CHIP_R200) || 1008 (rdev->family == CHIP_RV250) || 1009 (rdev->family == CHIP_RV280) || 1010 (rdev->family == CHIP_RS300)) { 1011 DRM_INFO("Loading R200 Microcode\n"); 1012 fw_name = FIRMWARE_R200; 1013 } else if ((rdev->family == CHIP_R300) || 1014 (rdev->family == CHIP_R350) || 1015 (rdev->family == CHIP_RV350) || 1016 (rdev->family == CHIP_RV380) || 1017 (rdev->family == CHIP_RS400) || 1018 (rdev->family == CHIP_RS480)) { 1019 DRM_INFO("Loading R300 Microcode\n"); 1020 fw_name = FIRMWARE_R300; 1021 } else if ((rdev->family == CHIP_R420) || 1022 (rdev->family == CHIP_R423) || 1023 (rdev->family == CHIP_RV410)) { 1024 DRM_INFO("Loading R400 Microcode\n"); 1025 fw_name = FIRMWARE_R420; 1026 } else if ((rdev->family == CHIP_RS690) || 1027 (rdev->family == CHIP_RS740)) { 1028 DRM_INFO("Loading RS690/RS740 Microcode\n"); 1029 fw_name = FIRMWARE_RS690; 1030 } else if (rdev->family == CHIP_RS600) { 1031 DRM_INFO("Loading RS600 Microcode\n"); 1032 fw_name = FIRMWARE_RS600; 1033 } else if ((rdev->family == CHIP_RV515) || 1034 (rdev->family == CHIP_R520) || 1035 (rdev->family == CHIP_RV530) || 1036 (rdev->family == CHIP_R580) || 1037 (rdev->family == CHIP_RV560) || 1038 (rdev->family == CHIP_RV570)) { 1039 DRM_INFO("Loading R500 Microcode\n"); 1040 fw_name = FIRMWARE_R520; 1041 } 1042 1043 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 1044 if (err) { 1045 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1046 fw_name); 1047 } else if (rdev->me_fw->datasize % 8) { 1048 printk(KERN_ERR 1049 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1050 rdev->me_fw->datasize, fw_name); 1051 err = -EINVAL; 1052 release_firmware(rdev->me_fw); 1053 rdev->me_fw = NULL; 1054 } 1055 return err; 1056 } 1057 1058 u32 r100_gfx_get_rptr(struct radeon_device *rdev, 1059 struct radeon_ring *ring) 1060 { 1061 u32 rptr; 1062 1063 if (rdev->wb.enabled) 1064 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 1065 else 1066 rptr = RREG32(RADEON_CP_RB_RPTR); 1067 1068 return rptr; 1069 } 1070 1071 u32 r100_gfx_get_wptr(struct radeon_device *rdev, 1072 struct radeon_ring *ring) 1073 { 1074 return RREG32(RADEON_CP_RB_WPTR); 1075 } 1076 1077 void r100_gfx_set_wptr(struct radeon_device *rdev, 1078 struct radeon_ring *ring) 1079 { 1080 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1081 (void)RREG32(RADEON_CP_RB_WPTR); 1082 } 1083 1084 /** 1085 * r100_cp_fini_microcode - drop the firmware image reference 1086 * 1087 * @rdev: radeon_device pointer 1088 * 1089 * Drop the me firmware image reference. 1090 * Called at driver shutdown. 1091 */ 1092 static void r100_cp_fini_microcode (struct radeon_device *rdev) 1093 { 1094 release_firmware(rdev->me_fw); 1095 rdev->me_fw = NULL; 1096 } 1097 1098 static void r100_cp_load_microcode(struct radeon_device *rdev) 1099 { 1100 const __be32 *fw_data; 1101 int i, size; 1102 1103 if (r100_gui_wait_for_idle(rdev)) { 1104 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n"); 1105 } 1106 1107 if (rdev->me_fw) { 1108 size = rdev->me_fw->datasize / 4; 1109 fw_data = (const __be32 *)&rdev->me_fw->data[0]; 1110 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1111 for (i = 0; i < size; i += 2) { 1112 WREG32(RADEON_CP_ME_RAM_DATAH, 1113 be32_to_cpup(&fw_data[i])); 1114 WREG32(RADEON_CP_ME_RAM_DATAL, 1115 be32_to_cpup(&fw_data[i + 1])); 1116 } 1117 } 1118 } 1119 1120 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1121 { 1122 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1123 unsigned rb_bufsz; 1124 unsigned rb_blksz; 1125 unsigned max_fetch; 1126 unsigned pre_write_timer; 1127 unsigned pre_write_limit; 1128 unsigned indirect2_start; 1129 unsigned indirect1_start; 1130 uint32_t tmp; 1131 int r; 1132 1133 if (r100_debugfs_cp_init(rdev)) { 1134 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1135 } 1136 if (!rdev->me_fw) { 1137 r = r100_cp_init_microcode(rdev); 1138 if (r) { 1139 DRM_ERROR("Failed to load firmware!\n"); 1140 return r; 1141 } 1142 } 1143 1144 /* Align ring size */ 1145 rb_bufsz = order_base_2(ring_size / 8); 1146 ring_size = (1 << (rb_bufsz + 1)) * 4; 1147 r100_cp_load_microcode(rdev); 1148 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1149 RADEON_CP_PACKET2); 1150 if (r) { 1151 return r; 1152 } 1153 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1154 * the rptr copy in system ram */ 1155 rb_blksz = 9; 1156 /* cp will read 128bytes at a time (4 dwords) */ 1157 max_fetch = 1; 1158 ring->align_mask = 16 - 1; 1159 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1160 pre_write_timer = 64; 1161 /* Force CP_RB_WPTR write if written more than one time before the 1162 * delay expire 1163 */ 1164 pre_write_limit = 0; 1165 /* Setup the cp cache like this (cache size is 96 dwords) : 1166 * RING 0 to 15 1167 * INDIRECT1 16 to 79 1168 * INDIRECT2 80 to 95 1169 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1170 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1171 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1172 * Idea being that most of the gpu cmd will be through indirect1 buffer 1173 * so it gets the bigger cache. 1174 */ 1175 indirect2_start = 80; 1176 indirect1_start = 16; 1177 /* cp setup */ 1178 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1179 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1180 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1181 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1182 #ifdef __BIG_ENDIAN 1183 tmp |= RADEON_BUF_SWAP_32BIT; 1184 #endif 1185 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1186 1187 /* Set ring address */ 1188 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1189 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1190 /* Force read & write ptr to 0 */ 1191 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1192 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1193 ring->wptr = 0; 1194 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1195 1196 /* set the wb address whether it's enabled or not */ 1197 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1198 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1199 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1200 1201 if (rdev->wb.enabled) 1202 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1203 else { 1204 tmp |= RADEON_RB_NO_UPDATE; 1205 WREG32(R_000770_SCRATCH_UMSK, 0); 1206 } 1207 1208 WREG32(RADEON_CP_RB_CNTL, tmp); 1209 udelay(10); 1210 /* Set cp mode to bus mastering & enable cp*/ 1211 WREG32(RADEON_CP_CSQ_MODE, 1212 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1213 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1214 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1215 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1216 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1217 1218 /* at this point everything should be setup correctly to enable master */ 1219 pci_enable_busmaster(rdev->dev->bsddev); 1220 1221 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1222 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1223 if (r) { 1224 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1225 return r; 1226 } 1227 ring->ready = true; 1228 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1229 1230 if (!ring->rptr_save_reg /* not resuming from suspend */ 1231 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1232 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1233 if (r) { 1234 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1235 ring->rptr_save_reg = 0; 1236 } 1237 } 1238 return 0; 1239 } 1240 1241 void r100_cp_fini(struct radeon_device *rdev) 1242 { 1243 if (r100_cp_wait_for_idle(rdev)) { 1244 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1245 } 1246 /* Disable ring */ 1247 r100_cp_disable(rdev); 1248 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1249 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1250 DRM_INFO("radeon: cp finalized\n"); 1251 } 1252 1253 void r100_cp_disable(struct radeon_device *rdev) 1254 { 1255 /* Disable ring */ 1256 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1257 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1258 WREG32(RADEON_CP_CSQ_MODE, 0); 1259 WREG32(RADEON_CP_CSQ_CNTL, 0); 1260 WREG32(R_000770_SCRATCH_UMSK, 0); 1261 if (r100_gui_wait_for_idle(rdev)) { 1262 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n"); 1263 } 1264 } 1265 1266 /* 1267 * CS functions 1268 */ 1269 int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1270 struct radeon_cs_packet *pkt, 1271 unsigned idx, 1272 unsigned reg) 1273 { 1274 int r; 1275 u32 tile_flags = 0; 1276 u32 tmp; 1277 struct radeon_bo_list *reloc; 1278 u32 value; 1279 1280 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1281 if (r) { 1282 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1283 idx, reg); 1284 radeon_cs_dump_packet(p, pkt); 1285 return r; 1286 } 1287 1288 value = radeon_get_ib_value(p, idx); 1289 tmp = value & 0x003fffff; 1290 tmp += (((u32)reloc->gpu_offset) >> 10); 1291 1292 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1293 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1294 tile_flags |= RADEON_DST_TILE_MACRO; 1295 if (reloc->tiling_flags & RADEON_TILING_MICRO) { 1296 if (reg == RADEON_SRC_PITCH_OFFSET) { 1297 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1298 radeon_cs_dump_packet(p, pkt); 1299 return -EINVAL; 1300 } 1301 tile_flags |= RADEON_DST_TILE_MICRO; 1302 } 1303 1304 tmp |= tile_flags; 1305 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1306 } else 1307 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1308 return 0; 1309 } 1310 1311 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1312 struct radeon_cs_packet *pkt, 1313 int idx) 1314 { 1315 unsigned c, i; 1316 struct radeon_bo_list *reloc; 1317 struct r100_cs_track *track; 1318 int r = 0; 1319 volatile uint32_t *ib; 1320 u32 idx_value; 1321 1322 ib = p->ib.ptr; 1323 track = (struct r100_cs_track *)p->track; 1324 c = radeon_get_ib_value(p, idx++) & 0x1F; 1325 if (c > 16) { 1326 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1327 pkt->opcode); 1328 radeon_cs_dump_packet(p, pkt); 1329 return -EINVAL; 1330 } 1331 track->num_arrays = c; 1332 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1333 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1334 if (r) { 1335 DRM_ERROR("No reloc for packet3 %d\n", 1336 pkt->opcode); 1337 radeon_cs_dump_packet(p, pkt); 1338 return r; 1339 } 1340 idx_value = radeon_get_ib_value(p, idx); 1341 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1342 1343 track->arrays[i + 0].esize = idx_value >> 8; 1344 track->arrays[i + 0].robj = reloc->robj; 1345 track->arrays[i + 0].esize &= 0x7F; 1346 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1347 if (r) { 1348 DRM_ERROR("No reloc for packet3 %d\n", 1349 pkt->opcode); 1350 radeon_cs_dump_packet(p, pkt); 1351 return r; 1352 } 1353 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); 1354 track->arrays[i + 1].robj = reloc->robj; 1355 track->arrays[i + 1].esize = idx_value >> 24; 1356 track->arrays[i + 1].esize &= 0x7F; 1357 } 1358 if (c & 1) { 1359 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1360 if (r) { 1361 DRM_ERROR("No reloc for packet3 %d\n", 1362 pkt->opcode); 1363 radeon_cs_dump_packet(p, pkt); 1364 return r; 1365 } 1366 idx_value = radeon_get_ib_value(p, idx); 1367 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1368 track->arrays[i + 0].robj = reloc->robj; 1369 track->arrays[i + 0].esize = idx_value >> 8; 1370 track->arrays[i + 0].esize &= 0x7F; 1371 } 1372 return r; 1373 } 1374 1375 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1376 struct radeon_cs_packet *pkt, 1377 const unsigned *auth, unsigned n, 1378 radeon_packet0_check_t check) 1379 { 1380 unsigned reg; 1381 unsigned i, j, m; 1382 unsigned idx; 1383 int r; 1384 1385 idx = pkt->idx + 1; 1386 reg = pkt->reg; 1387 /* Check that register fall into register range 1388 * determined by the number of entry (n) in the 1389 * safe register bitmap. 1390 */ 1391 if (pkt->one_reg_wr) { 1392 if ((reg >> 7) > n) { 1393 return -EINVAL; 1394 } 1395 } else { 1396 if (((reg + (pkt->count << 2)) >> 7) > n) { 1397 return -EINVAL; 1398 } 1399 } 1400 for (i = 0; i <= pkt->count; i++, idx++) { 1401 j = (reg >> 7); 1402 m = 1 << ((reg >> 2) & 31); 1403 if (auth[j] & m) { 1404 r = check(p, pkt, idx, reg); 1405 if (r) { 1406 return r; 1407 } 1408 } 1409 if (pkt->one_reg_wr) { 1410 if (!(auth[j] & m)) { 1411 break; 1412 } 1413 } else { 1414 reg += 4; 1415 } 1416 } 1417 return 0; 1418 } 1419 1420 /** 1421 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1422 * @parser: parser structure holding parsing context. 1423 * 1424 * Userspace sends a special sequence for VLINE waits. 1425 * PACKET0 - VLINE_START_END + value 1426 * PACKET0 - WAIT_UNTIL +_value 1427 * RELOC (P3) - crtc_id in reloc. 1428 * 1429 * This function parses this and relocates the VLINE START END 1430 * and WAIT UNTIL packets to the correct crtc. 1431 * It also detects a switched off crtc and nulls out the 1432 * wait in that case. 1433 */ 1434 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1435 { 1436 struct drm_crtc *crtc; 1437 struct radeon_crtc *radeon_crtc; 1438 struct radeon_cs_packet p3reloc, waitreloc; 1439 int crtc_id; 1440 int r; 1441 uint32_t header, h_idx, reg; 1442 volatile uint32_t *ib; 1443 1444 ib = p->ib.ptr; 1445 1446 /* parse the wait until */ 1447 r = radeon_cs_packet_parse(p, &waitreloc, p->idx); 1448 if (r) 1449 return r; 1450 1451 /* check its a wait until and only 1 count */ 1452 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1453 waitreloc.count != 0) { 1454 DRM_ERROR("vline wait had illegal wait until segment\n"); 1455 return -EINVAL; 1456 } 1457 1458 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1459 DRM_ERROR("vline wait had illegal wait until\n"); 1460 return -EINVAL; 1461 } 1462 1463 /* jump over the NOP */ 1464 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1465 if (r) 1466 return r; 1467 1468 h_idx = p->idx - 2; 1469 p->idx += waitreloc.count + 2; 1470 p->idx += p3reloc.count + 2; 1471 1472 header = radeon_get_ib_value(p, h_idx); 1473 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1474 reg = R100_CP_PACKET0_GET_REG(header); 1475 crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id); 1476 if (!crtc) { 1477 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1478 return -ENOENT; 1479 } 1480 radeon_crtc = to_radeon_crtc(crtc); 1481 crtc_id = radeon_crtc->crtc_id; 1482 1483 if (!crtc->enabled) { 1484 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1485 ib[h_idx + 2] = PACKET2(0); 1486 ib[h_idx + 3] = PACKET2(0); 1487 } else if (crtc_id == 1) { 1488 switch (reg) { 1489 case AVIVO_D1MODE_VLINE_START_END: 1490 header &= ~R300_CP_PACKET0_REG_MASK; 1491 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1492 break; 1493 case RADEON_CRTC_GUI_TRIG_VLINE: 1494 header &= ~R300_CP_PACKET0_REG_MASK; 1495 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1496 break; 1497 default: 1498 DRM_ERROR("unknown crtc reloc\n"); 1499 return -EINVAL; 1500 } 1501 ib[h_idx] = header; 1502 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1503 } 1504 1505 return 0; 1506 } 1507 1508 static int r100_get_vtx_size(uint32_t vtx_fmt) 1509 { 1510 int vtx_size; 1511 vtx_size = 2; 1512 /* ordered according to bits in spec */ 1513 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1514 vtx_size++; 1515 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1516 vtx_size += 3; 1517 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1518 vtx_size++; 1519 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1520 vtx_size++; 1521 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1522 vtx_size += 3; 1523 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1524 vtx_size++; 1525 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1526 vtx_size++; 1527 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1528 vtx_size += 2; 1529 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1530 vtx_size += 2; 1531 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1532 vtx_size++; 1533 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1534 vtx_size += 2; 1535 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1536 vtx_size++; 1537 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1538 vtx_size += 2; 1539 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1540 vtx_size++; 1541 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1542 vtx_size++; 1543 /* blend weight */ 1544 if (vtx_fmt & (0x7 << 15)) 1545 vtx_size += (vtx_fmt >> 15) & 0x7; 1546 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1547 vtx_size += 3; 1548 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1549 vtx_size += 2; 1550 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1551 vtx_size++; 1552 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1553 vtx_size++; 1554 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1555 vtx_size++; 1556 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1557 vtx_size++; 1558 return vtx_size; 1559 } 1560 1561 static int r100_packet0_check(struct radeon_cs_parser *p, 1562 struct radeon_cs_packet *pkt, 1563 unsigned idx, unsigned reg) 1564 { 1565 struct radeon_bo_list *reloc; 1566 struct r100_cs_track *track; 1567 volatile uint32_t *ib; 1568 uint32_t tmp; 1569 int r; 1570 int i, face; 1571 u32 tile_flags = 0; 1572 u32 idx_value; 1573 1574 ib = p->ib.ptr; 1575 track = (struct r100_cs_track *)p->track; 1576 1577 idx_value = radeon_get_ib_value(p, idx); 1578 1579 switch (reg) { 1580 case RADEON_CRTC_GUI_TRIG_VLINE: 1581 r = r100_cs_packet_parse_vline(p); 1582 if (r) { 1583 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1584 idx, reg); 1585 radeon_cs_dump_packet(p, pkt); 1586 return r; 1587 } 1588 break; 1589 /* FIXME: only allow PACKET3 blit? easier to check for out of 1590 * range access */ 1591 case RADEON_DST_PITCH_OFFSET: 1592 case RADEON_SRC_PITCH_OFFSET: 1593 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1594 if (r) 1595 return r; 1596 break; 1597 case RADEON_RB3D_DEPTHOFFSET: 1598 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1599 if (r) { 1600 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1601 idx, reg); 1602 radeon_cs_dump_packet(p, pkt); 1603 return r; 1604 } 1605 track->zb.robj = reloc->robj; 1606 track->zb.offset = idx_value; 1607 track->zb_dirty = true; 1608 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1609 break; 1610 case RADEON_RB3D_COLOROFFSET: 1611 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1612 if (r) { 1613 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1614 idx, reg); 1615 radeon_cs_dump_packet(p, pkt); 1616 return r; 1617 } 1618 track->cb[0].robj = reloc->robj; 1619 track->cb[0].offset = idx_value; 1620 track->cb_dirty = true; 1621 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1622 break; 1623 case RADEON_PP_TXOFFSET_0: 1624 case RADEON_PP_TXOFFSET_1: 1625 case RADEON_PP_TXOFFSET_2: 1626 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1627 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1628 if (r) { 1629 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1630 idx, reg); 1631 radeon_cs_dump_packet(p, pkt); 1632 return r; 1633 } 1634 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1635 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1636 tile_flags |= RADEON_TXO_MACRO_TILE; 1637 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1638 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1639 1640 tmp = idx_value & ~(0x7 << 2); 1641 tmp |= tile_flags; 1642 ib[idx] = tmp + ((u32)reloc->gpu_offset); 1643 } else 1644 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1645 track->textures[i].robj = reloc->robj; 1646 track->tex_dirty = true; 1647 break; 1648 case RADEON_PP_CUBIC_OFFSET_T0_0: 1649 case RADEON_PP_CUBIC_OFFSET_T0_1: 1650 case RADEON_PP_CUBIC_OFFSET_T0_2: 1651 case RADEON_PP_CUBIC_OFFSET_T0_3: 1652 case RADEON_PP_CUBIC_OFFSET_T0_4: 1653 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1654 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1655 if (r) { 1656 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1657 idx, reg); 1658 radeon_cs_dump_packet(p, pkt); 1659 return r; 1660 } 1661 track->textures[0].cube_info[i].offset = idx_value; 1662 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1663 track->textures[0].cube_info[i].robj = reloc->robj; 1664 track->tex_dirty = true; 1665 break; 1666 case RADEON_PP_CUBIC_OFFSET_T1_0: 1667 case RADEON_PP_CUBIC_OFFSET_T1_1: 1668 case RADEON_PP_CUBIC_OFFSET_T1_2: 1669 case RADEON_PP_CUBIC_OFFSET_T1_3: 1670 case RADEON_PP_CUBIC_OFFSET_T1_4: 1671 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1672 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1673 if (r) { 1674 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1675 idx, reg); 1676 radeon_cs_dump_packet(p, pkt); 1677 return r; 1678 } 1679 track->textures[1].cube_info[i].offset = idx_value; 1680 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1681 track->textures[1].cube_info[i].robj = reloc->robj; 1682 track->tex_dirty = true; 1683 break; 1684 case RADEON_PP_CUBIC_OFFSET_T2_0: 1685 case RADEON_PP_CUBIC_OFFSET_T2_1: 1686 case RADEON_PP_CUBIC_OFFSET_T2_2: 1687 case RADEON_PP_CUBIC_OFFSET_T2_3: 1688 case RADEON_PP_CUBIC_OFFSET_T2_4: 1689 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1690 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1691 if (r) { 1692 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1693 idx, reg); 1694 radeon_cs_dump_packet(p, pkt); 1695 return r; 1696 } 1697 track->textures[2].cube_info[i].offset = idx_value; 1698 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1699 track->textures[2].cube_info[i].robj = reloc->robj; 1700 track->tex_dirty = true; 1701 break; 1702 case RADEON_RE_WIDTH_HEIGHT: 1703 track->maxy = ((idx_value >> 16) & 0x7FF); 1704 track->cb_dirty = true; 1705 track->zb_dirty = true; 1706 break; 1707 case RADEON_RB3D_COLORPITCH: 1708 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1709 if (r) { 1710 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1711 idx, reg); 1712 radeon_cs_dump_packet(p, pkt); 1713 return r; 1714 } 1715 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1716 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1717 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1718 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1719 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1720 1721 tmp = idx_value & ~(0x7 << 16); 1722 tmp |= tile_flags; 1723 ib[idx] = tmp; 1724 } else 1725 ib[idx] = idx_value; 1726 1727 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1728 track->cb_dirty = true; 1729 break; 1730 case RADEON_RB3D_DEPTHPITCH: 1731 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1732 track->zb_dirty = true; 1733 break; 1734 case RADEON_RB3D_CNTL: 1735 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1736 case 7: 1737 case 8: 1738 case 9: 1739 case 11: 1740 case 12: 1741 track->cb[0].cpp = 1; 1742 break; 1743 case 3: 1744 case 4: 1745 case 15: 1746 track->cb[0].cpp = 2; 1747 break; 1748 case 6: 1749 track->cb[0].cpp = 4; 1750 break; 1751 default: 1752 DRM_ERROR("Invalid color buffer format (%d) !\n", 1753 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1754 return -EINVAL; 1755 } 1756 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1757 track->cb_dirty = true; 1758 track->zb_dirty = true; 1759 break; 1760 case RADEON_RB3D_ZSTENCILCNTL: 1761 switch (idx_value & 0xf) { 1762 case 0: 1763 track->zb.cpp = 2; 1764 break; 1765 case 2: 1766 case 3: 1767 case 4: 1768 case 5: 1769 case 9: 1770 case 11: 1771 track->zb.cpp = 4; 1772 break; 1773 default: 1774 break; 1775 } 1776 track->zb_dirty = true; 1777 break; 1778 case RADEON_RB3D_ZPASS_ADDR: 1779 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1780 if (r) { 1781 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1782 idx, reg); 1783 radeon_cs_dump_packet(p, pkt); 1784 return r; 1785 } 1786 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1787 break; 1788 case RADEON_PP_CNTL: 1789 { 1790 uint32_t temp = idx_value >> 4; 1791 for (i = 0; i < track->num_texture; i++) 1792 track->textures[i].enabled = !!(temp & (1 << i)); 1793 track->tex_dirty = true; 1794 } 1795 break; 1796 case RADEON_SE_VF_CNTL: 1797 track->vap_vf_cntl = idx_value; 1798 break; 1799 case RADEON_SE_VTX_FMT: 1800 track->vtx_size = r100_get_vtx_size(idx_value); 1801 break; 1802 case RADEON_PP_TEX_SIZE_0: 1803 case RADEON_PP_TEX_SIZE_1: 1804 case RADEON_PP_TEX_SIZE_2: 1805 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1806 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1807 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1808 track->tex_dirty = true; 1809 break; 1810 case RADEON_PP_TEX_PITCH_0: 1811 case RADEON_PP_TEX_PITCH_1: 1812 case RADEON_PP_TEX_PITCH_2: 1813 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1814 track->textures[i].pitch = idx_value + 32; 1815 track->tex_dirty = true; 1816 break; 1817 case RADEON_PP_TXFILTER_0: 1818 case RADEON_PP_TXFILTER_1: 1819 case RADEON_PP_TXFILTER_2: 1820 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1821 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1822 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1823 tmp = (idx_value >> 23) & 0x7; 1824 if (tmp == 2 || tmp == 6) 1825 track->textures[i].roundup_w = false; 1826 tmp = (idx_value >> 27) & 0x7; 1827 if (tmp == 2 || tmp == 6) 1828 track->textures[i].roundup_h = false; 1829 track->tex_dirty = true; 1830 break; 1831 case RADEON_PP_TXFORMAT_0: 1832 case RADEON_PP_TXFORMAT_1: 1833 case RADEON_PP_TXFORMAT_2: 1834 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1835 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1836 track->textures[i].use_pitch = 1; 1837 } else { 1838 track->textures[i].use_pitch = 0; 1839 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1840 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1841 } 1842 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1843 track->textures[i].tex_coord_type = 2; 1844 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1845 case RADEON_TXFORMAT_I8: 1846 case RADEON_TXFORMAT_RGB332: 1847 case RADEON_TXFORMAT_Y8: 1848 track->textures[i].cpp = 1; 1849 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1850 break; 1851 case RADEON_TXFORMAT_AI88: 1852 case RADEON_TXFORMAT_ARGB1555: 1853 case RADEON_TXFORMAT_RGB565: 1854 case RADEON_TXFORMAT_ARGB4444: 1855 case RADEON_TXFORMAT_VYUY422: 1856 case RADEON_TXFORMAT_YVYU422: 1857 case RADEON_TXFORMAT_SHADOW16: 1858 case RADEON_TXFORMAT_LDUDV655: 1859 case RADEON_TXFORMAT_DUDV88: 1860 track->textures[i].cpp = 2; 1861 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1862 break; 1863 case RADEON_TXFORMAT_ARGB8888: 1864 case RADEON_TXFORMAT_RGBA8888: 1865 case RADEON_TXFORMAT_SHADOW32: 1866 case RADEON_TXFORMAT_LDUDUV8888: 1867 track->textures[i].cpp = 4; 1868 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1869 break; 1870 case RADEON_TXFORMAT_DXT1: 1871 track->textures[i].cpp = 1; 1872 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1873 break; 1874 case RADEON_TXFORMAT_DXT23: 1875 case RADEON_TXFORMAT_DXT45: 1876 track->textures[i].cpp = 1; 1877 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1878 break; 1879 } 1880 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1881 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1882 track->tex_dirty = true; 1883 break; 1884 case RADEON_PP_CUBIC_FACES_0: 1885 case RADEON_PP_CUBIC_FACES_1: 1886 case RADEON_PP_CUBIC_FACES_2: 1887 tmp = idx_value; 1888 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1889 for (face = 0; face < 4; face++) { 1890 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1891 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1892 } 1893 track->tex_dirty = true; 1894 break; 1895 default: 1896 pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx); 1897 return -EINVAL; 1898 } 1899 return 0; 1900 } 1901 1902 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1903 struct radeon_cs_packet *pkt, 1904 struct radeon_bo *robj) 1905 { 1906 unsigned idx; 1907 u32 value; 1908 idx = pkt->idx + 1; 1909 value = radeon_get_ib_value(p, idx + 2); 1910 if ((value + 1) > radeon_bo_size(robj)) { 1911 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1912 "(need %u have %lu) !\n", 1913 value + 1, 1914 radeon_bo_size(robj)); 1915 return -EINVAL; 1916 } 1917 return 0; 1918 } 1919 1920 static int r100_packet3_check(struct radeon_cs_parser *p, 1921 struct radeon_cs_packet *pkt) 1922 { 1923 struct radeon_bo_list *reloc; 1924 struct r100_cs_track *track; 1925 unsigned idx; 1926 volatile uint32_t *ib; 1927 int r; 1928 1929 ib = p->ib.ptr; 1930 idx = pkt->idx + 1; 1931 track = (struct r100_cs_track *)p->track; 1932 switch (pkt->opcode) { 1933 case PACKET3_3D_LOAD_VBPNTR: 1934 r = r100_packet3_load_vbpntr(p, pkt, idx); 1935 if (r) 1936 return r; 1937 break; 1938 case PACKET3_INDX_BUFFER: 1939 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1940 if (r) { 1941 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1942 radeon_cs_dump_packet(p, pkt); 1943 return r; 1944 } 1945 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); 1946 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1947 if (r) { 1948 return r; 1949 } 1950 break; 1951 case 0x23: 1952 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1953 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1954 if (r) { 1955 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1956 radeon_cs_dump_packet(p, pkt); 1957 return r; 1958 } 1959 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); 1960 track->num_arrays = 1; 1961 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1962 1963 track->arrays[0].robj = reloc->robj; 1964 track->arrays[0].esize = track->vtx_size; 1965 1966 track->max_indx = radeon_get_ib_value(p, idx+1); 1967 1968 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1969 track->immd_dwords = pkt->count - 1; 1970 r = r100_cs_track_check(p->rdev, track); 1971 if (r) 1972 return r; 1973 break; 1974 case PACKET3_3D_DRAW_IMMD: 1975 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1976 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1977 return -EINVAL; 1978 } 1979 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1980 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1981 track->immd_dwords = pkt->count - 1; 1982 r = r100_cs_track_check(p->rdev, track); 1983 if (r) 1984 return r; 1985 break; 1986 /* triggers drawing using in-packet vertex data */ 1987 case PACKET3_3D_DRAW_IMMD_2: 1988 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1989 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1990 return -EINVAL; 1991 } 1992 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1993 track->immd_dwords = pkt->count; 1994 r = r100_cs_track_check(p->rdev, track); 1995 if (r) 1996 return r; 1997 break; 1998 /* triggers drawing using in-packet vertex data */ 1999 case PACKET3_3D_DRAW_VBUF_2: 2000 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2001 r = r100_cs_track_check(p->rdev, track); 2002 if (r) 2003 return r; 2004 break; 2005 /* triggers drawing of vertex buffers setup elsewhere */ 2006 case PACKET3_3D_DRAW_INDX_2: 2007 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2008 r = r100_cs_track_check(p->rdev, track); 2009 if (r) 2010 return r; 2011 break; 2012 /* triggers drawing using indices to vertex buffer */ 2013 case PACKET3_3D_DRAW_VBUF: 2014 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2015 r = r100_cs_track_check(p->rdev, track); 2016 if (r) 2017 return r; 2018 break; 2019 /* triggers drawing of vertex buffers setup elsewhere */ 2020 case PACKET3_3D_DRAW_INDX: 2021 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2022 r = r100_cs_track_check(p->rdev, track); 2023 if (r) 2024 return r; 2025 break; 2026 /* triggers drawing using indices to vertex buffer */ 2027 case PACKET3_3D_CLEAR_HIZ: 2028 case PACKET3_3D_CLEAR_ZMASK: 2029 if (p->rdev->hyperz_filp != p->filp) 2030 return -EINVAL; 2031 break; 2032 case PACKET3_NOP: 2033 break; 2034 default: 2035 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2036 return -EINVAL; 2037 } 2038 return 0; 2039 } 2040 2041 int r100_cs_parse(struct radeon_cs_parser *p) 2042 { 2043 struct radeon_cs_packet pkt; 2044 struct r100_cs_track *track; 2045 int r; 2046 2047 track = kzalloc(sizeof(*track), GFP_KERNEL); 2048 if (!track) 2049 return -ENOMEM; 2050 r100_cs_track_clear(p->rdev, track); 2051 p->track = track; 2052 do { 2053 r = radeon_cs_packet_parse(p, &pkt, p->idx); 2054 if (r) { 2055 return r; 2056 } 2057 p->idx += pkt.count + 2; 2058 switch (pkt.type) { 2059 case RADEON_PACKET_TYPE0: 2060 if (p->rdev->family >= CHIP_R200) 2061 r = r100_cs_parse_packet0(p, &pkt, 2062 p->rdev->config.r100.reg_safe_bm, 2063 p->rdev->config.r100.reg_safe_bm_size, 2064 &r200_packet0_check); 2065 else 2066 r = r100_cs_parse_packet0(p, &pkt, 2067 p->rdev->config.r100.reg_safe_bm, 2068 p->rdev->config.r100.reg_safe_bm_size, 2069 &r100_packet0_check); 2070 break; 2071 case RADEON_PACKET_TYPE2: 2072 break; 2073 case RADEON_PACKET_TYPE3: 2074 r = r100_packet3_check(p, &pkt); 2075 break; 2076 default: 2077 DRM_ERROR("Unknown packet type %d !\n", 2078 pkt.type); 2079 return -EINVAL; 2080 } 2081 if (r) 2082 return r; 2083 } while (p->idx < p->chunk_ib->length_dw); 2084 return 0; 2085 } 2086 2087 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2088 { 2089 DRM_ERROR("pitch %d\n", t->pitch); 2090 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2091 DRM_ERROR("width %d\n", t->width); 2092 DRM_ERROR("width_11 %d\n", t->width_11); 2093 DRM_ERROR("height %d\n", t->height); 2094 DRM_ERROR("height_11 %d\n", t->height_11); 2095 DRM_ERROR("num levels %d\n", t->num_levels); 2096 DRM_ERROR("depth %d\n", t->txdepth); 2097 DRM_ERROR("bpp %d\n", t->cpp); 2098 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2099 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2100 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2101 DRM_ERROR("compress format %d\n", t->compress_format); 2102 } 2103 2104 static int r100_track_compress_size(int compress_format, int w, int h) 2105 { 2106 int block_width, block_height, block_bytes; 2107 int wblocks, hblocks; 2108 int min_wblocks; 2109 int sz; 2110 2111 block_width = 4; 2112 block_height = 4; 2113 2114 switch (compress_format) { 2115 case R100_TRACK_COMP_DXT1: 2116 block_bytes = 8; 2117 min_wblocks = 4; 2118 break; 2119 default: 2120 case R100_TRACK_COMP_DXT35: 2121 block_bytes = 16; 2122 min_wblocks = 2; 2123 break; 2124 } 2125 2126 hblocks = (h + block_height - 1) / block_height; 2127 wblocks = (w + block_width - 1) / block_width; 2128 if (wblocks < min_wblocks) 2129 wblocks = min_wblocks; 2130 sz = wblocks * hblocks * block_bytes; 2131 return sz; 2132 } 2133 2134 static int r100_cs_track_cube(struct radeon_device *rdev, 2135 struct r100_cs_track *track, unsigned idx) 2136 { 2137 unsigned face, w, h; 2138 struct radeon_bo *cube_robj; 2139 unsigned long size; 2140 unsigned compress_format = track->textures[idx].compress_format; 2141 2142 for (face = 0; face < 5; face++) { 2143 cube_robj = track->textures[idx].cube_info[face].robj; 2144 w = track->textures[idx].cube_info[face].width; 2145 h = track->textures[idx].cube_info[face].height; 2146 2147 if (compress_format) { 2148 size = r100_track_compress_size(compress_format, w, h); 2149 } else 2150 size = w * h; 2151 size *= track->textures[idx].cpp; 2152 2153 size += track->textures[idx].cube_info[face].offset; 2154 2155 if (size > radeon_bo_size(cube_robj)) { 2156 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2157 size, radeon_bo_size(cube_robj)); 2158 r100_cs_track_texture_print(&track->textures[idx]); 2159 return -1; 2160 } 2161 } 2162 return 0; 2163 } 2164 2165 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2166 struct r100_cs_track *track) 2167 { 2168 struct radeon_bo *robj; 2169 unsigned long size; 2170 unsigned u, i, w, h, d; 2171 int ret; 2172 2173 for (u = 0; u < track->num_texture; u++) { 2174 if (!track->textures[u].enabled) 2175 continue; 2176 if (track->textures[u].lookup_disable) 2177 continue; 2178 robj = track->textures[u].robj; 2179 if (robj == NULL) { 2180 DRM_ERROR("No texture bound to unit %u\n", u); 2181 return -EINVAL; 2182 } 2183 size = 0; 2184 for (i = 0; i <= track->textures[u].num_levels; i++) { 2185 if (track->textures[u].use_pitch) { 2186 if (rdev->family < CHIP_R300) 2187 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2188 else 2189 w = track->textures[u].pitch / (1 << i); 2190 } else { 2191 w = track->textures[u].width; 2192 if (rdev->family >= CHIP_RV515) 2193 w |= track->textures[u].width_11; 2194 w = w / (1 << i); 2195 if (track->textures[u].roundup_w) 2196 w = roundup_pow_of_two(w); 2197 } 2198 h = track->textures[u].height; 2199 if (rdev->family >= CHIP_RV515) 2200 h |= track->textures[u].height_11; 2201 h = h / (1 << i); 2202 if (track->textures[u].roundup_h) 2203 h = roundup_pow_of_two(h); 2204 if (track->textures[u].tex_coord_type == 1) { 2205 d = (1 << track->textures[u].txdepth) / (1 << i); 2206 if (!d) 2207 d = 1; 2208 } else { 2209 d = 1; 2210 } 2211 if (track->textures[u].compress_format) { 2212 2213 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2214 /* compressed textures are block based */ 2215 } else 2216 size += w * h * d; 2217 } 2218 size *= track->textures[u].cpp; 2219 2220 switch (track->textures[u].tex_coord_type) { 2221 case 0: 2222 case 1: 2223 break; 2224 case 2: 2225 if (track->separate_cube) { 2226 ret = r100_cs_track_cube(rdev, track, u); 2227 if (ret) 2228 return ret; 2229 } else 2230 size *= 6; 2231 break; 2232 default: 2233 DRM_ERROR("Invalid texture coordinate type %u for unit " 2234 "%u\n", track->textures[u].tex_coord_type, u); 2235 return -EINVAL; 2236 } 2237 if (size > radeon_bo_size(robj)) { 2238 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2239 "%lu\n", u, size, radeon_bo_size(robj)); 2240 r100_cs_track_texture_print(&track->textures[u]); 2241 return -EINVAL; 2242 } 2243 } 2244 return 0; 2245 } 2246 2247 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2248 { 2249 unsigned i; 2250 unsigned long size; 2251 unsigned prim_walk; 2252 unsigned nverts; 2253 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2254 2255 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2256 !track->blend_read_enable) 2257 num_cb = 0; 2258 2259 for (i = 0; i < num_cb; i++) { 2260 if (track->cb[i].robj == NULL) { 2261 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2262 return -EINVAL; 2263 } 2264 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2265 size += track->cb[i].offset; 2266 if (size > radeon_bo_size(track->cb[i].robj)) { 2267 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2268 "(need %lu have %lu) !\n", i, size, 2269 radeon_bo_size(track->cb[i].robj)); 2270 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2271 i, track->cb[i].pitch, track->cb[i].cpp, 2272 track->cb[i].offset, track->maxy); 2273 return -EINVAL; 2274 } 2275 } 2276 track->cb_dirty = false; 2277 2278 if (track->zb_dirty && track->z_enabled) { 2279 if (track->zb.robj == NULL) { 2280 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2281 return -EINVAL; 2282 } 2283 size = track->zb.pitch * track->zb.cpp * track->maxy; 2284 size += track->zb.offset; 2285 if (size > radeon_bo_size(track->zb.robj)) { 2286 DRM_ERROR("[drm] Buffer too small for z buffer " 2287 "(need %lu have %lu) !\n", size, 2288 radeon_bo_size(track->zb.robj)); 2289 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2290 track->zb.pitch, track->zb.cpp, 2291 track->zb.offset, track->maxy); 2292 return -EINVAL; 2293 } 2294 } 2295 track->zb_dirty = false; 2296 2297 if (track->aa_dirty && track->aaresolve) { 2298 if (track->aa.robj == NULL) { 2299 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2300 return -EINVAL; 2301 } 2302 /* I believe the format comes from colorbuffer0. */ 2303 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2304 size += track->aa.offset; 2305 if (size > radeon_bo_size(track->aa.robj)) { 2306 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2307 "(need %lu have %lu) !\n", i, size, 2308 radeon_bo_size(track->aa.robj)); 2309 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2310 i, track->aa.pitch, track->cb[0].cpp, 2311 track->aa.offset, track->maxy); 2312 return -EINVAL; 2313 } 2314 } 2315 track->aa_dirty = false; 2316 2317 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2318 if (track->vap_vf_cntl & (1 << 14)) { 2319 nverts = track->vap_alt_nverts; 2320 } else { 2321 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2322 } 2323 switch (prim_walk) { 2324 case 1: 2325 for (i = 0; i < track->num_arrays; i++) { 2326 size = track->arrays[i].esize * track->max_indx * 4; 2327 if (track->arrays[i].robj == NULL) { 2328 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2329 "bound\n", prim_walk, i); 2330 return -EINVAL; 2331 } 2332 if (size > radeon_bo_size(track->arrays[i].robj)) { 2333 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2334 "need %lu dwords have %lu dwords\n", 2335 prim_walk, i, size >> 2, 2336 radeon_bo_size(track->arrays[i].robj) 2337 >> 2); 2338 DRM_ERROR("Max indices %u\n", track->max_indx); 2339 return -EINVAL; 2340 } 2341 } 2342 break; 2343 case 2: 2344 for (i = 0; i < track->num_arrays; i++) { 2345 size = track->arrays[i].esize * (nverts - 1) * 4; 2346 if (track->arrays[i].robj == NULL) { 2347 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2348 "bound\n", prim_walk, i); 2349 return -EINVAL; 2350 } 2351 if (size > radeon_bo_size(track->arrays[i].robj)) { 2352 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2353 "need %lu dwords have %lu dwords\n", 2354 prim_walk, i, size >> 2, 2355 radeon_bo_size(track->arrays[i].robj) 2356 >> 2); 2357 return -EINVAL; 2358 } 2359 } 2360 break; 2361 case 3: 2362 size = track->vtx_size * nverts; 2363 if (size != track->immd_dwords) { 2364 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2365 track->immd_dwords, size); 2366 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2367 nverts, track->vtx_size); 2368 return -EINVAL; 2369 } 2370 break; 2371 default: 2372 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2373 prim_walk); 2374 return -EINVAL; 2375 } 2376 2377 if (track->tex_dirty) { 2378 track->tex_dirty = false; 2379 return r100_cs_track_texture_check(rdev, track); 2380 } 2381 return 0; 2382 } 2383 2384 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2385 { 2386 unsigned i, face; 2387 2388 track->cb_dirty = true; 2389 track->zb_dirty = true; 2390 track->tex_dirty = true; 2391 track->aa_dirty = true; 2392 2393 if (rdev->family < CHIP_R300) { 2394 track->num_cb = 1; 2395 if (rdev->family <= CHIP_RS200) 2396 track->num_texture = 3; 2397 else 2398 track->num_texture = 6; 2399 track->maxy = 2048; 2400 track->separate_cube = 1; 2401 } else { 2402 track->num_cb = 4; 2403 track->num_texture = 16; 2404 track->maxy = 4096; 2405 track->separate_cube = 0; 2406 track->aaresolve = false; 2407 track->aa.robj = NULL; 2408 } 2409 2410 for (i = 0; i < track->num_cb; i++) { 2411 track->cb[i].robj = NULL; 2412 track->cb[i].pitch = 8192; 2413 track->cb[i].cpp = 16; 2414 track->cb[i].offset = 0; 2415 } 2416 track->z_enabled = true; 2417 track->zb.robj = NULL; 2418 track->zb.pitch = 8192; 2419 track->zb.cpp = 4; 2420 track->zb.offset = 0; 2421 track->vtx_size = 0x7F; 2422 track->immd_dwords = 0xFFFFFFFFUL; 2423 track->num_arrays = 11; 2424 track->max_indx = 0x00FFFFFFUL; 2425 for (i = 0; i < track->num_arrays; i++) { 2426 track->arrays[i].robj = NULL; 2427 track->arrays[i].esize = 0x7F; 2428 } 2429 for (i = 0; i < track->num_texture; i++) { 2430 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2431 track->textures[i].pitch = 16536; 2432 track->textures[i].width = 16536; 2433 track->textures[i].height = 16536; 2434 track->textures[i].width_11 = 1 << 11; 2435 track->textures[i].height_11 = 1 << 11; 2436 track->textures[i].num_levels = 12; 2437 if (rdev->family <= CHIP_RS200) { 2438 track->textures[i].tex_coord_type = 0; 2439 track->textures[i].txdepth = 0; 2440 } else { 2441 track->textures[i].txdepth = 16; 2442 track->textures[i].tex_coord_type = 1; 2443 } 2444 track->textures[i].cpp = 64; 2445 track->textures[i].robj = NULL; 2446 /* CS IB emission code makes sure texture unit are disabled */ 2447 track->textures[i].enabled = false; 2448 track->textures[i].lookup_disable = false; 2449 track->textures[i].roundup_w = true; 2450 track->textures[i].roundup_h = true; 2451 if (track->separate_cube) 2452 for (face = 0; face < 5; face++) { 2453 track->textures[i].cube_info[face].robj = NULL; 2454 track->textures[i].cube_info[face].width = 16536; 2455 track->textures[i].cube_info[face].height = 16536; 2456 track->textures[i].cube_info[face].offset = 0; 2457 } 2458 } 2459 } 2460 2461 /* 2462 * Global GPU functions 2463 */ 2464 static void r100_errata(struct radeon_device *rdev) 2465 { 2466 rdev->pll_errata = 0; 2467 2468 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2469 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2470 } 2471 2472 if (rdev->family == CHIP_RV100 || 2473 rdev->family == CHIP_RS100 || 2474 rdev->family == CHIP_RS200) { 2475 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2476 } 2477 } 2478 2479 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2480 { 2481 unsigned i; 2482 uint32_t tmp; 2483 2484 for (i = 0; i < rdev->usec_timeout; i++) { 2485 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2486 if (tmp >= n) { 2487 return 0; 2488 } 2489 DRM_UDELAY(1); 2490 } 2491 return -1; 2492 } 2493 2494 int r100_gui_wait_for_idle(struct radeon_device *rdev) 2495 { 2496 unsigned i; 2497 uint32_t tmp; 2498 2499 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2500 pr_warn("radeon: wait for empty RBBM fifo failed! Bad things might happen.\n"); 2501 } 2502 for (i = 0; i < rdev->usec_timeout; i++) { 2503 tmp = RREG32(RADEON_RBBM_STATUS); 2504 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2505 return 0; 2506 } 2507 DRM_UDELAY(1); 2508 } 2509 return -1; 2510 } 2511 2512 int r100_mc_wait_for_idle(struct radeon_device *rdev) 2513 { 2514 unsigned i; 2515 uint32_t tmp; 2516 2517 for (i = 0; i < rdev->usec_timeout; i++) { 2518 /* read MC_STATUS */ 2519 tmp = RREG32(RADEON_MC_STATUS); 2520 if (tmp & RADEON_MC_IDLE) { 2521 return 0; 2522 } 2523 DRM_UDELAY(1); 2524 } 2525 return -1; 2526 } 2527 2528 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2529 { 2530 u32 rbbm_status; 2531 2532 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2533 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2534 radeon_ring_lockup_update(rdev, ring); 2535 return false; 2536 } 2537 return radeon_ring_test_lockup(rdev, ring); 2538 } 2539 2540 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2541 void r100_enable_bm(struct radeon_device *rdev) 2542 { 2543 uint32_t tmp; 2544 /* Enable bus mastering */ 2545 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2546 WREG32(RADEON_BUS_CNTL, tmp); 2547 } 2548 2549 void r100_bm_disable(struct radeon_device *rdev) 2550 { 2551 u32 tmp; 2552 2553 /* disable bus mastering */ 2554 tmp = RREG32(R_000030_BUS_CNTL); 2555 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2556 mdelay(1); 2557 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2558 mdelay(1); 2559 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2560 tmp = RREG32(RADEON_BUS_CNTL); 2561 mdelay(1); 2562 pci_disable_busmaster(rdev->dev->bsddev); 2563 mdelay(1); 2564 } 2565 2566 int r100_asic_reset(struct radeon_device *rdev, bool hard) 2567 { 2568 struct r100_mc_save save; 2569 u32 status, tmp; 2570 int ret = 0; 2571 2572 status = RREG32(R_000E40_RBBM_STATUS); 2573 if (!G_000E40_GUI_ACTIVE(status)) { 2574 return 0; 2575 } 2576 r100_mc_stop(rdev, &save); 2577 status = RREG32(R_000E40_RBBM_STATUS); 2578 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2579 /* stop CP */ 2580 WREG32(RADEON_CP_CSQ_CNTL, 0); 2581 tmp = RREG32(RADEON_CP_RB_CNTL); 2582 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2583 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2584 WREG32(RADEON_CP_RB_WPTR, 0); 2585 WREG32(RADEON_CP_RB_CNTL, tmp); 2586 /* save PCI state */ 2587 pci_save_state(device_get_parent(rdev->dev->bsddev)); 2588 /* disable bus mastering */ 2589 r100_bm_disable(rdev); 2590 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2591 S_0000F0_SOFT_RESET_RE(1) | 2592 S_0000F0_SOFT_RESET_PP(1) | 2593 S_0000F0_SOFT_RESET_RB(1)); 2594 RREG32(R_0000F0_RBBM_SOFT_RESET); 2595 mdelay(500); 2596 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2597 mdelay(1); 2598 status = RREG32(R_000E40_RBBM_STATUS); 2599 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2600 /* reset CP */ 2601 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2602 RREG32(R_0000F0_RBBM_SOFT_RESET); 2603 mdelay(500); 2604 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2605 mdelay(1); 2606 status = RREG32(R_000E40_RBBM_STATUS); 2607 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2608 /* restore PCI & busmastering */ 2609 pci_restore_state(device_get_parent(rdev->dev->bsddev)); 2610 r100_enable_bm(rdev); 2611 /* Check if GPU is idle */ 2612 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2613 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2614 dev_err(rdev->dev, "failed to reset GPU\n"); 2615 ret = -1; 2616 } else 2617 dev_info(rdev->dev, "GPU reset succeed\n"); 2618 r100_mc_resume(rdev, &save); 2619 return ret; 2620 } 2621 2622 void r100_set_common_regs(struct radeon_device *rdev) 2623 { 2624 struct drm_device *dev = rdev->ddev; 2625 bool force_dac2 = false; 2626 u32 tmp; 2627 2628 /* set these so they don't interfere with anything */ 2629 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2630 WREG32(RADEON_SUBPIC_CNTL, 0); 2631 WREG32(RADEON_VIPH_CONTROL, 0); 2632 WREG32(RADEON_I2C_CNTL_1, 0); 2633 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2634 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2635 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2636 2637 /* always set up dac2 on rn50 and some rv100 as lots 2638 * of servers seem to wire it up to a VGA port but 2639 * don't report it in the bios connector 2640 * table. 2641 */ 2642 switch (dev->pdev->device) { 2643 /* RN50 */ 2644 case 0x515e: 2645 case 0x5969: 2646 force_dac2 = true; 2647 break; 2648 /* RV100*/ 2649 case 0x5159: 2650 case 0x515a: 2651 /* DELL triple head servers */ 2652 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && 2653 ((dev->pdev->subsystem_device == 0x016c) || 2654 (dev->pdev->subsystem_device == 0x016d) || 2655 (dev->pdev->subsystem_device == 0x016e) || 2656 (dev->pdev->subsystem_device == 0x016f) || 2657 (dev->pdev->subsystem_device == 0x0170) || 2658 (dev->pdev->subsystem_device == 0x017d) || 2659 (dev->pdev->subsystem_device == 0x017e) || 2660 (dev->pdev->subsystem_device == 0x0183) || 2661 (dev->pdev->subsystem_device == 0x018a) || 2662 (dev->pdev->subsystem_device == 0x019a))) 2663 force_dac2 = true; 2664 break; 2665 } 2666 2667 if (force_dac2) { 2668 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2669 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2670 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2671 2672 /* For CRT on DAC2, don't turn it on if BIOS didn't 2673 enable it, even it's detected. 2674 */ 2675 2676 /* force it to crtc0 */ 2677 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2678 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2679 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2680 2681 /* set up the TV DAC */ 2682 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2683 RADEON_TV_DAC_STD_MASK | 2684 RADEON_TV_DAC_RDACPD | 2685 RADEON_TV_DAC_GDACPD | 2686 RADEON_TV_DAC_BDACPD | 2687 RADEON_TV_DAC_BGADJ_MASK | 2688 RADEON_TV_DAC_DACADJ_MASK); 2689 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2690 RADEON_TV_DAC_NHOLD | 2691 RADEON_TV_DAC_STD_PS2 | 2692 (0x58 << 16)); 2693 2694 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2695 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2696 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2697 } 2698 2699 /* switch PM block to ACPI mode */ 2700 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2701 tmp &= ~RADEON_PM_MODE_SEL; 2702 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2703 2704 } 2705 2706 /* 2707 * VRAM info 2708 */ 2709 static void r100_vram_get_type(struct radeon_device *rdev) 2710 { 2711 uint32_t tmp; 2712 2713 rdev->mc.vram_is_ddr = false; 2714 if (rdev->flags & RADEON_IS_IGP) 2715 rdev->mc.vram_is_ddr = true; 2716 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2717 rdev->mc.vram_is_ddr = true; 2718 if ((rdev->family == CHIP_RV100) || 2719 (rdev->family == CHIP_RS100) || 2720 (rdev->family == CHIP_RS200)) { 2721 tmp = RREG32(RADEON_MEM_CNTL); 2722 if (tmp & RV100_HALF_MODE) { 2723 rdev->mc.vram_width = 32; 2724 } else { 2725 rdev->mc.vram_width = 64; 2726 } 2727 if (rdev->flags & RADEON_SINGLE_CRTC) { 2728 rdev->mc.vram_width /= 4; 2729 rdev->mc.vram_is_ddr = true; 2730 } 2731 } else if (rdev->family <= CHIP_RV280) { 2732 tmp = RREG32(RADEON_MEM_CNTL); 2733 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2734 rdev->mc.vram_width = 128; 2735 } else { 2736 rdev->mc.vram_width = 64; 2737 } 2738 } else { 2739 /* newer IGPs */ 2740 rdev->mc.vram_width = 128; 2741 } 2742 } 2743 2744 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2745 { 2746 u32 aper_size; 2747 u8 byte; 2748 2749 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2750 2751 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2752 * that is has the 2nd generation multifunction PCI interface 2753 */ 2754 if (rdev->family == CHIP_RV280 || 2755 rdev->family >= CHIP_RV350) { 2756 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2757 ~RADEON_HDP_APER_CNTL); 2758 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2759 return aper_size * 2; 2760 } 2761 2762 /* Older cards have all sorts of funny issues to deal with. First 2763 * check if it's a multifunction card by reading the PCI config 2764 * header type... Limit those to one aperture size 2765 */ 2766 pci_read_config_byte(rdev->pdev, 0xe, &byte); 2767 if (byte & 0x80) { 2768 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2769 DRM_INFO("Limiting VRAM to one aperture\n"); 2770 return aper_size; 2771 } 2772 2773 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2774 * have set it up. We don't write this as it's broken on some ASICs but 2775 * we expect the BIOS to have done the right thing (might be too optimistic...) 2776 */ 2777 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2778 return aper_size * 2; 2779 return aper_size; 2780 } 2781 2782 void r100_vram_init_sizes(struct radeon_device *rdev) 2783 { 2784 u64 config_aper_size; 2785 2786 /* work out accessible VRAM */ 2787 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 2788 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 2789 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2790 /* FIXME we don't use the second aperture yet when we could use it */ 2791 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2792 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2793 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2794 if (rdev->flags & RADEON_IS_IGP) { 2795 uint32_t tom; 2796 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2797 tom = RREG32(RADEON_NB_TOM); 2798 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2799 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2800 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2801 } else { 2802 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2803 /* Some production boards of m6 will report 0 2804 * if it's 8 MB 2805 */ 2806 if (rdev->mc.real_vram_size == 0) { 2807 rdev->mc.real_vram_size = 8192 * 1024; 2808 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2809 } 2810 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2811 * Novell bug 204882 + along with lots of ubuntu ones 2812 */ 2813 if (rdev->mc.aper_size > config_aper_size) 2814 config_aper_size = rdev->mc.aper_size; 2815 2816 if (config_aper_size > rdev->mc.real_vram_size) 2817 rdev->mc.mc_vram_size = config_aper_size; 2818 else 2819 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2820 } 2821 } 2822 2823 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2824 { 2825 uint32_t temp; 2826 2827 temp = RREG32(RADEON_CONFIG_CNTL); 2828 if (state == false) { 2829 temp &= ~RADEON_CFG_VGA_RAM_EN; 2830 temp |= RADEON_CFG_VGA_IO_DIS; 2831 } else { 2832 temp &= ~RADEON_CFG_VGA_IO_DIS; 2833 } 2834 WREG32(RADEON_CONFIG_CNTL, temp); 2835 } 2836 2837 static void r100_mc_init(struct radeon_device *rdev) 2838 { 2839 u64 base; 2840 2841 r100_vram_get_type(rdev); 2842 r100_vram_init_sizes(rdev); 2843 base = rdev->mc.aper_base; 2844 if (rdev->flags & RADEON_IS_IGP) 2845 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2846 radeon_vram_location(rdev, &rdev->mc, base); 2847 rdev->mc.gtt_base_align = 0; 2848 if (!(rdev->flags & RADEON_IS_AGP)) 2849 radeon_gtt_location(rdev, &rdev->mc); 2850 radeon_update_bandwidth_info(rdev); 2851 } 2852 2853 2854 /* 2855 * Indirect registers accessor 2856 */ 2857 void r100_pll_errata_after_index(struct radeon_device *rdev) 2858 { 2859 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2860 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2861 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2862 } 2863 } 2864 2865 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2866 { 2867 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2868 * or the chip could hang on a subsequent access 2869 */ 2870 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2871 mdelay(5); 2872 } 2873 2874 /* This function is required to workaround a hardware bug in some (all?) 2875 * revisions of the R300. This workaround should be called after every 2876 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2877 * may not be correct. 2878 */ 2879 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2880 uint32_t save, tmp; 2881 2882 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2883 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2884 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2885 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2886 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2887 } 2888 } 2889 2890 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2891 { 2892 unsigned long flags; 2893 uint32_t data; 2894 2895 spin_lock_irqsave(&rdev->pll_idx_lock, flags); 2896 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2897 r100_pll_errata_after_index(rdev); 2898 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2899 r100_pll_errata_after_data(rdev); 2900 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); 2901 return data; 2902 } 2903 2904 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2905 { 2906 unsigned long flags; 2907 2908 spin_lock_irqsave(&rdev->pll_idx_lock, flags); 2909 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2910 r100_pll_errata_after_index(rdev); 2911 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2912 r100_pll_errata_after_data(rdev); 2913 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); 2914 } 2915 2916 static void r100_set_safe_registers(struct radeon_device *rdev) 2917 { 2918 if (ASIC_IS_RN50(rdev)) { 2919 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2920 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 2921 } else if (rdev->family < CHIP_R200) { 2922 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2923 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 2924 } else { 2925 r200_set_safe_registers(rdev); 2926 } 2927 } 2928 2929 /* 2930 * Debugfs info 2931 */ 2932 #if defined(CONFIG_DEBUG_FS) 2933 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2934 { 2935 struct drm_info_node *node = (struct drm_info_node *) m->private; 2936 struct drm_device *dev = node->minor->dev; 2937 struct radeon_device *rdev = dev->dev_private; 2938 uint32_t reg, value; 2939 unsigned i; 2940 2941 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2942 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2943 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2944 for (i = 0; i < 64; i++) { 2945 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 2946 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 2947 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 2948 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 2949 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 2950 } 2951 return 0; 2952 } 2953 2954 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 2955 { 2956 struct drm_info_node *node = (struct drm_info_node *) m->private; 2957 struct drm_device *dev = node->minor->dev; 2958 struct radeon_device *rdev = dev->dev_private; 2959 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2960 uint32_t rdp, wdp; 2961 unsigned count, i, j; 2962 2963 radeon_ring_free_size(rdev, ring); 2964 rdp = RREG32(RADEON_CP_RB_RPTR); 2965 wdp = RREG32(RADEON_CP_RB_WPTR); 2966 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 2967 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2968 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2969 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2970 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2971 seq_printf(m, "%u dwords in ring\n", count); 2972 if (ring->ready) { 2973 for (j = 0; j <= count; j++) { 2974 i = (rdp + j) & ring->ptr_mask; 2975 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2976 } 2977 } 2978 return 0; 2979 } 2980 2981 2982 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 2983 { 2984 struct drm_info_node *node = (struct drm_info_node *) m->private; 2985 struct drm_device *dev = node->minor->dev; 2986 struct radeon_device *rdev = dev->dev_private; 2987 uint32_t csq_stat, csq2_stat, tmp; 2988 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 2989 unsigned i; 2990 2991 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2992 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 2993 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 2994 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 2995 r_rptr = (csq_stat >> 0) & 0x3ff; 2996 r_wptr = (csq_stat >> 10) & 0x3ff; 2997 ib1_rptr = (csq_stat >> 20) & 0x3ff; 2998 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 2999 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 3000 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 3001 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 3002 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 3003 seq_printf(m, "Ring rptr %u\n", r_rptr); 3004 seq_printf(m, "Ring wptr %u\n", r_wptr); 3005 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 3006 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 3007 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 3008 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 3009 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 3010 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 3011 seq_printf(m, "Ring fifo:\n"); 3012 for (i = 0; i < 256; i++) { 3013 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3014 tmp = RREG32(RADEON_CP_CSQ_DATA); 3015 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 3016 } 3017 seq_printf(m, "Indirect1 fifo:\n"); 3018 for (i = 256; i <= 512; i++) { 3019 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3020 tmp = RREG32(RADEON_CP_CSQ_DATA); 3021 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3022 } 3023 seq_printf(m, "Indirect2 fifo:\n"); 3024 for (i = 640; i < ib1_wptr; i++) { 3025 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3026 tmp = RREG32(RADEON_CP_CSQ_DATA); 3027 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3028 } 3029 return 0; 3030 } 3031 3032 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3033 { 3034 struct drm_info_node *node = (struct drm_info_node *) m->private; 3035 struct drm_device *dev = node->minor->dev; 3036 struct radeon_device *rdev = dev->dev_private; 3037 uint32_t tmp; 3038 3039 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3040 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3041 tmp = RREG32(RADEON_MC_FB_LOCATION); 3042 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3043 tmp = RREG32(RADEON_BUS_CNTL); 3044 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3045 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3046 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3047 tmp = RREG32(RADEON_AGP_BASE); 3048 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3049 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3050 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3051 tmp = RREG32(0x01D0); 3052 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3053 tmp = RREG32(RADEON_AIC_LO_ADDR); 3054 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3055 tmp = RREG32(RADEON_AIC_HI_ADDR); 3056 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3057 tmp = RREG32(0x01E4); 3058 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3059 return 0; 3060 } 3061 3062 static struct drm_info_list r100_debugfs_rbbm_list[] = { 3063 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3064 }; 3065 3066 static struct drm_info_list r100_debugfs_cp_list[] = { 3067 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3068 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3069 }; 3070 3071 static struct drm_info_list r100_debugfs_mc_info_list[] = { 3072 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3073 }; 3074 #endif 3075 3076 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3077 { 3078 #if defined(CONFIG_DEBUG_FS) 3079 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3080 #else 3081 return 0; 3082 #endif 3083 } 3084 3085 int r100_debugfs_cp_init(struct radeon_device *rdev) 3086 { 3087 #if defined(CONFIG_DEBUG_FS) 3088 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3089 #else 3090 return 0; 3091 #endif 3092 } 3093 3094 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3095 { 3096 #if defined(CONFIG_DEBUG_FS) 3097 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3098 #else 3099 return 0; 3100 #endif 3101 } 3102 3103 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3104 uint32_t tiling_flags, uint32_t pitch, 3105 uint32_t offset, uint32_t obj_size) 3106 { 3107 int surf_index = reg * 16; 3108 int flags = 0; 3109 3110 if (rdev->family <= CHIP_RS200) { 3111 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3112 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3113 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3114 if (tiling_flags & RADEON_TILING_MACRO) 3115 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3116 /* setting pitch to 0 disables tiling */ 3117 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3118 == 0) 3119 pitch = 0; 3120 } else if (rdev->family <= CHIP_RV280) { 3121 if (tiling_flags & (RADEON_TILING_MACRO)) 3122 flags |= R200_SURF_TILE_COLOR_MACRO; 3123 if (tiling_flags & RADEON_TILING_MICRO) 3124 flags |= R200_SURF_TILE_COLOR_MICRO; 3125 } else { 3126 if (tiling_flags & RADEON_TILING_MACRO) 3127 flags |= R300_SURF_TILE_MACRO; 3128 if (tiling_flags & RADEON_TILING_MICRO) 3129 flags |= R300_SURF_TILE_MICRO; 3130 } 3131 3132 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3133 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3134 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3135 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3136 3137 /* r100/r200 divide by 16 */ 3138 if (rdev->family < CHIP_R300) 3139 flags |= pitch / 16; 3140 else 3141 flags |= pitch / 8; 3142 3143 3144 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3145 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3146 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3147 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3148 return 0; 3149 } 3150 3151 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3152 { 3153 int surf_index = reg * 16; 3154 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3155 } 3156 3157 void r100_bandwidth_update(struct radeon_device *rdev) 3158 { 3159 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3160 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3161 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; 3162 fixed20_12 crit_point_ff = {0}; 3163 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3164 fixed20_12 memtcas_ff[8] = { 3165 dfixed_init(1), 3166 dfixed_init(2), 3167 dfixed_init(3), 3168 dfixed_init(0), 3169 dfixed_init_half(1), 3170 dfixed_init_half(2), 3171 dfixed_init(0), 3172 }; 3173 fixed20_12 memtcas_rs480_ff[8] = { 3174 dfixed_init(0), 3175 dfixed_init(1), 3176 dfixed_init(2), 3177 dfixed_init(3), 3178 dfixed_init(0), 3179 dfixed_init_half(1), 3180 dfixed_init_half(2), 3181 dfixed_init_half(3), 3182 }; 3183 fixed20_12 memtcas2_ff[8] = { 3184 dfixed_init(0), 3185 dfixed_init(1), 3186 dfixed_init(2), 3187 dfixed_init(3), 3188 dfixed_init(4), 3189 dfixed_init(5), 3190 dfixed_init(6), 3191 dfixed_init(7), 3192 }; 3193 fixed20_12 memtrbs[8] = { 3194 dfixed_init(1), 3195 dfixed_init_half(1), 3196 dfixed_init(2), 3197 dfixed_init_half(2), 3198 dfixed_init(3), 3199 dfixed_init_half(3), 3200 dfixed_init(4), 3201 dfixed_init_half(4) 3202 }; 3203 fixed20_12 memtrbs_r4xx[8] = { 3204 dfixed_init(4), 3205 dfixed_init(5), 3206 dfixed_init(6), 3207 dfixed_init(7), 3208 dfixed_init(8), 3209 dfixed_init(9), 3210 dfixed_init(10), 3211 dfixed_init(11) 3212 }; 3213 fixed20_12 min_mem_eff; 3214 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3215 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3216 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0}, 3217 disp_drain_rate2, read_return_rate; 3218 fixed20_12 time_disp1_drop_priority; 3219 int c; 3220 int cur_size = 16; /* in octawords */ 3221 int critical_point = 0, critical_point2; 3222 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 3223 int stop_req, max_stop_req; 3224 struct drm_display_mode *mode1 = NULL; 3225 struct drm_display_mode *mode2 = NULL; 3226 uint32_t pixel_bytes1 = 0; 3227 uint32_t pixel_bytes2 = 0; 3228 3229 /* Guess line buffer size to be 8192 pixels */ 3230 u32 lb_size = 8192; 3231 3232 if (!rdev->mode_info.mode_config_initialized) 3233 return; 3234 3235 radeon_update_display_priority(rdev); 3236 3237 if (rdev->mode_info.crtcs[0]->base.enabled) { 3238 const struct drm_framebuffer *fb = 3239 rdev->mode_info.crtcs[0]->base.primary->fb; 3240 3241 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3242 pixel_bytes1 = fb->format->cpp[0]; 3243 } 3244 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3245 if (rdev->mode_info.crtcs[1]->base.enabled) { 3246 const struct drm_framebuffer *fb = 3247 rdev->mode_info.crtcs[1]->base.primary->fb; 3248 3249 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3250 pixel_bytes2 = fb->format->cpp[0]; 3251 } 3252 } 3253 3254 min_mem_eff.full = dfixed_const_8(0); 3255 /* get modes */ 3256 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3257 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3258 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3259 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3260 /* check crtc enables */ 3261 if (mode2) 3262 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3263 if (mode1) 3264 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3265 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3266 } 3267 3268 /* 3269 * determine is there is enough bw for current mode 3270 */ 3271 sclk_ff = rdev->pm.sclk; 3272 mclk_ff = rdev->pm.mclk; 3273 3274 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3275 temp_ff.full = dfixed_const(temp); 3276 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3277 3278 pix_clk.full = 0; 3279 pix_clk2.full = 0; 3280 peak_disp_bw.full = 0; 3281 if (mode1) { 3282 temp_ff.full = dfixed_const(1000); 3283 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3284 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3285 temp_ff.full = dfixed_const(pixel_bytes1); 3286 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3287 } 3288 if (mode2) { 3289 temp_ff.full = dfixed_const(1000); 3290 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3291 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3292 temp_ff.full = dfixed_const(pixel_bytes2); 3293 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3294 } 3295 3296 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3297 if (peak_disp_bw.full >= mem_bw.full) { 3298 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3299 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3300 } 3301 3302 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3303 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3304 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3305 mem_trcd = ((temp >> 2) & 0x3) + 1; 3306 mem_trp = ((temp & 0x3)) + 1; 3307 mem_tras = ((temp & 0x70) >> 4) + 1; 3308 } else if (rdev->family == CHIP_R300 || 3309 rdev->family == CHIP_R350) { /* r300, r350 */ 3310 mem_trcd = (temp & 0x7) + 1; 3311 mem_trp = ((temp >> 8) & 0x7) + 1; 3312 mem_tras = ((temp >> 11) & 0xf) + 4; 3313 } else if (rdev->family == CHIP_RV350 || 3314 rdev->family == CHIP_RV380) { 3315 /* rv3x0 */ 3316 mem_trcd = (temp & 0x7) + 3; 3317 mem_trp = ((temp >> 8) & 0x7) + 3; 3318 mem_tras = ((temp >> 11) & 0xf) + 6; 3319 } else if (rdev->family == CHIP_R420 || 3320 rdev->family == CHIP_R423 || 3321 rdev->family == CHIP_RV410) { 3322 /* r4xx */ 3323 mem_trcd = (temp & 0xf) + 3; 3324 if (mem_trcd > 15) 3325 mem_trcd = 15; 3326 mem_trp = ((temp >> 8) & 0xf) + 3; 3327 if (mem_trp > 15) 3328 mem_trp = 15; 3329 mem_tras = ((temp >> 12) & 0x1f) + 6; 3330 if (mem_tras > 31) 3331 mem_tras = 31; 3332 } else { /* RV200, R200 */ 3333 mem_trcd = (temp & 0x7) + 1; 3334 mem_trp = ((temp >> 8) & 0x7) + 1; 3335 mem_tras = ((temp >> 12) & 0xf) + 4; 3336 } 3337 /* convert to FF */ 3338 trcd_ff.full = dfixed_const(mem_trcd); 3339 trp_ff.full = dfixed_const(mem_trp); 3340 tras_ff.full = dfixed_const(mem_tras); 3341 3342 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3343 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3344 data = (temp & (7 << 20)) >> 20; 3345 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3346 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3347 tcas_ff = memtcas_rs480_ff[data]; 3348 else 3349 tcas_ff = memtcas_ff[data]; 3350 } else 3351 tcas_ff = memtcas2_ff[data]; 3352 3353 if (rdev->family == CHIP_RS400 || 3354 rdev->family == CHIP_RS480) { 3355 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3356 data = (temp >> 23) & 0x7; 3357 if (data < 5) 3358 tcas_ff.full += dfixed_const(data); 3359 } 3360 3361 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3362 /* on the R300, Tcas is included in Trbs. 3363 */ 3364 temp = RREG32(RADEON_MEM_CNTL); 3365 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3366 if (data == 1) { 3367 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3368 temp = RREG32(R300_MC_IND_INDEX); 3369 temp &= ~R300_MC_IND_ADDR_MASK; 3370 temp |= R300_MC_READ_CNTL_CD_mcind; 3371 WREG32(R300_MC_IND_INDEX, temp); 3372 temp = RREG32(R300_MC_IND_DATA); 3373 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3374 } else { 3375 temp = RREG32(R300_MC_READ_CNTL_AB); 3376 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3377 } 3378 } else { 3379 temp = RREG32(R300_MC_READ_CNTL_AB); 3380 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3381 } 3382 if (rdev->family == CHIP_RV410 || 3383 rdev->family == CHIP_R420 || 3384 rdev->family == CHIP_R423) 3385 trbs_ff = memtrbs_r4xx[data]; 3386 else 3387 trbs_ff = memtrbs[data]; 3388 tcas_ff.full += trbs_ff.full; 3389 } 3390 3391 sclk_eff_ff.full = sclk_ff.full; 3392 3393 if (rdev->flags & RADEON_IS_AGP) { 3394 fixed20_12 agpmode_ff; 3395 agpmode_ff.full = dfixed_const(radeon_agpmode); 3396 temp_ff.full = dfixed_const_666(16); 3397 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3398 } 3399 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3400 3401 if (ASIC_IS_R300(rdev)) { 3402 sclk_delay_ff.full = dfixed_const(250); 3403 } else { 3404 if ((rdev->family == CHIP_RV100) || 3405 rdev->flags & RADEON_IS_IGP) { 3406 if (rdev->mc.vram_is_ddr) 3407 sclk_delay_ff.full = dfixed_const(41); 3408 else 3409 sclk_delay_ff.full = dfixed_const(33); 3410 } else { 3411 if (rdev->mc.vram_width == 128) 3412 sclk_delay_ff.full = dfixed_const(57); 3413 else 3414 sclk_delay_ff.full = dfixed_const(41); 3415 } 3416 } 3417 3418 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3419 3420 if (rdev->mc.vram_is_ddr) { 3421 if (rdev->mc.vram_width == 32) { 3422 k1.full = dfixed_const(40); 3423 c = 3; 3424 } else { 3425 k1.full = dfixed_const(20); 3426 c = 1; 3427 } 3428 } else { 3429 k1.full = dfixed_const(40); 3430 c = 3; 3431 } 3432 3433 temp_ff.full = dfixed_const(2); 3434 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3435 temp_ff.full = dfixed_const(c); 3436 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3437 temp_ff.full = dfixed_const(4); 3438 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3439 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3440 mc_latency_mclk.full += k1.full; 3441 3442 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3443 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3444 3445 /* 3446 HW cursor time assuming worst case of full size colour cursor. 3447 */ 3448 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3449 temp_ff.full += trcd_ff.full; 3450 if (temp_ff.full < tras_ff.full) 3451 temp_ff.full = tras_ff.full; 3452 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3453 3454 temp_ff.full = dfixed_const(cur_size); 3455 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3456 /* 3457 Find the total latency for the display data. 3458 */ 3459 disp_latency_overhead.full = dfixed_const(8); 3460 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3461 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3462 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3463 3464 if (mc_latency_mclk.full > mc_latency_sclk.full) 3465 disp_latency.full = mc_latency_mclk.full; 3466 else 3467 disp_latency.full = mc_latency_sclk.full; 3468 3469 /* setup Max GRPH_STOP_REQ default value */ 3470 if (ASIC_IS_RV100(rdev)) 3471 max_stop_req = 0x5c; 3472 else 3473 max_stop_req = 0x7c; 3474 3475 if (mode1) { 3476 /* CRTC1 3477 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3478 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3479 */ 3480 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3481 3482 if (stop_req > max_stop_req) 3483 stop_req = max_stop_req; 3484 3485 /* 3486 Find the drain rate of the display buffer. 3487 */ 3488 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3489 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3490 3491 /* 3492 Find the critical point of the display buffer. 3493 */ 3494 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3495 crit_point_ff.full += dfixed_const_half(0); 3496 3497 critical_point = dfixed_trunc(crit_point_ff); 3498 3499 if (rdev->disp_priority == 2) { 3500 critical_point = 0; 3501 } 3502 3503 /* 3504 The critical point should never be above max_stop_req-4. Setting 3505 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3506 */ 3507 if (max_stop_req - critical_point < 4) 3508 critical_point = 0; 3509 3510 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3511 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3512 critical_point = 0x10; 3513 } 3514 3515 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3516 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3517 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3518 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3519 if ((rdev->family == CHIP_R350) && 3520 (stop_req > 0x15)) { 3521 stop_req -= 0x10; 3522 } 3523 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3524 temp |= RADEON_GRPH_BUFFER_SIZE; 3525 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3526 RADEON_GRPH_CRITICAL_AT_SOF | 3527 RADEON_GRPH_STOP_CNTL); 3528 /* 3529 Write the result into the register. 3530 */ 3531 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3532 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3533 3534 #if 0 3535 if ((rdev->family == CHIP_RS400) || 3536 (rdev->family == CHIP_RS480)) { 3537 /* attempt to program RS400 disp regs correctly ??? */ 3538 temp = RREG32(RS400_DISP1_REG_CNTL); 3539 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3540 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3541 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3542 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3543 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3544 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3545 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3546 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3547 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3548 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3549 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3550 } 3551 #endif 3552 3553 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3554 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3555 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3556 } 3557 3558 if (mode2) { 3559 u32 grph2_cntl; 3560 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3561 3562 if (stop_req > max_stop_req) 3563 stop_req = max_stop_req; 3564 3565 /* 3566 Find the drain rate of the display buffer. 3567 */ 3568 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3569 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3570 3571 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3572 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3573 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3574 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3575 if ((rdev->family == CHIP_R350) && 3576 (stop_req > 0x15)) { 3577 stop_req -= 0x10; 3578 } 3579 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3580 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3581 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3582 RADEON_GRPH_CRITICAL_AT_SOF | 3583 RADEON_GRPH_STOP_CNTL); 3584 3585 if ((rdev->family == CHIP_RS100) || 3586 (rdev->family == CHIP_RS200)) 3587 critical_point2 = 0; 3588 else { 3589 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3590 temp_ff.full = dfixed_const(temp); 3591 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3592 if (sclk_ff.full < temp_ff.full) 3593 temp_ff.full = sclk_ff.full; 3594 3595 read_return_rate.full = temp_ff.full; 3596 3597 if (mode1) { 3598 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3599 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3600 } else { 3601 time_disp1_drop_priority.full = 0; 3602 } 3603 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3604 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3605 crit_point_ff.full += dfixed_const_half(0); 3606 3607 critical_point2 = dfixed_trunc(crit_point_ff); 3608 3609 if (rdev->disp_priority == 2) { 3610 critical_point2 = 0; 3611 } 3612 3613 if (max_stop_req - critical_point2 < 4) 3614 critical_point2 = 0; 3615 3616 } 3617 3618 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3619 /* some R300 cards have problem with this set to 0 */ 3620 critical_point2 = 0x10; 3621 } 3622 3623 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3624 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3625 3626 if ((rdev->family == CHIP_RS400) || 3627 (rdev->family == CHIP_RS480)) { 3628 #if 0 3629 /* attempt to program RS400 disp2 regs correctly ??? */ 3630 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3631 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3632 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3633 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3634 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3635 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3636 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3637 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3638 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3639 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3640 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3641 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3642 #endif 3643 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3644 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3645 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3646 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3647 } 3648 3649 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3650 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3651 } 3652 3653 /* Save number of lines the linebuffer leads before the scanout */ 3654 if (mode1) 3655 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); 3656 3657 if (mode2) 3658 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); 3659 } 3660 3661 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3662 { 3663 uint32_t scratch; 3664 uint32_t tmp = 0; 3665 unsigned i; 3666 int r; 3667 3668 r = radeon_scratch_get(rdev, &scratch); 3669 if (r) { 3670 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3671 return r; 3672 } 3673 WREG32(scratch, 0xCAFEDEAD); 3674 r = radeon_ring_lock(rdev, ring, 2); 3675 if (r) { 3676 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3677 radeon_scratch_free(rdev, scratch); 3678 return r; 3679 } 3680 radeon_ring_write(ring, PACKET0(scratch, 0)); 3681 radeon_ring_write(ring, 0xDEADBEEF); 3682 radeon_ring_unlock_commit(rdev, ring, false); 3683 for (i = 0; i < rdev->usec_timeout; i++) { 3684 tmp = RREG32(scratch); 3685 if (tmp == 0xDEADBEEF) { 3686 break; 3687 } 3688 DRM_UDELAY(1); 3689 } 3690 if (i < rdev->usec_timeout) { 3691 DRM_INFO("ring test succeeded in %d usecs\n", i); 3692 } else { 3693 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3694 scratch, tmp); 3695 r = -EINVAL; 3696 } 3697 radeon_scratch_free(rdev, scratch); 3698 return r; 3699 } 3700 3701 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3702 { 3703 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3704 3705 if (ring->rptr_save_reg) { 3706 u32 next_rptr = ring->wptr + 2 + 3; 3707 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3708 radeon_ring_write(ring, next_rptr); 3709 } 3710 3711 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3712 radeon_ring_write(ring, ib->gpu_addr); 3713 radeon_ring_write(ring, ib->length_dw); 3714 } 3715 3716 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3717 { 3718 struct radeon_ib ib; 3719 uint32_t scratch; 3720 uint32_t tmp = 0; 3721 unsigned i; 3722 int r; 3723 3724 r = radeon_scratch_get(rdev, &scratch); 3725 if (r) { 3726 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3727 return r; 3728 } 3729 WREG32(scratch, 0xCAFEDEAD); 3730 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3731 if (r) { 3732 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3733 goto free_scratch; 3734 } 3735 ib.ptr[0] = PACKET0(scratch, 0); 3736 ib.ptr[1] = 0xDEADBEEF; 3737 ib.ptr[2] = PACKET2(0); 3738 ib.ptr[3] = PACKET2(0); 3739 ib.ptr[4] = PACKET2(0); 3740 ib.ptr[5] = PACKET2(0); 3741 ib.ptr[6] = PACKET2(0); 3742 ib.ptr[7] = PACKET2(0); 3743 ib.length_dw = 8; 3744 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3745 if (r) { 3746 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3747 goto free_ib; 3748 } 3749 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 3750 RADEON_USEC_IB_TEST_TIMEOUT)); 3751 if (r < 0) { 3752 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3753 goto free_ib; 3754 } else if (r == 0) { 3755 DRM_ERROR("radeon: fence wait timed out.\n"); 3756 r = -ETIMEDOUT; 3757 goto free_ib; 3758 } 3759 r = 0; 3760 for (i = 0; i < rdev->usec_timeout; i++) { 3761 tmp = RREG32(scratch); 3762 if (tmp == 0xDEADBEEF) { 3763 break; 3764 } 3765 DRM_UDELAY(1); 3766 } 3767 if (i < rdev->usec_timeout) { 3768 DRM_INFO("ib test succeeded in %u usecs\n", i); 3769 } else { 3770 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3771 scratch, tmp); 3772 r = -EINVAL; 3773 } 3774 free_ib: 3775 radeon_ib_free(rdev, &ib); 3776 free_scratch: 3777 radeon_scratch_free(rdev, scratch); 3778 return r; 3779 } 3780 3781 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3782 { 3783 /* Shutdown CP we shouldn't need to do that but better be safe than 3784 * sorry 3785 */ 3786 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3787 WREG32(R_000740_CP_CSQ_CNTL, 0); 3788 3789 /* Save few CRTC registers */ 3790 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3791 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3792 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3793 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3794 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3795 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3796 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3797 } 3798 3799 /* Disable VGA aperture access */ 3800 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3801 /* Disable cursor, overlay, crtc */ 3802 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3803 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3804 S_000054_CRTC_DISPLAY_DIS(1)); 3805 WREG32(R_000050_CRTC_GEN_CNTL, 3806 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3807 S_000050_CRTC_DISP_REQ_EN_B(1)); 3808 WREG32(R_000420_OV0_SCALE_CNTL, 3809 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3810 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3811 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3812 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3813 S_000360_CUR2_LOCK(1)); 3814 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3815 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3816 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3817 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3818 WREG32(R_000360_CUR2_OFFSET, 3819 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3820 } 3821 } 3822 3823 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3824 { 3825 /* Update base address for crtc */ 3826 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3827 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3828 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3829 } 3830 /* Restore CRTC registers */ 3831 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3832 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3833 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3834 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3835 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3836 } 3837 } 3838 3839 void r100_vga_render_disable(struct radeon_device *rdev) 3840 { 3841 u32 tmp; 3842 3843 tmp = RREG8(R_0003C2_GENMO_WT); 3844 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3845 } 3846 3847 static void r100_debugfs(struct radeon_device *rdev) 3848 { 3849 int r; 3850 3851 r = r100_debugfs_mc_info_init(rdev); 3852 if (r) 3853 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3854 } 3855 3856 static void r100_mc_program(struct radeon_device *rdev) 3857 { 3858 struct r100_mc_save save; 3859 3860 /* Stops all mc clients */ 3861 r100_mc_stop(rdev, &save); 3862 if (rdev->flags & RADEON_IS_AGP) { 3863 WREG32(R_00014C_MC_AGP_LOCATION, 3864 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3865 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3866 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3867 if (rdev->family > CHIP_RV200) 3868 WREG32(R_00015C_AGP_BASE_2, 3869 upper_32_bits(rdev->mc.agp_base) & 0xff); 3870 } else { 3871 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3872 WREG32(R_000170_AGP_BASE, 0); 3873 if (rdev->family > CHIP_RV200) 3874 WREG32(R_00015C_AGP_BASE_2, 0); 3875 } 3876 /* Wait for mc idle */ 3877 if (r100_mc_wait_for_idle(rdev)) 3878 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3879 /* Program MC, should be a 32bits limited address space */ 3880 WREG32(R_000148_MC_FB_LOCATION, 3881 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3882 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3883 r100_mc_resume(rdev, &save); 3884 } 3885 3886 static void r100_clock_startup(struct radeon_device *rdev) 3887 { 3888 u32 tmp; 3889 3890 if (radeon_dynclks != -1 && radeon_dynclks) 3891 radeon_legacy_set_clock_gating(rdev, 1); 3892 /* We need to force on some of the block */ 3893 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3894 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3895 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3896 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3897 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3898 } 3899 3900 static int r100_startup(struct radeon_device *rdev) 3901 { 3902 int r; 3903 3904 /* set common regs */ 3905 r100_set_common_regs(rdev); 3906 /* program mc */ 3907 r100_mc_program(rdev); 3908 /* Resume clock */ 3909 r100_clock_startup(rdev); 3910 /* Initialize GART (initialize after TTM so we can allocate 3911 * memory through TTM but finalize after TTM) */ 3912 r100_enable_bm(rdev); 3913 if (rdev->flags & RADEON_IS_PCI) { 3914 r = r100_pci_gart_enable(rdev); 3915 if (r) 3916 return r; 3917 } 3918 3919 /* allocate wb buffer */ 3920 r = radeon_wb_init(rdev); 3921 if (r) 3922 return r; 3923 3924 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3925 if (r) { 3926 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3927 return r; 3928 } 3929 3930 /* Enable IRQ */ 3931 if (!rdev->irq.installed) { 3932 r = radeon_irq_kms_init(rdev); 3933 if (r) 3934 return r; 3935 } 3936 3937 r100_irq_set(rdev); 3938 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3939 /* 1M ring buffer */ 3940 r = r100_cp_init(rdev, 1024 * 1024); 3941 if (r) { 3942 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3943 return r; 3944 } 3945 3946 r = radeon_ib_pool_init(rdev); 3947 if (r) { 3948 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3949 return r; 3950 } 3951 3952 return 0; 3953 } 3954 3955 int r100_resume(struct radeon_device *rdev) 3956 { 3957 int r; 3958 3959 /* Make sur GART are not working */ 3960 if (rdev->flags & RADEON_IS_PCI) 3961 r100_pci_gart_disable(rdev); 3962 /* Resume clock before doing reset */ 3963 r100_clock_startup(rdev); 3964 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3965 if (radeon_asic_reset(rdev)) { 3966 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3967 RREG32(R_000E40_RBBM_STATUS), 3968 RREG32(R_0007C0_CP_STAT)); 3969 } 3970 /* post */ 3971 radeon_combios_asic_init(rdev->ddev); 3972 /* Resume clock after posting */ 3973 r100_clock_startup(rdev); 3974 /* Initialize surface registers */ 3975 radeon_surface_init(rdev); 3976 3977 rdev->accel_working = true; 3978 r = r100_startup(rdev); 3979 if (r) { 3980 rdev->accel_working = false; 3981 } 3982 return r; 3983 } 3984 3985 int r100_suspend(struct radeon_device *rdev) 3986 { 3987 radeon_pm_suspend(rdev); 3988 r100_cp_disable(rdev); 3989 radeon_wb_disable(rdev); 3990 r100_irq_disable(rdev); 3991 if (rdev->flags & RADEON_IS_PCI) 3992 r100_pci_gart_disable(rdev); 3993 return 0; 3994 } 3995 3996 void r100_fini(struct radeon_device *rdev) 3997 { 3998 radeon_pm_fini(rdev); 3999 r100_cp_fini(rdev); 4000 radeon_wb_fini(rdev); 4001 radeon_ib_pool_fini(rdev); 4002 radeon_gem_fini(rdev); 4003 if (rdev->flags & RADEON_IS_PCI) 4004 r100_pci_gart_fini(rdev); 4005 radeon_agp_fini(rdev); 4006 radeon_irq_kms_fini(rdev); 4007 radeon_fence_driver_fini(rdev); 4008 radeon_bo_fini(rdev); 4009 radeon_atombios_fini(rdev); 4010 r100_cp_fini_microcode(rdev); 4011 kfree(rdev->bios); 4012 rdev->bios = NULL; 4013 } 4014 4015 /* 4016 * Due to how kexec works, it can leave the hw fully initialised when it 4017 * boots the new kernel. However doing our init sequence with the CP and 4018 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 4019 * do some quick sanity checks and restore sane values to avoid this 4020 * problem. 4021 */ 4022 void r100_restore_sanity(struct radeon_device *rdev) 4023 { 4024 u32 tmp; 4025 4026 tmp = RREG32(RADEON_CP_CSQ_CNTL); 4027 if (tmp) { 4028 WREG32(RADEON_CP_CSQ_CNTL, 0); 4029 } 4030 tmp = RREG32(RADEON_CP_RB_CNTL); 4031 if (tmp) { 4032 WREG32(RADEON_CP_RB_CNTL, 0); 4033 } 4034 tmp = RREG32(RADEON_SCRATCH_UMSK); 4035 if (tmp) { 4036 WREG32(RADEON_SCRATCH_UMSK, 0); 4037 } 4038 } 4039 4040 int r100_init(struct radeon_device *rdev) 4041 { 4042 int r; 4043 4044 /* Register debugfs file specific to this group of asics */ 4045 r100_debugfs(rdev); 4046 /* Disable VGA */ 4047 r100_vga_render_disable(rdev); 4048 /* Initialize scratch registers */ 4049 radeon_scratch_init(rdev); 4050 /* Initialize surface registers */ 4051 radeon_surface_init(rdev); 4052 /* sanity check some register to avoid hangs like after kexec */ 4053 r100_restore_sanity(rdev); 4054 /* TODO: disable VGA need to use VGA request */ 4055 /* BIOS*/ 4056 if (!radeon_get_bios(rdev)) { 4057 if (ASIC_IS_AVIVO(rdev)) 4058 return -EINVAL; 4059 } 4060 if (rdev->is_atom_bios) { 4061 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4062 return -EINVAL; 4063 } else { 4064 r = radeon_combios_init(rdev); 4065 if (r) 4066 return r; 4067 } 4068 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4069 if (radeon_asic_reset(rdev)) { 4070 dev_warn(rdev->dev, 4071 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4072 RREG32(R_000E40_RBBM_STATUS), 4073 RREG32(R_0007C0_CP_STAT)); 4074 } 4075 /* check if cards are posted or not */ 4076 if (radeon_boot_test_post_card(rdev) == false) 4077 return -EINVAL; 4078 /* Set asic errata */ 4079 r100_errata(rdev); 4080 /* Initialize clocks */ 4081 radeon_get_clock_info(rdev->ddev); 4082 /* initialize AGP */ 4083 if (rdev->flags & RADEON_IS_AGP) { 4084 r = radeon_agp_init(rdev); 4085 if (r) { 4086 radeon_agp_disable(rdev); 4087 } 4088 } 4089 /* initialize VRAM */ 4090 r100_mc_init(rdev); 4091 /* Fence driver */ 4092 r = radeon_fence_driver_init(rdev); 4093 if (r) 4094 return r; 4095 /* Memory manager */ 4096 r = radeon_bo_init(rdev); 4097 if (r) 4098 return r; 4099 if (rdev->flags & RADEON_IS_PCI) { 4100 r = r100_pci_gart_init(rdev); 4101 if (r) 4102 return r; 4103 } 4104 r100_set_safe_registers(rdev); 4105 4106 /* Initialize power management */ 4107 radeon_pm_init(rdev); 4108 4109 rdev->accel_working = true; 4110 r = r100_startup(rdev); 4111 if (r) { 4112 /* Somethings want wront with the accel init stop accel */ 4113 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4114 r100_cp_fini(rdev); 4115 radeon_wb_fini(rdev); 4116 radeon_ib_pool_fini(rdev); 4117 radeon_irq_kms_fini(rdev); 4118 if (rdev->flags & RADEON_IS_PCI) 4119 r100_pci_gart_fini(rdev); 4120 rdev->accel_working = false; 4121 } 4122 return 0; 4123 } 4124 4125 uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg) 4126 { 4127 unsigned long flags; 4128 uint32_t ret; 4129 4130 spin_lock_irqsave(&rdev->mmio_idx_lock, flags); 4131 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4132 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4133 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); 4134 return ret; 4135 } 4136 4137 void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v) 4138 { 4139 unsigned long flags; 4140 4141 spin_lock_irqsave(&rdev->mmio_idx_lock, flags); 4142 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4143 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4144 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); 4145 } 4146 4147 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4148 { 4149 if (reg < rdev->rio_mem_size) 4150 return ioread32(rdev->rio_mem + reg); 4151 else { 4152 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); 4153 return ioread32(rdev->rio_mem + RADEON_MM_DATA); 4154 } 4155 } 4156 4157 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4158 { 4159 if (reg < rdev->rio_mem_size) 4160 iowrite32(v, rdev->rio_mem + reg); 4161 else { 4162 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); 4163 iowrite32(v, rdev->rio_mem + RADEON_MM_DATA); 4164 } 4165 } 4166