1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <drm/drmP.h> 30 #include <drm/radeon_drm.h> 31 #include "radeon_reg.h" 32 #include "radeon.h" 33 #include "radeon_asic.h" 34 #include "r100d.h" 35 #include "rs100d.h" 36 #include "rv200d.h" 37 #include "rv250d.h" 38 #include "atom.h" 39 40 #include <linux/firmware.h> 41 #include <linux/module.h> 42 43 #include "r100_reg_safe.h" 44 #include "rn50_reg_safe.h" 45 46 /* Firmware Names */ 47 #define FIRMWARE_R100 "radeonkmsfw_R100_cp" 48 #define FIRMWARE_R200 "radeonkmsfw_R200_cp" 49 #define FIRMWARE_R300 "radeonkmsfw_R300_cp" 50 #define FIRMWARE_R420 "radeonkmsfw_R420_cp" 51 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 52 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 53 #define FIRMWARE_R520 "radeonkmsfw_R520_cp" 54 55 MODULE_FIRMWARE(FIRMWARE_R100); 56 MODULE_FIRMWARE(FIRMWARE_R200); 57 MODULE_FIRMWARE(FIRMWARE_R300); 58 MODULE_FIRMWARE(FIRMWARE_R420); 59 MODULE_FIRMWARE(FIRMWARE_RS690); 60 MODULE_FIRMWARE(FIRMWARE_RS600); 61 MODULE_FIRMWARE(FIRMWARE_R520); 62 63 #include "r100_track.h" 64 65 /* This files gather functions specifics to: 66 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 67 * and others in some cases. 68 */ 69 70 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) 71 { 72 if (crtc == 0) { 73 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 74 return true; 75 else 76 return false; 77 } else { 78 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 79 return true; 80 else 81 return false; 82 } 83 } 84 85 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) 86 { 87 u32 vline1, vline2; 88 89 if (crtc == 0) { 90 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 91 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 92 } else { 93 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 94 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 95 } 96 if (vline1 != vline2) 97 return true; 98 else 99 return false; 100 } 101 102 /** 103 * r100_wait_for_vblank - vblank wait asic callback. 104 * 105 * @rdev: radeon_device pointer 106 * @crtc: crtc to wait for vblank on 107 * 108 * Wait for vblank on the requested crtc (r1xx-r4xx). 109 */ 110 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 111 { 112 unsigned i = 0; 113 114 if (crtc >= rdev->num_crtc) 115 return; 116 117 if (crtc == 0) { 118 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) 119 return; 120 } else { 121 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) 122 return; 123 } 124 125 /* depending on when we hit vblank, we may be close to active; if so, 126 * wait for another frame. 127 */ 128 while (r100_is_in_vblank(rdev, crtc)) { 129 if (i++ % 100 == 0) { 130 if (!r100_is_counter_moving(rdev, crtc)) 131 break; 132 } 133 } 134 135 while (!r100_is_in_vblank(rdev, crtc)) { 136 if (i++ % 100 == 0) { 137 if (!r100_is_counter_moving(rdev, crtc)) 138 break; 139 } 140 } 141 } 142 143 /** 144 * r100_page_flip - pageflip callback. 145 * 146 * @rdev: radeon_device pointer 147 * @crtc_id: crtc to cleanup pageflip on 148 * @crtc_base: new address of the crtc (GPU MC address) 149 * 150 * Does the actual pageflip (r1xx-r4xx). 151 * During vblank we take the crtc lock and wait for the update_pending 152 * bit to go high, when it does, we release the lock, and allow the 153 * double buffered update to take place. 154 */ 155 void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async) 156 { 157 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 158 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 159 int i; 160 161 /* Lock the graphics update lock */ 162 /* update the scanout addresses */ 163 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 164 165 /* Wait for update_pending to go high. */ 166 for (i = 0; i < rdev->usec_timeout; i++) { 167 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 168 break; 169 udelay(1); 170 } 171 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 172 173 /* Unlock the lock, so double-buffering can take place inside vblank */ 174 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 175 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 176 177 } 178 179 /** 180 * r100_page_flip_pending - check if page flip is still pending 181 * 182 * @rdev: radeon_device pointer 183 * @crtc_id: crtc to check 184 * 185 * Check if the last pagefilp is still pending (r1xx-r4xx). 186 * Returns the current update pending status. 187 */ 188 bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id) 189 { 190 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 191 192 /* Return current update_pending status: */ 193 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & 194 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET); 195 } 196 197 /** 198 * r100_pm_get_dynpm_state - look up dynpm power state callback. 199 * 200 * @rdev: radeon_device pointer 201 * 202 * Look up the optimal power state based on the 203 * current state of the GPU (r1xx-r5xx). 204 * Used for dynpm only. 205 */ 206 void r100_pm_get_dynpm_state(struct radeon_device *rdev) 207 { 208 int i; 209 rdev->pm.dynpm_can_upclock = true; 210 rdev->pm.dynpm_can_downclock = true; 211 212 switch (rdev->pm.dynpm_planned_action) { 213 case DYNPM_ACTION_MINIMUM: 214 rdev->pm.requested_power_state_index = 0; 215 rdev->pm.dynpm_can_downclock = false; 216 break; 217 case DYNPM_ACTION_DOWNCLOCK: 218 if (rdev->pm.current_power_state_index == 0) { 219 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 220 rdev->pm.dynpm_can_downclock = false; 221 } else { 222 if (rdev->pm.active_crtc_count > 1) { 223 for (i = 0; i < rdev->pm.num_power_states; i++) { 224 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 225 continue; 226 else if (i >= rdev->pm.current_power_state_index) { 227 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 228 break; 229 } else { 230 rdev->pm.requested_power_state_index = i; 231 break; 232 } 233 } 234 } else 235 rdev->pm.requested_power_state_index = 236 rdev->pm.current_power_state_index - 1; 237 } 238 /* don't use the power state if crtcs are active and no display flag is set */ 239 if ((rdev->pm.active_crtc_count > 0) && 240 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 241 RADEON_PM_MODE_NO_DISPLAY)) { 242 rdev->pm.requested_power_state_index++; 243 } 244 break; 245 case DYNPM_ACTION_UPCLOCK: 246 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 247 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 248 rdev->pm.dynpm_can_upclock = false; 249 } else { 250 if (rdev->pm.active_crtc_count > 1) { 251 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 252 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 253 continue; 254 else if (i <= rdev->pm.current_power_state_index) { 255 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 256 break; 257 } else { 258 rdev->pm.requested_power_state_index = i; 259 break; 260 } 261 } 262 } else 263 rdev->pm.requested_power_state_index = 264 rdev->pm.current_power_state_index + 1; 265 } 266 break; 267 case DYNPM_ACTION_DEFAULT: 268 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 269 rdev->pm.dynpm_can_upclock = false; 270 break; 271 case DYNPM_ACTION_NONE: 272 default: 273 DRM_ERROR("Requested mode for not defined action\n"); 274 return; 275 } 276 /* only one clock mode per power state */ 277 rdev->pm.requested_clock_mode_index = 0; 278 279 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 280 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 281 clock_info[rdev->pm.requested_clock_mode_index].sclk, 282 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 283 clock_info[rdev->pm.requested_clock_mode_index].mclk, 284 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 285 pcie_lanes); 286 } 287 288 /** 289 * r100_pm_init_profile - Initialize power profiles callback. 290 * 291 * @rdev: radeon_device pointer 292 * 293 * Initialize the power states used in profile mode 294 * (r1xx-r3xx). 295 * Used for profile mode only. 296 */ 297 void r100_pm_init_profile(struct radeon_device *rdev) 298 { 299 /* default */ 300 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 303 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 304 /* low sh */ 305 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 309 /* mid sh */ 310 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 313 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 314 /* high sh */ 315 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 318 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 319 /* low mh */ 320 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 323 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 324 /* mid mh */ 325 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 328 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 329 /* high mh */ 330 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 334 } 335 336 /** 337 * r100_pm_misc - set additional pm hw parameters callback. 338 * 339 * @rdev: radeon_device pointer 340 * 341 * Set non-clock parameters associated with a power state 342 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 343 */ 344 void r100_pm_misc(struct radeon_device *rdev) 345 { 346 int requested_index = rdev->pm.requested_power_state_index; 347 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 348 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 349 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 350 351 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 352 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 353 tmp = RREG32(voltage->gpio.reg); 354 if (voltage->active_high) 355 tmp |= voltage->gpio.mask; 356 else 357 tmp &= ~(voltage->gpio.mask); 358 WREG32(voltage->gpio.reg, tmp); 359 if (voltage->delay) 360 udelay(voltage->delay); 361 } else { 362 tmp = RREG32(voltage->gpio.reg); 363 if (voltage->active_high) 364 tmp &= ~voltage->gpio.mask; 365 else 366 tmp |= voltage->gpio.mask; 367 WREG32(voltage->gpio.reg, tmp); 368 if (voltage->delay) 369 udelay(voltage->delay); 370 } 371 } 372 373 sclk_cntl = RREG32_PLL(SCLK_CNTL); 374 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 375 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 376 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 377 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 378 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 379 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 380 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 381 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 382 else 383 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 384 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 385 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 386 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 387 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 388 } else 389 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 390 391 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 392 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 393 if (voltage->delay) { 394 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 395 switch (voltage->delay) { 396 case 33: 397 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 398 break; 399 case 66: 400 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 401 break; 402 case 99: 403 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 404 break; 405 case 132: 406 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 407 break; 408 } 409 } else 410 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 411 } else 412 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 413 414 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 415 sclk_cntl &= ~FORCE_HDP; 416 else 417 sclk_cntl |= FORCE_HDP; 418 419 WREG32_PLL(SCLK_CNTL, sclk_cntl); 420 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 421 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 422 423 /* set pcie lanes */ 424 if ((rdev->flags & RADEON_IS_PCIE) && 425 !(rdev->flags & RADEON_IS_IGP) && 426 rdev->asic->pm.set_pcie_lanes && 427 (ps->pcie_lanes != 428 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 429 radeon_set_pcie_lanes(rdev, 430 ps->pcie_lanes); 431 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 432 } 433 } 434 435 /** 436 * r100_pm_prepare - pre-power state change callback. 437 * 438 * @rdev: radeon_device pointer 439 * 440 * Prepare for a power state change (r1xx-r4xx). 441 */ 442 void r100_pm_prepare(struct radeon_device *rdev) 443 { 444 struct drm_device *ddev = rdev->ddev; 445 struct drm_crtc *crtc; 446 struct radeon_crtc *radeon_crtc; 447 u32 tmp; 448 449 /* disable any active CRTCs */ 450 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 451 radeon_crtc = to_radeon_crtc(crtc); 452 if (radeon_crtc->enabled) { 453 if (radeon_crtc->crtc_id) { 454 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 455 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 456 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 457 } else { 458 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 459 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 460 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 461 } 462 } 463 } 464 } 465 466 /** 467 * r100_pm_finish - post-power state change callback. 468 * 469 * @rdev: radeon_device pointer 470 * 471 * Clean up after a power state change (r1xx-r4xx). 472 */ 473 void r100_pm_finish(struct radeon_device *rdev) 474 { 475 struct drm_device *ddev = rdev->ddev; 476 struct drm_crtc *crtc; 477 struct radeon_crtc *radeon_crtc; 478 u32 tmp; 479 480 /* enable any active CRTCs */ 481 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 482 radeon_crtc = to_radeon_crtc(crtc); 483 if (radeon_crtc->enabled) { 484 if (radeon_crtc->crtc_id) { 485 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 486 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 487 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 488 } else { 489 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 490 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 491 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 492 } 493 } 494 } 495 } 496 497 /** 498 * r100_gui_idle - gui idle callback. 499 * 500 * @rdev: radeon_device pointer 501 * 502 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 503 * Returns true if idle, false if not. 504 */ 505 bool r100_gui_idle(struct radeon_device *rdev) 506 { 507 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 508 return false; 509 else 510 return true; 511 } 512 513 /* hpd for digital panel detect/disconnect */ 514 /** 515 * r100_hpd_sense - hpd sense callback. 516 * 517 * @rdev: radeon_device pointer 518 * @hpd: hpd (hotplug detect) pin 519 * 520 * Checks if a digital monitor is connected (r1xx-r4xx). 521 * Returns true if connected, false if not connected. 522 */ 523 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 524 { 525 bool connected = false; 526 527 switch (hpd) { 528 case RADEON_HPD_1: 529 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 530 connected = true; 531 break; 532 case RADEON_HPD_2: 533 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 534 connected = true; 535 break; 536 default: 537 break; 538 } 539 return connected; 540 } 541 542 /** 543 * r100_hpd_set_polarity - hpd set polarity callback. 544 * 545 * @rdev: radeon_device pointer 546 * @hpd: hpd (hotplug detect) pin 547 * 548 * Set the polarity of the hpd pin (r1xx-r4xx). 549 */ 550 void r100_hpd_set_polarity(struct radeon_device *rdev, 551 enum radeon_hpd_id hpd) 552 { 553 u32 tmp; 554 bool connected = r100_hpd_sense(rdev, hpd); 555 556 switch (hpd) { 557 case RADEON_HPD_1: 558 tmp = RREG32(RADEON_FP_GEN_CNTL); 559 if (connected) 560 tmp &= ~RADEON_FP_DETECT_INT_POL; 561 else 562 tmp |= RADEON_FP_DETECT_INT_POL; 563 WREG32(RADEON_FP_GEN_CNTL, tmp); 564 break; 565 case RADEON_HPD_2: 566 tmp = RREG32(RADEON_FP2_GEN_CNTL); 567 if (connected) 568 tmp &= ~RADEON_FP2_DETECT_INT_POL; 569 else 570 tmp |= RADEON_FP2_DETECT_INT_POL; 571 WREG32(RADEON_FP2_GEN_CNTL, tmp); 572 break; 573 default: 574 break; 575 } 576 } 577 578 /** 579 * r100_hpd_init - hpd setup callback. 580 * 581 * @rdev: radeon_device pointer 582 * 583 * Setup the hpd pins used by the card (r1xx-r4xx). 584 * Set the polarity, and enable the hpd interrupts. 585 */ 586 void r100_hpd_init(struct radeon_device *rdev) 587 { 588 struct drm_device *dev = rdev->ddev; 589 struct drm_connector *connector; 590 unsigned enable = 0; 591 592 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 593 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 594 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 595 enable |= 1 << radeon_connector->hpd.hpd; 596 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 597 } 598 radeon_irq_kms_enable_hpd(rdev, enable); 599 } 600 601 /** 602 * r100_hpd_fini - hpd tear down callback. 603 * 604 * @rdev: radeon_device pointer 605 * 606 * Tear down the hpd pins used by the card (r1xx-r4xx). 607 * Disable the hpd interrupts. 608 */ 609 void r100_hpd_fini(struct radeon_device *rdev) 610 { 611 struct drm_device *dev = rdev->ddev; 612 struct drm_connector *connector; 613 unsigned disable = 0; 614 615 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 616 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 617 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 618 disable |= 1 << radeon_connector->hpd.hpd; 619 } 620 radeon_irq_kms_disable_hpd(rdev, disable); 621 } 622 623 /* 624 * PCI GART 625 */ 626 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 627 { 628 /* TODO: can we do somethings here ? */ 629 /* It seems hw only cache one entry so we should discard this 630 * entry otherwise if first GPU GART read hit this entry it 631 * could end up in wrong address. */ 632 } 633 634 int r100_pci_gart_init(struct radeon_device *rdev) 635 { 636 int r; 637 638 if (rdev->gart.ptr) { 639 WARN(1, "R100 PCI GART already initialized\n"); 640 return 0; 641 } 642 /* Initialize common gart structure */ 643 r = radeon_gart_init(rdev); 644 if (r) 645 return r; 646 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 647 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 648 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; 649 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 650 return radeon_gart_table_ram_alloc(rdev); 651 } 652 653 int r100_pci_gart_enable(struct radeon_device *rdev) 654 { 655 uint32_t tmp; 656 657 /* discard memory request outside of configured range */ 658 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 659 WREG32(RADEON_AIC_CNTL, tmp); 660 /* set address range for PCI address translate */ 661 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 662 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 663 /* set PCI GART page-table base address */ 664 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 665 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 666 WREG32(RADEON_AIC_CNTL, tmp); 667 r100_pci_gart_tlb_flush(rdev); 668 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 669 (unsigned)(rdev->mc.gtt_size >> 20), 670 (unsigned long long)rdev->gart.table_addr); 671 rdev->gart.ready = true; 672 return 0; 673 } 674 675 void r100_pci_gart_disable(struct radeon_device *rdev) 676 { 677 uint32_t tmp; 678 679 /* discard memory request outside of configured range */ 680 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 681 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 682 WREG32(RADEON_AIC_LO_ADDR, 0); 683 WREG32(RADEON_AIC_HI_ADDR, 0); 684 } 685 686 uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) 687 { 688 return addr; 689 } 690 691 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 692 uint64_t entry) 693 { 694 u32 *gtt = rdev->gart.ptr; 695 gtt[i] = cpu_to_le32(lower_32_bits(entry)); 696 } 697 698 void r100_pci_gart_fini(struct radeon_device *rdev) 699 { 700 radeon_gart_fini(rdev); 701 r100_pci_gart_disable(rdev); 702 radeon_gart_table_ram_free(rdev); 703 } 704 705 int r100_irq_set(struct radeon_device *rdev) 706 { 707 uint32_t tmp = 0; 708 709 if (!rdev->irq.installed) { 710 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 711 WREG32(R_000040_GEN_INT_CNTL, 0); 712 return -EINVAL; 713 } 714 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 715 tmp |= RADEON_SW_INT_ENABLE; 716 } 717 if (rdev->irq.crtc_vblank_int[0] || 718 atomic_read(&rdev->irq.pflip[0])) { 719 tmp |= RADEON_CRTC_VBLANK_MASK; 720 } 721 if (rdev->irq.crtc_vblank_int[1] || 722 atomic_read(&rdev->irq.pflip[1])) { 723 tmp |= RADEON_CRTC2_VBLANK_MASK; 724 } 725 if (rdev->irq.hpd[0]) { 726 tmp |= RADEON_FP_DETECT_MASK; 727 } 728 if (rdev->irq.hpd[1]) { 729 tmp |= RADEON_FP2_DETECT_MASK; 730 } 731 WREG32(RADEON_GEN_INT_CNTL, tmp); 732 733 /* read back to post the write */ 734 RREG32(RADEON_GEN_INT_CNTL); 735 736 return 0; 737 } 738 739 void r100_irq_disable(struct radeon_device *rdev) 740 { 741 u32 tmp; 742 743 WREG32(R_000040_GEN_INT_CNTL, 0); 744 /* Wait and acknowledge irq */ 745 mdelay(1); 746 tmp = RREG32(R_000044_GEN_INT_STATUS); 747 WREG32(R_000044_GEN_INT_STATUS, tmp); 748 } 749 750 static uint32_t r100_irq_ack(struct radeon_device *rdev) 751 { 752 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 753 uint32_t irq_mask = RADEON_SW_INT_TEST | 754 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 755 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 756 757 if (irqs) { 758 WREG32(RADEON_GEN_INT_STATUS, irqs); 759 } 760 return irqs & irq_mask; 761 } 762 763 irqreturn_t r100_irq_process(struct radeon_device *rdev) 764 { 765 uint32_t status, msi_rearm; 766 bool queue_hotplug = false; 767 768 status = r100_irq_ack(rdev); 769 if (!status) { 770 return IRQ_NONE; 771 } 772 if (rdev->shutdown) { 773 return IRQ_NONE; 774 } 775 while (status) { 776 /* SW interrupt */ 777 if (status & RADEON_SW_INT_TEST) { 778 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 779 } 780 /* Vertical blank interrupts */ 781 if (status & RADEON_CRTC_VBLANK_STAT) { 782 if (rdev->irq.crtc_vblank_int[0]) { 783 drm_handle_vblank(rdev->ddev, 0); 784 rdev->pm.vblank_sync = true; 785 wake_up(&rdev->irq.vblank_queue); 786 } 787 if (atomic_read(&rdev->irq.pflip[0])) 788 radeon_crtc_handle_vblank(rdev, 0); 789 } 790 if (status & RADEON_CRTC2_VBLANK_STAT) { 791 if (rdev->irq.crtc_vblank_int[1]) { 792 drm_handle_vblank(rdev->ddev, 1); 793 rdev->pm.vblank_sync = true; 794 wake_up(&rdev->irq.vblank_queue); 795 } 796 if (atomic_read(&rdev->irq.pflip[1])) 797 radeon_crtc_handle_vblank(rdev, 1); 798 } 799 if (status & RADEON_FP_DETECT_STAT) { 800 queue_hotplug = true; 801 DRM_DEBUG("HPD1\n"); 802 } 803 if (status & RADEON_FP2_DETECT_STAT) { 804 queue_hotplug = true; 805 DRM_DEBUG("HPD2\n"); 806 } 807 status = r100_irq_ack(rdev); 808 } 809 if (queue_hotplug) 810 schedule_delayed_work(&rdev->hotplug_work, 0); 811 if (rdev->msi_enabled) { 812 switch (rdev->family) { 813 case CHIP_RS400: 814 case CHIP_RS480: 815 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 816 WREG32(RADEON_AIC_CNTL, msi_rearm); 817 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 818 break; 819 default: 820 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 821 break; 822 } 823 } 824 return IRQ_HANDLED; 825 } 826 827 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 828 { 829 if (crtc == 0) 830 return RREG32(RADEON_CRTC_CRNT_FRAME); 831 else 832 return RREG32(RADEON_CRTC2_CRNT_FRAME); 833 } 834 835 /** 836 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer 837 * rdev: radeon device structure 838 * ring: ring buffer struct for emitting packets 839 */ 840 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) 841 { 842 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 843 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 844 RADEON_HDP_READ_BUFFER_INVALIDATE); 845 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 846 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 847 } 848 849 /* Who ever call radeon_fence_emit should call ring_lock and ask 850 * for enough space (today caller are ib schedule and buffer move) */ 851 void r100_fence_ring_emit(struct radeon_device *rdev, 852 struct radeon_fence *fence) 853 { 854 struct radeon_ring *ring = &rdev->ring[fence->ring]; 855 856 /* We have to make sure that caches are flushed before 857 * CPU might read something from VRAM. */ 858 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 859 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 860 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 861 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 862 /* Wait until IDLE & CLEAN */ 863 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 864 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 865 r100_ring_hdp_flush(rdev, ring); 866 /* Emit fence sequence & fire IRQ */ 867 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 868 radeon_ring_write(ring, fence->seq); 869 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 870 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 871 } 872 873 bool r100_semaphore_ring_emit(struct radeon_device *rdev, 874 struct radeon_ring *ring, 875 struct radeon_semaphore *semaphore, 876 bool emit_wait) 877 { 878 /* Unused on older asics, since we don't have semaphores or multiple rings */ 879 BUG(); 880 return false; 881 } 882 883 struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, 884 uint64_t src_offset, 885 uint64_t dst_offset, 886 unsigned num_gpu_pages, 887 struct reservation_object *resv) 888 { 889 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 890 struct radeon_fence *fence; 891 uint32_t cur_pages; 892 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 893 uint32_t pitch; 894 uint32_t stride_pixels; 895 unsigned ndw; 896 int num_loops; 897 int r = 0; 898 899 /* radeon limited to 16k stride */ 900 stride_bytes &= 0x3fff; 901 /* radeon pitch is /64 */ 902 pitch = stride_bytes / 64; 903 stride_pixels = stride_bytes / 4; 904 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 905 906 /* Ask for enough room for blit + flush + fence */ 907 ndw = 64 + (10 * num_loops); 908 r = radeon_ring_lock(rdev, ring, ndw); 909 if (r) { 910 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 911 return ERR_PTR(-EINVAL); 912 } 913 while (num_gpu_pages > 0) { 914 cur_pages = num_gpu_pages; 915 if (cur_pages > 8191) { 916 cur_pages = 8191; 917 } 918 num_gpu_pages -= cur_pages; 919 920 /* pages are in Y direction - height 921 page width in X direction - width */ 922 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 923 radeon_ring_write(ring, 924 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 925 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 926 RADEON_GMC_SRC_CLIPPING | 927 RADEON_GMC_DST_CLIPPING | 928 RADEON_GMC_BRUSH_NONE | 929 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 930 RADEON_GMC_SRC_DATATYPE_COLOR | 931 RADEON_ROP3_S | 932 RADEON_DP_SRC_SOURCE_MEMORY | 933 RADEON_GMC_CLR_CMP_CNTL_DIS | 934 RADEON_GMC_WR_MSK_DIS); 935 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 936 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 937 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 938 radeon_ring_write(ring, 0); 939 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 940 radeon_ring_write(ring, num_gpu_pages); 941 radeon_ring_write(ring, num_gpu_pages); 942 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 943 } 944 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 945 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 946 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 947 radeon_ring_write(ring, 948 RADEON_WAIT_2D_IDLECLEAN | 949 RADEON_WAIT_HOST_IDLECLEAN | 950 RADEON_WAIT_DMA_GUI_IDLE); 951 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); 952 if (r) { 953 radeon_ring_unlock_undo(rdev, ring); 954 return ERR_PTR(r); 955 } 956 radeon_ring_unlock_commit(rdev, ring, false); 957 return fence; 958 } 959 960 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 961 { 962 unsigned i; 963 u32 tmp; 964 965 for (i = 0; i < rdev->usec_timeout; i++) { 966 tmp = RREG32(R_000E40_RBBM_STATUS); 967 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 968 return 0; 969 } 970 udelay(1); 971 } 972 return -1; 973 } 974 975 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 976 { 977 int r; 978 979 r = radeon_ring_lock(rdev, ring, 2); 980 if (r) { 981 return; 982 } 983 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 984 radeon_ring_write(ring, 985 RADEON_ISYNC_ANY2D_IDLE3D | 986 RADEON_ISYNC_ANY3D_IDLE2D | 987 RADEON_ISYNC_WAIT_IDLEGUI | 988 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 989 radeon_ring_unlock_commit(rdev, ring, false); 990 } 991 992 993 /* Load the microcode for the CP */ 994 static int r100_cp_init_microcode(struct radeon_device *rdev) 995 { 996 const char *fw_name = NULL; 997 int err; 998 999 DRM_DEBUG_KMS("\n"); 1000 1001 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 1002 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 1003 (rdev->family == CHIP_RS200)) { 1004 DRM_INFO("Loading R100 Microcode\n"); 1005 fw_name = FIRMWARE_R100; 1006 } else if ((rdev->family == CHIP_R200) || 1007 (rdev->family == CHIP_RV250) || 1008 (rdev->family == CHIP_RV280) || 1009 (rdev->family == CHIP_RS300)) { 1010 DRM_INFO("Loading R200 Microcode\n"); 1011 fw_name = FIRMWARE_R200; 1012 } else if ((rdev->family == CHIP_R300) || 1013 (rdev->family == CHIP_R350) || 1014 (rdev->family == CHIP_RV350) || 1015 (rdev->family == CHIP_RV380) || 1016 (rdev->family == CHIP_RS400) || 1017 (rdev->family == CHIP_RS480)) { 1018 DRM_INFO("Loading R300 Microcode\n"); 1019 fw_name = FIRMWARE_R300; 1020 } else if ((rdev->family == CHIP_R420) || 1021 (rdev->family == CHIP_R423) || 1022 (rdev->family == CHIP_RV410)) { 1023 DRM_INFO("Loading R400 Microcode\n"); 1024 fw_name = FIRMWARE_R420; 1025 } else if ((rdev->family == CHIP_RS690) || 1026 (rdev->family == CHIP_RS740)) { 1027 DRM_INFO("Loading RS690/RS740 Microcode\n"); 1028 fw_name = FIRMWARE_RS690; 1029 } else if (rdev->family == CHIP_RS600) { 1030 DRM_INFO("Loading RS600 Microcode\n"); 1031 fw_name = FIRMWARE_RS600; 1032 } else if ((rdev->family == CHIP_RV515) || 1033 (rdev->family == CHIP_R520) || 1034 (rdev->family == CHIP_RV530) || 1035 (rdev->family == CHIP_R580) || 1036 (rdev->family == CHIP_RV560) || 1037 (rdev->family == CHIP_RV570)) { 1038 DRM_INFO("Loading R500 Microcode\n"); 1039 fw_name = FIRMWARE_R520; 1040 } 1041 1042 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 1043 if (err) { 1044 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1045 fw_name); 1046 } else if (rdev->me_fw->datasize % 8) { 1047 printk(KERN_ERR 1048 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1049 rdev->me_fw->datasize, fw_name); 1050 err = -EINVAL; 1051 release_firmware(rdev->me_fw); 1052 rdev->me_fw = NULL; 1053 } 1054 return err; 1055 } 1056 1057 u32 r100_gfx_get_rptr(struct radeon_device *rdev, 1058 struct radeon_ring *ring) 1059 { 1060 u32 rptr; 1061 1062 if (rdev->wb.enabled) 1063 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 1064 else 1065 rptr = RREG32(RADEON_CP_RB_RPTR); 1066 1067 return rptr; 1068 } 1069 1070 u32 r100_gfx_get_wptr(struct radeon_device *rdev, 1071 struct radeon_ring *ring) 1072 { 1073 u32 wptr; 1074 1075 wptr = RREG32(RADEON_CP_RB_WPTR); 1076 1077 return wptr; 1078 } 1079 1080 void r100_gfx_set_wptr(struct radeon_device *rdev, 1081 struct radeon_ring *ring) 1082 { 1083 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1084 (void)RREG32(RADEON_CP_RB_WPTR); 1085 } 1086 1087 /** 1088 * r100_cp_fini_microcode - drop the firmware image reference 1089 * 1090 * @rdev: radeon_device pointer 1091 * 1092 * Drop the me firmware image reference. 1093 * Called at driver shutdown. 1094 */ 1095 static void r100_cp_fini_microcode (struct radeon_device *rdev) 1096 { 1097 release_firmware(rdev->me_fw); 1098 rdev->me_fw = NULL; 1099 } 1100 1101 static void r100_cp_load_microcode(struct radeon_device *rdev) 1102 { 1103 const __be32 *fw_data; 1104 int i, size; 1105 1106 if (r100_gui_wait_for_idle(rdev)) { 1107 printk(KERN_WARNING "Failed to wait GUI idle while " 1108 "programming pipes. Bad things might happen.\n"); 1109 } 1110 1111 if (rdev->me_fw) { 1112 size = rdev->me_fw->datasize / 4; 1113 fw_data = (const __be32 *)rdev->me_fw->data; 1114 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1115 for (i = 0; i < size; i += 2) { 1116 WREG32(RADEON_CP_ME_RAM_DATAH, 1117 be32_to_cpup(&fw_data[i])); 1118 WREG32(RADEON_CP_ME_RAM_DATAL, 1119 be32_to_cpup(&fw_data[i + 1])); 1120 } 1121 } 1122 } 1123 1124 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1125 { 1126 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1127 unsigned rb_bufsz; 1128 unsigned rb_blksz; 1129 unsigned max_fetch; 1130 unsigned pre_write_timer; 1131 unsigned pre_write_limit; 1132 unsigned indirect2_start; 1133 unsigned indirect1_start; 1134 uint32_t tmp; 1135 int r; 1136 1137 if (r100_debugfs_cp_init(rdev)) { 1138 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1139 } 1140 if (!rdev->me_fw) { 1141 r = r100_cp_init_microcode(rdev); 1142 if (r) { 1143 DRM_ERROR("Failed to load firmware!\n"); 1144 return r; 1145 } 1146 } 1147 1148 /* Align ring size */ 1149 rb_bufsz = order_base_2(ring_size / 8); 1150 ring_size = (1 << (rb_bufsz + 1)) * 4; 1151 r100_cp_load_microcode(rdev); 1152 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1153 RADEON_CP_PACKET2); 1154 if (r) { 1155 return r; 1156 } 1157 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1158 * the rptr copy in system ram */ 1159 rb_blksz = 9; 1160 /* cp will read 128bytes at a time (4 dwords) */ 1161 max_fetch = 1; 1162 ring->align_mask = 16 - 1; 1163 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1164 pre_write_timer = 64; 1165 /* Force CP_RB_WPTR write if written more than one time before the 1166 * delay expire 1167 */ 1168 pre_write_limit = 0; 1169 /* Setup the cp cache like this (cache size is 96 dwords) : 1170 * RING 0 to 15 1171 * INDIRECT1 16 to 79 1172 * INDIRECT2 80 to 95 1173 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1174 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1175 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1176 * Idea being that most of the gpu cmd will be through indirect1 buffer 1177 * so it gets the bigger cache. 1178 */ 1179 indirect2_start = 80; 1180 indirect1_start = 16; 1181 /* cp setup */ 1182 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1183 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1184 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1185 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1186 #ifdef __BIG_ENDIAN 1187 tmp |= RADEON_BUF_SWAP_32BIT; 1188 #endif 1189 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1190 1191 /* Set ring address */ 1192 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1193 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1194 /* Force read & write ptr to 0 */ 1195 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1196 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1197 ring->wptr = 0; 1198 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1199 1200 /* set the wb address whether it's enabled or not */ 1201 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1202 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1203 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1204 1205 if (rdev->wb.enabled) 1206 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1207 else { 1208 tmp |= RADEON_RB_NO_UPDATE; 1209 WREG32(R_000770_SCRATCH_UMSK, 0); 1210 } 1211 1212 WREG32(RADEON_CP_RB_CNTL, tmp); 1213 udelay(10); 1214 /* Set cp mode to bus mastering & enable cp*/ 1215 WREG32(RADEON_CP_CSQ_MODE, 1216 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1217 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1218 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1219 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1220 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1221 1222 /* at this point everything should be setup correctly to enable master */ 1223 pci_enable_busmaster(rdev->dev->bsddev); 1224 1225 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1226 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1227 if (r) { 1228 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1229 return r; 1230 } 1231 ring->ready = true; 1232 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1233 1234 if (!ring->rptr_save_reg /* not resuming from suspend */ 1235 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1236 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1237 if (r) { 1238 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1239 ring->rptr_save_reg = 0; 1240 } 1241 } 1242 return 0; 1243 } 1244 1245 void r100_cp_fini(struct radeon_device *rdev) 1246 { 1247 if (r100_cp_wait_for_idle(rdev)) { 1248 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1249 } 1250 /* Disable ring */ 1251 r100_cp_disable(rdev); 1252 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1253 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1254 DRM_INFO("radeon: cp finalized\n"); 1255 } 1256 1257 void r100_cp_disable(struct radeon_device *rdev) 1258 { 1259 /* Disable ring */ 1260 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1261 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1262 WREG32(RADEON_CP_CSQ_MODE, 0); 1263 WREG32(RADEON_CP_CSQ_CNTL, 0); 1264 WREG32(R_000770_SCRATCH_UMSK, 0); 1265 if (r100_gui_wait_for_idle(rdev)) { 1266 printk(KERN_WARNING "Failed to wait GUI idle while " 1267 "programming pipes. Bad things might happen.\n"); 1268 } 1269 } 1270 1271 /* 1272 * CS functions 1273 */ 1274 int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1275 struct radeon_cs_packet *pkt, 1276 unsigned idx, 1277 unsigned reg) 1278 { 1279 int r; 1280 u32 tile_flags = 0; 1281 u32 tmp; 1282 struct radeon_bo_list *reloc; 1283 u32 value; 1284 1285 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1286 if (r) { 1287 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1288 idx, reg); 1289 radeon_cs_dump_packet(p, pkt); 1290 return r; 1291 } 1292 1293 value = radeon_get_ib_value(p, idx); 1294 tmp = value & 0x003fffff; 1295 tmp += (((u32)reloc->gpu_offset) >> 10); 1296 1297 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1298 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1299 tile_flags |= RADEON_DST_TILE_MACRO; 1300 if (reloc->tiling_flags & RADEON_TILING_MICRO) { 1301 if (reg == RADEON_SRC_PITCH_OFFSET) { 1302 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1303 radeon_cs_dump_packet(p, pkt); 1304 return -EINVAL; 1305 } 1306 tile_flags |= RADEON_DST_TILE_MICRO; 1307 } 1308 1309 tmp |= tile_flags; 1310 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1311 } else 1312 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1313 return 0; 1314 } 1315 1316 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1317 struct radeon_cs_packet *pkt, 1318 int idx) 1319 { 1320 unsigned c, i; 1321 struct radeon_bo_list *reloc; 1322 struct r100_cs_track *track; 1323 int r = 0; 1324 volatile uint32_t *ib; 1325 u32 idx_value; 1326 1327 ib = p->ib.ptr; 1328 track = (struct r100_cs_track *)p->track; 1329 c = radeon_get_ib_value(p, idx++) & 0x1F; 1330 if (c > 16) { 1331 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1332 pkt->opcode); 1333 radeon_cs_dump_packet(p, pkt); 1334 return -EINVAL; 1335 } 1336 track->num_arrays = c; 1337 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1338 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1339 if (r) { 1340 DRM_ERROR("No reloc for packet3 %d\n", 1341 pkt->opcode); 1342 radeon_cs_dump_packet(p, pkt); 1343 return r; 1344 } 1345 idx_value = radeon_get_ib_value(p, idx); 1346 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1347 1348 track->arrays[i + 0].esize = idx_value >> 8; 1349 track->arrays[i + 0].robj = reloc->robj; 1350 track->arrays[i + 0].esize &= 0x7F; 1351 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1352 if (r) { 1353 DRM_ERROR("No reloc for packet3 %d\n", 1354 pkt->opcode); 1355 radeon_cs_dump_packet(p, pkt); 1356 return r; 1357 } 1358 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); 1359 track->arrays[i + 1].robj = reloc->robj; 1360 track->arrays[i + 1].esize = idx_value >> 24; 1361 track->arrays[i + 1].esize &= 0x7F; 1362 } 1363 if (c & 1) { 1364 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1365 if (r) { 1366 DRM_ERROR("No reloc for packet3 %d\n", 1367 pkt->opcode); 1368 radeon_cs_dump_packet(p, pkt); 1369 return r; 1370 } 1371 idx_value = radeon_get_ib_value(p, idx); 1372 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1373 track->arrays[i + 0].robj = reloc->robj; 1374 track->arrays[i + 0].esize = idx_value >> 8; 1375 track->arrays[i + 0].esize &= 0x7F; 1376 } 1377 return r; 1378 } 1379 1380 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1381 struct radeon_cs_packet *pkt, 1382 const unsigned *auth, unsigned n, 1383 radeon_packet0_check_t check) 1384 { 1385 unsigned reg; 1386 unsigned i, j, m; 1387 unsigned idx; 1388 int r; 1389 1390 idx = pkt->idx + 1; 1391 reg = pkt->reg; 1392 /* Check that register fall into register range 1393 * determined by the number of entry (n) in the 1394 * safe register bitmap. 1395 */ 1396 if (pkt->one_reg_wr) { 1397 if ((reg >> 7) > n) { 1398 return -EINVAL; 1399 } 1400 } else { 1401 if (((reg + (pkt->count << 2)) >> 7) > n) { 1402 return -EINVAL; 1403 } 1404 } 1405 for (i = 0; i <= pkt->count; i++, idx++) { 1406 j = (reg >> 7); 1407 m = 1 << ((reg >> 2) & 31); 1408 if (auth[j] & m) { 1409 r = check(p, pkt, idx, reg); 1410 if (r) { 1411 return r; 1412 } 1413 } 1414 if (pkt->one_reg_wr) { 1415 if (!(auth[j] & m)) { 1416 break; 1417 } 1418 } else { 1419 reg += 4; 1420 } 1421 } 1422 return 0; 1423 } 1424 1425 /** 1426 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1427 * @parser: parser structure holding parsing context. 1428 * 1429 * Userspace sends a special sequence for VLINE waits. 1430 * PACKET0 - VLINE_START_END + value 1431 * PACKET0 - WAIT_UNTIL +_value 1432 * RELOC (P3) - crtc_id in reloc. 1433 * 1434 * This function parses this and relocates the VLINE START END 1435 * and WAIT UNTIL packets to the correct crtc. 1436 * It also detects a switched off crtc and nulls out the 1437 * wait in that case. 1438 */ 1439 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1440 { 1441 struct drm_crtc *crtc; 1442 struct radeon_crtc *radeon_crtc; 1443 struct radeon_cs_packet p3reloc, waitreloc; 1444 int crtc_id; 1445 int r; 1446 uint32_t header, h_idx, reg; 1447 volatile uint32_t *ib; 1448 1449 ib = p->ib.ptr; 1450 1451 /* parse the wait until */ 1452 r = radeon_cs_packet_parse(p, &waitreloc, p->idx); 1453 if (r) 1454 return r; 1455 1456 /* check its a wait until and only 1 count */ 1457 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1458 waitreloc.count != 0) { 1459 DRM_ERROR("vline wait had illegal wait until segment\n"); 1460 return -EINVAL; 1461 } 1462 1463 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1464 DRM_ERROR("vline wait had illegal wait until\n"); 1465 return -EINVAL; 1466 } 1467 1468 /* jump over the NOP */ 1469 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1470 if (r) 1471 return r; 1472 1473 h_idx = p->idx - 2; 1474 p->idx += waitreloc.count + 2; 1475 p->idx += p3reloc.count + 2; 1476 1477 header = radeon_get_ib_value(p, h_idx); 1478 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1479 reg = R100_CP_PACKET0_GET_REG(header); 1480 crtc = drm_crtc_find(p->rdev->ddev, crtc_id); 1481 if (!crtc) { 1482 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1483 return -ENOENT; 1484 } 1485 radeon_crtc = to_radeon_crtc(crtc); 1486 crtc_id = radeon_crtc->crtc_id; 1487 1488 if (!crtc->enabled) { 1489 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1490 ib[h_idx + 2] = PACKET2(0); 1491 ib[h_idx + 3] = PACKET2(0); 1492 } else if (crtc_id == 1) { 1493 switch (reg) { 1494 case AVIVO_D1MODE_VLINE_START_END: 1495 header &= ~R300_CP_PACKET0_REG_MASK; 1496 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1497 break; 1498 case RADEON_CRTC_GUI_TRIG_VLINE: 1499 header &= ~R300_CP_PACKET0_REG_MASK; 1500 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1501 break; 1502 default: 1503 DRM_ERROR("unknown crtc reloc\n"); 1504 return -EINVAL; 1505 } 1506 ib[h_idx] = header; 1507 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1508 } 1509 1510 return 0; 1511 } 1512 1513 static int r100_get_vtx_size(uint32_t vtx_fmt) 1514 { 1515 int vtx_size; 1516 vtx_size = 2; 1517 /* ordered according to bits in spec */ 1518 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1519 vtx_size++; 1520 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1521 vtx_size += 3; 1522 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1523 vtx_size++; 1524 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1525 vtx_size++; 1526 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1527 vtx_size += 3; 1528 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1529 vtx_size++; 1530 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1531 vtx_size++; 1532 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1533 vtx_size += 2; 1534 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1535 vtx_size += 2; 1536 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1537 vtx_size++; 1538 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1539 vtx_size += 2; 1540 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1541 vtx_size++; 1542 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1543 vtx_size += 2; 1544 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1545 vtx_size++; 1546 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1547 vtx_size++; 1548 /* blend weight */ 1549 if (vtx_fmt & (0x7 << 15)) 1550 vtx_size += (vtx_fmt >> 15) & 0x7; 1551 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1552 vtx_size += 3; 1553 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1554 vtx_size += 2; 1555 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1556 vtx_size++; 1557 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1558 vtx_size++; 1559 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1560 vtx_size++; 1561 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1562 vtx_size++; 1563 return vtx_size; 1564 } 1565 1566 static int r100_packet0_check(struct radeon_cs_parser *p, 1567 struct radeon_cs_packet *pkt, 1568 unsigned idx, unsigned reg) 1569 { 1570 struct radeon_bo_list *reloc; 1571 struct r100_cs_track *track; 1572 volatile uint32_t *ib; 1573 uint32_t tmp; 1574 int r; 1575 int i, face; 1576 u32 tile_flags = 0; 1577 u32 idx_value; 1578 1579 ib = p->ib.ptr; 1580 track = (struct r100_cs_track *)p->track; 1581 1582 idx_value = radeon_get_ib_value(p, idx); 1583 1584 switch (reg) { 1585 case RADEON_CRTC_GUI_TRIG_VLINE: 1586 r = r100_cs_packet_parse_vline(p); 1587 if (r) { 1588 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1589 idx, reg); 1590 radeon_cs_dump_packet(p, pkt); 1591 return r; 1592 } 1593 break; 1594 /* FIXME: only allow PACKET3 blit? easier to check for out of 1595 * range access */ 1596 case RADEON_DST_PITCH_OFFSET: 1597 case RADEON_SRC_PITCH_OFFSET: 1598 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1599 if (r) 1600 return r; 1601 break; 1602 case RADEON_RB3D_DEPTHOFFSET: 1603 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1604 if (r) { 1605 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1606 idx, reg); 1607 radeon_cs_dump_packet(p, pkt); 1608 return r; 1609 } 1610 track->zb.robj = reloc->robj; 1611 track->zb.offset = idx_value; 1612 track->zb_dirty = true; 1613 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1614 break; 1615 case RADEON_RB3D_COLOROFFSET: 1616 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1617 if (r) { 1618 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1619 idx, reg); 1620 radeon_cs_dump_packet(p, pkt); 1621 return r; 1622 } 1623 track->cb[0].robj = reloc->robj; 1624 track->cb[0].offset = idx_value; 1625 track->cb_dirty = true; 1626 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1627 break; 1628 case RADEON_PP_TXOFFSET_0: 1629 case RADEON_PP_TXOFFSET_1: 1630 case RADEON_PP_TXOFFSET_2: 1631 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1632 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1633 if (r) { 1634 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1635 idx, reg); 1636 radeon_cs_dump_packet(p, pkt); 1637 return r; 1638 } 1639 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1640 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1641 tile_flags |= RADEON_TXO_MACRO_TILE; 1642 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1643 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1644 1645 tmp = idx_value & ~(0x7 << 2); 1646 tmp |= tile_flags; 1647 ib[idx] = tmp + ((u32)reloc->gpu_offset); 1648 } else 1649 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1650 track->textures[i].robj = reloc->robj; 1651 track->tex_dirty = true; 1652 break; 1653 case RADEON_PP_CUBIC_OFFSET_T0_0: 1654 case RADEON_PP_CUBIC_OFFSET_T0_1: 1655 case RADEON_PP_CUBIC_OFFSET_T0_2: 1656 case RADEON_PP_CUBIC_OFFSET_T0_3: 1657 case RADEON_PP_CUBIC_OFFSET_T0_4: 1658 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1659 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1660 if (r) { 1661 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1662 idx, reg); 1663 radeon_cs_dump_packet(p, pkt); 1664 return r; 1665 } 1666 track->textures[0].cube_info[i].offset = idx_value; 1667 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1668 track->textures[0].cube_info[i].robj = reloc->robj; 1669 track->tex_dirty = true; 1670 break; 1671 case RADEON_PP_CUBIC_OFFSET_T1_0: 1672 case RADEON_PP_CUBIC_OFFSET_T1_1: 1673 case RADEON_PP_CUBIC_OFFSET_T1_2: 1674 case RADEON_PP_CUBIC_OFFSET_T1_3: 1675 case RADEON_PP_CUBIC_OFFSET_T1_4: 1676 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1677 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1678 if (r) { 1679 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1680 idx, reg); 1681 radeon_cs_dump_packet(p, pkt); 1682 return r; 1683 } 1684 track->textures[1].cube_info[i].offset = idx_value; 1685 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1686 track->textures[1].cube_info[i].robj = reloc->robj; 1687 track->tex_dirty = true; 1688 break; 1689 case RADEON_PP_CUBIC_OFFSET_T2_0: 1690 case RADEON_PP_CUBIC_OFFSET_T2_1: 1691 case RADEON_PP_CUBIC_OFFSET_T2_2: 1692 case RADEON_PP_CUBIC_OFFSET_T2_3: 1693 case RADEON_PP_CUBIC_OFFSET_T2_4: 1694 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1695 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1696 if (r) { 1697 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1698 idx, reg); 1699 radeon_cs_dump_packet(p, pkt); 1700 return r; 1701 } 1702 track->textures[2].cube_info[i].offset = idx_value; 1703 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1704 track->textures[2].cube_info[i].robj = reloc->robj; 1705 track->tex_dirty = true; 1706 break; 1707 case RADEON_RE_WIDTH_HEIGHT: 1708 track->maxy = ((idx_value >> 16) & 0x7FF); 1709 track->cb_dirty = true; 1710 track->zb_dirty = true; 1711 break; 1712 case RADEON_RB3D_COLORPITCH: 1713 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1714 if (r) { 1715 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1716 idx, reg); 1717 radeon_cs_dump_packet(p, pkt); 1718 return r; 1719 } 1720 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1721 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1722 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1723 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1724 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1725 1726 tmp = idx_value & ~(0x7 << 16); 1727 tmp |= tile_flags; 1728 ib[idx] = tmp; 1729 } else 1730 ib[idx] = idx_value; 1731 1732 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1733 track->cb_dirty = true; 1734 break; 1735 case RADEON_RB3D_DEPTHPITCH: 1736 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1737 track->zb_dirty = true; 1738 break; 1739 case RADEON_RB3D_CNTL: 1740 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1741 case 7: 1742 case 8: 1743 case 9: 1744 case 11: 1745 case 12: 1746 track->cb[0].cpp = 1; 1747 break; 1748 case 3: 1749 case 4: 1750 case 15: 1751 track->cb[0].cpp = 2; 1752 break; 1753 case 6: 1754 track->cb[0].cpp = 4; 1755 break; 1756 default: 1757 DRM_ERROR("Invalid color buffer format (%d) !\n", 1758 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1759 return -EINVAL; 1760 } 1761 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1762 track->cb_dirty = true; 1763 track->zb_dirty = true; 1764 break; 1765 case RADEON_RB3D_ZSTENCILCNTL: 1766 switch (idx_value & 0xf) { 1767 case 0: 1768 track->zb.cpp = 2; 1769 break; 1770 case 2: 1771 case 3: 1772 case 4: 1773 case 5: 1774 case 9: 1775 case 11: 1776 track->zb.cpp = 4; 1777 break; 1778 default: 1779 break; 1780 } 1781 track->zb_dirty = true; 1782 break; 1783 case RADEON_RB3D_ZPASS_ADDR: 1784 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1785 if (r) { 1786 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1787 idx, reg); 1788 radeon_cs_dump_packet(p, pkt); 1789 return r; 1790 } 1791 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1792 break; 1793 case RADEON_PP_CNTL: 1794 { 1795 uint32_t temp = idx_value >> 4; 1796 for (i = 0; i < track->num_texture; i++) 1797 track->textures[i].enabled = !!(temp & (1 << i)); 1798 track->tex_dirty = true; 1799 } 1800 break; 1801 case RADEON_SE_VF_CNTL: 1802 track->vap_vf_cntl = idx_value; 1803 break; 1804 case RADEON_SE_VTX_FMT: 1805 track->vtx_size = r100_get_vtx_size(idx_value); 1806 break; 1807 case RADEON_PP_TEX_SIZE_0: 1808 case RADEON_PP_TEX_SIZE_1: 1809 case RADEON_PP_TEX_SIZE_2: 1810 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1811 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1812 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1813 track->tex_dirty = true; 1814 break; 1815 case RADEON_PP_TEX_PITCH_0: 1816 case RADEON_PP_TEX_PITCH_1: 1817 case RADEON_PP_TEX_PITCH_2: 1818 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1819 track->textures[i].pitch = idx_value + 32; 1820 track->tex_dirty = true; 1821 break; 1822 case RADEON_PP_TXFILTER_0: 1823 case RADEON_PP_TXFILTER_1: 1824 case RADEON_PP_TXFILTER_2: 1825 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1826 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1827 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1828 tmp = (idx_value >> 23) & 0x7; 1829 if (tmp == 2 || tmp == 6) 1830 track->textures[i].roundup_w = false; 1831 tmp = (idx_value >> 27) & 0x7; 1832 if (tmp == 2 || tmp == 6) 1833 track->textures[i].roundup_h = false; 1834 track->tex_dirty = true; 1835 break; 1836 case RADEON_PP_TXFORMAT_0: 1837 case RADEON_PP_TXFORMAT_1: 1838 case RADEON_PP_TXFORMAT_2: 1839 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1840 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1841 track->textures[i].use_pitch = 1; 1842 } else { 1843 track->textures[i].use_pitch = 0; 1844 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1845 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1846 } 1847 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1848 track->textures[i].tex_coord_type = 2; 1849 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1850 case RADEON_TXFORMAT_I8: 1851 case RADEON_TXFORMAT_RGB332: 1852 case RADEON_TXFORMAT_Y8: 1853 track->textures[i].cpp = 1; 1854 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1855 break; 1856 case RADEON_TXFORMAT_AI88: 1857 case RADEON_TXFORMAT_ARGB1555: 1858 case RADEON_TXFORMAT_RGB565: 1859 case RADEON_TXFORMAT_ARGB4444: 1860 case RADEON_TXFORMAT_VYUY422: 1861 case RADEON_TXFORMAT_YVYU422: 1862 case RADEON_TXFORMAT_SHADOW16: 1863 case RADEON_TXFORMAT_LDUDV655: 1864 case RADEON_TXFORMAT_DUDV88: 1865 track->textures[i].cpp = 2; 1866 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1867 break; 1868 case RADEON_TXFORMAT_ARGB8888: 1869 case RADEON_TXFORMAT_RGBA8888: 1870 case RADEON_TXFORMAT_SHADOW32: 1871 case RADEON_TXFORMAT_LDUDUV8888: 1872 track->textures[i].cpp = 4; 1873 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1874 break; 1875 case RADEON_TXFORMAT_DXT1: 1876 track->textures[i].cpp = 1; 1877 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1878 break; 1879 case RADEON_TXFORMAT_DXT23: 1880 case RADEON_TXFORMAT_DXT45: 1881 track->textures[i].cpp = 1; 1882 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1883 break; 1884 } 1885 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1886 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1887 track->tex_dirty = true; 1888 break; 1889 case RADEON_PP_CUBIC_FACES_0: 1890 case RADEON_PP_CUBIC_FACES_1: 1891 case RADEON_PP_CUBIC_FACES_2: 1892 tmp = idx_value; 1893 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1894 for (face = 0; face < 4; face++) { 1895 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1896 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1897 } 1898 track->tex_dirty = true; 1899 break; 1900 default: 1901 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1902 reg, idx); 1903 return -EINVAL; 1904 } 1905 return 0; 1906 } 1907 1908 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1909 struct radeon_cs_packet *pkt, 1910 struct radeon_bo *robj) 1911 { 1912 unsigned idx; 1913 u32 value; 1914 idx = pkt->idx + 1; 1915 value = radeon_get_ib_value(p, idx + 2); 1916 if ((value + 1) > radeon_bo_size(robj)) { 1917 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1918 "(need %u have %lu) !\n", 1919 value + 1, 1920 radeon_bo_size(robj)); 1921 return -EINVAL; 1922 } 1923 return 0; 1924 } 1925 1926 static int r100_packet3_check(struct radeon_cs_parser *p, 1927 struct radeon_cs_packet *pkt) 1928 { 1929 struct radeon_bo_list *reloc; 1930 struct r100_cs_track *track; 1931 unsigned idx; 1932 volatile uint32_t *ib; 1933 int r; 1934 1935 ib = p->ib.ptr; 1936 idx = pkt->idx + 1; 1937 track = (struct r100_cs_track *)p->track; 1938 switch (pkt->opcode) { 1939 case PACKET3_3D_LOAD_VBPNTR: 1940 r = r100_packet3_load_vbpntr(p, pkt, idx); 1941 if (r) 1942 return r; 1943 break; 1944 case PACKET3_INDX_BUFFER: 1945 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1946 if (r) { 1947 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1948 radeon_cs_dump_packet(p, pkt); 1949 return r; 1950 } 1951 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); 1952 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1953 if (r) { 1954 return r; 1955 } 1956 break; 1957 case 0x23: 1958 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1959 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1960 if (r) { 1961 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1962 radeon_cs_dump_packet(p, pkt); 1963 return r; 1964 } 1965 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); 1966 track->num_arrays = 1; 1967 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1968 1969 track->arrays[0].robj = reloc->robj; 1970 track->arrays[0].esize = track->vtx_size; 1971 1972 track->max_indx = radeon_get_ib_value(p, idx+1); 1973 1974 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1975 track->immd_dwords = pkt->count - 1; 1976 r = r100_cs_track_check(p->rdev, track); 1977 if (r) 1978 return r; 1979 break; 1980 case PACKET3_3D_DRAW_IMMD: 1981 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1982 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1983 return -EINVAL; 1984 } 1985 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1986 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1987 track->immd_dwords = pkt->count - 1; 1988 r = r100_cs_track_check(p->rdev, track); 1989 if (r) 1990 return r; 1991 break; 1992 /* triggers drawing using in-packet vertex data */ 1993 case PACKET3_3D_DRAW_IMMD_2: 1994 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1995 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1996 return -EINVAL; 1997 } 1998 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1999 track->immd_dwords = pkt->count; 2000 r = r100_cs_track_check(p->rdev, track); 2001 if (r) 2002 return r; 2003 break; 2004 /* triggers drawing using in-packet vertex data */ 2005 case PACKET3_3D_DRAW_VBUF_2: 2006 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2007 r = r100_cs_track_check(p->rdev, track); 2008 if (r) 2009 return r; 2010 break; 2011 /* triggers drawing of vertex buffers setup elsewhere */ 2012 case PACKET3_3D_DRAW_INDX_2: 2013 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2014 r = r100_cs_track_check(p->rdev, track); 2015 if (r) 2016 return r; 2017 break; 2018 /* triggers drawing using indices to vertex buffer */ 2019 case PACKET3_3D_DRAW_VBUF: 2020 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2021 r = r100_cs_track_check(p->rdev, track); 2022 if (r) 2023 return r; 2024 break; 2025 /* triggers drawing of vertex buffers setup elsewhere */ 2026 case PACKET3_3D_DRAW_INDX: 2027 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2028 r = r100_cs_track_check(p->rdev, track); 2029 if (r) 2030 return r; 2031 break; 2032 /* triggers drawing using indices to vertex buffer */ 2033 case PACKET3_3D_CLEAR_HIZ: 2034 case PACKET3_3D_CLEAR_ZMASK: 2035 if (p->rdev->hyperz_filp != p->filp) 2036 return -EINVAL; 2037 break; 2038 case PACKET3_NOP: 2039 break; 2040 default: 2041 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2042 return -EINVAL; 2043 } 2044 return 0; 2045 } 2046 2047 int r100_cs_parse(struct radeon_cs_parser *p) 2048 { 2049 struct radeon_cs_packet pkt; 2050 struct r100_cs_track *track; 2051 int r; 2052 2053 track = kzalloc(sizeof(*track), GFP_KERNEL); 2054 if (!track) 2055 return -ENOMEM; 2056 r100_cs_track_clear(p->rdev, track); 2057 p->track = track; 2058 do { 2059 r = radeon_cs_packet_parse(p, &pkt, p->idx); 2060 if (r) { 2061 return r; 2062 } 2063 p->idx += pkt.count + 2; 2064 switch (pkt.type) { 2065 case RADEON_PACKET_TYPE0: 2066 if (p->rdev->family >= CHIP_R200) 2067 r = r100_cs_parse_packet0(p, &pkt, 2068 p->rdev->config.r100.reg_safe_bm, 2069 p->rdev->config.r100.reg_safe_bm_size, 2070 &r200_packet0_check); 2071 else 2072 r = r100_cs_parse_packet0(p, &pkt, 2073 p->rdev->config.r100.reg_safe_bm, 2074 p->rdev->config.r100.reg_safe_bm_size, 2075 &r100_packet0_check); 2076 break; 2077 case RADEON_PACKET_TYPE2: 2078 break; 2079 case RADEON_PACKET_TYPE3: 2080 r = r100_packet3_check(p, &pkt); 2081 break; 2082 default: 2083 DRM_ERROR("Unknown packet type %d !\n", 2084 pkt.type); 2085 return -EINVAL; 2086 } 2087 if (r) 2088 return r; 2089 } while (p->idx < p->chunk_ib->length_dw); 2090 return 0; 2091 } 2092 2093 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2094 { 2095 DRM_ERROR("pitch %d\n", t->pitch); 2096 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2097 DRM_ERROR("width %d\n", t->width); 2098 DRM_ERROR("width_11 %d\n", t->width_11); 2099 DRM_ERROR("height %d\n", t->height); 2100 DRM_ERROR("height_11 %d\n", t->height_11); 2101 DRM_ERROR("num levels %d\n", t->num_levels); 2102 DRM_ERROR("depth %d\n", t->txdepth); 2103 DRM_ERROR("bpp %d\n", t->cpp); 2104 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2105 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2106 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2107 DRM_ERROR("compress format %d\n", t->compress_format); 2108 } 2109 2110 static int r100_track_compress_size(int compress_format, int w, int h) 2111 { 2112 int block_width, block_height, block_bytes; 2113 int wblocks, hblocks; 2114 int min_wblocks; 2115 int sz; 2116 2117 block_width = 4; 2118 block_height = 4; 2119 2120 switch (compress_format) { 2121 case R100_TRACK_COMP_DXT1: 2122 block_bytes = 8; 2123 min_wblocks = 4; 2124 break; 2125 default: 2126 case R100_TRACK_COMP_DXT35: 2127 block_bytes = 16; 2128 min_wblocks = 2; 2129 break; 2130 } 2131 2132 hblocks = (h + block_height - 1) / block_height; 2133 wblocks = (w + block_width - 1) / block_width; 2134 if (wblocks < min_wblocks) 2135 wblocks = min_wblocks; 2136 sz = wblocks * hblocks * block_bytes; 2137 return sz; 2138 } 2139 2140 static int r100_cs_track_cube(struct radeon_device *rdev, 2141 struct r100_cs_track *track, unsigned idx) 2142 { 2143 unsigned face, w, h; 2144 struct radeon_bo *cube_robj; 2145 unsigned long size; 2146 unsigned compress_format = track->textures[idx].compress_format; 2147 2148 for (face = 0; face < 5; face++) { 2149 cube_robj = track->textures[idx].cube_info[face].robj; 2150 w = track->textures[idx].cube_info[face].width; 2151 h = track->textures[idx].cube_info[face].height; 2152 2153 if (compress_format) { 2154 size = r100_track_compress_size(compress_format, w, h); 2155 } else 2156 size = w * h; 2157 size *= track->textures[idx].cpp; 2158 2159 size += track->textures[idx].cube_info[face].offset; 2160 2161 if (size > radeon_bo_size(cube_robj)) { 2162 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2163 size, radeon_bo_size(cube_robj)); 2164 r100_cs_track_texture_print(&track->textures[idx]); 2165 return -1; 2166 } 2167 } 2168 return 0; 2169 } 2170 2171 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2172 struct r100_cs_track *track) 2173 { 2174 struct radeon_bo *robj; 2175 unsigned long size; 2176 unsigned u, i, w, h, d; 2177 int ret; 2178 2179 for (u = 0; u < track->num_texture; u++) { 2180 if (!track->textures[u].enabled) 2181 continue; 2182 if (track->textures[u].lookup_disable) 2183 continue; 2184 robj = track->textures[u].robj; 2185 if (robj == NULL) { 2186 DRM_ERROR("No texture bound to unit %u\n", u); 2187 return -EINVAL; 2188 } 2189 size = 0; 2190 for (i = 0; i <= track->textures[u].num_levels; i++) { 2191 if (track->textures[u].use_pitch) { 2192 if (rdev->family < CHIP_R300) 2193 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2194 else 2195 w = track->textures[u].pitch / (1 << i); 2196 } else { 2197 w = track->textures[u].width; 2198 if (rdev->family >= CHIP_RV515) 2199 w |= track->textures[u].width_11; 2200 w = w / (1 << i); 2201 if (track->textures[u].roundup_w) 2202 w = roundup_pow_of_two(w); 2203 } 2204 h = track->textures[u].height; 2205 if (rdev->family >= CHIP_RV515) 2206 h |= track->textures[u].height_11; 2207 h = h / (1 << i); 2208 if (track->textures[u].roundup_h) 2209 h = roundup_pow_of_two(h); 2210 if (track->textures[u].tex_coord_type == 1) { 2211 d = (1 << track->textures[u].txdepth) / (1 << i); 2212 if (!d) 2213 d = 1; 2214 } else { 2215 d = 1; 2216 } 2217 if (track->textures[u].compress_format) { 2218 2219 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2220 /* compressed textures are block based */ 2221 } else 2222 size += w * h * d; 2223 } 2224 size *= track->textures[u].cpp; 2225 2226 switch (track->textures[u].tex_coord_type) { 2227 case 0: 2228 case 1: 2229 break; 2230 case 2: 2231 if (track->separate_cube) { 2232 ret = r100_cs_track_cube(rdev, track, u); 2233 if (ret) 2234 return ret; 2235 } else 2236 size *= 6; 2237 break; 2238 default: 2239 DRM_ERROR("Invalid texture coordinate type %u for unit " 2240 "%u\n", track->textures[u].tex_coord_type, u); 2241 return -EINVAL; 2242 } 2243 if (size > radeon_bo_size(robj)) { 2244 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2245 "%lu\n", u, size, radeon_bo_size(robj)); 2246 r100_cs_track_texture_print(&track->textures[u]); 2247 return -EINVAL; 2248 } 2249 } 2250 return 0; 2251 } 2252 2253 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2254 { 2255 unsigned i; 2256 unsigned long size; 2257 unsigned prim_walk; 2258 unsigned nverts; 2259 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2260 2261 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2262 !track->blend_read_enable) 2263 num_cb = 0; 2264 2265 for (i = 0; i < num_cb; i++) { 2266 if (track->cb[i].robj == NULL) { 2267 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2268 return -EINVAL; 2269 } 2270 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2271 size += track->cb[i].offset; 2272 if (size > radeon_bo_size(track->cb[i].robj)) { 2273 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2274 "(need %lu have %lu) !\n", i, size, 2275 radeon_bo_size(track->cb[i].robj)); 2276 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2277 i, track->cb[i].pitch, track->cb[i].cpp, 2278 track->cb[i].offset, track->maxy); 2279 return -EINVAL; 2280 } 2281 } 2282 track->cb_dirty = false; 2283 2284 if (track->zb_dirty && track->z_enabled) { 2285 if (track->zb.robj == NULL) { 2286 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2287 return -EINVAL; 2288 } 2289 size = track->zb.pitch * track->zb.cpp * track->maxy; 2290 size += track->zb.offset; 2291 if (size > radeon_bo_size(track->zb.robj)) { 2292 DRM_ERROR("[drm] Buffer too small for z buffer " 2293 "(need %lu have %lu) !\n", size, 2294 radeon_bo_size(track->zb.robj)); 2295 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2296 track->zb.pitch, track->zb.cpp, 2297 track->zb.offset, track->maxy); 2298 return -EINVAL; 2299 } 2300 } 2301 track->zb_dirty = false; 2302 2303 if (track->aa_dirty && track->aaresolve) { 2304 if (track->aa.robj == NULL) { 2305 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2306 return -EINVAL; 2307 } 2308 /* I believe the format comes from colorbuffer0. */ 2309 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2310 size += track->aa.offset; 2311 if (size > radeon_bo_size(track->aa.robj)) { 2312 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2313 "(need %lu have %lu) !\n", i, size, 2314 radeon_bo_size(track->aa.robj)); 2315 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2316 i, track->aa.pitch, track->cb[0].cpp, 2317 track->aa.offset, track->maxy); 2318 return -EINVAL; 2319 } 2320 } 2321 track->aa_dirty = false; 2322 2323 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2324 if (track->vap_vf_cntl & (1 << 14)) { 2325 nverts = track->vap_alt_nverts; 2326 } else { 2327 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2328 } 2329 switch (prim_walk) { 2330 case 1: 2331 for (i = 0; i < track->num_arrays; i++) { 2332 size = track->arrays[i].esize * track->max_indx * 4; 2333 if (track->arrays[i].robj == NULL) { 2334 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2335 "bound\n", prim_walk, i); 2336 return -EINVAL; 2337 } 2338 if (size > radeon_bo_size(track->arrays[i].robj)) { 2339 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2340 "need %lu dwords have %lu dwords\n", 2341 prim_walk, i, size >> 2, 2342 radeon_bo_size(track->arrays[i].robj) 2343 >> 2); 2344 DRM_ERROR("Max indices %u\n", track->max_indx); 2345 return -EINVAL; 2346 } 2347 } 2348 break; 2349 case 2: 2350 for (i = 0; i < track->num_arrays; i++) { 2351 size = track->arrays[i].esize * (nverts - 1) * 4; 2352 if (track->arrays[i].robj == NULL) { 2353 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2354 "bound\n", prim_walk, i); 2355 return -EINVAL; 2356 } 2357 if (size > radeon_bo_size(track->arrays[i].robj)) { 2358 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2359 "need %lu dwords have %lu dwords\n", 2360 prim_walk, i, size >> 2, 2361 radeon_bo_size(track->arrays[i].robj) 2362 >> 2); 2363 return -EINVAL; 2364 } 2365 } 2366 break; 2367 case 3: 2368 size = track->vtx_size * nverts; 2369 if (size != track->immd_dwords) { 2370 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2371 track->immd_dwords, size); 2372 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2373 nverts, track->vtx_size); 2374 return -EINVAL; 2375 } 2376 break; 2377 default: 2378 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2379 prim_walk); 2380 return -EINVAL; 2381 } 2382 2383 if (track->tex_dirty) { 2384 track->tex_dirty = false; 2385 return r100_cs_track_texture_check(rdev, track); 2386 } 2387 return 0; 2388 } 2389 2390 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2391 { 2392 unsigned i, face; 2393 2394 track->cb_dirty = true; 2395 track->zb_dirty = true; 2396 track->tex_dirty = true; 2397 track->aa_dirty = true; 2398 2399 if (rdev->family < CHIP_R300) { 2400 track->num_cb = 1; 2401 if (rdev->family <= CHIP_RS200) 2402 track->num_texture = 3; 2403 else 2404 track->num_texture = 6; 2405 track->maxy = 2048; 2406 track->separate_cube = 1; 2407 } else { 2408 track->num_cb = 4; 2409 track->num_texture = 16; 2410 track->maxy = 4096; 2411 track->separate_cube = 0; 2412 track->aaresolve = false; 2413 track->aa.robj = NULL; 2414 } 2415 2416 for (i = 0; i < track->num_cb; i++) { 2417 track->cb[i].robj = NULL; 2418 track->cb[i].pitch = 8192; 2419 track->cb[i].cpp = 16; 2420 track->cb[i].offset = 0; 2421 } 2422 track->z_enabled = true; 2423 track->zb.robj = NULL; 2424 track->zb.pitch = 8192; 2425 track->zb.cpp = 4; 2426 track->zb.offset = 0; 2427 track->vtx_size = 0x7F; 2428 track->immd_dwords = 0xFFFFFFFFUL; 2429 track->num_arrays = 11; 2430 track->max_indx = 0x00FFFFFFUL; 2431 for (i = 0; i < track->num_arrays; i++) { 2432 track->arrays[i].robj = NULL; 2433 track->arrays[i].esize = 0x7F; 2434 } 2435 for (i = 0; i < track->num_texture; i++) { 2436 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2437 track->textures[i].pitch = 16536; 2438 track->textures[i].width = 16536; 2439 track->textures[i].height = 16536; 2440 track->textures[i].width_11 = 1 << 11; 2441 track->textures[i].height_11 = 1 << 11; 2442 track->textures[i].num_levels = 12; 2443 if (rdev->family <= CHIP_RS200) { 2444 track->textures[i].tex_coord_type = 0; 2445 track->textures[i].txdepth = 0; 2446 } else { 2447 track->textures[i].txdepth = 16; 2448 track->textures[i].tex_coord_type = 1; 2449 } 2450 track->textures[i].cpp = 64; 2451 track->textures[i].robj = NULL; 2452 /* CS IB emission code makes sure texture unit are disabled */ 2453 track->textures[i].enabled = false; 2454 track->textures[i].lookup_disable = false; 2455 track->textures[i].roundup_w = true; 2456 track->textures[i].roundup_h = true; 2457 if (track->separate_cube) 2458 for (face = 0; face < 5; face++) { 2459 track->textures[i].cube_info[face].robj = NULL; 2460 track->textures[i].cube_info[face].width = 16536; 2461 track->textures[i].cube_info[face].height = 16536; 2462 track->textures[i].cube_info[face].offset = 0; 2463 } 2464 } 2465 } 2466 2467 /* 2468 * Global GPU functions 2469 */ 2470 static void r100_errata(struct radeon_device *rdev) 2471 { 2472 rdev->pll_errata = 0; 2473 2474 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2475 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2476 } 2477 2478 if (rdev->family == CHIP_RV100 || 2479 rdev->family == CHIP_RS100 || 2480 rdev->family == CHIP_RS200) { 2481 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2482 } 2483 } 2484 2485 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2486 { 2487 unsigned i; 2488 uint32_t tmp; 2489 2490 for (i = 0; i < rdev->usec_timeout; i++) { 2491 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2492 if (tmp >= n) { 2493 return 0; 2494 } 2495 DRM_UDELAY(1); 2496 } 2497 return -1; 2498 } 2499 2500 int r100_gui_wait_for_idle(struct radeon_device *rdev) 2501 { 2502 unsigned i; 2503 uint32_t tmp; 2504 2505 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2506 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" 2507 " Bad things might happen.\n"); 2508 } 2509 for (i = 0; i < rdev->usec_timeout; i++) { 2510 tmp = RREG32(RADEON_RBBM_STATUS); 2511 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2512 return 0; 2513 } 2514 DRM_UDELAY(1); 2515 } 2516 return -1; 2517 } 2518 2519 int r100_mc_wait_for_idle(struct radeon_device *rdev) 2520 { 2521 unsigned i; 2522 uint32_t tmp; 2523 2524 for (i = 0; i < rdev->usec_timeout; i++) { 2525 /* read MC_STATUS */ 2526 tmp = RREG32(RADEON_MC_STATUS); 2527 if (tmp & RADEON_MC_IDLE) { 2528 return 0; 2529 } 2530 DRM_UDELAY(1); 2531 } 2532 return -1; 2533 } 2534 2535 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2536 { 2537 u32 rbbm_status; 2538 2539 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2540 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2541 radeon_ring_lockup_update(rdev, ring); 2542 return false; 2543 } 2544 return radeon_ring_test_lockup(rdev, ring); 2545 } 2546 2547 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2548 void r100_enable_bm(struct radeon_device *rdev) 2549 { 2550 uint32_t tmp; 2551 /* Enable bus mastering */ 2552 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2553 WREG32(RADEON_BUS_CNTL, tmp); 2554 } 2555 2556 void r100_bm_disable(struct radeon_device *rdev) 2557 { 2558 u32 tmp; 2559 2560 /* disable bus mastering */ 2561 tmp = RREG32(R_000030_BUS_CNTL); 2562 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2563 mdelay(1); 2564 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2565 mdelay(1); 2566 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2567 tmp = RREG32(RADEON_BUS_CNTL); 2568 mdelay(1); 2569 pci_disable_busmaster(rdev->dev->bsddev); 2570 mdelay(1); 2571 } 2572 2573 int r100_asic_reset(struct radeon_device *rdev, bool hard) 2574 { 2575 struct r100_mc_save save; 2576 u32 status, tmp; 2577 int ret = 0; 2578 2579 status = RREG32(R_000E40_RBBM_STATUS); 2580 if (!G_000E40_GUI_ACTIVE(status)) { 2581 return 0; 2582 } 2583 r100_mc_stop(rdev, &save); 2584 status = RREG32(R_000E40_RBBM_STATUS); 2585 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2586 /* stop CP */ 2587 WREG32(RADEON_CP_CSQ_CNTL, 0); 2588 tmp = RREG32(RADEON_CP_RB_CNTL); 2589 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2590 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2591 WREG32(RADEON_CP_RB_WPTR, 0); 2592 WREG32(RADEON_CP_RB_CNTL, tmp); 2593 /* save PCI state */ 2594 pci_save_state(device_get_parent(rdev->dev->bsddev)); 2595 /* disable bus mastering */ 2596 r100_bm_disable(rdev); 2597 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2598 S_0000F0_SOFT_RESET_RE(1) | 2599 S_0000F0_SOFT_RESET_PP(1) | 2600 S_0000F0_SOFT_RESET_RB(1)); 2601 RREG32(R_0000F0_RBBM_SOFT_RESET); 2602 mdelay(500); 2603 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2604 mdelay(1); 2605 status = RREG32(R_000E40_RBBM_STATUS); 2606 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2607 /* reset CP */ 2608 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2609 RREG32(R_0000F0_RBBM_SOFT_RESET); 2610 mdelay(500); 2611 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2612 mdelay(1); 2613 status = RREG32(R_000E40_RBBM_STATUS); 2614 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2615 /* restore PCI & busmastering */ 2616 pci_restore_state(device_get_parent(rdev->dev->bsddev)); 2617 r100_enable_bm(rdev); 2618 /* Check if GPU is idle */ 2619 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2620 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2621 dev_err(rdev->dev, "failed to reset GPU\n"); 2622 ret = -1; 2623 } else 2624 dev_info(rdev->dev, "GPU reset succeed\n"); 2625 r100_mc_resume(rdev, &save); 2626 return ret; 2627 } 2628 2629 void r100_set_common_regs(struct radeon_device *rdev) 2630 { 2631 struct drm_device *dev = rdev->ddev; 2632 bool force_dac2 = false; 2633 u32 tmp; 2634 2635 /* set these so they don't interfere with anything */ 2636 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2637 WREG32(RADEON_SUBPIC_CNTL, 0); 2638 WREG32(RADEON_VIPH_CONTROL, 0); 2639 WREG32(RADEON_I2C_CNTL_1, 0); 2640 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2641 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2642 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2643 2644 /* always set up dac2 on rn50 and some rv100 as lots 2645 * of servers seem to wire it up to a VGA port but 2646 * don't report it in the bios connector 2647 * table. 2648 */ 2649 switch (dev->pdev->device) { 2650 /* RN50 */ 2651 case 0x515e: 2652 case 0x5969: 2653 force_dac2 = true; 2654 break; 2655 /* RV100*/ 2656 case 0x5159: 2657 case 0x515a: 2658 /* DELL triple head servers */ 2659 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && 2660 ((dev->pdev->subsystem_device == 0x016c) || 2661 (dev->pdev->subsystem_device == 0x016d) || 2662 (dev->pdev->subsystem_device == 0x016e) || 2663 (dev->pdev->subsystem_device == 0x016f) || 2664 (dev->pdev->subsystem_device == 0x0170) || 2665 (dev->pdev->subsystem_device == 0x017d) || 2666 (dev->pdev->subsystem_device == 0x017e) || 2667 (dev->pdev->subsystem_device == 0x0183) || 2668 (dev->pdev->subsystem_device == 0x018a) || 2669 (dev->pdev->subsystem_device == 0x019a))) 2670 force_dac2 = true; 2671 break; 2672 } 2673 2674 if (force_dac2) { 2675 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2676 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2677 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2678 2679 /* For CRT on DAC2, don't turn it on if BIOS didn't 2680 enable it, even it's detected. 2681 */ 2682 2683 /* force it to crtc0 */ 2684 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2685 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2686 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2687 2688 /* set up the TV DAC */ 2689 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2690 RADEON_TV_DAC_STD_MASK | 2691 RADEON_TV_DAC_RDACPD | 2692 RADEON_TV_DAC_GDACPD | 2693 RADEON_TV_DAC_BDACPD | 2694 RADEON_TV_DAC_BGADJ_MASK | 2695 RADEON_TV_DAC_DACADJ_MASK); 2696 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2697 RADEON_TV_DAC_NHOLD | 2698 RADEON_TV_DAC_STD_PS2 | 2699 (0x58 << 16)); 2700 2701 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2702 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2703 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2704 } 2705 2706 /* switch PM block to ACPI mode */ 2707 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2708 tmp &= ~RADEON_PM_MODE_SEL; 2709 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2710 2711 } 2712 2713 /* 2714 * VRAM info 2715 */ 2716 static void r100_vram_get_type(struct radeon_device *rdev) 2717 { 2718 uint32_t tmp; 2719 2720 rdev->mc.vram_is_ddr = false; 2721 if (rdev->flags & RADEON_IS_IGP) 2722 rdev->mc.vram_is_ddr = true; 2723 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2724 rdev->mc.vram_is_ddr = true; 2725 if ((rdev->family == CHIP_RV100) || 2726 (rdev->family == CHIP_RS100) || 2727 (rdev->family == CHIP_RS200)) { 2728 tmp = RREG32(RADEON_MEM_CNTL); 2729 if (tmp & RV100_HALF_MODE) { 2730 rdev->mc.vram_width = 32; 2731 } else { 2732 rdev->mc.vram_width = 64; 2733 } 2734 if (rdev->flags & RADEON_SINGLE_CRTC) { 2735 rdev->mc.vram_width /= 4; 2736 rdev->mc.vram_is_ddr = true; 2737 } 2738 } else if (rdev->family <= CHIP_RV280) { 2739 tmp = RREG32(RADEON_MEM_CNTL); 2740 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2741 rdev->mc.vram_width = 128; 2742 } else { 2743 rdev->mc.vram_width = 64; 2744 } 2745 } else { 2746 /* newer IGPs */ 2747 rdev->mc.vram_width = 128; 2748 } 2749 } 2750 2751 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2752 { 2753 u32 aper_size; 2754 u8 byte; 2755 2756 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2757 2758 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2759 * that is has the 2nd generation multifunction PCI interface 2760 */ 2761 if (rdev->family == CHIP_RV280 || 2762 rdev->family >= CHIP_RV350) { 2763 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2764 ~RADEON_HDP_APER_CNTL); 2765 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2766 return aper_size * 2; 2767 } 2768 2769 /* Older cards have all sorts of funny issues to deal with. First 2770 * check if it's a multifunction card by reading the PCI config 2771 * header type... Limit those to one aperture size 2772 */ 2773 byte = pci_read_config(rdev->dev->bsddev, 0xe, 1); 2774 if (byte & 0x80) { 2775 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2776 DRM_INFO("Limiting VRAM to one aperture\n"); 2777 return aper_size; 2778 } 2779 2780 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2781 * have set it up. We don't write this as it's broken on some ASICs but 2782 * we expect the BIOS to have done the right thing (might be too optimistic...) 2783 */ 2784 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2785 return aper_size * 2; 2786 return aper_size; 2787 } 2788 2789 void r100_vram_init_sizes(struct radeon_device *rdev) 2790 { 2791 u64 config_aper_size; 2792 2793 /* work out accessible VRAM */ 2794 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 2795 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 2796 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2797 /* FIXME we don't use the second aperture yet when we could use it */ 2798 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2799 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2800 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2801 if (rdev->flags & RADEON_IS_IGP) { 2802 uint32_t tom; 2803 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2804 tom = RREG32(RADEON_NB_TOM); 2805 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2806 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2807 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2808 } else { 2809 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2810 /* Some production boards of m6 will report 0 2811 * if it's 8 MB 2812 */ 2813 if (rdev->mc.real_vram_size == 0) { 2814 rdev->mc.real_vram_size = 8192 * 1024; 2815 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2816 } 2817 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2818 * Novell bug 204882 + along with lots of ubuntu ones 2819 */ 2820 if (rdev->mc.aper_size > config_aper_size) 2821 config_aper_size = rdev->mc.aper_size; 2822 2823 if (config_aper_size > rdev->mc.real_vram_size) 2824 rdev->mc.mc_vram_size = config_aper_size; 2825 else 2826 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2827 } 2828 } 2829 2830 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2831 { 2832 uint32_t temp; 2833 2834 temp = RREG32(RADEON_CONFIG_CNTL); 2835 if (state == false) { 2836 temp &= ~RADEON_CFG_VGA_RAM_EN; 2837 temp |= RADEON_CFG_VGA_IO_DIS; 2838 } else { 2839 temp &= ~RADEON_CFG_VGA_IO_DIS; 2840 } 2841 WREG32(RADEON_CONFIG_CNTL, temp); 2842 } 2843 2844 static void r100_mc_init(struct radeon_device *rdev) 2845 { 2846 u64 base; 2847 2848 r100_vram_get_type(rdev); 2849 r100_vram_init_sizes(rdev); 2850 base = rdev->mc.aper_base; 2851 if (rdev->flags & RADEON_IS_IGP) 2852 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2853 radeon_vram_location(rdev, &rdev->mc, base); 2854 rdev->mc.gtt_base_align = 0; 2855 if (!(rdev->flags & RADEON_IS_AGP)) 2856 radeon_gtt_location(rdev, &rdev->mc); 2857 radeon_update_bandwidth_info(rdev); 2858 } 2859 2860 2861 /* 2862 * Indirect registers accessor 2863 */ 2864 void r100_pll_errata_after_index(struct radeon_device *rdev) 2865 { 2866 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2867 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2868 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2869 } 2870 } 2871 2872 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2873 { 2874 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2875 * or the chip could hang on a subsequent access 2876 */ 2877 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2878 mdelay(5); 2879 } 2880 2881 /* This function is required to workaround a hardware bug in some (all?) 2882 * revisions of the R300. This workaround should be called after every 2883 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2884 * may not be correct. 2885 */ 2886 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2887 uint32_t save, tmp; 2888 2889 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2890 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2891 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2892 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2893 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2894 } 2895 } 2896 2897 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2898 { 2899 uint32_t data; 2900 2901 spin_lock(&rdev->pll_idx_lock); 2902 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2903 r100_pll_errata_after_index(rdev); 2904 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2905 r100_pll_errata_after_data(rdev); 2906 spin_unlock(&rdev->pll_idx_lock); 2907 return data; 2908 } 2909 2910 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2911 { 2912 spin_lock(&rdev->pll_idx_lock); 2913 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2914 r100_pll_errata_after_index(rdev); 2915 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2916 r100_pll_errata_after_data(rdev); 2917 spin_unlock(&rdev->pll_idx_lock); 2918 } 2919 2920 static void r100_set_safe_registers(struct radeon_device *rdev) 2921 { 2922 if (ASIC_IS_RN50(rdev)) { 2923 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2924 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 2925 } else if (rdev->family < CHIP_R200) { 2926 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2927 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 2928 } else { 2929 r200_set_safe_registers(rdev); 2930 } 2931 } 2932 2933 /* 2934 * Debugfs info 2935 */ 2936 #if defined(CONFIG_DEBUG_FS) 2937 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2938 { 2939 struct drm_info_node *node = (struct drm_info_node *) m->private; 2940 struct drm_device *dev = node->minor->dev; 2941 struct radeon_device *rdev = dev->dev_private; 2942 uint32_t reg, value; 2943 unsigned i; 2944 2945 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2946 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2947 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2948 for (i = 0; i < 64; i++) { 2949 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 2950 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 2951 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 2952 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 2953 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 2954 } 2955 return 0; 2956 } 2957 2958 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 2959 { 2960 struct drm_info_node *node = (struct drm_info_node *) m->private; 2961 struct drm_device *dev = node->minor->dev; 2962 struct radeon_device *rdev = dev->dev_private; 2963 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2964 uint32_t rdp, wdp; 2965 unsigned count, i, j; 2966 2967 radeon_ring_free_size(rdev, ring); 2968 rdp = RREG32(RADEON_CP_RB_RPTR); 2969 wdp = RREG32(RADEON_CP_RB_WPTR); 2970 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 2971 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2972 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2973 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2974 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2975 seq_printf(m, "%u dwords in ring\n", count); 2976 if (ring->ready) { 2977 for (j = 0; j <= count; j++) { 2978 i = (rdp + j) & ring->ptr_mask; 2979 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2980 } 2981 } 2982 return 0; 2983 } 2984 2985 2986 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 2987 { 2988 struct drm_info_node *node = (struct drm_info_node *) m->private; 2989 struct drm_device *dev = node->minor->dev; 2990 struct radeon_device *rdev = dev->dev_private; 2991 uint32_t csq_stat, csq2_stat, tmp; 2992 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 2993 unsigned i; 2994 2995 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2996 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 2997 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 2998 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 2999 r_rptr = (csq_stat >> 0) & 0x3ff; 3000 r_wptr = (csq_stat >> 10) & 0x3ff; 3001 ib1_rptr = (csq_stat >> 20) & 0x3ff; 3002 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 3003 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 3004 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 3005 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 3006 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 3007 seq_printf(m, "Ring rptr %u\n", r_rptr); 3008 seq_printf(m, "Ring wptr %u\n", r_wptr); 3009 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 3010 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 3011 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 3012 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 3013 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 3014 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 3015 seq_printf(m, "Ring fifo:\n"); 3016 for (i = 0; i < 256; i++) { 3017 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3018 tmp = RREG32(RADEON_CP_CSQ_DATA); 3019 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 3020 } 3021 seq_printf(m, "Indirect1 fifo:\n"); 3022 for (i = 256; i <= 512; i++) { 3023 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3024 tmp = RREG32(RADEON_CP_CSQ_DATA); 3025 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3026 } 3027 seq_printf(m, "Indirect2 fifo:\n"); 3028 for (i = 640; i < ib1_wptr; i++) { 3029 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3030 tmp = RREG32(RADEON_CP_CSQ_DATA); 3031 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3032 } 3033 return 0; 3034 } 3035 3036 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3037 { 3038 struct drm_info_node *node = (struct drm_info_node *) m->private; 3039 struct drm_device *dev = node->minor->dev; 3040 struct radeon_device *rdev = dev->dev_private; 3041 uint32_t tmp; 3042 3043 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3044 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3045 tmp = RREG32(RADEON_MC_FB_LOCATION); 3046 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3047 tmp = RREG32(RADEON_BUS_CNTL); 3048 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3049 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3050 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3051 tmp = RREG32(RADEON_AGP_BASE); 3052 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3053 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3054 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3055 tmp = RREG32(0x01D0); 3056 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3057 tmp = RREG32(RADEON_AIC_LO_ADDR); 3058 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3059 tmp = RREG32(RADEON_AIC_HI_ADDR); 3060 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3061 tmp = RREG32(0x01E4); 3062 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3063 return 0; 3064 } 3065 3066 static struct drm_info_list r100_debugfs_rbbm_list[] = { 3067 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3068 }; 3069 3070 static struct drm_info_list r100_debugfs_cp_list[] = { 3071 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3072 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3073 }; 3074 3075 static struct drm_info_list r100_debugfs_mc_info_list[] = { 3076 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3077 }; 3078 #endif 3079 3080 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3081 { 3082 #if defined(CONFIG_DEBUG_FS) 3083 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3084 #else 3085 return 0; 3086 #endif 3087 } 3088 3089 int r100_debugfs_cp_init(struct radeon_device *rdev) 3090 { 3091 #if defined(CONFIG_DEBUG_FS) 3092 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3093 #else 3094 return 0; 3095 #endif 3096 } 3097 3098 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3099 { 3100 #if defined(CONFIG_DEBUG_FS) 3101 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3102 #else 3103 return 0; 3104 #endif 3105 } 3106 3107 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3108 uint32_t tiling_flags, uint32_t pitch, 3109 uint32_t offset, uint32_t obj_size) 3110 { 3111 int surf_index = reg * 16; 3112 int flags = 0; 3113 3114 if (rdev->family <= CHIP_RS200) { 3115 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3116 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3117 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3118 if (tiling_flags & RADEON_TILING_MACRO) 3119 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3120 /* setting pitch to 0 disables tiling */ 3121 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3122 == 0) 3123 pitch = 0; 3124 } else if (rdev->family <= CHIP_RV280) { 3125 if (tiling_flags & (RADEON_TILING_MACRO)) 3126 flags |= R200_SURF_TILE_COLOR_MACRO; 3127 if (tiling_flags & RADEON_TILING_MICRO) 3128 flags |= R200_SURF_TILE_COLOR_MICRO; 3129 } else { 3130 if (tiling_flags & RADEON_TILING_MACRO) 3131 flags |= R300_SURF_TILE_MACRO; 3132 if (tiling_flags & RADEON_TILING_MICRO) 3133 flags |= R300_SURF_TILE_MICRO; 3134 } 3135 3136 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3137 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3138 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3139 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3140 3141 /* r100/r200 divide by 16 */ 3142 if (rdev->family < CHIP_R300) 3143 flags |= pitch / 16; 3144 else 3145 flags |= pitch / 8; 3146 3147 3148 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3149 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3150 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3151 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3152 return 0; 3153 } 3154 3155 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3156 { 3157 int surf_index = reg * 16; 3158 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3159 } 3160 3161 void r100_bandwidth_update(struct radeon_device *rdev) 3162 { 3163 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3164 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3165 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; 3166 fixed20_12 crit_point_ff = {0}; 3167 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3168 fixed20_12 memtcas_ff[8] = { 3169 dfixed_init(1), 3170 dfixed_init(2), 3171 dfixed_init(3), 3172 dfixed_init(0), 3173 dfixed_init_half(1), 3174 dfixed_init_half(2), 3175 dfixed_init(0), 3176 }; 3177 fixed20_12 memtcas_rs480_ff[8] = { 3178 dfixed_init(0), 3179 dfixed_init(1), 3180 dfixed_init(2), 3181 dfixed_init(3), 3182 dfixed_init(0), 3183 dfixed_init_half(1), 3184 dfixed_init_half(2), 3185 dfixed_init_half(3), 3186 }; 3187 fixed20_12 memtcas2_ff[8] = { 3188 dfixed_init(0), 3189 dfixed_init(1), 3190 dfixed_init(2), 3191 dfixed_init(3), 3192 dfixed_init(4), 3193 dfixed_init(5), 3194 dfixed_init(6), 3195 dfixed_init(7), 3196 }; 3197 fixed20_12 memtrbs[8] = { 3198 dfixed_init(1), 3199 dfixed_init_half(1), 3200 dfixed_init(2), 3201 dfixed_init_half(2), 3202 dfixed_init(3), 3203 dfixed_init_half(3), 3204 dfixed_init(4), 3205 dfixed_init_half(4) 3206 }; 3207 fixed20_12 memtrbs_r4xx[8] = { 3208 dfixed_init(4), 3209 dfixed_init(5), 3210 dfixed_init(6), 3211 dfixed_init(7), 3212 dfixed_init(8), 3213 dfixed_init(9), 3214 dfixed_init(10), 3215 dfixed_init(11) 3216 }; 3217 fixed20_12 min_mem_eff; 3218 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3219 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3220 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0}, 3221 disp_drain_rate2, read_return_rate; 3222 fixed20_12 time_disp1_drop_priority; 3223 int c; 3224 int cur_size = 16; /* in octawords */ 3225 int critical_point = 0, critical_point2; 3226 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 3227 int stop_req, max_stop_req; 3228 struct drm_display_mode *mode1 = NULL; 3229 struct drm_display_mode *mode2 = NULL; 3230 uint32_t pixel_bytes1 = 0; 3231 uint32_t pixel_bytes2 = 0; 3232 3233 /* Guess line buffer size to be 8192 pixels */ 3234 u32 lb_size = 8192; 3235 3236 if (!rdev->mode_info.mode_config_initialized) 3237 return; 3238 3239 radeon_update_display_priority(rdev); 3240 3241 if (rdev->mode_info.crtcs[0]->base.enabled) { 3242 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3243 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; 3244 } 3245 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3246 if (rdev->mode_info.crtcs[1]->base.enabled) { 3247 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3248 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; 3249 } 3250 } 3251 3252 min_mem_eff.full = dfixed_const_8(0); 3253 /* get modes */ 3254 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3255 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3256 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3257 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3258 /* check crtc enables */ 3259 if (mode2) 3260 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3261 if (mode1) 3262 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3263 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3264 } 3265 3266 /* 3267 * determine is there is enough bw for current mode 3268 */ 3269 sclk_ff = rdev->pm.sclk; 3270 mclk_ff = rdev->pm.mclk; 3271 3272 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3273 temp_ff.full = dfixed_const(temp); 3274 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3275 3276 pix_clk.full = 0; 3277 pix_clk2.full = 0; 3278 peak_disp_bw.full = 0; 3279 if (mode1) { 3280 temp_ff.full = dfixed_const(1000); 3281 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3282 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3283 temp_ff.full = dfixed_const(pixel_bytes1); 3284 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3285 } 3286 if (mode2) { 3287 temp_ff.full = dfixed_const(1000); 3288 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3289 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3290 temp_ff.full = dfixed_const(pixel_bytes2); 3291 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3292 } 3293 3294 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3295 if (peak_disp_bw.full >= mem_bw.full) { 3296 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3297 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3298 } 3299 3300 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3301 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3302 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3303 mem_trcd = ((temp >> 2) & 0x3) + 1; 3304 mem_trp = ((temp & 0x3)) + 1; 3305 mem_tras = ((temp & 0x70) >> 4) + 1; 3306 } else if (rdev->family == CHIP_R300 || 3307 rdev->family == CHIP_R350) { /* r300, r350 */ 3308 mem_trcd = (temp & 0x7) + 1; 3309 mem_trp = ((temp >> 8) & 0x7) + 1; 3310 mem_tras = ((temp >> 11) & 0xf) + 4; 3311 } else if (rdev->family == CHIP_RV350 || 3312 rdev->family <= CHIP_RV380) { 3313 /* rv3x0 */ 3314 mem_trcd = (temp & 0x7) + 3; 3315 mem_trp = ((temp >> 8) & 0x7) + 3; 3316 mem_tras = ((temp >> 11) & 0xf) + 6; 3317 } else if (rdev->family == CHIP_R420 || 3318 rdev->family == CHIP_R423 || 3319 rdev->family == CHIP_RV410) { 3320 /* r4xx */ 3321 mem_trcd = (temp & 0xf) + 3; 3322 if (mem_trcd > 15) 3323 mem_trcd = 15; 3324 mem_trp = ((temp >> 8) & 0xf) + 3; 3325 if (mem_trp > 15) 3326 mem_trp = 15; 3327 mem_tras = ((temp >> 12) & 0x1f) + 6; 3328 if (mem_tras > 31) 3329 mem_tras = 31; 3330 } else { /* RV200, R200 */ 3331 mem_trcd = (temp & 0x7) + 1; 3332 mem_trp = ((temp >> 8) & 0x7) + 1; 3333 mem_tras = ((temp >> 12) & 0xf) + 4; 3334 } 3335 /* convert to FF */ 3336 trcd_ff.full = dfixed_const(mem_trcd); 3337 trp_ff.full = dfixed_const(mem_trp); 3338 tras_ff.full = dfixed_const(mem_tras); 3339 3340 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3341 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3342 data = (temp & (7 << 20)) >> 20; 3343 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3344 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3345 tcas_ff = memtcas_rs480_ff[data]; 3346 else 3347 tcas_ff = memtcas_ff[data]; 3348 } else 3349 tcas_ff = memtcas2_ff[data]; 3350 3351 if (rdev->family == CHIP_RS400 || 3352 rdev->family == CHIP_RS480) { 3353 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3354 data = (temp >> 23) & 0x7; 3355 if (data < 5) 3356 tcas_ff.full += dfixed_const(data); 3357 } 3358 3359 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3360 /* on the R300, Tcas is included in Trbs. 3361 */ 3362 temp = RREG32(RADEON_MEM_CNTL); 3363 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3364 if (data == 1) { 3365 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3366 temp = RREG32(R300_MC_IND_INDEX); 3367 temp &= ~R300_MC_IND_ADDR_MASK; 3368 temp |= R300_MC_READ_CNTL_CD_mcind; 3369 WREG32(R300_MC_IND_INDEX, temp); 3370 temp = RREG32(R300_MC_IND_DATA); 3371 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3372 } else { 3373 temp = RREG32(R300_MC_READ_CNTL_AB); 3374 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3375 } 3376 } else { 3377 temp = RREG32(R300_MC_READ_CNTL_AB); 3378 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3379 } 3380 if (rdev->family == CHIP_RV410 || 3381 rdev->family == CHIP_R420 || 3382 rdev->family == CHIP_R423) 3383 trbs_ff = memtrbs_r4xx[data]; 3384 else 3385 trbs_ff = memtrbs[data]; 3386 tcas_ff.full += trbs_ff.full; 3387 } 3388 3389 sclk_eff_ff.full = sclk_ff.full; 3390 3391 if (rdev->flags & RADEON_IS_AGP) { 3392 fixed20_12 agpmode_ff; 3393 agpmode_ff.full = dfixed_const(radeon_agpmode); 3394 temp_ff.full = dfixed_const_666(16); 3395 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3396 } 3397 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3398 3399 if (ASIC_IS_R300(rdev)) { 3400 sclk_delay_ff.full = dfixed_const(250); 3401 } else { 3402 if ((rdev->family == CHIP_RV100) || 3403 rdev->flags & RADEON_IS_IGP) { 3404 if (rdev->mc.vram_is_ddr) 3405 sclk_delay_ff.full = dfixed_const(41); 3406 else 3407 sclk_delay_ff.full = dfixed_const(33); 3408 } else { 3409 if (rdev->mc.vram_width == 128) 3410 sclk_delay_ff.full = dfixed_const(57); 3411 else 3412 sclk_delay_ff.full = dfixed_const(41); 3413 } 3414 } 3415 3416 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3417 3418 if (rdev->mc.vram_is_ddr) { 3419 if (rdev->mc.vram_width == 32) { 3420 k1.full = dfixed_const(40); 3421 c = 3; 3422 } else { 3423 k1.full = dfixed_const(20); 3424 c = 1; 3425 } 3426 } else { 3427 k1.full = dfixed_const(40); 3428 c = 3; 3429 } 3430 3431 temp_ff.full = dfixed_const(2); 3432 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3433 temp_ff.full = dfixed_const(c); 3434 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3435 temp_ff.full = dfixed_const(4); 3436 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3437 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3438 mc_latency_mclk.full += k1.full; 3439 3440 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3441 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3442 3443 /* 3444 HW cursor time assuming worst case of full size colour cursor. 3445 */ 3446 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3447 temp_ff.full += trcd_ff.full; 3448 if (temp_ff.full < tras_ff.full) 3449 temp_ff.full = tras_ff.full; 3450 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3451 3452 temp_ff.full = dfixed_const(cur_size); 3453 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3454 /* 3455 Find the total latency for the display data. 3456 */ 3457 disp_latency_overhead.full = dfixed_const(8); 3458 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3459 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3460 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3461 3462 if (mc_latency_mclk.full > mc_latency_sclk.full) 3463 disp_latency.full = mc_latency_mclk.full; 3464 else 3465 disp_latency.full = mc_latency_sclk.full; 3466 3467 /* setup Max GRPH_STOP_REQ default value */ 3468 if (ASIC_IS_RV100(rdev)) 3469 max_stop_req = 0x5c; 3470 else 3471 max_stop_req = 0x7c; 3472 3473 /* 3474 XXX: disp_drain_rate.full not initialized in (mode2) block 3475 Looks like a real bug. Try to report it upstream. 3476 */ 3477 #ifdef __DragonFly__ 3478 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3479 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3480 crit_point_ff.full += dfixed_const_half(0); 3481 #endif 3482 3483 if (mode1) { 3484 /* CRTC1 3485 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3486 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3487 */ 3488 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3489 3490 if (stop_req > max_stop_req) 3491 stop_req = max_stop_req; 3492 3493 /* 3494 Find the drain rate of the display buffer. 3495 */ 3496 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3497 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3498 3499 /* 3500 Find the critical point of the display buffer. 3501 */ 3502 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3503 crit_point_ff.full += dfixed_const_half(0); 3504 3505 critical_point = dfixed_trunc(crit_point_ff); 3506 3507 if (rdev->disp_priority == 2) { 3508 critical_point = 0; 3509 } 3510 3511 /* 3512 The critical point should never be above max_stop_req-4. Setting 3513 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3514 */ 3515 if (max_stop_req - critical_point < 4) 3516 critical_point = 0; 3517 3518 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3519 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3520 critical_point = 0x10; 3521 } 3522 3523 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3524 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3525 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3526 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3527 if ((rdev->family == CHIP_R350) && 3528 (stop_req > 0x15)) { 3529 stop_req -= 0x10; 3530 } 3531 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3532 temp |= RADEON_GRPH_BUFFER_SIZE; 3533 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3534 RADEON_GRPH_CRITICAL_AT_SOF | 3535 RADEON_GRPH_STOP_CNTL); 3536 /* 3537 Write the result into the register. 3538 */ 3539 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3540 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3541 3542 #if 0 3543 if ((rdev->family == CHIP_RS400) || 3544 (rdev->family == CHIP_RS480)) { 3545 /* attempt to program RS400 disp regs correctly ??? */ 3546 temp = RREG32(RS400_DISP1_REG_CNTL); 3547 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3548 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3549 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3550 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3551 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3552 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3553 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3554 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3555 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3556 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3557 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3558 } 3559 #endif 3560 3561 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3562 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3563 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3564 } 3565 3566 if (mode2) { 3567 u32 grph2_cntl; 3568 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3569 3570 if (stop_req > max_stop_req) 3571 stop_req = max_stop_req; 3572 3573 /* 3574 Find the drain rate of the display buffer. 3575 */ 3576 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3577 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3578 3579 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3580 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3581 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3582 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3583 if ((rdev->family == CHIP_R350) && 3584 (stop_req > 0x15)) { 3585 stop_req -= 0x10; 3586 } 3587 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3588 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3589 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3590 RADEON_GRPH_CRITICAL_AT_SOF | 3591 RADEON_GRPH_STOP_CNTL); 3592 3593 if ((rdev->family == CHIP_RS100) || 3594 (rdev->family == CHIP_RS200)) 3595 critical_point2 = 0; 3596 else { 3597 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3598 temp_ff.full = dfixed_const(temp); 3599 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3600 if (sclk_ff.full < temp_ff.full) 3601 temp_ff.full = sclk_ff.full; 3602 3603 read_return_rate.full = temp_ff.full; 3604 3605 if (mode1) { 3606 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3607 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3608 } else { 3609 time_disp1_drop_priority.full = 0; 3610 } 3611 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3612 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3613 crit_point_ff.full += dfixed_const_half(0); 3614 3615 critical_point2 = dfixed_trunc(crit_point_ff); 3616 3617 if (rdev->disp_priority == 2) { 3618 critical_point2 = 0; 3619 } 3620 3621 if (max_stop_req - critical_point2 < 4) 3622 critical_point2 = 0; 3623 3624 } 3625 3626 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3627 /* some R300 cards have problem with this set to 0 */ 3628 critical_point2 = 0x10; 3629 } 3630 3631 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3632 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3633 3634 if ((rdev->family == CHIP_RS400) || 3635 (rdev->family == CHIP_RS480)) { 3636 #if 0 3637 /* attempt to program RS400 disp2 regs correctly ??? */ 3638 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3639 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3640 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3641 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3642 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3643 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3644 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3645 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3646 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3647 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3648 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3649 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3650 #endif 3651 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3652 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3653 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3654 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3655 } 3656 3657 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3658 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3659 } 3660 3661 /* Save number of lines the linebuffer leads before the scanout */ 3662 if (mode1) 3663 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); 3664 3665 if (mode2) 3666 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); 3667 } 3668 3669 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3670 { 3671 uint32_t scratch; 3672 uint32_t tmp = 0; 3673 unsigned i; 3674 int r; 3675 3676 r = radeon_scratch_get(rdev, &scratch); 3677 if (r) { 3678 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3679 return r; 3680 } 3681 WREG32(scratch, 0xCAFEDEAD); 3682 r = radeon_ring_lock(rdev, ring, 2); 3683 if (r) { 3684 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3685 radeon_scratch_free(rdev, scratch); 3686 return r; 3687 } 3688 radeon_ring_write(ring, PACKET0(scratch, 0)); 3689 radeon_ring_write(ring, 0xDEADBEEF); 3690 radeon_ring_unlock_commit(rdev, ring, false); 3691 for (i = 0; i < rdev->usec_timeout; i++) { 3692 tmp = RREG32(scratch); 3693 if (tmp == 0xDEADBEEF) { 3694 break; 3695 } 3696 DRM_UDELAY(1); 3697 } 3698 if (i < rdev->usec_timeout) { 3699 DRM_INFO("ring test succeeded in %d usecs\n", i); 3700 } else { 3701 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3702 scratch, tmp); 3703 r = -EINVAL; 3704 } 3705 radeon_scratch_free(rdev, scratch); 3706 return r; 3707 } 3708 3709 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3710 { 3711 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3712 3713 if (ring->rptr_save_reg) { 3714 u32 next_rptr = ring->wptr + 2 + 3; 3715 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3716 radeon_ring_write(ring, next_rptr); 3717 } 3718 3719 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3720 radeon_ring_write(ring, ib->gpu_addr); 3721 radeon_ring_write(ring, ib->length_dw); 3722 } 3723 3724 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3725 { 3726 struct radeon_ib ib; 3727 uint32_t scratch; 3728 uint32_t tmp = 0; 3729 unsigned i; 3730 int r; 3731 3732 r = radeon_scratch_get(rdev, &scratch); 3733 if (r) { 3734 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3735 return r; 3736 } 3737 WREG32(scratch, 0xCAFEDEAD); 3738 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3739 if (r) { 3740 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3741 goto free_scratch; 3742 } 3743 ib.ptr[0] = PACKET0(scratch, 0); 3744 ib.ptr[1] = 0xDEADBEEF; 3745 ib.ptr[2] = PACKET2(0); 3746 ib.ptr[3] = PACKET2(0); 3747 ib.ptr[4] = PACKET2(0); 3748 ib.ptr[5] = PACKET2(0); 3749 ib.ptr[6] = PACKET2(0); 3750 ib.ptr[7] = PACKET2(0); 3751 ib.length_dw = 8; 3752 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3753 if (r) { 3754 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3755 goto free_ib; 3756 } 3757 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 3758 RADEON_USEC_IB_TEST_TIMEOUT)); 3759 if (r < 0) { 3760 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3761 goto free_ib; 3762 } else if (r == 0) { 3763 DRM_ERROR("radeon: fence wait timed out.\n"); 3764 r = -ETIMEDOUT; 3765 goto free_ib; 3766 } 3767 r = 0; 3768 for (i = 0; i < rdev->usec_timeout; i++) { 3769 tmp = RREG32(scratch); 3770 if (tmp == 0xDEADBEEF) { 3771 break; 3772 } 3773 DRM_UDELAY(1); 3774 } 3775 if (i < rdev->usec_timeout) { 3776 DRM_INFO("ib test succeeded in %u usecs\n", i); 3777 } else { 3778 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3779 scratch, tmp); 3780 r = -EINVAL; 3781 } 3782 free_ib: 3783 radeon_ib_free(rdev, &ib); 3784 free_scratch: 3785 radeon_scratch_free(rdev, scratch); 3786 return r; 3787 } 3788 3789 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3790 { 3791 /* Shutdown CP we shouldn't need to do that but better be safe than 3792 * sorry 3793 */ 3794 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3795 WREG32(R_000740_CP_CSQ_CNTL, 0); 3796 3797 /* Save few CRTC registers */ 3798 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3799 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3800 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3801 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3802 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3803 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3804 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3805 } 3806 3807 /* Disable VGA aperture access */ 3808 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3809 /* Disable cursor, overlay, crtc */ 3810 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3811 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3812 S_000054_CRTC_DISPLAY_DIS(1)); 3813 WREG32(R_000050_CRTC_GEN_CNTL, 3814 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3815 S_000050_CRTC_DISP_REQ_EN_B(1)); 3816 WREG32(R_000420_OV0_SCALE_CNTL, 3817 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3818 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3819 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3820 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3821 S_000360_CUR2_LOCK(1)); 3822 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3823 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3824 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3825 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3826 WREG32(R_000360_CUR2_OFFSET, 3827 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3828 } 3829 } 3830 3831 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3832 { 3833 /* Update base address for crtc */ 3834 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3835 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3836 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3837 } 3838 /* Restore CRTC registers */ 3839 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3840 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3841 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3842 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3843 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3844 } 3845 } 3846 3847 void r100_vga_render_disable(struct radeon_device *rdev) 3848 { 3849 u32 tmp; 3850 3851 tmp = RREG8(R_0003C2_GENMO_WT); 3852 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3853 } 3854 3855 static void r100_debugfs(struct radeon_device *rdev) 3856 { 3857 int r; 3858 3859 r = r100_debugfs_mc_info_init(rdev); 3860 if (r) 3861 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3862 } 3863 3864 static void r100_mc_program(struct radeon_device *rdev) 3865 { 3866 struct r100_mc_save save; 3867 3868 /* Stops all mc clients */ 3869 r100_mc_stop(rdev, &save); 3870 if (rdev->flags & RADEON_IS_AGP) { 3871 WREG32(R_00014C_MC_AGP_LOCATION, 3872 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3873 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3874 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3875 if (rdev->family > CHIP_RV200) 3876 WREG32(R_00015C_AGP_BASE_2, 3877 upper_32_bits(rdev->mc.agp_base) & 0xff); 3878 } else { 3879 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3880 WREG32(R_000170_AGP_BASE, 0); 3881 if (rdev->family > CHIP_RV200) 3882 WREG32(R_00015C_AGP_BASE_2, 0); 3883 } 3884 /* Wait for mc idle */ 3885 if (r100_mc_wait_for_idle(rdev)) 3886 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3887 /* Program MC, should be a 32bits limited address space */ 3888 WREG32(R_000148_MC_FB_LOCATION, 3889 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3890 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3891 r100_mc_resume(rdev, &save); 3892 } 3893 3894 static void r100_clock_startup(struct radeon_device *rdev) 3895 { 3896 u32 tmp; 3897 3898 if (radeon_dynclks != -1 && radeon_dynclks) 3899 radeon_legacy_set_clock_gating(rdev, 1); 3900 /* We need to force on some of the block */ 3901 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3902 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3903 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3904 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3905 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3906 } 3907 3908 static int r100_startup(struct radeon_device *rdev) 3909 { 3910 int r; 3911 3912 /* set common regs */ 3913 r100_set_common_regs(rdev); 3914 /* program mc */ 3915 r100_mc_program(rdev); 3916 /* Resume clock */ 3917 r100_clock_startup(rdev); 3918 /* Initialize GART (initialize after TTM so we can allocate 3919 * memory through TTM but finalize after TTM) */ 3920 r100_enable_bm(rdev); 3921 if (rdev->flags & RADEON_IS_PCI) { 3922 r = r100_pci_gart_enable(rdev); 3923 if (r) 3924 return r; 3925 } 3926 3927 /* allocate wb buffer */ 3928 r = radeon_wb_init(rdev); 3929 if (r) 3930 return r; 3931 3932 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3933 if (r) { 3934 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3935 return r; 3936 } 3937 3938 /* Enable IRQ */ 3939 if (!rdev->irq.installed) { 3940 r = radeon_irq_kms_init(rdev); 3941 if (r) 3942 return r; 3943 } 3944 3945 r100_irq_set(rdev); 3946 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3947 /* 1M ring buffer */ 3948 r = r100_cp_init(rdev, 1024 * 1024); 3949 if (r) { 3950 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3951 return r; 3952 } 3953 3954 r = radeon_ib_pool_init(rdev); 3955 if (r) { 3956 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3957 return r; 3958 } 3959 3960 return 0; 3961 } 3962 3963 int r100_resume(struct radeon_device *rdev) 3964 { 3965 int r; 3966 3967 /* Make sur GART are not working */ 3968 if (rdev->flags & RADEON_IS_PCI) 3969 r100_pci_gart_disable(rdev); 3970 /* Resume clock before doing reset */ 3971 r100_clock_startup(rdev); 3972 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3973 if (radeon_asic_reset(rdev)) { 3974 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3975 RREG32(R_000E40_RBBM_STATUS), 3976 RREG32(R_0007C0_CP_STAT)); 3977 } 3978 /* post */ 3979 radeon_combios_asic_init(rdev->ddev); 3980 /* Resume clock after posting */ 3981 r100_clock_startup(rdev); 3982 /* Initialize surface registers */ 3983 radeon_surface_init(rdev); 3984 3985 rdev->accel_working = true; 3986 r = r100_startup(rdev); 3987 if (r) { 3988 rdev->accel_working = false; 3989 } 3990 return r; 3991 } 3992 3993 int r100_suspend(struct radeon_device *rdev) 3994 { 3995 radeon_pm_suspend(rdev); 3996 r100_cp_disable(rdev); 3997 radeon_wb_disable(rdev); 3998 r100_irq_disable(rdev); 3999 if (rdev->flags & RADEON_IS_PCI) 4000 r100_pci_gart_disable(rdev); 4001 return 0; 4002 } 4003 4004 void r100_fini(struct radeon_device *rdev) 4005 { 4006 radeon_pm_fini(rdev); 4007 r100_cp_fini(rdev); 4008 radeon_wb_fini(rdev); 4009 radeon_ib_pool_fini(rdev); 4010 radeon_gem_fini(rdev); 4011 if (rdev->flags & RADEON_IS_PCI) 4012 r100_pci_gart_fini(rdev); 4013 radeon_agp_fini(rdev); 4014 radeon_irq_kms_fini(rdev); 4015 radeon_fence_driver_fini(rdev); 4016 radeon_bo_fini(rdev); 4017 radeon_atombios_fini(rdev); 4018 r100_cp_fini_microcode(rdev); 4019 kfree(rdev->bios); 4020 rdev->bios = NULL; 4021 } 4022 4023 /* 4024 * Due to how kexec works, it can leave the hw fully initialised when it 4025 * boots the new kernel. However doing our init sequence with the CP and 4026 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 4027 * do some quick sanity checks and restore sane values to avoid this 4028 * problem. 4029 */ 4030 void r100_restore_sanity(struct radeon_device *rdev) 4031 { 4032 u32 tmp; 4033 4034 tmp = RREG32(RADEON_CP_CSQ_CNTL); 4035 if (tmp) { 4036 WREG32(RADEON_CP_CSQ_CNTL, 0); 4037 } 4038 tmp = RREG32(RADEON_CP_RB_CNTL); 4039 if (tmp) { 4040 WREG32(RADEON_CP_RB_CNTL, 0); 4041 } 4042 tmp = RREG32(RADEON_SCRATCH_UMSK); 4043 if (tmp) { 4044 WREG32(RADEON_SCRATCH_UMSK, 0); 4045 } 4046 } 4047 4048 int r100_init(struct radeon_device *rdev) 4049 { 4050 int r; 4051 4052 /* Register debugfs file specific to this group of asics */ 4053 r100_debugfs(rdev); 4054 /* Disable VGA */ 4055 r100_vga_render_disable(rdev); 4056 /* Initialize scratch registers */ 4057 radeon_scratch_init(rdev); 4058 /* Initialize surface registers */ 4059 radeon_surface_init(rdev); 4060 /* sanity check some register to avoid hangs like after kexec */ 4061 r100_restore_sanity(rdev); 4062 /* TODO: disable VGA need to use VGA request */ 4063 /* BIOS*/ 4064 if (!radeon_get_bios(rdev)) { 4065 if (ASIC_IS_AVIVO(rdev)) 4066 return -EINVAL; 4067 } 4068 if (rdev->is_atom_bios) { 4069 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4070 return -EINVAL; 4071 } else { 4072 r = radeon_combios_init(rdev); 4073 if (r) 4074 return r; 4075 } 4076 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4077 if (radeon_asic_reset(rdev)) { 4078 dev_warn(rdev->dev, 4079 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4080 RREG32(R_000E40_RBBM_STATUS), 4081 RREG32(R_0007C0_CP_STAT)); 4082 } 4083 /* check if cards are posted or not */ 4084 if (radeon_boot_test_post_card(rdev) == false) 4085 return -EINVAL; 4086 /* Set asic errata */ 4087 r100_errata(rdev); 4088 /* Initialize clocks */ 4089 radeon_get_clock_info(rdev->ddev); 4090 /* initialize AGP */ 4091 if (rdev->flags & RADEON_IS_AGP) { 4092 r = radeon_agp_init(rdev); 4093 if (r) { 4094 radeon_agp_disable(rdev); 4095 } 4096 } 4097 /* initialize VRAM */ 4098 r100_mc_init(rdev); 4099 /* Fence driver */ 4100 r = radeon_fence_driver_init(rdev); 4101 if (r) 4102 return r; 4103 /* Memory manager */ 4104 r = radeon_bo_init(rdev); 4105 if (r) 4106 return r; 4107 if (rdev->flags & RADEON_IS_PCI) { 4108 r = r100_pci_gart_init(rdev); 4109 if (r) 4110 return r; 4111 } 4112 r100_set_safe_registers(rdev); 4113 4114 /* Initialize power management */ 4115 radeon_pm_init(rdev); 4116 4117 rdev->accel_working = true; 4118 r = r100_startup(rdev); 4119 if (r) { 4120 /* Somethings want wront with the accel init stop accel */ 4121 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4122 r100_cp_fini(rdev); 4123 radeon_wb_fini(rdev); 4124 radeon_ib_pool_fini(rdev); 4125 radeon_irq_kms_fini(rdev); 4126 if (rdev->flags & RADEON_IS_PCI) 4127 r100_pci_gart_fini(rdev); 4128 rdev->accel_working = false; 4129 } 4130 return 0; 4131 } 4132 4133 uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg) 4134 { 4135 uint32_t ret; 4136 4137 spin_lock(&rdev->mmio_idx_lock); 4138 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4139 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA); 4140 spin_unlock(&rdev->mmio_idx_lock); 4141 return ret; 4142 } 4143 4144 void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v) 4145 { 4146 spin_lock(&rdev->mmio_idx_lock); 4147 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4148 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v); 4149 spin_unlock(&rdev->mmio_idx_lock); 4150 } 4151 4152 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4153 { 4154 if (reg < rdev->rio_mem_size) 4155 return bus_read_4(rdev->rio_mem, reg); 4156 else { 4157 /* XXX No locking? -- dumbbell@ */ 4158 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4159 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA); 4160 } 4161 } 4162 4163 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4164 { 4165 if (reg < rdev->rio_mem_size) 4166 bus_write_4(rdev->rio_mem, reg, v); 4167 else { 4168 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4169 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v); 4170 } 4171 } 4172