1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <drm/drmP.h> 30 #include <drm/radeon_drm.h> 31 #include "radeon_reg.h" 32 #include "radeon.h" 33 #include "radeon_asic.h" 34 #include "r100d.h" 35 #include "rs100d.h" 36 #include "rv200d.h" 37 #include "rv250d.h" 38 #include "atom.h" 39 40 #include <linux/firmware.h> 41 #include <linux/module.h> 42 43 #include "r100_reg_safe.h" 44 #include "rn50_reg_safe.h" 45 46 /* Firmware Names */ 47 #define FIRMWARE_R100 "radeonkmsfw_R100_cp" 48 #define FIRMWARE_R200 "radeonkmsfw_R200_cp" 49 #define FIRMWARE_R300 "radeonkmsfw_R300_cp" 50 #define FIRMWARE_R420 "radeonkmsfw_R420_cp" 51 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 52 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 53 #define FIRMWARE_R520 "radeonkmsfw_R520_cp" 54 55 MODULE_FIRMWARE(FIRMWARE_R100); 56 MODULE_FIRMWARE(FIRMWARE_R200); 57 MODULE_FIRMWARE(FIRMWARE_R300); 58 MODULE_FIRMWARE(FIRMWARE_R420); 59 MODULE_FIRMWARE(FIRMWARE_RS690); 60 MODULE_FIRMWARE(FIRMWARE_RS600); 61 MODULE_FIRMWARE(FIRMWARE_R520); 62 63 #include "r100_track.h" 64 65 /* This files gather functions specifics to: 66 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 67 * and others in some cases. 68 */ 69 70 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) 71 { 72 if (crtc == 0) { 73 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 74 return true; 75 else 76 return false; 77 } else { 78 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 79 return true; 80 else 81 return false; 82 } 83 } 84 85 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) 86 { 87 u32 vline1, vline2; 88 89 if (crtc == 0) { 90 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 91 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 92 } else { 93 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 94 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 95 } 96 if (vline1 != vline2) 97 return true; 98 else 99 return false; 100 } 101 102 /** 103 * r100_wait_for_vblank - vblank wait asic callback. 104 * 105 * @rdev: radeon_device pointer 106 * @crtc: crtc to wait for vblank on 107 * 108 * Wait for vblank on the requested crtc (r1xx-r4xx). 109 */ 110 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 111 { 112 unsigned i = 0; 113 114 if (crtc >= rdev->num_crtc) 115 return; 116 117 if (crtc == 0) { 118 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) 119 return; 120 } else { 121 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) 122 return; 123 } 124 125 /* depending on when we hit vblank, we may be close to active; if so, 126 * wait for another frame. 127 */ 128 while (r100_is_in_vblank(rdev, crtc)) { 129 if (i++ % 100 == 0) { 130 if (!r100_is_counter_moving(rdev, crtc)) 131 break; 132 } 133 } 134 135 while (!r100_is_in_vblank(rdev, crtc)) { 136 if (i++ % 100 == 0) { 137 if (!r100_is_counter_moving(rdev, crtc)) 138 break; 139 } 140 } 141 } 142 143 /** 144 * r100_page_flip - pageflip callback. 145 * 146 * @rdev: radeon_device pointer 147 * @crtc_id: crtc to cleanup pageflip on 148 * @crtc_base: new address of the crtc (GPU MC address) 149 * 150 * Does the actual pageflip (r1xx-r4xx). 151 * During vblank we take the crtc lock and wait for the update_pending 152 * bit to go high, when it does, we release the lock, and allow the 153 * double buffered update to take place. 154 */ 155 void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async) 156 { 157 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 158 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 159 int i; 160 161 /* Lock the graphics update lock */ 162 /* update the scanout addresses */ 163 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 164 165 /* Wait for update_pending to go high. */ 166 for (i = 0; i < rdev->usec_timeout; i++) { 167 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 168 break; 169 udelay(1); 170 } 171 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 172 173 /* Unlock the lock, so double-buffering can take place inside vblank */ 174 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 175 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 176 177 } 178 179 /** 180 * r100_page_flip_pending - check if page flip is still pending 181 * 182 * @rdev: radeon_device pointer 183 * @crtc_id: crtc to check 184 * 185 * Check if the last pagefilp is still pending (r1xx-r4xx). 186 * Returns the current update pending status. 187 */ 188 bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id) 189 { 190 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 191 192 /* Return current update_pending status: */ 193 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & 194 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET); 195 } 196 197 /** 198 * r100_pm_get_dynpm_state - look up dynpm power state callback. 199 * 200 * @rdev: radeon_device pointer 201 * 202 * Look up the optimal power state based on the 203 * current state of the GPU (r1xx-r5xx). 204 * Used for dynpm only. 205 */ 206 void r100_pm_get_dynpm_state(struct radeon_device *rdev) 207 { 208 int i; 209 rdev->pm.dynpm_can_upclock = true; 210 rdev->pm.dynpm_can_downclock = true; 211 212 switch (rdev->pm.dynpm_planned_action) { 213 case DYNPM_ACTION_MINIMUM: 214 rdev->pm.requested_power_state_index = 0; 215 rdev->pm.dynpm_can_downclock = false; 216 break; 217 case DYNPM_ACTION_DOWNCLOCK: 218 if (rdev->pm.current_power_state_index == 0) { 219 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 220 rdev->pm.dynpm_can_downclock = false; 221 } else { 222 if (rdev->pm.active_crtc_count > 1) { 223 for (i = 0; i < rdev->pm.num_power_states; i++) { 224 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 225 continue; 226 else if (i >= rdev->pm.current_power_state_index) { 227 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 228 break; 229 } else { 230 rdev->pm.requested_power_state_index = i; 231 break; 232 } 233 } 234 } else 235 rdev->pm.requested_power_state_index = 236 rdev->pm.current_power_state_index - 1; 237 } 238 /* don't use the power state if crtcs are active and no display flag is set */ 239 if ((rdev->pm.active_crtc_count > 0) && 240 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 241 RADEON_PM_MODE_NO_DISPLAY)) { 242 rdev->pm.requested_power_state_index++; 243 } 244 break; 245 case DYNPM_ACTION_UPCLOCK: 246 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 247 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 248 rdev->pm.dynpm_can_upclock = false; 249 } else { 250 if (rdev->pm.active_crtc_count > 1) { 251 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 252 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 253 continue; 254 else if (i <= rdev->pm.current_power_state_index) { 255 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 256 break; 257 } else { 258 rdev->pm.requested_power_state_index = i; 259 break; 260 } 261 } 262 } else 263 rdev->pm.requested_power_state_index = 264 rdev->pm.current_power_state_index + 1; 265 } 266 break; 267 case DYNPM_ACTION_DEFAULT: 268 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 269 rdev->pm.dynpm_can_upclock = false; 270 break; 271 case DYNPM_ACTION_NONE: 272 default: 273 DRM_ERROR("Requested mode for not defined action\n"); 274 return; 275 } 276 /* only one clock mode per power state */ 277 rdev->pm.requested_clock_mode_index = 0; 278 279 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 280 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 281 clock_info[rdev->pm.requested_clock_mode_index].sclk, 282 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 283 clock_info[rdev->pm.requested_clock_mode_index].mclk, 284 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 285 pcie_lanes); 286 } 287 288 /** 289 * r100_pm_init_profile - Initialize power profiles callback. 290 * 291 * @rdev: radeon_device pointer 292 * 293 * Initialize the power states used in profile mode 294 * (r1xx-r3xx). 295 * Used for profile mode only. 296 */ 297 void r100_pm_init_profile(struct radeon_device *rdev) 298 { 299 /* default */ 300 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 303 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 304 /* low sh */ 305 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 309 /* mid sh */ 310 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 313 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 314 /* high sh */ 315 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 318 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 319 /* low mh */ 320 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 323 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 324 /* mid mh */ 325 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 328 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 329 /* high mh */ 330 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 334 } 335 336 /** 337 * r100_pm_misc - set additional pm hw parameters callback. 338 * 339 * @rdev: radeon_device pointer 340 * 341 * Set non-clock parameters associated with a power state 342 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 343 */ 344 void r100_pm_misc(struct radeon_device *rdev) 345 { 346 int requested_index = rdev->pm.requested_power_state_index; 347 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 348 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 349 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 350 351 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 352 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 353 tmp = RREG32(voltage->gpio.reg); 354 if (voltage->active_high) 355 tmp |= voltage->gpio.mask; 356 else 357 tmp &= ~(voltage->gpio.mask); 358 WREG32(voltage->gpio.reg, tmp); 359 if (voltage->delay) 360 udelay(voltage->delay); 361 } else { 362 tmp = RREG32(voltage->gpio.reg); 363 if (voltage->active_high) 364 tmp &= ~voltage->gpio.mask; 365 else 366 tmp |= voltage->gpio.mask; 367 WREG32(voltage->gpio.reg, tmp); 368 if (voltage->delay) 369 udelay(voltage->delay); 370 } 371 } 372 373 sclk_cntl = RREG32_PLL(SCLK_CNTL); 374 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 375 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 376 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 377 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 378 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 379 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 380 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 381 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 382 else 383 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 384 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 385 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 386 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 387 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 388 } else 389 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 390 391 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 392 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 393 if (voltage->delay) { 394 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 395 switch (voltage->delay) { 396 case 33: 397 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 398 break; 399 case 66: 400 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 401 break; 402 case 99: 403 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 404 break; 405 case 132: 406 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 407 break; 408 } 409 } else 410 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 411 } else 412 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 413 414 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 415 sclk_cntl &= ~FORCE_HDP; 416 else 417 sclk_cntl |= FORCE_HDP; 418 419 WREG32_PLL(SCLK_CNTL, sclk_cntl); 420 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 421 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 422 423 /* set pcie lanes */ 424 if ((rdev->flags & RADEON_IS_PCIE) && 425 !(rdev->flags & RADEON_IS_IGP) && 426 rdev->asic->pm.set_pcie_lanes && 427 (ps->pcie_lanes != 428 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 429 radeon_set_pcie_lanes(rdev, 430 ps->pcie_lanes); 431 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 432 } 433 } 434 435 /** 436 * r100_pm_prepare - pre-power state change callback. 437 * 438 * @rdev: radeon_device pointer 439 * 440 * Prepare for a power state change (r1xx-r4xx). 441 */ 442 void r100_pm_prepare(struct radeon_device *rdev) 443 { 444 struct drm_device *ddev = rdev->ddev; 445 struct drm_crtc *crtc; 446 struct radeon_crtc *radeon_crtc; 447 u32 tmp; 448 449 /* disable any active CRTCs */ 450 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 451 radeon_crtc = to_radeon_crtc(crtc); 452 if (radeon_crtc->enabled) { 453 if (radeon_crtc->crtc_id) { 454 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 455 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 456 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 457 } else { 458 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 459 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 460 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 461 } 462 } 463 } 464 } 465 466 /** 467 * r100_pm_finish - post-power state change callback. 468 * 469 * @rdev: radeon_device pointer 470 * 471 * Clean up after a power state change (r1xx-r4xx). 472 */ 473 void r100_pm_finish(struct radeon_device *rdev) 474 { 475 struct drm_device *ddev = rdev->ddev; 476 struct drm_crtc *crtc; 477 struct radeon_crtc *radeon_crtc; 478 u32 tmp; 479 480 /* enable any active CRTCs */ 481 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 482 radeon_crtc = to_radeon_crtc(crtc); 483 if (radeon_crtc->enabled) { 484 if (radeon_crtc->crtc_id) { 485 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 486 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 487 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 488 } else { 489 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 490 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 491 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 492 } 493 } 494 } 495 } 496 497 /** 498 * r100_gui_idle - gui idle callback. 499 * 500 * @rdev: radeon_device pointer 501 * 502 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 503 * Returns true if idle, false if not. 504 */ 505 bool r100_gui_idle(struct radeon_device *rdev) 506 { 507 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 508 return false; 509 else 510 return true; 511 } 512 513 /* hpd for digital panel detect/disconnect */ 514 /** 515 * r100_hpd_sense - hpd sense callback. 516 * 517 * @rdev: radeon_device pointer 518 * @hpd: hpd (hotplug detect) pin 519 * 520 * Checks if a digital monitor is connected (r1xx-r4xx). 521 * Returns true if connected, false if not connected. 522 */ 523 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 524 { 525 bool connected = false; 526 527 switch (hpd) { 528 case RADEON_HPD_1: 529 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 530 connected = true; 531 break; 532 case RADEON_HPD_2: 533 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 534 connected = true; 535 break; 536 default: 537 break; 538 } 539 return connected; 540 } 541 542 /** 543 * r100_hpd_set_polarity - hpd set polarity callback. 544 * 545 * @rdev: radeon_device pointer 546 * @hpd: hpd (hotplug detect) pin 547 * 548 * Set the polarity of the hpd pin (r1xx-r4xx). 549 */ 550 void r100_hpd_set_polarity(struct radeon_device *rdev, 551 enum radeon_hpd_id hpd) 552 { 553 u32 tmp; 554 bool connected = r100_hpd_sense(rdev, hpd); 555 556 switch (hpd) { 557 case RADEON_HPD_1: 558 tmp = RREG32(RADEON_FP_GEN_CNTL); 559 if (connected) 560 tmp &= ~RADEON_FP_DETECT_INT_POL; 561 else 562 tmp |= RADEON_FP_DETECT_INT_POL; 563 WREG32(RADEON_FP_GEN_CNTL, tmp); 564 break; 565 case RADEON_HPD_2: 566 tmp = RREG32(RADEON_FP2_GEN_CNTL); 567 if (connected) 568 tmp &= ~RADEON_FP2_DETECT_INT_POL; 569 else 570 tmp |= RADEON_FP2_DETECT_INT_POL; 571 WREG32(RADEON_FP2_GEN_CNTL, tmp); 572 break; 573 default: 574 break; 575 } 576 } 577 578 /** 579 * r100_hpd_init - hpd setup callback. 580 * 581 * @rdev: radeon_device pointer 582 * 583 * Setup the hpd pins used by the card (r1xx-r4xx). 584 * Set the polarity, and enable the hpd interrupts. 585 */ 586 void r100_hpd_init(struct radeon_device *rdev) 587 { 588 struct drm_device *dev = rdev->ddev; 589 struct drm_connector *connector; 590 unsigned enable = 0; 591 592 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 593 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 594 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 595 enable |= 1 << radeon_connector->hpd.hpd; 596 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 597 } 598 radeon_irq_kms_enable_hpd(rdev, enable); 599 } 600 601 /** 602 * r100_hpd_fini - hpd tear down callback. 603 * 604 * @rdev: radeon_device pointer 605 * 606 * Tear down the hpd pins used by the card (r1xx-r4xx). 607 * Disable the hpd interrupts. 608 */ 609 void r100_hpd_fini(struct radeon_device *rdev) 610 { 611 struct drm_device *dev = rdev->ddev; 612 struct drm_connector *connector; 613 unsigned disable = 0; 614 615 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 616 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 617 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 618 disable |= 1 << radeon_connector->hpd.hpd; 619 } 620 radeon_irq_kms_disable_hpd(rdev, disable); 621 } 622 623 /* 624 * PCI GART 625 */ 626 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 627 { 628 /* TODO: can we do somethings here ? */ 629 /* It seems hw only cache one entry so we should discard this 630 * entry otherwise if first GPU GART read hit this entry it 631 * could end up in wrong address. */ 632 } 633 634 int r100_pci_gart_init(struct radeon_device *rdev) 635 { 636 int r; 637 638 if (rdev->gart.ptr) { 639 WARN(1, "R100 PCI GART already initialized\n"); 640 return 0; 641 } 642 /* Initialize common gart structure */ 643 r = radeon_gart_init(rdev); 644 if (r) 645 return r; 646 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 647 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 648 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; 649 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 650 return radeon_gart_table_ram_alloc(rdev); 651 } 652 653 int r100_pci_gart_enable(struct radeon_device *rdev) 654 { 655 uint32_t tmp; 656 657 /* discard memory request outside of configured range */ 658 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 659 WREG32(RADEON_AIC_CNTL, tmp); 660 /* set address range for PCI address translate */ 661 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 662 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 663 /* set PCI GART page-table base address */ 664 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 665 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 666 WREG32(RADEON_AIC_CNTL, tmp); 667 r100_pci_gart_tlb_flush(rdev); 668 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 669 (unsigned)(rdev->mc.gtt_size >> 20), 670 (unsigned long long)rdev->gart.table_addr); 671 rdev->gart.ready = true; 672 return 0; 673 } 674 675 void r100_pci_gart_disable(struct radeon_device *rdev) 676 { 677 uint32_t tmp; 678 679 /* discard memory request outside of configured range */ 680 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 681 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 682 WREG32(RADEON_AIC_LO_ADDR, 0); 683 WREG32(RADEON_AIC_HI_ADDR, 0); 684 } 685 686 uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) 687 { 688 return addr; 689 } 690 691 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 692 uint64_t entry) 693 { 694 u32 *gtt = rdev->gart.ptr; 695 gtt[i] = cpu_to_le32(lower_32_bits(entry)); 696 } 697 698 void r100_pci_gart_fini(struct radeon_device *rdev) 699 { 700 radeon_gart_fini(rdev); 701 r100_pci_gart_disable(rdev); 702 radeon_gart_table_ram_free(rdev); 703 } 704 705 int r100_irq_set(struct radeon_device *rdev) 706 { 707 uint32_t tmp = 0; 708 709 if (!rdev->irq.installed) { 710 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 711 WREG32(R_000040_GEN_INT_CNTL, 0); 712 return -EINVAL; 713 } 714 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 715 tmp |= RADEON_SW_INT_ENABLE; 716 } 717 if (rdev->irq.crtc_vblank_int[0] || 718 atomic_read(&rdev->irq.pflip[0])) { 719 tmp |= RADEON_CRTC_VBLANK_MASK; 720 } 721 if (rdev->irq.crtc_vblank_int[1] || 722 atomic_read(&rdev->irq.pflip[1])) { 723 tmp |= RADEON_CRTC2_VBLANK_MASK; 724 } 725 if (rdev->irq.hpd[0]) { 726 tmp |= RADEON_FP_DETECT_MASK; 727 } 728 if (rdev->irq.hpd[1]) { 729 tmp |= RADEON_FP2_DETECT_MASK; 730 } 731 WREG32(RADEON_GEN_INT_CNTL, tmp); 732 733 /* read back to post the write */ 734 RREG32(RADEON_GEN_INT_CNTL); 735 736 return 0; 737 } 738 739 void r100_irq_disable(struct radeon_device *rdev) 740 { 741 u32 tmp; 742 743 WREG32(R_000040_GEN_INT_CNTL, 0); 744 /* Wait and acknowledge irq */ 745 mdelay(1); 746 tmp = RREG32(R_000044_GEN_INT_STATUS); 747 WREG32(R_000044_GEN_INT_STATUS, tmp); 748 } 749 750 static uint32_t r100_irq_ack(struct radeon_device *rdev) 751 { 752 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 753 uint32_t irq_mask = RADEON_SW_INT_TEST | 754 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 755 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 756 757 if (irqs) { 758 WREG32(RADEON_GEN_INT_STATUS, irqs); 759 } 760 return irqs & irq_mask; 761 } 762 763 irqreturn_t r100_irq_process(struct radeon_device *rdev) 764 { 765 uint32_t status, msi_rearm; 766 bool queue_hotplug = false; 767 768 status = r100_irq_ack(rdev); 769 if (!status) { 770 return IRQ_NONE; 771 } 772 if (rdev->shutdown) { 773 return IRQ_NONE; 774 } 775 while (status) { 776 /* SW interrupt */ 777 if (status & RADEON_SW_INT_TEST) { 778 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 779 } 780 /* Vertical blank interrupts */ 781 if (status & RADEON_CRTC_VBLANK_STAT) { 782 if (rdev->irq.crtc_vblank_int[0]) { 783 drm_handle_vblank(rdev->ddev, 0); 784 rdev->pm.vblank_sync = true; 785 wake_up(&rdev->irq.vblank_queue); 786 } 787 if (atomic_read(&rdev->irq.pflip[0])) 788 radeon_crtc_handle_vblank(rdev, 0); 789 } 790 if (status & RADEON_CRTC2_VBLANK_STAT) { 791 if (rdev->irq.crtc_vblank_int[1]) { 792 drm_handle_vblank(rdev->ddev, 1); 793 rdev->pm.vblank_sync = true; 794 wake_up(&rdev->irq.vblank_queue); 795 } 796 if (atomic_read(&rdev->irq.pflip[1])) 797 radeon_crtc_handle_vblank(rdev, 1); 798 } 799 if (status & RADEON_FP_DETECT_STAT) { 800 queue_hotplug = true; 801 DRM_DEBUG("HPD1\n"); 802 } 803 if (status & RADEON_FP2_DETECT_STAT) { 804 queue_hotplug = true; 805 DRM_DEBUG("HPD2\n"); 806 } 807 status = r100_irq_ack(rdev); 808 } 809 if (queue_hotplug) 810 schedule_delayed_work(&rdev->hotplug_work, 0); 811 if (rdev->msi_enabled) { 812 switch (rdev->family) { 813 case CHIP_RS400: 814 case CHIP_RS480: 815 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 816 WREG32(RADEON_AIC_CNTL, msi_rearm); 817 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 818 break; 819 default: 820 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 821 break; 822 } 823 } 824 return IRQ_HANDLED; 825 } 826 827 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 828 { 829 if (crtc == 0) 830 return RREG32(RADEON_CRTC_CRNT_FRAME); 831 else 832 return RREG32(RADEON_CRTC2_CRNT_FRAME); 833 } 834 835 /** 836 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer 837 * rdev: radeon device structure 838 * ring: ring buffer struct for emitting packets 839 */ 840 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) 841 { 842 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 843 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 844 RADEON_HDP_READ_BUFFER_INVALIDATE); 845 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 846 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 847 } 848 849 /* Who ever call radeon_fence_emit should call ring_lock and ask 850 * for enough space (today caller are ib schedule and buffer move) */ 851 void r100_fence_ring_emit(struct radeon_device *rdev, 852 struct radeon_fence *fence) 853 { 854 struct radeon_ring *ring = &rdev->ring[fence->ring]; 855 856 /* We have to make sure that caches are flushed before 857 * CPU might read something from VRAM. */ 858 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 859 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 860 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 861 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 862 /* Wait until IDLE & CLEAN */ 863 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 864 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 865 r100_ring_hdp_flush(rdev, ring); 866 /* Emit fence sequence & fire IRQ */ 867 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 868 radeon_ring_write(ring, fence->seq); 869 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 870 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 871 } 872 873 bool r100_semaphore_ring_emit(struct radeon_device *rdev, 874 struct radeon_ring *ring, 875 struct radeon_semaphore *semaphore, 876 bool emit_wait) 877 { 878 /* Unused on older asics, since we don't have semaphores or multiple rings */ 879 BUG(); 880 return false; 881 } 882 883 struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, 884 uint64_t src_offset, 885 uint64_t dst_offset, 886 unsigned num_gpu_pages, 887 struct reservation_object *resv) 888 { 889 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 890 struct radeon_fence *fence; 891 uint32_t cur_pages; 892 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 893 uint32_t pitch; 894 uint32_t stride_pixels; 895 unsigned ndw; 896 int num_loops; 897 int r = 0; 898 899 /* radeon limited to 16k stride */ 900 stride_bytes &= 0x3fff; 901 /* radeon pitch is /64 */ 902 pitch = stride_bytes / 64; 903 stride_pixels = stride_bytes / 4; 904 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 905 906 /* Ask for enough room for blit + flush + fence */ 907 ndw = 64 + (10 * num_loops); 908 r = radeon_ring_lock(rdev, ring, ndw); 909 if (r) { 910 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 911 return ERR_PTR(-EINVAL); 912 } 913 while (num_gpu_pages > 0) { 914 cur_pages = num_gpu_pages; 915 if (cur_pages > 8191) { 916 cur_pages = 8191; 917 } 918 num_gpu_pages -= cur_pages; 919 920 /* pages are in Y direction - height 921 page width in X direction - width */ 922 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 923 radeon_ring_write(ring, 924 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 925 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 926 RADEON_GMC_SRC_CLIPPING | 927 RADEON_GMC_DST_CLIPPING | 928 RADEON_GMC_BRUSH_NONE | 929 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 930 RADEON_GMC_SRC_DATATYPE_COLOR | 931 RADEON_ROP3_S | 932 RADEON_DP_SRC_SOURCE_MEMORY | 933 RADEON_GMC_CLR_CMP_CNTL_DIS | 934 RADEON_GMC_WR_MSK_DIS); 935 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 936 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 937 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 938 radeon_ring_write(ring, 0); 939 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 940 radeon_ring_write(ring, num_gpu_pages); 941 radeon_ring_write(ring, num_gpu_pages); 942 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 943 } 944 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 945 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 946 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 947 radeon_ring_write(ring, 948 RADEON_WAIT_2D_IDLECLEAN | 949 RADEON_WAIT_HOST_IDLECLEAN | 950 RADEON_WAIT_DMA_GUI_IDLE); 951 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); 952 if (r) { 953 radeon_ring_unlock_undo(rdev, ring); 954 return ERR_PTR(r); 955 } 956 radeon_ring_unlock_commit(rdev, ring, false); 957 return fence; 958 } 959 960 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 961 { 962 unsigned i; 963 u32 tmp; 964 965 for (i = 0; i < rdev->usec_timeout; i++) { 966 tmp = RREG32(R_000E40_RBBM_STATUS); 967 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 968 return 0; 969 } 970 udelay(1); 971 } 972 return -1; 973 } 974 975 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 976 { 977 int r; 978 979 r = radeon_ring_lock(rdev, ring, 2); 980 if (r) { 981 return; 982 } 983 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 984 radeon_ring_write(ring, 985 RADEON_ISYNC_ANY2D_IDLE3D | 986 RADEON_ISYNC_ANY3D_IDLE2D | 987 RADEON_ISYNC_WAIT_IDLEGUI | 988 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 989 radeon_ring_unlock_commit(rdev, ring, false); 990 } 991 992 993 /* Load the microcode for the CP */ 994 static int r100_cp_init_microcode(struct radeon_device *rdev) 995 { 996 const char *fw_name = NULL; 997 int err; 998 999 DRM_DEBUG_KMS("\n"); 1000 1001 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 1002 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 1003 (rdev->family == CHIP_RS200)) { 1004 DRM_INFO("Loading R100 Microcode\n"); 1005 fw_name = FIRMWARE_R100; 1006 } else if ((rdev->family == CHIP_R200) || 1007 (rdev->family == CHIP_RV250) || 1008 (rdev->family == CHIP_RV280) || 1009 (rdev->family == CHIP_RS300)) { 1010 DRM_INFO("Loading R200 Microcode\n"); 1011 fw_name = FIRMWARE_R200; 1012 } else if ((rdev->family == CHIP_R300) || 1013 (rdev->family == CHIP_R350) || 1014 (rdev->family == CHIP_RV350) || 1015 (rdev->family == CHIP_RV380) || 1016 (rdev->family == CHIP_RS400) || 1017 (rdev->family == CHIP_RS480)) { 1018 DRM_INFO("Loading R300 Microcode\n"); 1019 fw_name = FIRMWARE_R300; 1020 } else if ((rdev->family == CHIP_R420) || 1021 (rdev->family == CHIP_R423) || 1022 (rdev->family == CHIP_RV410)) { 1023 DRM_INFO("Loading R400 Microcode\n"); 1024 fw_name = FIRMWARE_R420; 1025 } else if ((rdev->family == CHIP_RS690) || 1026 (rdev->family == CHIP_RS740)) { 1027 DRM_INFO("Loading RS690/RS740 Microcode\n"); 1028 fw_name = FIRMWARE_RS690; 1029 } else if (rdev->family == CHIP_RS600) { 1030 DRM_INFO("Loading RS600 Microcode\n"); 1031 fw_name = FIRMWARE_RS600; 1032 } else if ((rdev->family == CHIP_RV515) || 1033 (rdev->family == CHIP_R520) || 1034 (rdev->family == CHIP_RV530) || 1035 (rdev->family == CHIP_R580) || 1036 (rdev->family == CHIP_RV560) || 1037 (rdev->family == CHIP_RV570)) { 1038 DRM_INFO("Loading R500 Microcode\n"); 1039 fw_name = FIRMWARE_R520; 1040 } 1041 1042 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 1043 if (err) { 1044 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1045 fw_name); 1046 } else if (rdev->me_fw->datasize % 8) { 1047 printk(KERN_ERR 1048 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1049 rdev->me_fw->datasize, fw_name); 1050 err = -EINVAL; 1051 release_firmware(rdev->me_fw); 1052 rdev->me_fw = NULL; 1053 } 1054 return err; 1055 } 1056 1057 u32 r100_gfx_get_rptr(struct radeon_device *rdev, 1058 struct radeon_ring *ring) 1059 { 1060 u32 rptr; 1061 1062 if (rdev->wb.enabled) 1063 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 1064 else 1065 rptr = RREG32(RADEON_CP_RB_RPTR); 1066 1067 return rptr; 1068 } 1069 1070 u32 r100_gfx_get_wptr(struct radeon_device *rdev, 1071 struct radeon_ring *ring) 1072 { 1073 return RREG32(RADEON_CP_RB_WPTR); 1074 } 1075 1076 void r100_gfx_set_wptr(struct radeon_device *rdev, 1077 struct radeon_ring *ring) 1078 { 1079 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1080 (void)RREG32(RADEON_CP_RB_WPTR); 1081 } 1082 1083 /** 1084 * r100_cp_fini_microcode - drop the firmware image reference 1085 * 1086 * @rdev: radeon_device pointer 1087 * 1088 * Drop the me firmware image reference. 1089 * Called at driver shutdown. 1090 */ 1091 static void r100_cp_fini_microcode (struct radeon_device *rdev) 1092 { 1093 release_firmware(rdev->me_fw); 1094 rdev->me_fw = NULL; 1095 } 1096 1097 static void r100_cp_load_microcode(struct radeon_device *rdev) 1098 { 1099 const __be32 *fw_data; 1100 int i, size; 1101 1102 if (r100_gui_wait_for_idle(rdev)) { 1103 printk(KERN_WARNING "Failed to wait GUI idle while " 1104 "programming pipes. Bad things might happen.\n"); 1105 } 1106 1107 if (rdev->me_fw) { 1108 size = rdev->me_fw->datasize / 4; 1109 fw_data = (const __be32 *)rdev->me_fw->data; 1110 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1111 for (i = 0; i < size; i += 2) { 1112 WREG32(RADEON_CP_ME_RAM_DATAH, 1113 be32_to_cpup(&fw_data[i])); 1114 WREG32(RADEON_CP_ME_RAM_DATAL, 1115 be32_to_cpup(&fw_data[i + 1])); 1116 } 1117 } 1118 } 1119 1120 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1121 { 1122 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1123 unsigned rb_bufsz; 1124 unsigned rb_blksz; 1125 unsigned max_fetch; 1126 unsigned pre_write_timer; 1127 unsigned pre_write_limit; 1128 unsigned indirect2_start; 1129 unsigned indirect1_start; 1130 uint32_t tmp; 1131 int r; 1132 1133 if (r100_debugfs_cp_init(rdev)) { 1134 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1135 } 1136 if (!rdev->me_fw) { 1137 r = r100_cp_init_microcode(rdev); 1138 if (r) { 1139 DRM_ERROR("Failed to load firmware!\n"); 1140 return r; 1141 } 1142 } 1143 1144 /* Align ring size */ 1145 rb_bufsz = order_base_2(ring_size / 8); 1146 ring_size = (1 << (rb_bufsz + 1)) * 4; 1147 r100_cp_load_microcode(rdev); 1148 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1149 RADEON_CP_PACKET2); 1150 if (r) { 1151 return r; 1152 } 1153 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1154 * the rptr copy in system ram */ 1155 rb_blksz = 9; 1156 /* cp will read 128bytes at a time (4 dwords) */ 1157 max_fetch = 1; 1158 ring->align_mask = 16 - 1; 1159 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1160 pre_write_timer = 64; 1161 /* Force CP_RB_WPTR write if written more than one time before the 1162 * delay expire 1163 */ 1164 pre_write_limit = 0; 1165 /* Setup the cp cache like this (cache size is 96 dwords) : 1166 * RING 0 to 15 1167 * INDIRECT1 16 to 79 1168 * INDIRECT2 80 to 95 1169 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1170 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1171 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1172 * Idea being that most of the gpu cmd will be through indirect1 buffer 1173 * so it gets the bigger cache. 1174 */ 1175 indirect2_start = 80; 1176 indirect1_start = 16; 1177 /* cp setup */ 1178 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1179 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1180 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1181 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1182 #ifdef __BIG_ENDIAN 1183 tmp |= RADEON_BUF_SWAP_32BIT; 1184 #endif 1185 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1186 1187 /* Set ring address */ 1188 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1189 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1190 /* Force read & write ptr to 0 */ 1191 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1192 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1193 ring->wptr = 0; 1194 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1195 1196 /* set the wb address whether it's enabled or not */ 1197 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1198 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1199 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1200 1201 if (rdev->wb.enabled) 1202 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1203 else { 1204 tmp |= RADEON_RB_NO_UPDATE; 1205 WREG32(R_000770_SCRATCH_UMSK, 0); 1206 } 1207 1208 WREG32(RADEON_CP_RB_CNTL, tmp); 1209 udelay(10); 1210 /* Set cp mode to bus mastering & enable cp*/ 1211 WREG32(RADEON_CP_CSQ_MODE, 1212 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1213 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1214 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1215 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1216 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1217 1218 /* at this point everything should be setup correctly to enable master */ 1219 pci_enable_busmaster(rdev->dev->bsddev); 1220 1221 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1222 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1223 if (r) { 1224 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1225 return r; 1226 } 1227 ring->ready = true; 1228 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1229 1230 if (!ring->rptr_save_reg /* not resuming from suspend */ 1231 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1232 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1233 if (r) { 1234 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1235 ring->rptr_save_reg = 0; 1236 } 1237 } 1238 return 0; 1239 } 1240 1241 void r100_cp_fini(struct radeon_device *rdev) 1242 { 1243 if (r100_cp_wait_for_idle(rdev)) { 1244 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1245 } 1246 /* Disable ring */ 1247 r100_cp_disable(rdev); 1248 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1249 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1250 DRM_INFO("radeon: cp finalized\n"); 1251 } 1252 1253 void r100_cp_disable(struct radeon_device *rdev) 1254 { 1255 /* Disable ring */ 1256 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1257 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1258 WREG32(RADEON_CP_CSQ_MODE, 0); 1259 WREG32(RADEON_CP_CSQ_CNTL, 0); 1260 WREG32(R_000770_SCRATCH_UMSK, 0); 1261 if (r100_gui_wait_for_idle(rdev)) { 1262 printk(KERN_WARNING "Failed to wait GUI idle while " 1263 "programming pipes. Bad things might happen.\n"); 1264 } 1265 } 1266 1267 /* 1268 * CS functions 1269 */ 1270 int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1271 struct radeon_cs_packet *pkt, 1272 unsigned idx, 1273 unsigned reg) 1274 { 1275 int r; 1276 u32 tile_flags = 0; 1277 u32 tmp; 1278 struct radeon_bo_list *reloc; 1279 u32 value; 1280 1281 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1282 if (r) { 1283 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1284 idx, reg); 1285 radeon_cs_dump_packet(p, pkt); 1286 return r; 1287 } 1288 1289 value = radeon_get_ib_value(p, idx); 1290 tmp = value & 0x003fffff; 1291 tmp += (((u32)reloc->gpu_offset) >> 10); 1292 1293 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1294 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1295 tile_flags |= RADEON_DST_TILE_MACRO; 1296 if (reloc->tiling_flags & RADEON_TILING_MICRO) { 1297 if (reg == RADEON_SRC_PITCH_OFFSET) { 1298 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1299 radeon_cs_dump_packet(p, pkt); 1300 return -EINVAL; 1301 } 1302 tile_flags |= RADEON_DST_TILE_MICRO; 1303 } 1304 1305 tmp |= tile_flags; 1306 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1307 } else 1308 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1309 return 0; 1310 } 1311 1312 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1313 struct radeon_cs_packet *pkt, 1314 int idx) 1315 { 1316 unsigned c, i; 1317 struct radeon_bo_list *reloc; 1318 struct r100_cs_track *track; 1319 int r = 0; 1320 volatile uint32_t *ib; 1321 u32 idx_value; 1322 1323 ib = p->ib.ptr; 1324 track = (struct r100_cs_track *)p->track; 1325 c = radeon_get_ib_value(p, idx++) & 0x1F; 1326 if (c > 16) { 1327 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1328 pkt->opcode); 1329 radeon_cs_dump_packet(p, pkt); 1330 return -EINVAL; 1331 } 1332 track->num_arrays = c; 1333 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1334 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1335 if (r) { 1336 DRM_ERROR("No reloc for packet3 %d\n", 1337 pkt->opcode); 1338 radeon_cs_dump_packet(p, pkt); 1339 return r; 1340 } 1341 idx_value = radeon_get_ib_value(p, idx); 1342 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1343 1344 track->arrays[i + 0].esize = idx_value >> 8; 1345 track->arrays[i + 0].robj = reloc->robj; 1346 track->arrays[i + 0].esize &= 0x7F; 1347 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1348 if (r) { 1349 DRM_ERROR("No reloc for packet3 %d\n", 1350 pkt->opcode); 1351 radeon_cs_dump_packet(p, pkt); 1352 return r; 1353 } 1354 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); 1355 track->arrays[i + 1].robj = reloc->robj; 1356 track->arrays[i + 1].esize = idx_value >> 24; 1357 track->arrays[i + 1].esize &= 0x7F; 1358 } 1359 if (c & 1) { 1360 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1361 if (r) { 1362 DRM_ERROR("No reloc for packet3 %d\n", 1363 pkt->opcode); 1364 radeon_cs_dump_packet(p, pkt); 1365 return r; 1366 } 1367 idx_value = radeon_get_ib_value(p, idx); 1368 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1369 track->arrays[i + 0].robj = reloc->robj; 1370 track->arrays[i + 0].esize = idx_value >> 8; 1371 track->arrays[i + 0].esize &= 0x7F; 1372 } 1373 return r; 1374 } 1375 1376 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1377 struct radeon_cs_packet *pkt, 1378 const unsigned *auth, unsigned n, 1379 radeon_packet0_check_t check) 1380 { 1381 unsigned reg; 1382 unsigned i, j, m; 1383 unsigned idx; 1384 int r; 1385 1386 idx = pkt->idx + 1; 1387 reg = pkt->reg; 1388 /* Check that register fall into register range 1389 * determined by the number of entry (n) in the 1390 * safe register bitmap. 1391 */ 1392 if (pkt->one_reg_wr) { 1393 if ((reg >> 7) > n) { 1394 return -EINVAL; 1395 } 1396 } else { 1397 if (((reg + (pkt->count << 2)) >> 7) > n) { 1398 return -EINVAL; 1399 } 1400 } 1401 for (i = 0; i <= pkt->count; i++, idx++) { 1402 j = (reg >> 7); 1403 m = 1 << ((reg >> 2) & 31); 1404 if (auth[j] & m) { 1405 r = check(p, pkt, idx, reg); 1406 if (r) { 1407 return r; 1408 } 1409 } 1410 if (pkt->one_reg_wr) { 1411 if (!(auth[j] & m)) { 1412 break; 1413 } 1414 } else { 1415 reg += 4; 1416 } 1417 } 1418 return 0; 1419 } 1420 1421 /** 1422 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1423 * @parser: parser structure holding parsing context. 1424 * 1425 * Userspace sends a special sequence for VLINE waits. 1426 * PACKET0 - VLINE_START_END + value 1427 * PACKET0 - WAIT_UNTIL +_value 1428 * RELOC (P3) - crtc_id in reloc. 1429 * 1430 * This function parses this and relocates the VLINE START END 1431 * and WAIT UNTIL packets to the correct crtc. 1432 * It also detects a switched off crtc and nulls out the 1433 * wait in that case. 1434 */ 1435 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1436 { 1437 struct drm_crtc *crtc; 1438 struct radeon_crtc *radeon_crtc; 1439 struct radeon_cs_packet p3reloc, waitreloc; 1440 int crtc_id; 1441 int r; 1442 uint32_t header, h_idx, reg; 1443 volatile uint32_t *ib; 1444 1445 ib = p->ib.ptr; 1446 1447 /* parse the wait until */ 1448 r = radeon_cs_packet_parse(p, &waitreloc, p->idx); 1449 if (r) 1450 return r; 1451 1452 /* check its a wait until and only 1 count */ 1453 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1454 waitreloc.count != 0) { 1455 DRM_ERROR("vline wait had illegal wait until segment\n"); 1456 return -EINVAL; 1457 } 1458 1459 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1460 DRM_ERROR("vline wait had illegal wait until\n"); 1461 return -EINVAL; 1462 } 1463 1464 /* jump over the NOP */ 1465 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1466 if (r) 1467 return r; 1468 1469 h_idx = p->idx - 2; 1470 p->idx += waitreloc.count + 2; 1471 p->idx += p3reloc.count + 2; 1472 1473 header = radeon_get_ib_value(p, h_idx); 1474 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1475 reg = R100_CP_PACKET0_GET_REG(header); 1476 crtc = drm_crtc_find(p->rdev->ddev, crtc_id); 1477 if (!crtc) { 1478 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1479 return -ENOENT; 1480 } 1481 radeon_crtc = to_radeon_crtc(crtc); 1482 crtc_id = radeon_crtc->crtc_id; 1483 1484 if (!crtc->enabled) { 1485 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1486 ib[h_idx + 2] = PACKET2(0); 1487 ib[h_idx + 3] = PACKET2(0); 1488 } else if (crtc_id == 1) { 1489 switch (reg) { 1490 case AVIVO_D1MODE_VLINE_START_END: 1491 header &= ~R300_CP_PACKET0_REG_MASK; 1492 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1493 break; 1494 case RADEON_CRTC_GUI_TRIG_VLINE: 1495 header &= ~R300_CP_PACKET0_REG_MASK; 1496 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1497 break; 1498 default: 1499 DRM_ERROR("unknown crtc reloc\n"); 1500 return -EINVAL; 1501 } 1502 ib[h_idx] = header; 1503 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1504 } 1505 1506 return 0; 1507 } 1508 1509 static int r100_get_vtx_size(uint32_t vtx_fmt) 1510 { 1511 int vtx_size; 1512 vtx_size = 2; 1513 /* ordered according to bits in spec */ 1514 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1515 vtx_size++; 1516 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1517 vtx_size += 3; 1518 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1519 vtx_size++; 1520 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1521 vtx_size++; 1522 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1523 vtx_size += 3; 1524 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1525 vtx_size++; 1526 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1527 vtx_size++; 1528 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1529 vtx_size += 2; 1530 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1531 vtx_size += 2; 1532 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1533 vtx_size++; 1534 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1535 vtx_size += 2; 1536 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1537 vtx_size++; 1538 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1539 vtx_size += 2; 1540 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1541 vtx_size++; 1542 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1543 vtx_size++; 1544 /* blend weight */ 1545 if (vtx_fmt & (0x7 << 15)) 1546 vtx_size += (vtx_fmt >> 15) & 0x7; 1547 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1548 vtx_size += 3; 1549 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1550 vtx_size += 2; 1551 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1552 vtx_size++; 1553 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1554 vtx_size++; 1555 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1556 vtx_size++; 1557 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1558 vtx_size++; 1559 return vtx_size; 1560 } 1561 1562 static int r100_packet0_check(struct radeon_cs_parser *p, 1563 struct radeon_cs_packet *pkt, 1564 unsigned idx, unsigned reg) 1565 { 1566 struct radeon_bo_list *reloc; 1567 struct r100_cs_track *track; 1568 volatile uint32_t *ib; 1569 uint32_t tmp; 1570 int r; 1571 int i, face; 1572 u32 tile_flags = 0; 1573 u32 idx_value; 1574 1575 ib = p->ib.ptr; 1576 track = (struct r100_cs_track *)p->track; 1577 1578 idx_value = radeon_get_ib_value(p, idx); 1579 1580 switch (reg) { 1581 case RADEON_CRTC_GUI_TRIG_VLINE: 1582 r = r100_cs_packet_parse_vline(p); 1583 if (r) { 1584 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1585 idx, reg); 1586 radeon_cs_dump_packet(p, pkt); 1587 return r; 1588 } 1589 break; 1590 /* FIXME: only allow PACKET3 blit? easier to check for out of 1591 * range access */ 1592 case RADEON_DST_PITCH_OFFSET: 1593 case RADEON_SRC_PITCH_OFFSET: 1594 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1595 if (r) 1596 return r; 1597 break; 1598 case RADEON_RB3D_DEPTHOFFSET: 1599 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1600 if (r) { 1601 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1602 idx, reg); 1603 radeon_cs_dump_packet(p, pkt); 1604 return r; 1605 } 1606 track->zb.robj = reloc->robj; 1607 track->zb.offset = idx_value; 1608 track->zb_dirty = true; 1609 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1610 break; 1611 case RADEON_RB3D_COLOROFFSET: 1612 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1613 if (r) { 1614 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1615 idx, reg); 1616 radeon_cs_dump_packet(p, pkt); 1617 return r; 1618 } 1619 track->cb[0].robj = reloc->robj; 1620 track->cb[0].offset = idx_value; 1621 track->cb_dirty = true; 1622 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1623 break; 1624 case RADEON_PP_TXOFFSET_0: 1625 case RADEON_PP_TXOFFSET_1: 1626 case RADEON_PP_TXOFFSET_2: 1627 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1628 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1629 if (r) { 1630 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1631 idx, reg); 1632 radeon_cs_dump_packet(p, pkt); 1633 return r; 1634 } 1635 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1636 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1637 tile_flags |= RADEON_TXO_MACRO_TILE; 1638 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1639 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1640 1641 tmp = idx_value & ~(0x7 << 2); 1642 tmp |= tile_flags; 1643 ib[idx] = tmp + ((u32)reloc->gpu_offset); 1644 } else 1645 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1646 track->textures[i].robj = reloc->robj; 1647 track->tex_dirty = true; 1648 break; 1649 case RADEON_PP_CUBIC_OFFSET_T0_0: 1650 case RADEON_PP_CUBIC_OFFSET_T0_1: 1651 case RADEON_PP_CUBIC_OFFSET_T0_2: 1652 case RADEON_PP_CUBIC_OFFSET_T0_3: 1653 case RADEON_PP_CUBIC_OFFSET_T0_4: 1654 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1655 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1656 if (r) { 1657 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1658 idx, reg); 1659 radeon_cs_dump_packet(p, pkt); 1660 return r; 1661 } 1662 track->textures[0].cube_info[i].offset = idx_value; 1663 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1664 track->textures[0].cube_info[i].robj = reloc->robj; 1665 track->tex_dirty = true; 1666 break; 1667 case RADEON_PP_CUBIC_OFFSET_T1_0: 1668 case RADEON_PP_CUBIC_OFFSET_T1_1: 1669 case RADEON_PP_CUBIC_OFFSET_T1_2: 1670 case RADEON_PP_CUBIC_OFFSET_T1_3: 1671 case RADEON_PP_CUBIC_OFFSET_T1_4: 1672 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1673 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1674 if (r) { 1675 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1676 idx, reg); 1677 radeon_cs_dump_packet(p, pkt); 1678 return r; 1679 } 1680 track->textures[1].cube_info[i].offset = idx_value; 1681 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1682 track->textures[1].cube_info[i].robj = reloc->robj; 1683 track->tex_dirty = true; 1684 break; 1685 case RADEON_PP_CUBIC_OFFSET_T2_0: 1686 case RADEON_PP_CUBIC_OFFSET_T2_1: 1687 case RADEON_PP_CUBIC_OFFSET_T2_2: 1688 case RADEON_PP_CUBIC_OFFSET_T2_3: 1689 case RADEON_PP_CUBIC_OFFSET_T2_4: 1690 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1691 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1692 if (r) { 1693 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1694 idx, reg); 1695 radeon_cs_dump_packet(p, pkt); 1696 return r; 1697 } 1698 track->textures[2].cube_info[i].offset = idx_value; 1699 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1700 track->textures[2].cube_info[i].robj = reloc->robj; 1701 track->tex_dirty = true; 1702 break; 1703 case RADEON_RE_WIDTH_HEIGHT: 1704 track->maxy = ((idx_value >> 16) & 0x7FF); 1705 track->cb_dirty = true; 1706 track->zb_dirty = true; 1707 break; 1708 case RADEON_RB3D_COLORPITCH: 1709 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1710 if (r) { 1711 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1712 idx, reg); 1713 radeon_cs_dump_packet(p, pkt); 1714 return r; 1715 } 1716 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1717 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1718 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1719 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1720 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1721 1722 tmp = idx_value & ~(0x7 << 16); 1723 tmp |= tile_flags; 1724 ib[idx] = tmp; 1725 } else 1726 ib[idx] = idx_value; 1727 1728 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1729 track->cb_dirty = true; 1730 break; 1731 case RADEON_RB3D_DEPTHPITCH: 1732 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1733 track->zb_dirty = true; 1734 break; 1735 case RADEON_RB3D_CNTL: 1736 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1737 case 7: 1738 case 8: 1739 case 9: 1740 case 11: 1741 case 12: 1742 track->cb[0].cpp = 1; 1743 break; 1744 case 3: 1745 case 4: 1746 case 15: 1747 track->cb[0].cpp = 2; 1748 break; 1749 case 6: 1750 track->cb[0].cpp = 4; 1751 break; 1752 default: 1753 DRM_ERROR("Invalid color buffer format (%d) !\n", 1754 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1755 return -EINVAL; 1756 } 1757 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1758 track->cb_dirty = true; 1759 track->zb_dirty = true; 1760 break; 1761 case RADEON_RB3D_ZSTENCILCNTL: 1762 switch (idx_value & 0xf) { 1763 case 0: 1764 track->zb.cpp = 2; 1765 break; 1766 case 2: 1767 case 3: 1768 case 4: 1769 case 5: 1770 case 9: 1771 case 11: 1772 track->zb.cpp = 4; 1773 break; 1774 default: 1775 break; 1776 } 1777 track->zb_dirty = true; 1778 break; 1779 case RADEON_RB3D_ZPASS_ADDR: 1780 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1781 if (r) { 1782 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1783 idx, reg); 1784 radeon_cs_dump_packet(p, pkt); 1785 return r; 1786 } 1787 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1788 break; 1789 case RADEON_PP_CNTL: 1790 { 1791 uint32_t temp = idx_value >> 4; 1792 for (i = 0; i < track->num_texture; i++) 1793 track->textures[i].enabled = !!(temp & (1 << i)); 1794 track->tex_dirty = true; 1795 } 1796 break; 1797 case RADEON_SE_VF_CNTL: 1798 track->vap_vf_cntl = idx_value; 1799 break; 1800 case RADEON_SE_VTX_FMT: 1801 track->vtx_size = r100_get_vtx_size(idx_value); 1802 break; 1803 case RADEON_PP_TEX_SIZE_0: 1804 case RADEON_PP_TEX_SIZE_1: 1805 case RADEON_PP_TEX_SIZE_2: 1806 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1807 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1808 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1809 track->tex_dirty = true; 1810 break; 1811 case RADEON_PP_TEX_PITCH_0: 1812 case RADEON_PP_TEX_PITCH_1: 1813 case RADEON_PP_TEX_PITCH_2: 1814 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1815 track->textures[i].pitch = idx_value + 32; 1816 track->tex_dirty = true; 1817 break; 1818 case RADEON_PP_TXFILTER_0: 1819 case RADEON_PP_TXFILTER_1: 1820 case RADEON_PP_TXFILTER_2: 1821 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1822 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1823 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1824 tmp = (idx_value >> 23) & 0x7; 1825 if (tmp == 2 || tmp == 6) 1826 track->textures[i].roundup_w = false; 1827 tmp = (idx_value >> 27) & 0x7; 1828 if (tmp == 2 || tmp == 6) 1829 track->textures[i].roundup_h = false; 1830 track->tex_dirty = true; 1831 break; 1832 case RADEON_PP_TXFORMAT_0: 1833 case RADEON_PP_TXFORMAT_1: 1834 case RADEON_PP_TXFORMAT_2: 1835 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1836 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1837 track->textures[i].use_pitch = 1; 1838 } else { 1839 track->textures[i].use_pitch = 0; 1840 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1841 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1842 } 1843 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1844 track->textures[i].tex_coord_type = 2; 1845 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1846 case RADEON_TXFORMAT_I8: 1847 case RADEON_TXFORMAT_RGB332: 1848 case RADEON_TXFORMAT_Y8: 1849 track->textures[i].cpp = 1; 1850 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1851 break; 1852 case RADEON_TXFORMAT_AI88: 1853 case RADEON_TXFORMAT_ARGB1555: 1854 case RADEON_TXFORMAT_RGB565: 1855 case RADEON_TXFORMAT_ARGB4444: 1856 case RADEON_TXFORMAT_VYUY422: 1857 case RADEON_TXFORMAT_YVYU422: 1858 case RADEON_TXFORMAT_SHADOW16: 1859 case RADEON_TXFORMAT_LDUDV655: 1860 case RADEON_TXFORMAT_DUDV88: 1861 track->textures[i].cpp = 2; 1862 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1863 break; 1864 case RADEON_TXFORMAT_ARGB8888: 1865 case RADEON_TXFORMAT_RGBA8888: 1866 case RADEON_TXFORMAT_SHADOW32: 1867 case RADEON_TXFORMAT_LDUDUV8888: 1868 track->textures[i].cpp = 4; 1869 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1870 break; 1871 case RADEON_TXFORMAT_DXT1: 1872 track->textures[i].cpp = 1; 1873 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1874 break; 1875 case RADEON_TXFORMAT_DXT23: 1876 case RADEON_TXFORMAT_DXT45: 1877 track->textures[i].cpp = 1; 1878 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1879 break; 1880 } 1881 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1882 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1883 track->tex_dirty = true; 1884 break; 1885 case RADEON_PP_CUBIC_FACES_0: 1886 case RADEON_PP_CUBIC_FACES_1: 1887 case RADEON_PP_CUBIC_FACES_2: 1888 tmp = idx_value; 1889 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1890 for (face = 0; face < 4; face++) { 1891 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1892 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1893 } 1894 track->tex_dirty = true; 1895 break; 1896 default: 1897 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1898 reg, idx); 1899 return -EINVAL; 1900 } 1901 return 0; 1902 } 1903 1904 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1905 struct radeon_cs_packet *pkt, 1906 struct radeon_bo *robj) 1907 { 1908 unsigned idx; 1909 u32 value; 1910 idx = pkt->idx + 1; 1911 value = radeon_get_ib_value(p, idx + 2); 1912 if ((value + 1) > radeon_bo_size(robj)) { 1913 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1914 "(need %u have %lu) !\n", 1915 value + 1, 1916 radeon_bo_size(robj)); 1917 return -EINVAL; 1918 } 1919 return 0; 1920 } 1921 1922 static int r100_packet3_check(struct radeon_cs_parser *p, 1923 struct radeon_cs_packet *pkt) 1924 { 1925 struct radeon_bo_list *reloc; 1926 struct r100_cs_track *track; 1927 unsigned idx; 1928 volatile uint32_t *ib; 1929 int r; 1930 1931 ib = p->ib.ptr; 1932 idx = pkt->idx + 1; 1933 track = (struct r100_cs_track *)p->track; 1934 switch (pkt->opcode) { 1935 case PACKET3_3D_LOAD_VBPNTR: 1936 r = r100_packet3_load_vbpntr(p, pkt, idx); 1937 if (r) 1938 return r; 1939 break; 1940 case PACKET3_INDX_BUFFER: 1941 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1942 if (r) { 1943 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1944 radeon_cs_dump_packet(p, pkt); 1945 return r; 1946 } 1947 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); 1948 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1949 if (r) { 1950 return r; 1951 } 1952 break; 1953 case 0x23: 1954 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1955 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1956 if (r) { 1957 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1958 radeon_cs_dump_packet(p, pkt); 1959 return r; 1960 } 1961 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); 1962 track->num_arrays = 1; 1963 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1964 1965 track->arrays[0].robj = reloc->robj; 1966 track->arrays[0].esize = track->vtx_size; 1967 1968 track->max_indx = radeon_get_ib_value(p, idx+1); 1969 1970 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1971 track->immd_dwords = pkt->count - 1; 1972 r = r100_cs_track_check(p->rdev, track); 1973 if (r) 1974 return r; 1975 break; 1976 case PACKET3_3D_DRAW_IMMD: 1977 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1978 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1979 return -EINVAL; 1980 } 1981 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1982 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1983 track->immd_dwords = pkt->count - 1; 1984 r = r100_cs_track_check(p->rdev, track); 1985 if (r) 1986 return r; 1987 break; 1988 /* triggers drawing using in-packet vertex data */ 1989 case PACKET3_3D_DRAW_IMMD_2: 1990 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1991 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1992 return -EINVAL; 1993 } 1994 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1995 track->immd_dwords = pkt->count; 1996 r = r100_cs_track_check(p->rdev, track); 1997 if (r) 1998 return r; 1999 break; 2000 /* triggers drawing using in-packet vertex data */ 2001 case PACKET3_3D_DRAW_VBUF_2: 2002 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2003 r = r100_cs_track_check(p->rdev, track); 2004 if (r) 2005 return r; 2006 break; 2007 /* triggers drawing of vertex buffers setup elsewhere */ 2008 case PACKET3_3D_DRAW_INDX_2: 2009 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2010 r = r100_cs_track_check(p->rdev, track); 2011 if (r) 2012 return r; 2013 break; 2014 /* triggers drawing using indices to vertex buffer */ 2015 case PACKET3_3D_DRAW_VBUF: 2016 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2017 r = r100_cs_track_check(p->rdev, track); 2018 if (r) 2019 return r; 2020 break; 2021 /* triggers drawing of vertex buffers setup elsewhere */ 2022 case PACKET3_3D_DRAW_INDX: 2023 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2024 r = r100_cs_track_check(p->rdev, track); 2025 if (r) 2026 return r; 2027 break; 2028 /* triggers drawing using indices to vertex buffer */ 2029 case PACKET3_3D_CLEAR_HIZ: 2030 case PACKET3_3D_CLEAR_ZMASK: 2031 if (p->rdev->hyperz_filp != p->filp) 2032 return -EINVAL; 2033 break; 2034 case PACKET3_NOP: 2035 break; 2036 default: 2037 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2038 return -EINVAL; 2039 } 2040 return 0; 2041 } 2042 2043 int r100_cs_parse(struct radeon_cs_parser *p) 2044 { 2045 struct radeon_cs_packet pkt; 2046 struct r100_cs_track *track; 2047 int r; 2048 2049 track = kzalloc(sizeof(*track), GFP_KERNEL); 2050 if (!track) 2051 return -ENOMEM; 2052 r100_cs_track_clear(p->rdev, track); 2053 p->track = track; 2054 do { 2055 r = radeon_cs_packet_parse(p, &pkt, p->idx); 2056 if (r) { 2057 return r; 2058 } 2059 p->idx += pkt.count + 2; 2060 switch (pkt.type) { 2061 case RADEON_PACKET_TYPE0: 2062 if (p->rdev->family >= CHIP_R200) 2063 r = r100_cs_parse_packet0(p, &pkt, 2064 p->rdev->config.r100.reg_safe_bm, 2065 p->rdev->config.r100.reg_safe_bm_size, 2066 &r200_packet0_check); 2067 else 2068 r = r100_cs_parse_packet0(p, &pkt, 2069 p->rdev->config.r100.reg_safe_bm, 2070 p->rdev->config.r100.reg_safe_bm_size, 2071 &r100_packet0_check); 2072 break; 2073 case RADEON_PACKET_TYPE2: 2074 break; 2075 case RADEON_PACKET_TYPE3: 2076 r = r100_packet3_check(p, &pkt); 2077 break; 2078 default: 2079 DRM_ERROR("Unknown packet type %d !\n", 2080 pkt.type); 2081 return -EINVAL; 2082 } 2083 if (r) 2084 return r; 2085 } while (p->idx < p->chunk_ib->length_dw); 2086 return 0; 2087 } 2088 2089 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2090 { 2091 DRM_ERROR("pitch %d\n", t->pitch); 2092 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2093 DRM_ERROR("width %d\n", t->width); 2094 DRM_ERROR("width_11 %d\n", t->width_11); 2095 DRM_ERROR("height %d\n", t->height); 2096 DRM_ERROR("height_11 %d\n", t->height_11); 2097 DRM_ERROR("num levels %d\n", t->num_levels); 2098 DRM_ERROR("depth %d\n", t->txdepth); 2099 DRM_ERROR("bpp %d\n", t->cpp); 2100 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2101 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2102 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2103 DRM_ERROR("compress format %d\n", t->compress_format); 2104 } 2105 2106 static int r100_track_compress_size(int compress_format, int w, int h) 2107 { 2108 int block_width, block_height, block_bytes; 2109 int wblocks, hblocks; 2110 int min_wblocks; 2111 int sz; 2112 2113 block_width = 4; 2114 block_height = 4; 2115 2116 switch (compress_format) { 2117 case R100_TRACK_COMP_DXT1: 2118 block_bytes = 8; 2119 min_wblocks = 4; 2120 break; 2121 default: 2122 case R100_TRACK_COMP_DXT35: 2123 block_bytes = 16; 2124 min_wblocks = 2; 2125 break; 2126 } 2127 2128 hblocks = (h + block_height - 1) / block_height; 2129 wblocks = (w + block_width - 1) / block_width; 2130 if (wblocks < min_wblocks) 2131 wblocks = min_wblocks; 2132 sz = wblocks * hblocks * block_bytes; 2133 return sz; 2134 } 2135 2136 static int r100_cs_track_cube(struct radeon_device *rdev, 2137 struct r100_cs_track *track, unsigned idx) 2138 { 2139 unsigned face, w, h; 2140 struct radeon_bo *cube_robj; 2141 unsigned long size; 2142 unsigned compress_format = track->textures[idx].compress_format; 2143 2144 for (face = 0; face < 5; face++) { 2145 cube_robj = track->textures[idx].cube_info[face].robj; 2146 w = track->textures[idx].cube_info[face].width; 2147 h = track->textures[idx].cube_info[face].height; 2148 2149 if (compress_format) { 2150 size = r100_track_compress_size(compress_format, w, h); 2151 } else 2152 size = w * h; 2153 size *= track->textures[idx].cpp; 2154 2155 size += track->textures[idx].cube_info[face].offset; 2156 2157 if (size > radeon_bo_size(cube_robj)) { 2158 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2159 size, radeon_bo_size(cube_robj)); 2160 r100_cs_track_texture_print(&track->textures[idx]); 2161 return -1; 2162 } 2163 } 2164 return 0; 2165 } 2166 2167 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2168 struct r100_cs_track *track) 2169 { 2170 struct radeon_bo *robj; 2171 unsigned long size; 2172 unsigned u, i, w, h, d; 2173 int ret; 2174 2175 for (u = 0; u < track->num_texture; u++) { 2176 if (!track->textures[u].enabled) 2177 continue; 2178 if (track->textures[u].lookup_disable) 2179 continue; 2180 robj = track->textures[u].robj; 2181 if (robj == NULL) { 2182 DRM_ERROR("No texture bound to unit %u\n", u); 2183 return -EINVAL; 2184 } 2185 size = 0; 2186 for (i = 0; i <= track->textures[u].num_levels; i++) { 2187 if (track->textures[u].use_pitch) { 2188 if (rdev->family < CHIP_R300) 2189 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2190 else 2191 w = track->textures[u].pitch / (1 << i); 2192 } else { 2193 w = track->textures[u].width; 2194 if (rdev->family >= CHIP_RV515) 2195 w |= track->textures[u].width_11; 2196 w = w / (1 << i); 2197 if (track->textures[u].roundup_w) 2198 w = roundup_pow_of_two(w); 2199 } 2200 h = track->textures[u].height; 2201 if (rdev->family >= CHIP_RV515) 2202 h |= track->textures[u].height_11; 2203 h = h / (1 << i); 2204 if (track->textures[u].roundup_h) 2205 h = roundup_pow_of_two(h); 2206 if (track->textures[u].tex_coord_type == 1) { 2207 d = (1 << track->textures[u].txdepth) / (1 << i); 2208 if (!d) 2209 d = 1; 2210 } else { 2211 d = 1; 2212 } 2213 if (track->textures[u].compress_format) { 2214 2215 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2216 /* compressed textures are block based */ 2217 } else 2218 size += w * h * d; 2219 } 2220 size *= track->textures[u].cpp; 2221 2222 switch (track->textures[u].tex_coord_type) { 2223 case 0: 2224 case 1: 2225 break; 2226 case 2: 2227 if (track->separate_cube) { 2228 ret = r100_cs_track_cube(rdev, track, u); 2229 if (ret) 2230 return ret; 2231 } else 2232 size *= 6; 2233 break; 2234 default: 2235 DRM_ERROR("Invalid texture coordinate type %u for unit " 2236 "%u\n", track->textures[u].tex_coord_type, u); 2237 return -EINVAL; 2238 } 2239 if (size > radeon_bo_size(robj)) { 2240 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2241 "%lu\n", u, size, radeon_bo_size(robj)); 2242 r100_cs_track_texture_print(&track->textures[u]); 2243 return -EINVAL; 2244 } 2245 } 2246 return 0; 2247 } 2248 2249 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2250 { 2251 unsigned i; 2252 unsigned long size; 2253 unsigned prim_walk; 2254 unsigned nverts; 2255 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2256 2257 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2258 !track->blend_read_enable) 2259 num_cb = 0; 2260 2261 for (i = 0; i < num_cb; i++) { 2262 if (track->cb[i].robj == NULL) { 2263 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2264 return -EINVAL; 2265 } 2266 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2267 size += track->cb[i].offset; 2268 if (size > radeon_bo_size(track->cb[i].robj)) { 2269 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2270 "(need %lu have %lu) !\n", i, size, 2271 radeon_bo_size(track->cb[i].robj)); 2272 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2273 i, track->cb[i].pitch, track->cb[i].cpp, 2274 track->cb[i].offset, track->maxy); 2275 return -EINVAL; 2276 } 2277 } 2278 track->cb_dirty = false; 2279 2280 if (track->zb_dirty && track->z_enabled) { 2281 if (track->zb.robj == NULL) { 2282 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2283 return -EINVAL; 2284 } 2285 size = track->zb.pitch * track->zb.cpp * track->maxy; 2286 size += track->zb.offset; 2287 if (size > radeon_bo_size(track->zb.robj)) { 2288 DRM_ERROR("[drm] Buffer too small for z buffer " 2289 "(need %lu have %lu) !\n", size, 2290 radeon_bo_size(track->zb.robj)); 2291 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2292 track->zb.pitch, track->zb.cpp, 2293 track->zb.offset, track->maxy); 2294 return -EINVAL; 2295 } 2296 } 2297 track->zb_dirty = false; 2298 2299 if (track->aa_dirty && track->aaresolve) { 2300 if (track->aa.robj == NULL) { 2301 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2302 return -EINVAL; 2303 } 2304 /* I believe the format comes from colorbuffer0. */ 2305 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2306 size += track->aa.offset; 2307 if (size > radeon_bo_size(track->aa.robj)) { 2308 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2309 "(need %lu have %lu) !\n", i, size, 2310 radeon_bo_size(track->aa.robj)); 2311 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2312 i, track->aa.pitch, track->cb[0].cpp, 2313 track->aa.offset, track->maxy); 2314 return -EINVAL; 2315 } 2316 } 2317 track->aa_dirty = false; 2318 2319 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2320 if (track->vap_vf_cntl & (1 << 14)) { 2321 nverts = track->vap_alt_nverts; 2322 } else { 2323 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2324 } 2325 switch (prim_walk) { 2326 case 1: 2327 for (i = 0; i < track->num_arrays; i++) { 2328 size = track->arrays[i].esize * track->max_indx * 4; 2329 if (track->arrays[i].robj == NULL) { 2330 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2331 "bound\n", prim_walk, i); 2332 return -EINVAL; 2333 } 2334 if (size > radeon_bo_size(track->arrays[i].robj)) { 2335 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2336 "need %lu dwords have %lu dwords\n", 2337 prim_walk, i, size >> 2, 2338 radeon_bo_size(track->arrays[i].robj) 2339 >> 2); 2340 DRM_ERROR("Max indices %u\n", track->max_indx); 2341 return -EINVAL; 2342 } 2343 } 2344 break; 2345 case 2: 2346 for (i = 0; i < track->num_arrays; i++) { 2347 size = track->arrays[i].esize * (nverts - 1) * 4; 2348 if (track->arrays[i].robj == NULL) { 2349 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2350 "bound\n", prim_walk, i); 2351 return -EINVAL; 2352 } 2353 if (size > radeon_bo_size(track->arrays[i].robj)) { 2354 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2355 "need %lu dwords have %lu dwords\n", 2356 prim_walk, i, size >> 2, 2357 radeon_bo_size(track->arrays[i].robj) 2358 >> 2); 2359 return -EINVAL; 2360 } 2361 } 2362 break; 2363 case 3: 2364 size = track->vtx_size * nverts; 2365 if (size != track->immd_dwords) { 2366 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2367 track->immd_dwords, size); 2368 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2369 nverts, track->vtx_size); 2370 return -EINVAL; 2371 } 2372 break; 2373 default: 2374 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2375 prim_walk); 2376 return -EINVAL; 2377 } 2378 2379 if (track->tex_dirty) { 2380 track->tex_dirty = false; 2381 return r100_cs_track_texture_check(rdev, track); 2382 } 2383 return 0; 2384 } 2385 2386 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2387 { 2388 unsigned i, face; 2389 2390 track->cb_dirty = true; 2391 track->zb_dirty = true; 2392 track->tex_dirty = true; 2393 track->aa_dirty = true; 2394 2395 if (rdev->family < CHIP_R300) { 2396 track->num_cb = 1; 2397 if (rdev->family <= CHIP_RS200) 2398 track->num_texture = 3; 2399 else 2400 track->num_texture = 6; 2401 track->maxy = 2048; 2402 track->separate_cube = 1; 2403 } else { 2404 track->num_cb = 4; 2405 track->num_texture = 16; 2406 track->maxy = 4096; 2407 track->separate_cube = 0; 2408 track->aaresolve = false; 2409 track->aa.robj = NULL; 2410 } 2411 2412 for (i = 0; i < track->num_cb; i++) { 2413 track->cb[i].robj = NULL; 2414 track->cb[i].pitch = 8192; 2415 track->cb[i].cpp = 16; 2416 track->cb[i].offset = 0; 2417 } 2418 track->z_enabled = true; 2419 track->zb.robj = NULL; 2420 track->zb.pitch = 8192; 2421 track->zb.cpp = 4; 2422 track->zb.offset = 0; 2423 track->vtx_size = 0x7F; 2424 track->immd_dwords = 0xFFFFFFFFUL; 2425 track->num_arrays = 11; 2426 track->max_indx = 0x00FFFFFFUL; 2427 for (i = 0; i < track->num_arrays; i++) { 2428 track->arrays[i].robj = NULL; 2429 track->arrays[i].esize = 0x7F; 2430 } 2431 for (i = 0; i < track->num_texture; i++) { 2432 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2433 track->textures[i].pitch = 16536; 2434 track->textures[i].width = 16536; 2435 track->textures[i].height = 16536; 2436 track->textures[i].width_11 = 1 << 11; 2437 track->textures[i].height_11 = 1 << 11; 2438 track->textures[i].num_levels = 12; 2439 if (rdev->family <= CHIP_RS200) { 2440 track->textures[i].tex_coord_type = 0; 2441 track->textures[i].txdepth = 0; 2442 } else { 2443 track->textures[i].txdepth = 16; 2444 track->textures[i].tex_coord_type = 1; 2445 } 2446 track->textures[i].cpp = 64; 2447 track->textures[i].robj = NULL; 2448 /* CS IB emission code makes sure texture unit are disabled */ 2449 track->textures[i].enabled = false; 2450 track->textures[i].lookup_disable = false; 2451 track->textures[i].roundup_w = true; 2452 track->textures[i].roundup_h = true; 2453 if (track->separate_cube) 2454 for (face = 0; face < 5; face++) { 2455 track->textures[i].cube_info[face].robj = NULL; 2456 track->textures[i].cube_info[face].width = 16536; 2457 track->textures[i].cube_info[face].height = 16536; 2458 track->textures[i].cube_info[face].offset = 0; 2459 } 2460 } 2461 } 2462 2463 /* 2464 * Global GPU functions 2465 */ 2466 static void r100_errata(struct radeon_device *rdev) 2467 { 2468 rdev->pll_errata = 0; 2469 2470 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2471 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2472 } 2473 2474 if (rdev->family == CHIP_RV100 || 2475 rdev->family == CHIP_RS100 || 2476 rdev->family == CHIP_RS200) { 2477 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2478 } 2479 } 2480 2481 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2482 { 2483 unsigned i; 2484 uint32_t tmp; 2485 2486 for (i = 0; i < rdev->usec_timeout; i++) { 2487 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2488 if (tmp >= n) { 2489 return 0; 2490 } 2491 DRM_UDELAY(1); 2492 } 2493 return -1; 2494 } 2495 2496 int r100_gui_wait_for_idle(struct radeon_device *rdev) 2497 { 2498 unsigned i; 2499 uint32_t tmp; 2500 2501 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2502 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" 2503 " Bad things might happen.\n"); 2504 } 2505 for (i = 0; i < rdev->usec_timeout; i++) { 2506 tmp = RREG32(RADEON_RBBM_STATUS); 2507 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2508 return 0; 2509 } 2510 DRM_UDELAY(1); 2511 } 2512 return -1; 2513 } 2514 2515 int r100_mc_wait_for_idle(struct radeon_device *rdev) 2516 { 2517 unsigned i; 2518 uint32_t tmp; 2519 2520 for (i = 0; i < rdev->usec_timeout; i++) { 2521 /* read MC_STATUS */ 2522 tmp = RREG32(RADEON_MC_STATUS); 2523 if (tmp & RADEON_MC_IDLE) { 2524 return 0; 2525 } 2526 DRM_UDELAY(1); 2527 } 2528 return -1; 2529 } 2530 2531 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2532 { 2533 u32 rbbm_status; 2534 2535 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2536 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2537 radeon_ring_lockup_update(rdev, ring); 2538 return false; 2539 } 2540 return radeon_ring_test_lockup(rdev, ring); 2541 } 2542 2543 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2544 void r100_enable_bm(struct radeon_device *rdev) 2545 { 2546 uint32_t tmp; 2547 /* Enable bus mastering */ 2548 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2549 WREG32(RADEON_BUS_CNTL, tmp); 2550 } 2551 2552 void r100_bm_disable(struct radeon_device *rdev) 2553 { 2554 u32 tmp; 2555 2556 /* disable bus mastering */ 2557 tmp = RREG32(R_000030_BUS_CNTL); 2558 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2559 mdelay(1); 2560 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2561 mdelay(1); 2562 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2563 tmp = RREG32(RADEON_BUS_CNTL); 2564 mdelay(1); 2565 pci_disable_busmaster(rdev->dev->bsddev); 2566 mdelay(1); 2567 } 2568 2569 int r100_asic_reset(struct radeon_device *rdev, bool hard) 2570 { 2571 struct r100_mc_save save; 2572 u32 status, tmp; 2573 int ret = 0; 2574 2575 status = RREG32(R_000E40_RBBM_STATUS); 2576 if (!G_000E40_GUI_ACTIVE(status)) { 2577 return 0; 2578 } 2579 r100_mc_stop(rdev, &save); 2580 status = RREG32(R_000E40_RBBM_STATUS); 2581 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2582 /* stop CP */ 2583 WREG32(RADEON_CP_CSQ_CNTL, 0); 2584 tmp = RREG32(RADEON_CP_RB_CNTL); 2585 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2586 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2587 WREG32(RADEON_CP_RB_WPTR, 0); 2588 WREG32(RADEON_CP_RB_CNTL, tmp); 2589 /* save PCI state */ 2590 pci_save_state(device_get_parent(rdev->dev->bsddev)); 2591 /* disable bus mastering */ 2592 r100_bm_disable(rdev); 2593 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2594 S_0000F0_SOFT_RESET_RE(1) | 2595 S_0000F0_SOFT_RESET_PP(1) | 2596 S_0000F0_SOFT_RESET_RB(1)); 2597 RREG32(R_0000F0_RBBM_SOFT_RESET); 2598 mdelay(500); 2599 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2600 mdelay(1); 2601 status = RREG32(R_000E40_RBBM_STATUS); 2602 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2603 /* reset CP */ 2604 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2605 RREG32(R_0000F0_RBBM_SOFT_RESET); 2606 mdelay(500); 2607 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2608 mdelay(1); 2609 status = RREG32(R_000E40_RBBM_STATUS); 2610 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2611 /* restore PCI & busmastering */ 2612 pci_restore_state(device_get_parent(rdev->dev->bsddev)); 2613 r100_enable_bm(rdev); 2614 /* Check if GPU is idle */ 2615 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2616 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2617 dev_err(rdev->dev, "failed to reset GPU\n"); 2618 ret = -1; 2619 } else 2620 dev_info(rdev->dev, "GPU reset succeed\n"); 2621 r100_mc_resume(rdev, &save); 2622 return ret; 2623 } 2624 2625 void r100_set_common_regs(struct radeon_device *rdev) 2626 { 2627 struct drm_device *dev = rdev->ddev; 2628 bool force_dac2 = false; 2629 u32 tmp; 2630 2631 /* set these so they don't interfere with anything */ 2632 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2633 WREG32(RADEON_SUBPIC_CNTL, 0); 2634 WREG32(RADEON_VIPH_CONTROL, 0); 2635 WREG32(RADEON_I2C_CNTL_1, 0); 2636 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2637 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2638 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2639 2640 /* always set up dac2 on rn50 and some rv100 as lots 2641 * of servers seem to wire it up to a VGA port but 2642 * don't report it in the bios connector 2643 * table. 2644 */ 2645 switch (dev->pdev->device) { 2646 /* RN50 */ 2647 case 0x515e: 2648 case 0x5969: 2649 force_dac2 = true; 2650 break; 2651 /* RV100*/ 2652 case 0x5159: 2653 case 0x515a: 2654 /* DELL triple head servers */ 2655 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && 2656 ((dev->pdev->subsystem_device == 0x016c) || 2657 (dev->pdev->subsystem_device == 0x016d) || 2658 (dev->pdev->subsystem_device == 0x016e) || 2659 (dev->pdev->subsystem_device == 0x016f) || 2660 (dev->pdev->subsystem_device == 0x0170) || 2661 (dev->pdev->subsystem_device == 0x017d) || 2662 (dev->pdev->subsystem_device == 0x017e) || 2663 (dev->pdev->subsystem_device == 0x0183) || 2664 (dev->pdev->subsystem_device == 0x018a) || 2665 (dev->pdev->subsystem_device == 0x019a))) 2666 force_dac2 = true; 2667 break; 2668 } 2669 2670 if (force_dac2) { 2671 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2672 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2673 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2674 2675 /* For CRT on DAC2, don't turn it on if BIOS didn't 2676 enable it, even it's detected. 2677 */ 2678 2679 /* force it to crtc0 */ 2680 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2681 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2682 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2683 2684 /* set up the TV DAC */ 2685 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2686 RADEON_TV_DAC_STD_MASK | 2687 RADEON_TV_DAC_RDACPD | 2688 RADEON_TV_DAC_GDACPD | 2689 RADEON_TV_DAC_BDACPD | 2690 RADEON_TV_DAC_BGADJ_MASK | 2691 RADEON_TV_DAC_DACADJ_MASK); 2692 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2693 RADEON_TV_DAC_NHOLD | 2694 RADEON_TV_DAC_STD_PS2 | 2695 (0x58 << 16)); 2696 2697 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2698 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2699 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2700 } 2701 2702 /* switch PM block to ACPI mode */ 2703 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2704 tmp &= ~RADEON_PM_MODE_SEL; 2705 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2706 2707 } 2708 2709 /* 2710 * VRAM info 2711 */ 2712 static void r100_vram_get_type(struct radeon_device *rdev) 2713 { 2714 uint32_t tmp; 2715 2716 rdev->mc.vram_is_ddr = false; 2717 if (rdev->flags & RADEON_IS_IGP) 2718 rdev->mc.vram_is_ddr = true; 2719 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2720 rdev->mc.vram_is_ddr = true; 2721 if ((rdev->family == CHIP_RV100) || 2722 (rdev->family == CHIP_RS100) || 2723 (rdev->family == CHIP_RS200)) { 2724 tmp = RREG32(RADEON_MEM_CNTL); 2725 if (tmp & RV100_HALF_MODE) { 2726 rdev->mc.vram_width = 32; 2727 } else { 2728 rdev->mc.vram_width = 64; 2729 } 2730 if (rdev->flags & RADEON_SINGLE_CRTC) { 2731 rdev->mc.vram_width /= 4; 2732 rdev->mc.vram_is_ddr = true; 2733 } 2734 } else if (rdev->family <= CHIP_RV280) { 2735 tmp = RREG32(RADEON_MEM_CNTL); 2736 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2737 rdev->mc.vram_width = 128; 2738 } else { 2739 rdev->mc.vram_width = 64; 2740 } 2741 } else { 2742 /* newer IGPs */ 2743 rdev->mc.vram_width = 128; 2744 } 2745 } 2746 2747 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2748 { 2749 u32 aper_size; 2750 u8 byte; 2751 2752 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2753 2754 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2755 * that is has the 2nd generation multifunction PCI interface 2756 */ 2757 if (rdev->family == CHIP_RV280 || 2758 rdev->family >= CHIP_RV350) { 2759 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2760 ~RADEON_HDP_APER_CNTL); 2761 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2762 return aper_size * 2; 2763 } 2764 2765 /* Older cards have all sorts of funny issues to deal with. First 2766 * check if it's a multifunction card by reading the PCI config 2767 * header type... Limit those to one aperture size 2768 */ 2769 byte = pci_read_config(rdev->dev->bsddev, 0xe, 1); 2770 if (byte & 0x80) { 2771 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2772 DRM_INFO("Limiting VRAM to one aperture\n"); 2773 return aper_size; 2774 } 2775 2776 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2777 * have set it up. We don't write this as it's broken on some ASICs but 2778 * we expect the BIOS to have done the right thing (might be too optimistic...) 2779 */ 2780 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2781 return aper_size * 2; 2782 return aper_size; 2783 } 2784 2785 void r100_vram_init_sizes(struct radeon_device *rdev) 2786 { 2787 u64 config_aper_size; 2788 2789 /* work out accessible VRAM */ 2790 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 2791 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 2792 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2793 /* FIXME we don't use the second aperture yet when we could use it */ 2794 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2795 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2796 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2797 if (rdev->flags & RADEON_IS_IGP) { 2798 uint32_t tom; 2799 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2800 tom = RREG32(RADEON_NB_TOM); 2801 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2802 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2803 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2804 } else { 2805 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2806 /* Some production boards of m6 will report 0 2807 * if it's 8 MB 2808 */ 2809 if (rdev->mc.real_vram_size == 0) { 2810 rdev->mc.real_vram_size = 8192 * 1024; 2811 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2812 } 2813 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2814 * Novell bug 204882 + along with lots of ubuntu ones 2815 */ 2816 if (rdev->mc.aper_size > config_aper_size) 2817 config_aper_size = rdev->mc.aper_size; 2818 2819 if (config_aper_size > rdev->mc.real_vram_size) 2820 rdev->mc.mc_vram_size = config_aper_size; 2821 else 2822 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2823 } 2824 } 2825 2826 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2827 { 2828 uint32_t temp; 2829 2830 temp = RREG32(RADEON_CONFIG_CNTL); 2831 if (state == false) { 2832 temp &= ~RADEON_CFG_VGA_RAM_EN; 2833 temp |= RADEON_CFG_VGA_IO_DIS; 2834 } else { 2835 temp &= ~RADEON_CFG_VGA_IO_DIS; 2836 } 2837 WREG32(RADEON_CONFIG_CNTL, temp); 2838 } 2839 2840 static void r100_mc_init(struct radeon_device *rdev) 2841 { 2842 u64 base; 2843 2844 r100_vram_get_type(rdev); 2845 r100_vram_init_sizes(rdev); 2846 base = rdev->mc.aper_base; 2847 if (rdev->flags & RADEON_IS_IGP) 2848 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2849 radeon_vram_location(rdev, &rdev->mc, base); 2850 rdev->mc.gtt_base_align = 0; 2851 if (!(rdev->flags & RADEON_IS_AGP)) 2852 radeon_gtt_location(rdev, &rdev->mc); 2853 radeon_update_bandwidth_info(rdev); 2854 } 2855 2856 2857 /* 2858 * Indirect registers accessor 2859 */ 2860 void r100_pll_errata_after_index(struct radeon_device *rdev) 2861 { 2862 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2863 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2864 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2865 } 2866 } 2867 2868 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2869 { 2870 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2871 * or the chip could hang on a subsequent access 2872 */ 2873 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2874 mdelay(5); 2875 } 2876 2877 /* This function is required to workaround a hardware bug in some (all?) 2878 * revisions of the R300. This workaround should be called after every 2879 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2880 * may not be correct. 2881 */ 2882 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2883 uint32_t save, tmp; 2884 2885 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2886 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2887 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2888 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2889 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2890 } 2891 } 2892 2893 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2894 { 2895 uint32_t data; 2896 2897 lockmgr(&rdev->pll_idx_lock, LK_EXCLUSIVE); 2898 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2899 r100_pll_errata_after_index(rdev); 2900 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2901 r100_pll_errata_after_data(rdev); 2902 lockmgr(&rdev->pll_idx_lock, LK_RELEASE); 2903 return data; 2904 } 2905 2906 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2907 { 2908 lockmgr(&rdev->pll_idx_lock, LK_EXCLUSIVE); 2909 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2910 r100_pll_errata_after_index(rdev); 2911 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2912 r100_pll_errata_after_data(rdev); 2913 lockmgr(&rdev->pll_idx_lock, LK_RELEASE); 2914 } 2915 2916 static void r100_set_safe_registers(struct radeon_device *rdev) 2917 { 2918 if (ASIC_IS_RN50(rdev)) { 2919 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2920 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 2921 } else if (rdev->family < CHIP_R200) { 2922 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2923 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 2924 } else { 2925 r200_set_safe_registers(rdev); 2926 } 2927 } 2928 2929 /* 2930 * Debugfs info 2931 */ 2932 #if defined(CONFIG_DEBUG_FS) 2933 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2934 { 2935 struct drm_info_node *node = (struct drm_info_node *) m->private; 2936 struct drm_device *dev = node->minor->dev; 2937 struct radeon_device *rdev = dev->dev_private; 2938 uint32_t reg, value; 2939 unsigned i; 2940 2941 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2942 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2943 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2944 for (i = 0; i < 64; i++) { 2945 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 2946 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 2947 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 2948 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 2949 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 2950 } 2951 return 0; 2952 } 2953 2954 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 2955 { 2956 struct drm_info_node *node = (struct drm_info_node *) m->private; 2957 struct drm_device *dev = node->minor->dev; 2958 struct radeon_device *rdev = dev->dev_private; 2959 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2960 uint32_t rdp, wdp; 2961 unsigned count, i, j; 2962 2963 radeon_ring_free_size(rdev, ring); 2964 rdp = RREG32(RADEON_CP_RB_RPTR); 2965 wdp = RREG32(RADEON_CP_RB_WPTR); 2966 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 2967 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2968 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2969 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2970 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2971 seq_printf(m, "%u dwords in ring\n", count); 2972 if (ring->ready) { 2973 for (j = 0; j <= count; j++) { 2974 i = (rdp + j) & ring->ptr_mask; 2975 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2976 } 2977 } 2978 return 0; 2979 } 2980 2981 2982 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 2983 { 2984 struct drm_info_node *node = (struct drm_info_node *) m->private; 2985 struct drm_device *dev = node->minor->dev; 2986 struct radeon_device *rdev = dev->dev_private; 2987 uint32_t csq_stat, csq2_stat, tmp; 2988 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 2989 unsigned i; 2990 2991 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2992 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 2993 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 2994 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 2995 r_rptr = (csq_stat >> 0) & 0x3ff; 2996 r_wptr = (csq_stat >> 10) & 0x3ff; 2997 ib1_rptr = (csq_stat >> 20) & 0x3ff; 2998 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 2999 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 3000 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 3001 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 3002 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 3003 seq_printf(m, "Ring rptr %u\n", r_rptr); 3004 seq_printf(m, "Ring wptr %u\n", r_wptr); 3005 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 3006 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 3007 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 3008 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 3009 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 3010 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 3011 seq_printf(m, "Ring fifo:\n"); 3012 for (i = 0; i < 256; i++) { 3013 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3014 tmp = RREG32(RADEON_CP_CSQ_DATA); 3015 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 3016 } 3017 seq_printf(m, "Indirect1 fifo:\n"); 3018 for (i = 256; i <= 512; i++) { 3019 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3020 tmp = RREG32(RADEON_CP_CSQ_DATA); 3021 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3022 } 3023 seq_printf(m, "Indirect2 fifo:\n"); 3024 for (i = 640; i < ib1_wptr; i++) { 3025 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3026 tmp = RREG32(RADEON_CP_CSQ_DATA); 3027 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3028 } 3029 return 0; 3030 } 3031 3032 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3033 { 3034 struct drm_info_node *node = (struct drm_info_node *) m->private; 3035 struct drm_device *dev = node->minor->dev; 3036 struct radeon_device *rdev = dev->dev_private; 3037 uint32_t tmp; 3038 3039 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3040 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3041 tmp = RREG32(RADEON_MC_FB_LOCATION); 3042 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3043 tmp = RREG32(RADEON_BUS_CNTL); 3044 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3045 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3046 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3047 tmp = RREG32(RADEON_AGP_BASE); 3048 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3049 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3050 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3051 tmp = RREG32(0x01D0); 3052 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3053 tmp = RREG32(RADEON_AIC_LO_ADDR); 3054 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3055 tmp = RREG32(RADEON_AIC_HI_ADDR); 3056 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3057 tmp = RREG32(0x01E4); 3058 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3059 return 0; 3060 } 3061 3062 static struct drm_info_list r100_debugfs_rbbm_list[] = { 3063 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3064 }; 3065 3066 static struct drm_info_list r100_debugfs_cp_list[] = { 3067 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3068 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3069 }; 3070 3071 static struct drm_info_list r100_debugfs_mc_info_list[] = { 3072 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3073 }; 3074 #endif 3075 3076 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3077 { 3078 #if defined(CONFIG_DEBUG_FS) 3079 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3080 #else 3081 return 0; 3082 #endif 3083 } 3084 3085 int r100_debugfs_cp_init(struct radeon_device *rdev) 3086 { 3087 #if defined(CONFIG_DEBUG_FS) 3088 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3089 #else 3090 return 0; 3091 #endif 3092 } 3093 3094 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3095 { 3096 #if defined(CONFIG_DEBUG_FS) 3097 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3098 #else 3099 return 0; 3100 #endif 3101 } 3102 3103 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3104 uint32_t tiling_flags, uint32_t pitch, 3105 uint32_t offset, uint32_t obj_size) 3106 { 3107 int surf_index = reg * 16; 3108 int flags = 0; 3109 3110 if (rdev->family <= CHIP_RS200) { 3111 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3112 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3113 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3114 if (tiling_flags & RADEON_TILING_MACRO) 3115 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3116 /* setting pitch to 0 disables tiling */ 3117 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3118 == 0) 3119 pitch = 0; 3120 } else if (rdev->family <= CHIP_RV280) { 3121 if (tiling_flags & (RADEON_TILING_MACRO)) 3122 flags |= R200_SURF_TILE_COLOR_MACRO; 3123 if (tiling_flags & RADEON_TILING_MICRO) 3124 flags |= R200_SURF_TILE_COLOR_MICRO; 3125 } else { 3126 if (tiling_flags & RADEON_TILING_MACRO) 3127 flags |= R300_SURF_TILE_MACRO; 3128 if (tiling_flags & RADEON_TILING_MICRO) 3129 flags |= R300_SURF_TILE_MICRO; 3130 } 3131 3132 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3133 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3134 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3135 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3136 3137 /* r100/r200 divide by 16 */ 3138 if (rdev->family < CHIP_R300) 3139 flags |= pitch / 16; 3140 else 3141 flags |= pitch / 8; 3142 3143 3144 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3145 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3146 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3147 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3148 return 0; 3149 } 3150 3151 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3152 { 3153 int surf_index = reg * 16; 3154 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3155 } 3156 3157 void r100_bandwidth_update(struct radeon_device *rdev) 3158 { 3159 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3160 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3161 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; 3162 fixed20_12 crit_point_ff = {0}; 3163 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3164 fixed20_12 memtcas_ff[8] = { 3165 dfixed_init(1), 3166 dfixed_init(2), 3167 dfixed_init(3), 3168 dfixed_init(0), 3169 dfixed_init_half(1), 3170 dfixed_init_half(2), 3171 dfixed_init(0), 3172 }; 3173 fixed20_12 memtcas_rs480_ff[8] = { 3174 dfixed_init(0), 3175 dfixed_init(1), 3176 dfixed_init(2), 3177 dfixed_init(3), 3178 dfixed_init(0), 3179 dfixed_init_half(1), 3180 dfixed_init_half(2), 3181 dfixed_init_half(3), 3182 }; 3183 fixed20_12 memtcas2_ff[8] = { 3184 dfixed_init(0), 3185 dfixed_init(1), 3186 dfixed_init(2), 3187 dfixed_init(3), 3188 dfixed_init(4), 3189 dfixed_init(5), 3190 dfixed_init(6), 3191 dfixed_init(7), 3192 }; 3193 fixed20_12 memtrbs[8] = { 3194 dfixed_init(1), 3195 dfixed_init_half(1), 3196 dfixed_init(2), 3197 dfixed_init_half(2), 3198 dfixed_init(3), 3199 dfixed_init_half(3), 3200 dfixed_init(4), 3201 dfixed_init_half(4) 3202 }; 3203 fixed20_12 memtrbs_r4xx[8] = { 3204 dfixed_init(4), 3205 dfixed_init(5), 3206 dfixed_init(6), 3207 dfixed_init(7), 3208 dfixed_init(8), 3209 dfixed_init(9), 3210 dfixed_init(10), 3211 dfixed_init(11) 3212 }; 3213 fixed20_12 min_mem_eff; 3214 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3215 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3216 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0}, 3217 disp_drain_rate2, read_return_rate; 3218 fixed20_12 time_disp1_drop_priority; 3219 int c; 3220 int cur_size = 16; /* in octawords */ 3221 int critical_point = 0, critical_point2; 3222 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 3223 int stop_req, max_stop_req; 3224 struct drm_display_mode *mode1 = NULL; 3225 struct drm_display_mode *mode2 = NULL; 3226 uint32_t pixel_bytes1 = 0; 3227 uint32_t pixel_bytes2 = 0; 3228 3229 /* Guess line buffer size to be 8192 pixels */ 3230 u32 lb_size = 8192; 3231 3232 if (!rdev->mode_info.mode_config_initialized) 3233 return; 3234 3235 radeon_update_display_priority(rdev); 3236 3237 if (rdev->mode_info.crtcs[0]->base.enabled) { 3238 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3239 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; 3240 } 3241 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3242 if (rdev->mode_info.crtcs[1]->base.enabled) { 3243 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3244 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; 3245 } 3246 } 3247 3248 min_mem_eff.full = dfixed_const_8(0); 3249 /* get modes */ 3250 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3251 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3252 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3253 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3254 /* check crtc enables */ 3255 if (mode2) 3256 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3257 if (mode1) 3258 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3259 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3260 } 3261 3262 /* 3263 * determine is there is enough bw for current mode 3264 */ 3265 sclk_ff = rdev->pm.sclk; 3266 mclk_ff = rdev->pm.mclk; 3267 3268 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3269 temp_ff.full = dfixed_const(temp); 3270 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3271 3272 pix_clk.full = 0; 3273 pix_clk2.full = 0; 3274 peak_disp_bw.full = 0; 3275 if (mode1) { 3276 temp_ff.full = dfixed_const(1000); 3277 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3278 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3279 temp_ff.full = dfixed_const(pixel_bytes1); 3280 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3281 } 3282 if (mode2) { 3283 temp_ff.full = dfixed_const(1000); 3284 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3285 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3286 temp_ff.full = dfixed_const(pixel_bytes2); 3287 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3288 } 3289 3290 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3291 if (peak_disp_bw.full >= mem_bw.full) { 3292 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3293 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3294 } 3295 3296 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3297 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3298 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3299 mem_trcd = ((temp >> 2) & 0x3) + 1; 3300 mem_trp = ((temp & 0x3)) + 1; 3301 mem_tras = ((temp & 0x70) >> 4) + 1; 3302 } else if (rdev->family == CHIP_R300 || 3303 rdev->family == CHIP_R350) { /* r300, r350 */ 3304 mem_trcd = (temp & 0x7) + 1; 3305 mem_trp = ((temp >> 8) & 0x7) + 1; 3306 mem_tras = ((temp >> 11) & 0xf) + 4; 3307 } else if (rdev->family == CHIP_RV350 || 3308 rdev->family <= CHIP_RV380) { 3309 /* rv3x0 */ 3310 mem_trcd = (temp & 0x7) + 3; 3311 mem_trp = ((temp >> 8) & 0x7) + 3; 3312 mem_tras = ((temp >> 11) & 0xf) + 6; 3313 } else if (rdev->family == CHIP_R420 || 3314 rdev->family == CHIP_R423 || 3315 rdev->family == CHIP_RV410) { 3316 /* r4xx */ 3317 mem_trcd = (temp & 0xf) + 3; 3318 if (mem_trcd > 15) 3319 mem_trcd = 15; 3320 mem_trp = ((temp >> 8) & 0xf) + 3; 3321 if (mem_trp > 15) 3322 mem_trp = 15; 3323 mem_tras = ((temp >> 12) & 0x1f) + 6; 3324 if (mem_tras > 31) 3325 mem_tras = 31; 3326 } else { /* RV200, R200 */ 3327 mem_trcd = (temp & 0x7) + 1; 3328 mem_trp = ((temp >> 8) & 0x7) + 1; 3329 mem_tras = ((temp >> 12) & 0xf) + 4; 3330 } 3331 /* convert to FF */ 3332 trcd_ff.full = dfixed_const(mem_trcd); 3333 trp_ff.full = dfixed_const(mem_trp); 3334 tras_ff.full = dfixed_const(mem_tras); 3335 3336 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3337 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3338 data = (temp & (7 << 20)) >> 20; 3339 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3340 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3341 tcas_ff = memtcas_rs480_ff[data]; 3342 else 3343 tcas_ff = memtcas_ff[data]; 3344 } else 3345 tcas_ff = memtcas2_ff[data]; 3346 3347 if (rdev->family == CHIP_RS400 || 3348 rdev->family == CHIP_RS480) { 3349 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3350 data = (temp >> 23) & 0x7; 3351 if (data < 5) 3352 tcas_ff.full += dfixed_const(data); 3353 } 3354 3355 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3356 /* on the R300, Tcas is included in Trbs. 3357 */ 3358 temp = RREG32(RADEON_MEM_CNTL); 3359 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3360 if (data == 1) { 3361 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3362 temp = RREG32(R300_MC_IND_INDEX); 3363 temp &= ~R300_MC_IND_ADDR_MASK; 3364 temp |= R300_MC_READ_CNTL_CD_mcind; 3365 WREG32(R300_MC_IND_INDEX, temp); 3366 temp = RREG32(R300_MC_IND_DATA); 3367 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3368 } else { 3369 temp = RREG32(R300_MC_READ_CNTL_AB); 3370 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3371 } 3372 } else { 3373 temp = RREG32(R300_MC_READ_CNTL_AB); 3374 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3375 } 3376 if (rdev->family == CHIP_RV410 || 3377 rdev->family == CHIP_R420 || 3378 rdev->family == CHIP_R423) 3379 trbs_ff = memtrbs_r4xx[data]; 3380 else 3381 trbs_ff = memtrbs[data]; 3382 tcas_ff.full += trbs_ff.full; 3383 } 3384 3385 sclk_eff_ff.full = sclk_ff.full; 3386 3387 if (rdev->flags & RADEON_IS_AGP) { 3388 fixed20_12 agpmode_ff; 3389 agpmode_ff.full = dfixed_const(radeon_agpmode); 3390 temp_ff.full = dfixed_const_666(16); 3391 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3392 } 3393 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3394 3395 if (ASIC_IS_R300(rdev)) { 3396 sclk_delay_ff.full = dfixed_const(250); 3397 } else { 3398 if ((rdev->family == CHIP_RV100) || 3399 rdev->flags & RADEON_IS_IGP) { 3400 if (rdev->mc.vram_is_ddr) 3401 sclk_delay_ff.full = dfixed_const(41); 3402 else 3403 sclk_delay_ff.full = dfixed_const(33); 3404 } else { 3405 if (rdev->mc.vram_width == 128) 3406 sclk_delay_ff.full = dfixed_const(57); 3407 else 3408 sclk_delay_ff.full = dfixed_const(41); 3409 } 3410 } 3411 3412 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3413 3414 if (rdev->mc.vram_is_ddr) { 3415 if (rdev->mc.vram_width == 32) { 3416 k1.full = dfixed_const(40); 3417 c = 3; 3418 } else { 3419 k1.full = dfixed_const(20); 3420 c = 1; 3421 } 3422 } else { 3423 k1.full = dfixed_const(40); 3424 c = 3; 3425 } 3426 3427 temp_ff.full = dfixed_const(2); 3428 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3429 temp_ff.full = dfixed_const(c); 3430 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3431 temp_ff.full = dfixed_const(4); 3432 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3433 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3434 mc_latency_mclk.full += k1.full; 3435 3436 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3437 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3438 3439 /* 3440 HW cursor time assuming worst case of full size colour cursor. 3441 */ 3442 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3443 temp_ff.full += trcd_ff.full; 3444 if (temp_ff.full < tras_ff.full) 3445 temp_ff.full = tras_ff.full; 3446 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3447 3448 temp_ff.full = dfixed_const(cur_size); 3449 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3450 /* 3451 Find the total latency for the display data. 3452 */ 3453 disp_latency_overhead.full = dfixed_const(8); 3454 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3455 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3456 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3457 3458 if (mc_latency_mclk.full > mc_latency_sclk.full) 3459 disp_latency.full = mc_latency_mclk.full; 3460 else 3461 disp_latency.full = mc_latency_sclk.full; 3462 3463 /* setup Max GRPH_STOP_REQ default value */ 3464 if (ASIC_IS_RV100(rdev)) 3465 max_stop_req = 0x5c; 3466 else 3467 max_stop_req = 0x7c; 3468 3469 /* 3470 XXX: disp_drain_rate.full not initialized in (mode2) block 3471 Looks like a real bug. Try to report it upstream. 3472 */ 3473 #ifdef __DragonFly__ 3474 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3475 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3476 crit_point_ff.full += dfixed_const_half(0); 3477 #endif 3478 3479 if (mode1) { 3480 /* CRTC1 3481 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3482 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3483 */ 3484 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3485 3486 if (stop_req > max_stop_req) 3487 stop_req = max_stop_req; 3488 3489 /* 3490 Find the drain rate of the display buffer. 3491 */ 3492 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3493 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3494 3495 /* 3496 Find the critical point of the display buffer. 3497 */ 3498 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3499 crit_point_ff.full += dfixed_const_half(0); 3500 3501 critical_point = dfixed_trunc(crit_point_ff); 3502 3503 if (rdev->disp_priority == 2) { 3504 critical_point = 0; 3505 } 3506 3507 /* 3508 The critical point should never be above max_stop_req-4. Setting 3509 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3510 */ 3511 if (max_stop_req - critical_point < 4) 3512 critical_point = 0; 3513 3514 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3515 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3516 critical_point = 0x10; 3517 } 3518 3519 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3520 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3521 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3522 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3523 if ((rdev->family == CHIP_R350) && 3524 (stop_req > 0x15)) { 3525 stop_req -= 0x10; 3526 } 3527 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3528 temp |= RADEON_GRPH_BUFFER_SIZE; 3529 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3530 RADEON_GRPH_CRITICAL_AT_SOF | 3531 RADEON_GRPH_STOP_CNTL); 3532 /* 3533 Write the result into the register. 3534 */ 3535 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3536 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3537 3538 #if 0 3539 if ((rdev->family == CHIP_RS400) || 3540 (rdev->family == CHIP_RS480)) { 3541 /* attempt to program RS400 disp regs correctly ??? */ 3542 temp = RREG32(RS400_DISP1_REG_CNTL); 3543 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3544 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3545 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3546 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3547 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3548 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3549 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3550 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3551 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3552 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3553 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3554 } 3555 #endif 3556 3557 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3558 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3559 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3560 } 3561 3562 if (mode2) { 3563 u32 grph2_cntl; 3564 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3565 3566 if (stop_req > max_stop_req) 3567 stop_req = max_stop_req; 3568 3569 /* 3570 Find the drain rate of the display buffer. 3571 */ 3572 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3573 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3574 3575 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3576 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3577 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3578 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3579 if ((rdev->family == CHIP_R350) && 3580 (stop_req > 0x15)) { 3581 stop_req -= 0x10; 3582 } 3583 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3584 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3585 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3586 RADEON_GRPH_CRITICAL_AT_SOF | 3587 RADEON_GRPH_STOP_CNTL); 3588 3589 if ((rdev->family == CHIP_RS100) || 3590 (rdev->family == CHIP_RS200)) 3591 critical_point2 = 0; 3592 else { 3593 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3594 temp_ff.full = dfixed_const(temp); 3595 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3596 if (sclk_ff.full < temp_ff.full) 3597 temp_ff.full = sclk_ff.full; 3598 3599 read_return_rate.full = temp_ff.full; 3600 3601 if (mode1) { 3602 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3603 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3604 } else { 3605 time_disp1_drop_priority.full = 0; 3606 } 3607 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3608 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3609 crit_point_ff.full += dfixed_const_half(0); 3610 3611 critical_point2 = dfixed_trunc(crit_point_ff); 3612 3613 if (rdev->disp_priority == 2) { 3614 critical_point2 = 0; 3615 } 3616 3617 if (max_stop_req - critical_point2 < 4) 3618 critical_point2 = 0; 3619 3620 } 3621 3622 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3623 /* some R300 cards have problem with this set to 0 */ 3624 critical_point2 = 0x10; 3625 } 3626 3627 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3628 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3629 3630 if ((rdev->family == CHIP_RS400) || 3631 (rdev->family == CHIP_RS480)) { 3632 #if 0 3633 /* attempt to program RS400 disp2 regs correctly ??? */ 3634 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3635 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3636 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3637 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3638 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3639 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3640 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3641 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3642 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3643 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3644 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3645 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3646 #endif 3647 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3648 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3649 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3650 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3651 } 3652 3653 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3654 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3655 } 3656 3657 /* Save number of lines the linebuffer leads before the scanout */ 3658 if (mode1) 3659 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); 3660 3661 if (mode2) 3662 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); 3663 } 3664 3665 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3666 { 3667 uint32_t scratch; 3668 uint32_t tmp = 0; 3669 unsigned i; 3670 int r; 3671 3672 r = radeon_scratch_get(rdev, &scratch); 3673 if (r) { 3674 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3675 return r; 3676 } 3677 WREG32(scratch, 0xCAFEDEAD); 3678 r = radeon_ring_lock(rdev, ring, 2); 3679 if (r) { 3680 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3681 radeon_scratch_free(rdev, scratch); 3682 return r; 3683 } 3684 radeon_ring_write(ring, PACKET0(scratch, 0)); 3685 radeon_ring_write(ring, 0xDEADBEEF); 3686 radeon_ring_unlock_commit(rdev, ring, false); 3687 for (i = 0; i < rdev->usec_timeout; i++) { 3688 tmp = RREG32(scratch); 3689 if (tmp == 0xDEADBEEF) { 3690 break; 3691 } 3692 DRM_UDELAY(1); 3693 } 3694 if (i < rdev->usec_timeout) { 3695 DRM_INFO("ring test succeeded in %d usecs\n", i); 3696 } else { 3697 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3698 scratch, tmp); 3699 r = -EINVAL; 3700 } 3701 radeon_scratch_free(rdev, scratch); 3702 return r; 3703 } 3704 3705 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3706 { 3707 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3708 3709 if (ring->rptr_save_reg) { 3710 u32 next_rptr = ring->wptr + 2 + 3; 3711 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3712 radeon_ring_write(ring, next_rptr); 3713 } 3714 3715 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3716 radeon_ring_write(ring, ib->gpu_addr); 3717 radeon_ring_write(ring, ib->length_dw); 3718 } 3719 3720 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3721 { 3722 struct radeon_ib ib; 3723 uint32_t scratch; 3724 uint32_t tmp = 0; 3725 unsigned i; 3726 int r; 3727 3728 r = radeon_scratch_get(rdev, &scratch); 3729 if (r) { 3730 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3731 return r; 3732 } 3733 WREG32(scratch, 0xCAFEDEAD); 3734 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3735 if (r) { 3736 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3737 goto free_scratch; 3738 } 3739 ib.ptr[0] = PACKET0(scratch, 0); 3740 ib.ptr[1] = 0xDEADBEEF; 3741 ib.ptr[2] = PACKET2(0); 3742 ib.ptr[3] = PACKET2(0); 3743 ib.ptr[4] = PACKET2(0); 3744 ib.ptr[5] = PACKET2(0); 3745 ib.ptr[6] = PACKET2(0); 3746 ib.ptr[7] = PACKET2(0); 3747 ib.length_dw = 8; 3748 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3749 if (r) { 3750 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3751 goto free_ib; 3752 } 3753 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 3754 RADEON_USEC_IB_TEST_TIMEOUT)); 3755 if (r < 0) { 3756 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3757 goto free_ib; 3758 } else if (r == 0) { 3759 DRM_ERROR("radeon: fence wait timed out.\n"); 3760 r = -ETIMEDOUT; 3761 goto free_ib; 3762 } 3763 r = 0; 3764 for (i = 0; i < rdev->usec_timeout; i++) { 3765 tmp = RREG32(scratch); 3766 if (tmp == 0xDEADBEEF) { 3767 break; 3768 } 3769 DRM_UDELAY(1); 3770 } 3771 if (i < rdev->usec_timeout) { 3772 DRM_INFO("ib test succeeded in %u usecs\n", i); 3773 } else { 3774 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3775 scratch, tmp); 3776 r = -EINVAL; 3777 } 3778 free_ib: 3779 radeon_ib_free(rdev, &ib); 3780 free_scratch: 3781 radeon_scratch_free(rdev, scratch); 3782 return r; 3783 } 3784 3785 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3786 { 3787 /* Shutdown CP we shouldn't need to do that but better be safe than 3788 * sorry 3789 */ 3790 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3791 WREG32(R_000740_CP_CSQ_CNTL, 0); 3792 3793 /* Save few CRTC registers */ 3794 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3795 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3796 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3797 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3798 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3799 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3800 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3801 } 3802 3803 /* Disable VGA aperture access */ 3804 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3805 /* Disable cursor, overlay, crtc */ 3806 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3807 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3808 S_000054_CRTC_DISPLAY_DIS(1)); 3809 WREG32(R_000050_CRTC_GEN_CNTL, 3810 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3811 S_000050_CRTC_DISP_REQ_EN_B(1)); 3812 WREG32(R_000420_OV0_SCALE_CNTL, 3813 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3814 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3815 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3816 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3817 S_000360_CUR2_LOCK(1)); 3818 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3819 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3820 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3821 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3822 WREG32(R_000360_CUR2_OFFSET, 3823 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3824 } 3825 } 3826 3827 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3828 { 3829 /* Update base address for crtc */ 3830 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3831 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3832 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3833 } 3834 /* Restore CRTC registers */ 3835 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3836 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3837 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3838 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3839 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3840 } 3841 } 3842 3843 void r100_vga_render_disable(struct radeon_device *rdev) 3844 { 3845 u32 tmp; 3846 3847 tmp = RREG8(R_0003C2_GENMO_WT); 3848 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3849 } 3850 3851 static void r100_debugfs(struct radeon_device *rdev) 3852 { 3853 int r; 3854 3855 r = r100_debugfs_mc_info_init(rdev); 3856 if (r) 3857 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3858 } 3859 3860 static void r100_mc_program(struct radeon_device *rdev) 3861 { 3862 struct r100_mc_save save; 3863 3864 /* Stops all mc clients */ 3865 r100_mc_stop(rdev, &save); 3866 if (rdev->flags & RADEON_IS_AGP) { 3867 WREG32(R_00014C_MC_AGP_LOCATION, 3868 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3869 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3870 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3871 if (rdev->family > CHIP_RV200) 3872 WREG32(R_00015C_AGP_BASE_2, 3873 upper_32_bits(rdev->mc.agp_base) & 0xff); 3874 } else { 3875 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3876 WREG32(R_000170_AGP_BASE, 0); 3877 if (rdev->family > CHIP_RV200) 3878 WREG32(R_00015C_AGP_BASE_2, 0); 3879 } 3880 /* Wait for mc idle */ 3881 if (r100_mc_wait_for_idle(rdev)) 3882 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3883 /* Program MC, should be a 32bits limited address space */ 3884 WREG32(R_000148_MC_FB_LOCATION, 3885 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3886 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3887 r100_mc_resume(rdev, &save); 3888 } 3889 3890 static void r100_clock_startup(struct radeon_device *rdev) 3891 { 3892 u32 tmp; 3893 3894 if (radeon_dynclks != -1 && radeon_dynclks) 3895 radeon_legacy_set_clock_gating(rdev, 1); 3896 /* We need to force on some of the block */ 3897 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3898 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3899 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3900 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3901 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3902 } 3903 3904 static int r100_startup(struct radeon_device *rdev) 3905 { 3906 int r; 3907 3908 /* set common regs */ 3909 r100_set_common_regs(rdev); 3910 /* program mc */ 3911 r100_mc_program(rdev); 3912 /* Resume clock */ 3913 r100_clock_startup(rdev); 3914 /* Initialize GART (initialize after TTM so we can allocate 3915 * memory through TTM but finalize after TTM) */ 3916 r100_enable_bm(rdev); 3917 if (rdev->flags & RADEON_IS_PCI) { 3918 r = r100_pci_gart_enable(rdev); 3919 if (r) 3920 return r; 3921 } 3922 3923 /* allocate wb buffer */ 3924 r = radeon_wb_init(rdev); 3925 if (r) 3926 return r; 3927 3928 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3929 if (r) { 3930 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3931 return r; 3932 } 3933 3934 /* Enable IRQ */ 3935 if (!rdev->irq.installed) { 3936 r = radeon_irq_kms_init(rdev); 3937 if (r) 3938 return r; 3939 } 3940 3941 r100_irq_set(rdev); 3942 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3943 /* 1M ring buffer */ 3944 r = r100_cp_init(rdev, 1024 * 1024); 3945 if (r) { 3946 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3947 return r; 3948 } 3949 3950 r = radeon_ib_pool_init(rdev); 3951 if (r) { 3952 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3953 return r; 3954 } 3955 3956 return 0; 3957 } 3958 3959 int r100_resume(struct radeon_device *rdev) 3960 { 3961 int r; 3962 3963 /* Make sur GART are not working */ 3964 if (rdev->flags & RADEON_IS_PCI) 3965 r100_pci_gart_disable(rdev); 3966 /* Resume clock before doing reset */ 3967 r100_clock_startup(rdev); 3968 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3969 if (radeon_asic_reset(rdev)) { 3970 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3971 RREG32(R_000E40_RBBM_STATUS), 3972 RREG32(R_0007C0_CP_STAT)); 3973 } 3974 /* post */ 3975 radeon_combios_asic_init(rdev->ddev); 3976 /* Resume clock after posting */ 3977 r100_clock_startup(rdev); 3978 /* Initialize surface registers */ 3979 radeon_surface_init(rdev); 3980 3981 rdev->accel_working = true; 3982 r = r100_startup(rdev); 3983 if (r) { 3984 rdev->accel_working = false; 3985 } 3986 return r; 3987 } 3988 3989 int r100_suspend(struct radeon_device *rdev) 3990 { 3991 radeon_pm_suspend(rdev); 3992 r100_cp_disable(rdev); 3993 radeon_wb_disable(rdev); 3994 r100_irq_disable(rdev); 3995 if (rdev->flags & RADEON_IS_PCI) 3996 r100_pci_gart_disable(rdev); 3997 return 0; 3998 } 3999 4000 void r100_fini(struct radeon_device *rdev) 4001 { 4002 radeon_pm_fini(rdev); 4003 r100_cp_fini(rdev); 4004 radeon_wb_fini(rdev); 4005 radeon_ib_pool_fini(rdev); 4006 radeon_gem_fini(rdev); 4007 if (rdev->flags & RADEON_IS_PCI) 4008 r100_pci_gart_fini(rdev); 4009 radeon_agp_fini(rdev); 4010 radeon_irq_kms_fini(rdev); 4011 radeon_fence_driver_fini(rdev); 4012 radeon_bo_fini(rdev); 4013 radeon_atombios_fini(rdev); 4014 r100_cp_fini_microcode(rdev); 4015 kfree(rdev->bios); 4016 rdev->bios = NULL; 4017 } 4018 4019 /* 4020 * Due to how kexec works, it can leave the hw fully initialised when it 4021 * boots the new kernel. However doing our init sequence with the CP and 4022 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 4023 * do some quick sanity checks and restore sane values to avoid this 4024 * problem. 4025 */ 4026 void r100_restore_sanity(struct radeon_device *rdev) 4027 { 4028 u32 tmp; 4029 4030 tmp = RREG32(RADEON_CP_CSQ_CNTL); 4031 if (tmp) { 4032 WREG32(RADEON_CP_CSQ_CNTL, 0); 4033 } 4034 tmp = RREG32(RADEON_CP_RB_CNTL); 4035 if (tmp) { 4036 WREG32(RADEON_CP_RB_CNTL, 0); 4037 } 4038 tmp = RREG32(RADEON_SCRATCH_UMSK); 4039 if (tmp) { 4040 WREG32(RADEON_SCRATCH_UMSK, 0); 4041 } 4042 } 4043 4044 int r100_init(struct radeon_device *rdev) 4045 { 4046 int r; 4047 4048 /* Register debugfs file specific to this group of asics */ 4049 r100_debugfs(rdev); 4050 /* Disable VGA */ 4051 r100_vga_render_disable(rdev); 4052 /* Initialize scratch registers */ 4053 radeon_scratch_init(rdev); 4054 /* Initialize surface registers */ 4055 radeon_surface_init(rdev); 4056 /* sanity check some register to avoid hangs like after kexec */ 4057 r100_restore_sanity(rdev); 4058 /* TODO: disable VGA need to use VGA request */ 4059 /* BIOS*/ 4060 if (!radeon_get_bios(rdev)) { 4061 if (ASIC_IS_AVIVO(rdev)) 4062 return -EINVAL; 4063 } 4064 if (rdev->is_atom_bios) { 4065 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4066 return -EINVAL; 4067 } else { 4068 r = radeon_combios_init(rdev); 4069 if (r) 4070 return r; 4071 } 4072 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4073 if (radeon_asic_reset(rdev)) { 4074 dev_warn(rdev->dev, 4075 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4076 RREG32(R_000E40_RBBM_STATUS), 4077 RREG32(R_0007C0_CP_STAT)); 4078 } 4079 /* check if cards are posted or not */ 4080 if (radeon_boot_test_post_card(rdev) == false) 4081 return -EINVAL; 4082 /* Set asic errata */ 4083 r100_errata(rdev); 4084 /* Initialize clocks */ 4085 radeon_get_clock_info(rdev->ddev); 4086 /* initialize AGP */ 4087 if (rdev->flags & RADEON_IS_AGP) { 4088 r = radeon_agp_init(rdev); 4089 if (r) { 4090 radeon_agp_disable(rdev); 4091 } 4092 } 4093 /* initialize VRAM */ 4094 r100_mc_init(rdev); 4095 /* Fence driver */ 4096 r = radeon_fence_driver_init(rdev); 4097 if (r) 4098 return r; 4099 /* Memory manager */ 4100 r = radeon_bo_init(rdev); 4101 if (r) 4102 return r; 4103 if (rdev->flags & RADEON_IS_PCI) { 4104 r = r100_pci_gart_init(rdev); 4105 if (r) 4106 return r; 4107 } 4108 r100_set_safe_registers(rdev); 4109 4110 /* Initialize power management */ 4111 radeon_pm_init(rdev); 4112 4113 rdev->accel_working = true; 4114 r = r100_startup(rdev); 4115 if (r) { 4116 /* Somethings want wront with the accel init stop accel */ 4117 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4118 r100_cp_fini(rdev); 4119 radeon_wb_fini(rdev); 4120 radeon_ib_pool_fini(rdev); 4121 radeon_irq_kms_fini(rdev); 4122 if (rdev->flags & RADEON_IS_PCI) 4123 r100_pci_gart_fini(rdev); 4124 rdev->accel_working = false; 4125 } 4126 return 0; 4127 } 4128 4129 uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg) 4130 { 4131 uint32_t ret; 4132 4133 lockmgr(&rdev->mmio_idx_lock, LK_EXCLUSIVE); 4134 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4135 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4136 lockmgr(&rdev->mmio_idx_lock, LK_RELEASE); 4137 return ret; 4138 } 4139 4140 void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v) 4141 { 4142 lockmgr(&rdev->mmio_idx_lock, LK_EXCLUSIVE); 4143 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4144 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4145 lockmgr(&rdev->mmio_idx_lock, LK_RELEASE); 4146 } 4147 4148 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4149 { 4150 if (reg < rdev->rio_mem_size) 4151 return bus_read_4(rdev->rio_mem, reg); 4152 else { 4153 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4154 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA); 4155 } 4156 } 4157 4158 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4159 { 4160 if (reg < rdev->rio_mem_size) 4161 bus_write_4(rdev->rio_mem, reg, v); 4162 else { 4163 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4164 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v); 4165 } 4166 } 4167