1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/r100.c 255573 2013-09-14 17:24:41Z dumbbell $ 29 */ 30 #include <drm/drmP.h> 31 #include <drm/radeon_drm.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "r100d.h" 36 #include "rs100d.h" 37 #include "rv200d.h" 38 #include "rv250d.h" 39 #include "atom.h" 40 41 #include <linux/firmware.h> 42 #include <linux/module.h> 43 44 #include "r100_reg_safe.h" 45 #include "rn50_reg_safe.h" 46 47 /* Firmware Names */ 48 #define FIRMWARE_R100 "radeonkmsfw_R100_cp" 49 #define FIRMWARE_R200 "radeonkmsfw_R200_cp" 50 #define FIRMWARE_R300 "radeonkmsfw_R300_cp" 51 #define FIRMWARE_R420 "radeonkmsfw_R420_cp" 52 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 53 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 54 #define FIRMWARE_R520 "radeonkmsfw_R520_cp" 55 56 MODULE_FIRMWARE(FIRMWARE_R100); 57 MODULE_FIRMWARE(FIRMWARE_R200); 58 MODULE_FIRMWARE(FIRMWARE_R300); 59 MODULE_FIRMWARE(FIRMWARE_R420); 60 MODULE_FIRMWARE(FIRMWARE_RS690); 61 MODULE_FIRMWARE(FIRMWARE_RS600); 62 MODULE_FIRMWARE(FIRMWARE_R520); 63 64 #include "r100_track.h" 65 66 /* This files gather functions specifics to: 67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * and others in some cases. 69 */ 70 71 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) 72 { 73 if (crtc == 0) { 74 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 75 return true; 76 else 77 return false; 78 } else { 79 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 80 return true; 81 else 82 return false; 83 } 84 } 85 86 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) 87 { 88 u32 vline1, vline2; 89 90 if (crtc == 0) { 91 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 92 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 93 } else { 94 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 95 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 96 } 97 if (vline1 != vline2) 98 return true; 99 else 100 return false; 101 } 102 103 /** 104 * r100_wait_for_vblank - vblank wait asic callback. 105 * 106 * @rdev: radeon_device pointer 107 * @crtc: crtc to wait for vblank on 108 * 109 * Wait for vblank on the requested crtc (r1xx-r4xx). 110 */ 111 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 112 { 113 unsigned i = 0; 114 115 if (crtc >= rdev->num_crtc) 116 return; 117 118 if (crtc == 0) { 119 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) 120 return; 121 } else { 122 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) 123 return; 124 } 125 126 /* depending on when we hit vblank, we may be close to active; if so, 127 * wait for another frame. 128 */ 129 while (r100_is_in_vblank(rdev, crtc)) { 130 if (i++ % 100 == 0) { 131 if (!r100_is_counter_moving(rdev, crtc)) 132 break; 133 } 134 } 135 136 while (!r100_is_in_vblank(rdev, crtc)) { 137 if (i++ % 100 == 0) { 138 if (!r100_is_counter_moving(rdev, crtc)) 139 break; 140 } 141 } 142 } 143 144 /** 145 * r100_page_flip - pageflip callback. 146 * 147 * @rdev: radeon_device pointer 148 * @crtc_id: crtc to cleanup pageflip on 149 * @crtc_base: new address of the crtc (GPU MC address) 150 * 151 * Does the actual pageflip (r1xx-r4xx). 152 * During vblank we take the crtc lock and wait for the update_pending 153 * bit to go high, when it does, we release the lock, and allow the 154 * double buffered update to take place. 155 */ 156 void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 157 { 158 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 159 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 160 int i; 161 162 /* Lock the graphics update lock */ 163 /* update the scanout addresses */ 164 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 165 166 /* Wait for update_pending to go high. */ 167 for (i = 0; i < rdev->usec_timeout; i++) { 168 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 169 break; 170 udelay(1); 171 } 172 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 173 174 /* Unlock the lock, so double-buffering can take place inside vblank */ 175 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 176 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 177 178 } 179 180 /** 181 * r100_page_flip_pending - check if page flip is still pending 182 * 183 * @rdev: radeon_device pointer 184 * @crtc_id: crtc to check 185 * 186 * Check if the last pagefilp is still pending (r1xx-r4xx). 187 * Returns the current update pending status. 188 */ 189 bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id) 190 { 191 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 192 193 /* Return current update_pending status: */ 194 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & 195 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET); 196 } 197 198 /** 199 * r100_pm_get_dynpm_state - look up dynpm power state callback. 200 * 201 * @rdev: radeon_device pointer 202 * 203 * Look up the optimal power state based on the 204 * current state of the GPU (r1xx-r5xx). 205 * Used for dynpm only. 206 */ 207 void r100_pm_get_dynpm_state(struct radeon_device *rdev) 208 { 209 int i; 210 rdev->pm.dynpm_can_upclock = true; 211 rdev->pm.dynpm_can_downclock = true; 212 213 switch (rdev->pm.dynpm_planned_action) { 214 case DYNPM_ACTION_MINIMUM: 215 rdev->pm.requested_power_state_index = 0; 216 rdev->pm.dynpm_can_downclock = false; 217 break; 218 case DYNPM_ACTION_DOWNCLOCK: 219 if (rdev->pm.current_power_state_index == 0) { 220 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 221 rdev->pm.dynpm_can_downclock = false; 222 } else { 223 if (rdev->pm.active_crtc_count > 1) { 224 for (i = 0; i < rdev->pm.num_power_states; i++) { 225 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 226 continue; 227 else if (i >= rdev->pm.current_power_state_index) { 228 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 229 break; 230 } else { 231 rdev->pm.requested_power_state_index = i; 232 break; 233 } 234 } 235 } else 236 rdev->pm.requested_power_state_index = 237 rdev->pm.current_power_state_index - 1; 238 } 239 /* don't use the power state if crtcs are active and no display flag is set */ 240 if ((rdev->pm.active_crtc_count > 0) && 241 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 242 RADEON_PM_MODE_NO_DISPLAY)) { 243 rdev->pm.requested_power_state_index++; 244 } 245 break; 246 case DYNPM_ACTION_UPCLOCK: 247 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 248 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 249 rdev->pm.dynpm_can_upclock = false; 250 } else { 251 if (rdev->pm.active_crtc_count > 1) { 252 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 253 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 254 continue; 255 else if (i <= rdev->pm.current_power_state_index) { 256 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 257 break; 258 } else { 259 rdev->pm.requested_power_state_index = i; 260 break; 261 } 262 } 263 } else 264 rdev->pm.requested_power_state_index = 265 rdev->pm.current_power_state_index + 1; 266 } 267 break; 268 case DYNPM_ACTION_DEFAULT: 269 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 270 rdev->pm.dynpm_can_upclock = false; 271 break; 272 case DYNPM_ACTION_NONE: 273 default: 274 DRM_ERROR("Requested mode for not defined action\n"); 275 return; 276 } 277 /* only one clock mode per power state */ 278 rdev->pm.requested_clock_mode_index = 0; 279 280 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 281 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 282 clock_info[rdev->pm.requested_clock_mode_index].sclk, 283 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 284 clock_info[rdev->pm.requested_clock_mode_index].mclk, 285 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 286 pcie_lanes); 287 } 288 289 /** 290 * r100_pm_init_profile - Initialize power profiles callback. 291 * 292 * @rdev: radeon_device pointer 293 * 294 * Initialize the power states used in profile mode 295 * (r1xx-r3xx). 296 * Used for profile mode only. 297 */ 298 void r100_pm_init_profile(struct radeon_device *rdev) 299 { 300 /* default */ 301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 303 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 304 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 305 /* low sh */ 306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 309 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 310 /* mid sh */ 311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 313 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 314 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 315 /* high sh */ 316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 318 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 319 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 320 /* low mh */ 321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 323 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 324 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 325 /* mid mh */ 326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 328 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 329 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 330 /* high mh */ 331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 334 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 335 } 336 337 /** 338 * r100_pm_misc - set additional pm hw parameters callback. 339 * 340 * @rdev: radeon_device pointer 341 * 342 * Set non-clock parameters associated with a power state 343 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 344 */ 345 void r100_pm_misc(struct radeon_device *rdev) 346 { 347 int requested_index = rdev->pm.requested_power_state_index; 348 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 349 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 350 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 351 352 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 353 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 354 tmp = RREG32(voltage->gpio.reg); 355 if (voltage->active_high) 356 tmp |= voltage->gpio.mask; 357 else 358 tmp &= ~(voltage->gpio.mask); 359 WREG32(voltage->gpio.reg, tmp); 360 if (voltage->delay) 361 udelay(voltage->delay); 362 } else { 363 tmp = RREG32(voltage->gpio.reg); 364 if (voltage->active_high) 365 tmp &= ~voltage->gpio.mask; 366 else 367 tmp |= voltage->gpio.mask; 368 WREG32(voltage->gpio.reg, tmp); 369 if (voltage->delay) 370 udelay(voltage->delay); 371 } 372 } 373 374 sclk_cntl = RREG32_PLL(SCLK_CNTL); 375 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 376 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 377 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 378 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 379 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 380 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 381 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 382 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 383 else 384 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 385 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 386 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 387 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 388 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 389 } else 390 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 391 392 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 393 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 394 if (voltage->delay) { 395 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 396 switch (voltage->delay) { 397 case 33: 398 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 399 break; 400 case 66: 401 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 402 break; 403 case 99: 404 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 405 break; 406 case 132: 407 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 408 break; 409 } 410 } else 411 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 412 } else 413 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 414 415 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 416 sclk_cntl &= ~FORCE_HDP; 417 else 418 sclk_cntl |= FORCE_HDP; 419 420 WREG32_PLL(SCLK_CNTL, sclk_cntl); 421 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 422 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 423 424 /* set pcie lanes */ 425 if ((rdev->flags & RADEON_IS_PCIE) && 426 !(rdev->flags & RADEON_IS_IGP) && 427 rdev->asic->pm.set_pcie_lanes && 428 (ps->pcie_lanes != 429 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 430 radeon_set_pcie_lanes(rdev, 431 ps->pcie_lanes); 432 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 433 } 434 } 435 436 /** 437 * r100_pm_prepare - pre-power state change callback. 438 * 439 * @rdev: radeon_device pointer 440 * 441 * Prepare for a power state change (r1xx-r4xx). 442 */ 443 void r100_pm_prepare(struct radeon_device *rdev) 444 { 445 struct drm_device *ddev = rdev->ddev; 446 struct drm_crtc *crtc; 447 struct radeon_crtc *radeon_crtc; 448 u32 tmp; 449 450 /* disable any active CRTCs */ 451 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 452 radeon_crtc = to_radeon_crtc(crtc); 453 if (radeon_crtc->enabled) { 454 if (radeon_crtc->crtc_id) { 455 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 456 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 457 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 458 } else { 459 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 460 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 461 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 462 } 463 } 464 } 465 } 466 467 /** 468 * r100_pm_finish - post-power state change callback. 469 * 470 * @rdev: radeon_device pointer 471 * 472 * Clean up after a power state change (r1xx-r4xx). 473 */ 474 void r100_pm_finish(struct radeon_device *rdev) 475 { 476 struct drm_device *ddev = rdev->ddev; 477 struct drm_crtc *crtc; 478 struct radeon_crtc *radeon_crtc; 479 u32 tmp; 480 481 /* enable any active CRTCs */ 482 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 483 radeon_crtc = to_radeon_crtc(crtc); 484 if (radeon_crtc->enabled) { 485 if (radeon_crtc->crtc_id) { 486 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 487 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 488 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 489 } else { 490 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 491 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 492 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 493 } 494 } 495 } 496 } 497 498 /** 499 * r100_gui_idle - gui idle callback. 500 * 501 * @rdev: radeon_device pointer 502 * 503 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 504 * Returns true if idle, false if not. 505 */ 506 bool r100_gui_idle(struct radeon_device *rdev) 507 { 508 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 509 return false; 510 else 511 return true; 512 } 513 514 /* hpd for digital panel detect/disconnect */ 515 /** 516 * r100_hpd_sense - hpd sense callback. 517 * 518 * @rdev: radeon_device pointer 519 * @hpd: hpd (hotplug detect) pin 520 * 521 * Checks if a digital monitor is connected (r1xx-r4xx). 522 * Returns true if connected, false if not connected. 523 */ 524 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 525 { 526 bool connected = false; 527 528 switch (hpd) { 529 case RADEON_HPD_1: 530 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 531 connected = true; 532 break; 533 case RADEON_HPD_2: 534 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 535 connected = true; 536 break; 537 default: 538 break; 539 } 540 return connected; 541 } 542 543 /** 544 * r100_hpd_set_polarity - hpd set polarity callback. 545 * 546 * @rdev: radeon_device pointer 547 * @hpd: hpd (hotplug detect) pin 548 * 549 * Set the polarity of the hpd pin (r1xx-r4xx). 550 */ 551 void r100_hpd_set_polarity(struct radeon_device *rdev, 552 enum radeon_hpd_id hpd) 553 { 554 u32 tmp; 555 bool connected = r100_hpd_sense(rdev, hpd); 556 557 switch (hpd) { 558 case RADEON_HPD_1: 559 tmp = RREG32(RADEON_FP_GEN_CNTL); 560 if (connected) 561 tmp &= ~RADEON_FP_DETECT_INT_POL; 562 else 563 tmp |= RADEON_FP_DETECT_INT_POL; 564 WREG32(RADEON_FP_GEN_CNTL, tmp); 565 break; 566 case RADEON_HPD_2: 567 tmp = RREG32(RADEON_FP2_GEN_CNTL); 568 if (connected) 569 tmp &= ~RADEON_FP2_DETECT_INT_POL; 570 else 571 tmp |= RADEON_FP2_DETECT_INT_POL; 572 WREG32(RADEON_FP2_GEN_CNTL, tmp); 573 break; 574 default: 575 break; 576 } 577 } 578 579 /** 580 * r100_hpd_init - hpd setup callback. 581 * 582 * @rdev: radeon_device pointer 583 * 584 * Setup the hpd pins used by the card (r1xx-r4xx). 585 * Set the polarity, and enable the hpd interrupts. 586 */ 587 void r100_hpd_init(struct radeon_device *rdev) 588 { 589 struct drm_device *dev = rdev->ddev; 590 struct drm_connector *connector; 591 unsigned enable = 0; 592 593 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 594 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 595 enable |= 1 << radeon_connector->hpd.hpd; 596 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 597 } 598 radeon_irq_kms_enable_hpd(rdev, enable); 599 } 600 601 /** 602 * r100_hpd_fini - hpd tear down callback. 603 * 604 * @rdev: radeon_device pointer 605 * 606 * Tear down the hpd pins used by the card (r1xx-r4xx). 607 * Disable the hpd interrupts. 608 */ 609 void r100_hpd_fini(struct radeon_device *rdev) 610 { 611 struct drm_device *dev = rdev->ddev; 612 struct drm_connector *connector; 613 unsigned disable = 0; 614 615 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 616 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 617 disable |= 1 << radeon_connector->hpd.hpd; 618 } 619 radeon_irq_kms_disable_hpd(rdev, disable); 620 } 621 622 /* 623 * PCI GART 624 */ 625 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 626 { 627 /* TODO: can we do somethings here ? */ 628 /* It seems hw only cache one entry so we should discard this 629 * entry otherwise if first GPU GART read hit this entry it 630 * could end up in wrong address. */ 631 } 632 633 int r100_pci_gart_init(struct radeon_device *rdev) 634 { 635 int r; 636 637 if (rdev->gart.ptr) { 638 WARN(1, "R100 PCI GART already initialized\n"); 639 return 0; 640 } 641 /* Initialize common gart structure */ 642 r = radeon_gart_init(rdev); 643 if (r) 644 return r; 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 647 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 648 return radeon_gart_table_ram_alloc(rdev); 649 } 650 651 int r100_pci_gart_enable(struct radeon_device *rdev) 652 { 653 uint32_t tmp; 654 655 /* discard memory request outside of configured range */ 656 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 657 WREG32(RADEON_AIC_CNTL, tmp); 658 /* set address range for PCI address translate */ 659 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 660 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 661 /* set PCI GART page-table base address */ 662 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 663 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 664 WREG32(RADEON_AIC_CNTL, tmp); 665 r100_pci_gart_tlb_flush(rdev); 666 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 667 (unsigned)(rdev->mc.gtt_size >> 20), 668 (unsigned long long)rdev->gart.table_addr); 669 rdev->gart.ready = true; 670 return 0; 671 } 672 673 void r100_pci_gart_disable(struct radeon_device *rdev) 674 { 675 uint32_t tmp; 676 677 /* discard memory request outside of configured range */ 678 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 679 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 680 WREG32(RADEON_AIC_LO_ADDR, 0); 681 WREG32(RADEON_AIC_HI_ADDR, 0); 682 } 683 684 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 685 uint64_t addr, uint32_t flags) 686 { 687 u32 *gtt = rdev->gart.ptr; 688 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 689 } 690 691 void r100_pci_gart_fini(struct radeon_device *rdev) 692 { 693 radeon_gart_fini(rdev); 694 r100_pci_gart_disable(rdev); 695 radeon_gart_table_ram_free(rdev); 696 } 697 698 int r100_irq_set(struct radeon_device *rdev) 699 { 700 uint32_t tmp = 0; 701 702 if (!rdev->irq.installed) { 703 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 704 WREG32(R_000040_GEN_INT_CNTL, 0); 705 return -EINVAL; 706 } 707 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 708 tmp |= RADEON_SW_INT_ENABLE; 709 } 710 if (rdev->irq.crtc_vblank_int[0] || 711 atomic_read(&rdev->irq.pflip[0])) { 712 tmp |= RADEON_CRTC_VBLANK_MASK; 713 } 714 if (rdev->irq.crtc_vblank_int[1] || 715 atomic_read(&rdev->irq.pflip[1])) { 716 tmp |= RADEON_CRTC2_VBLANK_MASK; 717 } 718 if (rdev->irq.hpd[0]) { 719 tmp |= RADEON_FP_DETECT_MASK; 720 } 721 if (rdev->irq.hpd[1]) { 722 tmp |= RADEON_FP2_DETECT_MASK; 723 } 724 WREG32(RADEON_GEN_INT_CNTL, tmp); 725 return 0; 726 } 727 728 void r100_irq_disable(struct radeon_device *rdev) 729 { 730 u32 tmp; 731 732 WREG32(R_000040_GEN_INT_CNTL, 0); 733 /* Wait and acknowledge irq */ 734 mdelay(1); 735 tmp = RREG32(R_000044_GEN_INT_STATUS); 736 WREG32(R_000044_GEN_INT_STATUS, tmp); 737 } 738 739 static uint32_t r100_irq_ack(struct radeon_device *rdev) 740 { 741 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 742 uint32_t irq_mask = RADEON_SW_INT_TEST | 743 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 744 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 745 746 if (irqs) { 747 WREG32(RADEON_GEN_INT_STATUS, irqs); 748 } 749 return irqs & irq_mask; 750 } 751 752 irqreturn_t r100_irq_process(struct radeon_device *rdev) 753 { 754 uint32_t status, msi_rearm; 755 bool queue_hotplug = false; 756 757 status = r100_irq_ack(rdev); 758 if (!status) { 759 return IRQ_NONE; 760 } 761 if (rdev->shutdown) { 762 return IRQ_NONE; 763 } 764 while (status) { 765 /* SW interrupt */ 766 if (status & RADEON_SW_INT_TEST) { 767 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 768 } 769 /* Vertical blank interrupts */ 770 if (status & RADEON_CRTC_VBLANK_STAT) { 771 if (rdev->irq.crtc_vblank_int[0]) { 772 drm_handle_vblank(rdev->ddev, 0); 773 rdev->pm.vblank_sync = true; 774 wake_up(&rdev->irq.vblank_queue); 775 } 776 if (atomic_read(&rdev->irq.pflip[0])) 777 radeon_crtc_handle_vblank(rdev, 0); 778 } 779 if (status & RADEON_CRTC2_VBLANK_STAT) { 780 if (rdev->irq.crtc_vblank_int[1]) { 781 drm_handle_vblank(rdev->ddev, 1); 782 rdev->pm.vblank_sync = true; 783 wake_up(&rdev->irq.vblank_queue); 784 } 785 if (atomic_read(&rdev->irq.pflip[1])) 786 radeon_crtc_handle_vblank(rdev, 1); 787 } 788 if (status & RADEON_FP_DETECT_STAT) { 789 queue_hotplug = true; 790 DRM_DEBUG("HPD1\n"); 791 } 792 if (status & RADEON_FP2_DETECT_STAT) { 793 queue_hotplug = true; 794 DRM_DEBUG("HPD2\n"); 795 } 796 status = r100_irq_ack(rdev); 797 } 798 if (queue_hotplug) 799 schedule_work(&rdev->hotplug_work); 800 if (rdev->msi_enabled) { 801 switch (rdev->family) { 802 case CHIP_RS400: 803 case CHIP_RS480: 804 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 805 WREG32(RADEON_AIC_CNTL, msi_rearm); 806 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 807 break; 808 default: 809 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 810 break; 811 } 812 } 813 return IRQ_HANDLED; 814 } 815 816 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 817 { 818 if (crtc == 0) 819 return RREG32(RADEON_CRTC_CRNT_FRAME); 820 else 821 return RREG32(RADEON_CRTC2_CRNT_FRAME); 822 } 823 824 /** 825 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer 826 * rdev: radeon device structure 827 * ring: ring buffer struct for emitting packets 828 */ 829 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) 830 { 831 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 832 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 833 RADEON_HDP_READ_BUFFER_INVALIDATE); 834 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 835 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 836 } 837 838 /* Who ever call radeon_fence_emit should call ring_lock and ask 839 * for enough space (today caller are ib schedule and buffer move) */ 840 void r100_fence_ring_emit(struct radeon_device *rdev, 841 struct radeon_fence *fence) 842 { 843 struct radeon_ring *ring = &rdev->ring[fence->ring]; 844 845 /* We have to make sure that caches are flushed before 846 * CPU might read something from VRAM. */ 847 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 848 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 849 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 850 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 851 /* Wait until IDLE & CLEAN */ 852 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 853 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 854 r100_ring_hdp_flush(rdev, ring); 855 /* Emit fence sequence & fire IRQ */ 856 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 857 radeon_ring_write(ring, fence->seq); 858 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 859 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 860 } 861 862 bool r100_semaphore_ring_emit(struct radeon_device *rdev, 863 struct radeon_ring *ring, 864 struct radeon_semaphore *semaphore, 865 bool emit_wait) 866 { 867 /* Unused on older asics, since we don't have semaphores or multiple rings */ 868 BUG(); 869 return false; 870 } 871 872 struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, 873 uint64_t src_offset, 874 uint64_t dst_offset, 875 unsigned num_gpu_pages, 876 struct reservation_object *resv) 877 { 878 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 879 struct radeon_fence *fence; 880 uint32_t cur_pages; 881 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 882 uint32_t pitch; 883 uint32_t stride_pixels; 884 unsigned ndw; 885 int num_loops; 886 int r = 0; 887 888 /* radeon limited to 16k stride */ 889 stride_bytes &= 0x3fff; 890 /* radeon pitch is /64 */ 891 pitch = stride_bytes / 64; 892 stride_pixels = stride_bytes / 4; 893 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 894 895 /* Ask for enough room for blit + flush + fence */ 896 ndw = 64 + (10 * num_loops); 897 r = radeon_ring_lock(rdev, ring, ndw); 898 if (r) { 899 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 900 return ERR_PTR(-EINVAL); 901 } 902 while (num_gpu_pages > 0) { 903 cur_pages = num_gpu_pages; 904 if (cur_pages > 8191) { 905 cur_pages = 8191; 906 } 907 num_gpu_pages -= cur_pages; 908 909 /* pages are in Y direction - height 910 page width in X direction - width */ 911 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 912 radeon_ring_write(ring, 913 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 914 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 915 RADEON_GMC_SRC_CLIPPING | 916 RADEON_GMC_DST_CLIPPING | 917 RADEON_GMC_BRUSH_NONE | 918 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 919 RADEON_GMC_SRC_DATATYPE_COLOR | 920 RADEON_ROP3_S | 921 RADEON_DP_SRC_SOURCE_MEMORY | 922 RADEON_GMC_CLR_CMP_CNTL_DIS | 923 RADEON_GMC_WR_MSK_DIS); 924 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 925 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 926 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 927 radeon_ring_write(ring, 0); 928 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 929 radeon_ring_write(ring, num_gpu_pages); 930 radeon_ring_write(ring, num_gpu_pages); 931 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 932 } 933 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 934 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 935 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 936 radeon_ring_write(ring, 937 RADEON_WAIT_2D_IDLECLEAN | 938 RADEON_WAIT_HOST_IDLECLEAN | 939 RADEON_WAIT_DMA_GUI_IDLE); 940 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); 941 if (r) { 942 radeon_ring_unlock_undo(rdev, ring); 943 return ERR_PTR(r); 944 } 945 radeon_ring_unlock_commit(rdev, ring, false); 946 return fence; 947 } 948 949 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 950 { 951 unsigned i; 952 u32 tmp; 953 954 for (i = 0; i < rdev->usec_timeout; i++) { 955 tmp = RREG32(R_000E40_RBBM_STATUS); 956 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 957 return 0; 958 } 959 udelay(1); 960 } 961 return -1; 962 } 963 964 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 965 { 966 int r; 967 968 r = radeon_ring_lock(rdev, ring, 2); 969 if (r) { 970 return; 971 } 972 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 973 radeon_ring_write(ring, 974 RADEON_ISYNC_ANY2D_IDLE3D | 975 RADEON_ISYNC_ANY3D_IDLE2D | 976 RADEON_ISYNC_WAIT_IDLEGUI | 977 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 978 radeon_ring_unlock_commit(rdev, ring, false); 979 } 980 981 982 /* Load the microcode for the CP */ 983 static int r100_cp_init_microcode(struct radeon_device *rdev) 984 { 985 const char *fw_name = NULL; 986 int err; 987 988 DRM_DEBUG_KMS("\n"); 989 990 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 991 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 992 (rdev->family == CHIP_RS200)) { 993 DRM_INFO("Loading R100 Microcode\n"); 994 fw_name = FIRMWARE_R100; 995 } else if ((rdev->family == CHIP_R200) || 996 (rdev->family == CHIP_RV250) || 997 (rdev->family == CHIP_RV280) || 998 (rdev->family == CHIP_RS300)) { 999 DRM_INFO("Loading R200 Microcode\n"); 1000 fw_name = FIRMWARE_R200; 1001 } else if ((rdev->family == CHIP_R300) || 1002 (rdev->family == CHIP_R350) || 1003 (rdev->family == CHIP_RV350) || 1004 (rdev->family == CHIP_RV380) || 1005 (rdev->family == CHIP_RS400) || 1006 (rdev->family == CHIP_RS480)) { 1007 DRM_INFO("Loading R300 Microcode\n"); 1008 fw_name = FIRMWARE_R300; 1009 } else if ((rdev->family == CHIP_R420) || 1010 (rdev->family == CHIP_R423) || 1011 (rdev->family == CHIP_RV410)) { 1012 DRM_INFO("Loading R400 Microcode\n"); 1013 fw_name = FIRMWARE_R420; 1014 } else if ((rdev->family == CHIP_RS690) || 1015 (rdev->family == CHIP_RS740)) { 1016 DRM_INFO("Loading RS690/RS740 Microcode\n"); 1017 fw_name = FIRMWARE_RS690; 1018 } else if (rdev->family == CHIP_RS600) { 1019 DRM_INFO("Loading RS600 Microcode\n"); 1020 fw_name = FIRMWARE_RS600; 1021 } else if ((rdev->family == CHIP_RV515) || 1022 (rdev->family == CHIP_R520) || 1023 (rdev->family == CHIP_RV530) || 1024 (rdev->family == CHIP_R580) || 1025 (rdev->family == CHIP_RV560) || 1026 (rdev->family == CHIP_RV570)) { 1027 DRM_INFO("Loading R500 Microcode\n"); 1028 fw_name = FIRMWARE_R520; 1029 } 1030 1031 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 1032 if (err) { 1033 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1034 fw_name); 1035 } else if (rdev->me_fw->datasize % 8) { 1036 printk(KERN_ERR 1037 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1038 rdev->me_fw->datasize, fw_name); 1039 err = -EINVAL; 1040 release_firmware(rdev->me_fw); 1041 rdev->me_fw = NULL; 1042 } 1043 return err; 1044 } 1045 1046 u32 r100_gfx_get_rptr(struct radeon_device *rdev, 1047 struct radeon_ring *ring) 1048 { 1049 u32 rptr; 1050 1051 if (rdev->wb.enabled) 1052 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 1053 else 1054 rptr = RREG32(RADEON_CP_RB_RPTR); 1055 1056 return rptr; 1057 } 1058 1059 u32 r100_gfx_get_wptr(struct radeon_device *rdev, 1060 struct radeon_ring *ring) 1061 { 1062 u32 wptr; 1063 1064 wptr = RREG32(RADEON_CP_RB_WPTR); 1065 1066 return wptr; 1067 } 1068 1069 void r100_gfx_set_wptr(struct radeon_device *rdev, 1070 struct radeon_ring *ring) 1071 { 1072 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1073 (void)RREG32(RADEON_CP_RB_WPTR); 1074 } 1075 1076 /** 1077 * r100_cp_fini_microcode - drop the firmware image reference 1078 * 1079 * @rdev: radeon_device pointer 1080 * 1081 * Drop the me firmware image reference. 1082 * Called at driver shutdown. 1083 */ 1084 static void r100_cp_fini_microcode (struct radeon_device *rdev) 1085 { 1086 release_firmware(rdev->me_fw); 1087 rdev->me_fw = NULL; 1088 } 1089 1090 static void r100_cp_load_microcode(struct radeon_device *rdev) 1091 { 1092 const __be32 *fw_data; 1093 int i, size; 1094 1095 if (r100_gui_wait_for_idle(rdev)) { 1096 printk(KERN_WARNING "Failed to wait GUI idle while " 1097 "programming pipes. Bad things might happen.\n"); 1098 } 1099 1100 if (rdev->me_fw) { 1101 size = rdev->me_fw->datasize / 4; 1102 fw_data = (const __be32 *)rdev->me_fw->data; 1103 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1104 for (i = 0; i < size; i += 2) { 1105 WREG32(RADEON_CP_ME_RAM_DATAH, 1106 be32_to_cpup(&fw_data[i])); 1107 WREG32(RADEON_CP_ME_RAM_DATAL, 1108 be32_to_cpup(&fw_data[i + 1])); 1109 } 1110 } 1111 } 1112 1113 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1114 { 1115 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1116 unsigned rb_bufsz; 1117 unsigned rb_blksz; 1118 unsigned max_fetch; 1119 unsigned pre_write_timer; 1120 unsigned pre_write_limit; 1121 unsigned indirect2_start; 1122 unsigned indirect1_start; 1123 uint32_t tmp; 1124 int r; 1125 1126 if (r100_debugfs_cp_init(rdev)) { 1127 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1128 } 1129 if (!rdev->me_fw) { 1130 r = r100_cp_init_microcode(rdev); 1131 if (r) { 1132 DRM_ERROR("Failed to load firmware!\n"); 1133 return r; 1134 } 1135 } 1136 1137 /* Align ring size */ 1138 rb_bufsz = order_base_2(ring_size / 8); 1139 ring_size = (1 << (rb_bufsz + 1)) * 4; 1140 r100_cp_load_microcode(rdev); 1141 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1142 RADEON_CP_PACKET2); 1143 if (r) { 1144 return r; 1145 } 1146 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1147 * the rptr copy in system ram */ 1148 rb_blksz = 9; 1149 /* cp will read 128bytes at a time (4 dwords) */ 1150 max_fetch = 1; 1151 ring->align_mask = 16 - 1; 1152 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1153 pre_write_timer = 64; 1154 /* Force CP_RB_WPTR write if written more than one time before the 1155 * delay expire 1156 */ 1157 pre_write_limit = 0; 1158 /* Setup the cp cache like this (cache size is 96 dwords) : 1159 * RING 0 to 15 1160 * INDIRECT1 16 to 79 1161 * INDIRECT2 80 to 95 1162 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1163 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1164 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1165 * Idea being that most of the gpu cmd will be through indirect1 buffer 1166 * so it gets the bigger cache. 1167 */ 1168 indirect2_start = 80; 1169 indirect1_start = 16; 1170 /* cp setup */ 1171 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1172 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1173 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1174 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1175 #ifdef __BIG_ENDIAN 1176 tmp |= RADEON_BUF_SWAP_32BIT; 1177 #endif 1178 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1179 1180 /* Set ring address */ 1181 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1182 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1183 /* Force read & write ptr to 0 */ 1184 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1185 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1186 ring->wptr = 0; 1187 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1188 1189 /* set the wb address whether it's enabled or not */ 1190 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1191 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1192 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1193 1194 if (rdev->wb.enabled) 1195 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1196 else { 1197 tmp |= RADEON_RB_NO_UPDATE; 1198 WREG32(R_000770_SCRATCH_UMSK, 0); 1199 } 1200 1201 WREG32(RADEON_CP_RB_CNTL, tmp); 1202 udelay(10); 1203 /* Set cp mode to bus mastering & enable cp*/ 1204 WREG32(RADEON_CP_CSQ_MODE, 1205 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1206 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1207 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1208 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1209 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1210 1211 /* at this point everything should be setup correctly to enable master */ 1212 pci_enable_busmaster(rdev->dev->bsddev); 1213 1214 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1215 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1216 if (r) { 1217 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1218 return r; 1219 } 1220 ring->ready = true; 1221 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1222 1223 if (!ring->rptr_save_reg /* not resuming from suspend */ 1224 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1225 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1226 if (r) { 1227 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1228 ring->rptr_save_reg = 0; 1229 } 1230 } 1231 return 0; 1232 } 1233 1234 void r100_cp_fini(struct radeon_device *rdev) 1235 { 1236 if (r100_cp_wait_for_idle(rdev)) { 1237 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1238 } 1239 /* Disable ring */ 1240 r100_cp_disable(rdev); 1241 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1242 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1243 DRM_INFO("radeon: cp finalized\n"); 1244 } 1245 1246 void r100_cp_disable(struct radeon_device *rdev) 1247 { 1248 /* Disable ring */ 1249 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1250 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1251 WREG32(RADEON_CP_CSQ_MODE, 0); 1252 WREG32(RADEON_CP_CSQ_CNTL, 0); 1253 WREG32(R_000770_SCRATCH_UMSK, 0); 1254 if (r100_gui_wait_for_idle(rdev)) { 1255 printk(KERN_WARNING "Failed to wait GUI idle while " 1256 "programming pipes. Bad things might happen.\n"); 1257 } 1258 } 1259 1260 /* 1261 * CS functions 1262 */ 1263 int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1264 struct radeon_cs_packet *pkt, 1265 unsigned idx, 1266 unsigned reg) 1267 { 1268 int r; 1269 u32 tile_flags = 0; 1270 u32 tmp; 1271 struct radeon_cs_reloc *reloc; 1272 u32 value; 1273 1274 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1275 if (r) { 1276 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1277 idx, reg); 1278 radeon_cs_dump_packet(p, pkt); 1279 return r; 1280 } 1281 1282 value = radeon_get_ib_value(p, idx); 1283 tmp = value & 0x003fffff; 1284 tmp += (((u32)reloc->gpu_offset) >> 10); 1285 1286 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1287 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1288 tile_flags |= RADEON_DST_TILE_MACRO; 1289 if (reloc->tiling_flags & RADEON_TILING_MICRO) { 1290 if (reg == RADEON_SRC_PITCH_OFFSET) { 1291 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1292 radeon_cs_dump_packet(p, pkt); 1293 return -EINVAL; 1294 } 1295 tile_flags |= RADEON_DST_TILE_MICRO; 1296 } 1297 1298 tmp |= tile_flags; 1299 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1300 } else 1301 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1302 return 0; 1303 } 1304 1305 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1306 struct radeon_cs_packet *pkt, 1307 int idx) 1308 { 1309 unsigned c, i; 1310 struct radeon_cs_reloc *reloc; 1311 struct r100_cs_track *track; 1312 int r = 0; 1313 volatile uint32_t *ib; 1314 u32 idx_value; 1315 1316 ib = p->ib.ptr; 1317 track = (struct r100_cs_track *)p->track; 1318 c = radeon_get_ib_value(p, idx++) & 0x1F; 1319 if (c > 16) { 1320 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1321 pkt->opcode); 1322 radeon_cs_dump_packet(p, pkt); 1323 return -EINVAL; 1324 } 1325 track->num_arrays = c; 1326 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1327 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1328 if (r) { 1329 DRM_ERROR("No reloc for packet3 %d\n", 1330 pkt->opcode); 1331 radeon_cs_dump_packet(p, pkt); 1332 return r; 1333 } 1334 idx_value = radeon_get_ib_value(p, idx); 1335 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1336 1337 track->arrays[i + 0].esize = idx_value >> 8; 1338 track->arrays[i + 0].robj = reloc->robj; 1339 track->arrays[i + 0].esize &= 0x7F; 1340 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1341 if (r) { 1342 DRM_ERROR("No reloc for packet3 %d\n", 1343 pkt->opcode); 1344 radeon_cs_dump_packet(p, pkt); 1345 return r; 1346 } 1347 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); 1348 track->arrays[i + 1].robj = reloc->robj; 1349 track->arrays[i + 1].esize = idx_value >> 24; 1350 track->arrays[i + 1].esize &= 0x7F; 1351 } 1352 if (c & 1) { 1353 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1354 if (r) { 1355 DRM_ERROR("No reloc for packet3 %d\n", 1356 pkt->opcode); 1357 radeon_cs_dump_packet(p, pkt); 1358 return r; 1359 } 1360 idx_value = radeon_get_ib_value(p, idx); 1361 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1362 track->arrays[i + 0].robj = reloc->robj; 1363 track->arrays[i + 0].esize = idx_value >> 8; 1364 track->arrays[i + 0].esize &= 0x7F; 1365 } 1366 return r; 1367 } 1368 1369 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1370 struct radeon_cs_packet *pkt, 1371 const unsigned *auth, unsigned n, 1372 radeon_packet0_check_t check) 1373 { 1374 unsigned reg; 1375 unsigned i, j, m; 1376 unsigned idx; 1377 int r; 1378 1379 idx = pkt->idx + 1; 1380 reg = pkt->reg; 1381 /* Check that register fall into register range 1382 * determined by the number of entry (n) in the 1383 * safe register bitmap. 1384 */ 1385 if (pkt->one_reg_wr) { 1386 if ((reg >> 7) > n) { 1387 return -EINVAL; 1388 } 1389 } else { 1390 if (((reg + (pkt->count << 2)) >> 7) > n) { 1391 return -EINVAL; 1392 } 1393 } 1394 for (i = 0; i <= pkt->count; i++, idx++) { 1395 j = (reg >> 7); 1396 m = 1 << ((reg >> 2) & 31); 1397 if (auth[j] & m) { 1398 r = check(p, pkt, idx, reg); 1399 if (r) { 1400 return r; 1401 } 1402 } 1403 if (pkt->one_reg_wr) { 1404 if (!(auth[j] & m)) { 1405 break; 1406 } 1407 } else { 1408 reg += 4; 1409 } 1410 } 1411 return 0; 1412 } 1413 1414 /** 1415 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1416 * @parser: parser structure holding parsing context. 1417 * 1418 * Userspace sends a special sequence for VLINE waits. 1419 * PACKET0 - VLINE_START_END + value 1420 * PACKET0 - WAIT_UNTIL +_value 1421 * RELOC (P3) - crtc_id in reloc. 1422 * 1423 * This function parses this and relocates the VLINE START END 1424 * and WAIT UNTIL packets to the correct crtc. 1425 * It also detects a switched off crtc and nulls out the 1426 * wait in that case. 1427 */ 1428 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1429 { 1430 struct drm_crtc *crtc; 1431 struct radeon_crtc *radeon_crtc; 1432 struct radeon_cs_packet p3reloc, waitreloc; 1433 int crtc_id; 1434 int r; 1435 uint32_t header, h_idx, reg; 1436 volatile uint32_t *ib; 1437 1438 ib = p->ib.ptr; 1439 1440 /* parse the wait until */ 1441 r = radeon_cs_packet_parse(p, &waitreloc, p->idx); 1442 if (r) 1443 return r; 1444 1445 /* check its a wait until and only 1 count */ 1446 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1447 waitreloc.count != 0) { 1448 DRM_ERROR("vline wait had illegal wait until segment\n"); 1449 return -EINVAL; 1450 } 1451 1452 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1453 DRM_ERROR("vline wait had illegal wait until\n"); 1454 return -EINVAL; 1455 } 1456 1457 /* jump over the NOP */ 1458 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1459 if (r) 1460 return r; 1461 1462 h_idx = p->idx - 2; 1463 p->idx += waitreloc.count + 2; 1464 p->idx += p3reloc.count + 2; 1465 1466 header = radeon_get_ib_value(p, h_idx); 1467 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1468 reg = R100_CP_PACKET0_GET_REG(header); 1469 crtc = drm_crtc_find(p->rdev->ddev, crtc_id); 1470 if (!crtc) { 1471 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1472 return -ENOENT; 1473 } 1474 radeon_crtc = to_radeon_crtc(crtc); 1475 crtc_id = radeon_crtc->crtc_id; 1476 1477 if (!crtc->enabled) { 1478 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1479 ib[h_idx + 2] = PACKET2(0); 1480 ib[h_idx + 3] = PACKET2(0); 1481 } else if (crtc_id == 1) { 1482 switch (reg) { 1483 case AVIVO_D1MODE_VLINE_START_END: 1484 header &= ~R300_CP_PACKET0_REG_MASK; 1485 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1486 break; 1487 case RADEON_CRTC_GUI_TRIG_VLINE: 1488 header &= ~R300_CP_PACKET0_REG_MASK; 1489 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1490 break; 1491 default: 1492 DRM_ERROR("unknown crtc reloc\n"); 1493 return -EINVAL; 1494 } 1495 ib[h_idx] = header; 1496 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1497 } 1498 1499 return 0; 1500 } 1501 1502 static int r100_get_vtx_size(uint32_t vtx_fmt) 1503 { 1504 int vtx_size; 1505 vtx_size = 2; 1506 /* ordered according to bits in spec */ 1507 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1508 vtx_size++; 1509 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1510 vtx_size += 3; 1511 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1512 vtx_size++; 1513 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1514 vtx_size++; 1515 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1516 vtx_size += 3; 1517 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1518 vtx_size++; 1519 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1520 vtx_size++; 1521 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1522 vtx_size += 2; 1523 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1524 vtx_size += 2; 1525 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1526 vtx_size++; 1527 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1528 vtx_size += 2; 1529 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1530 vtx_size++; 1531 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1532 vtx_size += 2; 1533 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1534 vtx_size++; 1535 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1536 vtx_size++; 1537 /* blend weight */ 1538 if (vtx_fmt & (0x7 << 15)) 1539 vtx_size += (vtx_fmt >> 15) & 0x7; 1540 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1541 vtx_size += 3; 1542 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1543 vtx_size += 2; 1544 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1545 vtx_size++; 1546 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1547 vtx_size++; 1548 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1549 vtx_size++; 1550 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1551 vtx_size++; 1552 return vtx_size; 1553 } 1554 1555 static int r100_packet0_check(struct radeon_cs_parser *p, 1556 struct radeon_cs_packet *pkt, 1557 unsigned idx, unsigned reg) 1558 { 1559 struct radeon_cs_reloc *reloc; 1560 struct r100_cs_track *track; 1561 volatile uint32_t *ib; 1562 uint32_t tmp; 1563 int r; 1564 int i, face; 1565 u32 tile_flags = 0; 1566 u32 idx_value; 1567 1568 ib = p->ib.ptr; 1569 track = (struct r100_cs_track *)p->track; 1570 1571 idx_value = radeon_get_ib_value(p, idx); 1572 1573 switch (reg) { 1574 case RADEON_CRTC_GUI_TRIG_VLINE: 1575 r = r100_cs_packet_parse_vline(p); 1576 if (r) { 1577 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1578 idx, reg); 1579 radeon_cs_dump_packet(p, pkt); 1580 return r; 1581 } 1582 break; 1583 /* FIXME: only allow PACKET3 blit? easier to check for out of 1584 * range access */ 1585 case RADEON_DST_PITCH_OFFSET: 1586 case RADEON_SRC_PITCH_OFFSET: 1587 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1588 if (r) 1589 return r; 1590 break; 1591 case RADEON_RB3D_DEPTHOFFSET: 1592 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1593 if (r) { 1594 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1595 idx, reg); 1596 radeon_cs_dump_packet(p, pkt); 1597 return r; 1598 } 1599 track->zb.robj = reloc->robj; 1600 track->zb.offset = idx_value; 1601 track->zb_dirty = true; 1602 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1603 break; 1604 case RADEON_RB3D_COLOROFFSET: 1605 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1606 if (r) { 1607 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1608 idx, reg); 1609 radeon_cs_dump_packet(p, pkt); 1610 return r; 1611 } 1612 track->cb[0].robj = reloc->robj; 1613 track->cb[0].offset = idx_value; 1614 track->cb_dirty = true; 1615 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1616 break; 1617 case RADEON_PP_TXOFFSET_0: 1618 case RADEON_PP_TXOFFSET_1: 1619 case RADEON_PP_TXOFFSET_2: 1620 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1621 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1622 if (r) { 1623 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1624 idx, reg); 1625 radeon_cs_dump_packet(p, pkt); 1626 return r; 1627 } 1628 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1629 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1630 tile_flags |= RADEON_TXO_MACRO_TILE; 1631 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1632 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1633 1634 tmp = idx_value & ~(0x7 << 2); 1635 tmp |= tile_flags; 1636 ib[idx] = tmp + ((u32)reloc->gpu_offset); 1637 } else 1638 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1639 track->textures[i].robj = reloc->robj; 1640 track->tex_dirty = true; 1641 break; 1642 case RADEON_PP_CUBIC_OFFSET_T0_0: 1643 case RADEON_PP_CUBIC_OFFSET_T0_1: 1644 case RADEON_PP_CUBIC_OFFSET_T0_2: 1645 case RADEON_PP_CUBIC_OFFSET_T0_3: 1646 case RADEON_PP_CUBIC_OFFSET_T0_4: 1647 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1648 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1649 if (r) { 1650 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1651 idx, reg); 1652 radeon_cs_dump_packet(p, pkt); 1653 return r; 1654 } 1655 track->textures[0].cube_info[i].offset = idx_value; 1656 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1657 track->textures[0].cube_info[i].robj = reloc->robj; 1658 track->tex_dirty = true; 1659 break; 1660 case RADEON_PP_CUBIC_OFFSET_T1_0: 1661 case RADEON_PP_CUBIC_OFFSET_T1_1: 1662 case RADEON_PP_CUBIC_OFFSET_T1_2: 1663 case RADEON_PP_CUBIC_OFFSET_T1_3: 1664 case RADEON_PP_CUBIC_OFFSET_T1_4: 1665 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1666 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1667 if (r) { 1668 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1669 idx, reg); 1670 radeon_cs_dump_packet(p, pkt); 1671 return r; 1672 } 1673 track->textures[1].cube_info[i].offset = idx_value; 1674 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1675 track->textures[1].cube_info[i].robj = reloc->robj; 1676 track->tex_dirty = true; 1677 break; 1678 case RADEON_PP_CUBIC_OFFSET_T2_0: 1679 case RADEON_PP_CUBIC_OFFSET_T2_1: 1680 case RADEON_PP_CUBIC_OFFSET_T2_2: 1681 case RADEON_PP_CUBIC_OFFSET_T2_3: 1682 case RADEON_PP_CUBIC_OFFSET_T2_4: 1683 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1684 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1685 if (r) { 1686 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1687 idx, reg); 1688 radeon_cs_dump_packet(p, pkt); 1689 return r; 1690 } 1691 track->textures[2].cube_info[i].offset = idx_value; 1692 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1693 track->textures[2].cube_info[i].robj = reloc->robj; 1694 track->tex_dirty = true; 1695 break; 1696 case RADEON_RE_WIDTH_HEIGHT: 1697 track->maxy = ((idx_value >> 16) & 0x7FF); 1698 track->cb_dirty = true; 1699 track->zb_dirty = true; 1700 break; 1701 case RADEON_RB3D_COLORPITCH: 1702 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1703 if (r) { 1704 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1705 idx, reg); 1706 radeon_cs_dump_packet(p, pkt); 1707 return r; 1708 } 1709 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1710 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1711 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1712 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1713 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1714 1715 tmp = idx_value & ~(0x7 << 16); 1716 tmp |= tile_flags; 1717 ib[idx] = tmp; 1718 } else 1719 ib[idx] = idx_value; 1720 1721 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1722 track->cb_dirty = true; 1723 break; 1724 case RADEON_RB3D_DEPTHPITCH: 1725 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1726 track->zb_dirty = true; 1727 break; 1728 case RADEON_RB3D_CNTL: 1729 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1730 case 7: 1731 case 8: 1732 case 9: 1733 case 11: 1734 case 12: 1735 track->cb[0].cpp = 1; 1736 break; 1737 case 3: 1738 case 4: 1739 case 15: 1740 track->cb[0].cpp = 2; 1741 break; 1742 case 6: 1743 track->cb[0].cpp = 4; 1744 break; 1745 default: 1746 DRM_ERROR("Invalid color buffer format (%d) !\n", 1747 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1748 return -EINVAL; 1749 } 1750 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1751 track->cb_dirty = true; 1752 track->zb_dirty = true; 1753 break; 1754 case RADEON_RB3D_ZSTENCILCNTL: 1755 switch (idx_value & 0xf) { 1756 case 0: 1757 track->zb.cpp = 2; 1758 break; 1759 case 2: 1760 case 3: 1761 case 4: 1762 case 5: 1763 case 9: 1764 case 11: 1765 track->zb.cpp = 4; 1766 break; 1767 default: 1768 break; 1769 } 1770 track->zb_dirty = true; 1771 break; 1772 case RADEON_RB3D_ZPASS_ADDR: 1773 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1774 if (r) { 1775 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1776 idx, reg); 1777 radeon_cs_dump_packet(p, pkt); 1778 return r; 1779 } 1780 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1781 break; 1782 case RADEON_PP_CNTL: 1783 { 1784 uint32_t temp = idx_value >> 4; 1785 for (i = 0; i < track->num_texture; i++) 1786 track->textures[i].enabled = !!(temp & (1 << i)); 1787 track->tex_dirty = true; 1788 } 1789 break; 1790 case RADEON_SE_VF_CNTL: 1791 track->vap_vf_cntl = idx_value; 1792 break; 1793 case RADEON_SE_VTX_FMT: 1794 track->vtx_size = r100_get_vtx_size(idx_value); 1795 break; 1796 case RADEON_PP_TEX_SIZE_0: 1797 case RADEON_PP_TEX_SIZE_1: 1798 case RADEON_PP_TEX_SIZE_2: 1799 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1800 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1801 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1802 track->tex_dirty = true; 1803 break; 1804 case RADEON_PP_TEX_PITCH_0: 1805 case RADEON_PP_TEX_PITCH_1: 1806 case RADEON_PP_TEX_PITCH_2: 1807 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1808 track->textures[i].pitch = idx_value + 32; 1809 track->tex_dirty = true; 1810 break; 1811 case RADEON_PP_TXFILTER_0: 1812 case RADEON_PP_TXFILTER_1: 1813 case RADEON_PP_TXFILTER_2: 1814 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1815 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1816 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1817 tmp = (idx_value >> 23) & 0x7; 1818 if (tmp == 2 || tmp == 6) 1819 track->textures[i].roundup_w = false; 1820 tmp = (idx_value >> 27) & 0x7; 1821 if (tmp == 2 || tmp == 6) 1822 track->textures[i].roundup_h = false; 1823 track->tex_dirty = true; 1824 break; 1825 case RADEON_PP_TXFORMAT_0: 1826 case RADEON_PP_TXFORMAT_1: 1827 case RADEON_PP_TXFORMAT_2: 1828 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1829 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1830 track->textures[i].use_pitch = 1; 1831 } else { 1832 track->textures[i].use_pitch = 0; 1833 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1834 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1835 } 1836 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1837 track->textures[i].tex_coord_type = 2; 1838 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1839 case RADEON_TXFORMAT_I8: 1840 case RADEON_TXFORMAT_RGB332: 1841 case RADEON_TXFORMAT_Y8: 1842 track->textures[i].cpp = 1; 1843 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1844 break; 1845 case RADEON_TXFORMAT_AI88: 1846 case RADEON_TXFORMAT_ARGB1555: 1847 case RADEON_TXFORMAT_RGB565: 1848 case RADEON_TXFORMAT_ARGB4444: 1849 case RADEON_TXFORMAT_VYUY422: 1850 case RADEON_TXFORMAT_YVYU422: 1851 case RADEON_TXFORMAT_SHADOW16: 1852 case RADEON_TXFORMAT_LDUDV655: 1853 case RADEON_TXFORMAT_DUDV88: 1854 track->textures[i].cpp = 2; 1855 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1856 break; 1857 case RADEON_TXFORMAT_ARGB8888: 1858 case RADEON_TXFORMAT_RGBA8888: 1859 case RADEON_TXFORMAT_SHADOW32: 1860 case RADEON_TXFORMAT_LDUDUV8888: 1861 track->textures[i].cpp = 4; 1862 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1863 break; 1864 case RADEON_TXFORMAT_DXT1: 1865 track->textures[i].cpp = 1; 1866 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1867 break; 1868 case RADEON_TXFORMAT_DXT23: 1869 case RADEON_TXFORMAT_DXT45: 1870 track->textures[i].cpp = 1; 1871 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1872 break; 1873 } 1874 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1875 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1876 track->tex_dirty = true; 1877 break; 1878 case RADEON_PP_CUBIC_FACES_0: 1879 case RADEON_PP_CUBIC_FACES_1: 1880 case RADEON_PP_CUBIC_FACES_2: 1881 tmp = idx_value; 1882 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1883 for (face = 0; face < 4; face++) { 1884 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1885 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1886 } 1887 track->tex_dirty = true; 1888 break; 1889 default: 1890 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1891 reg, idx); 1892 return -EINVAL; 1893 } 1894 return 0; 1895 } 1896 1897 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1898 struct radeon_cs_packet *pkt, 1899 struct radeon_bo *robj) 1900 { 1901 unsigned idx; 1902 u32 value; 1903 idx = pkt->idx + 1; 1904 value = radeon_get_ib_value(p, idx + 2); 1905 if ((value + 1) > radeon_bo_size(robj)) { 1906 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1907 "(need %u have %lu) !\n", 1908 value + 1, 1909 radeon_bo_size(robj)); 1910 return -EINVAL; 1911 } 1912 return 0; 1913 } 1914 1915 static int r100_packet3_check(struct radeon_cs_parser *p, 1916 struct radeon_cs_packet *pkt) 1917 { 1918 struct radeon_cs_reloc *reloc; 1919 struct r100_cs_track *track; 1920 unsigned idx; 1921 volatile uint32_t *ib; 1922 int r; 1923 1924 ib = p->ib.ptr; 1925 idx = pkt->idx + 1; 1926 track = (struct r100_cs_track *)p->track; 1927 switch (pkt->opcode) { 1928 case PACKET3_3D_LOAD_VBPNTR: 1929 r = r100_packet3_load_vbpntr(p, pkt, idx); 1930 if (r) 1931 return r; 1932 break; 1933 case PACKET3_INDX_BUFFER: 1934 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1935 if (r) { 1936 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1937 radeon_cs_dump_packet(p, pkt); 1938 return r; 1939 } 1940 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); 1941 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1942 if (r) { 1943 return r; 1944 } 1945 break; 1946 case 0x23: 1947 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1948 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1949 if (r) { 1950 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1951 radeon_cs_dump_packet(p, pkt); 1952 return r; 1953 } 1954 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); 1955 track->num_arrays = 1; 1956 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1957 1958 track->arrays[0].robj = reloc->robj; 1959 track->arrays[0].esize = track->vtx_size; 1960 1961 track->max_indx = radeon_get_ib_value(p, idx+1); 1962 1963 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1964 track->immd_dwords = pkt->count - 1; 1965 r = r100_cs_track_check(p->rdev, track); 1966 if (r) 1967 return r; 1968 break; 1969 case PACKET3_3D_DRAW_IMMD: 1970 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1971 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1972 return -EINVAL; 1973 } 1974 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1975 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1976 track->immd_dwords = pkt->count - 1; 1977 r = r100_cs_track_check(p->rdev, track); 1978 if (r) 1979 return r; 1980 break; 1981 /* triggers drawing using in-packet vertex data */ 1982 case PACKET3_3D_DRAW_IMMD_2: 1983 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1984 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1985 return -EINVAL; 1986 } 1987 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1988 track->immd_dwords = pkt->count; 1989 r = r100_cs_track_check(p->rdev, track); 1990 if (r) 1991 return r; 1992 break; 1993 /* triggers drawing using in-packet vertex data */ 1994 case PACKET3_3D_DRAW_VBUF_2: 1995 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1996 r = r100_cs_track_check(p->rdev, track); 1997 if (r) 1998 return r; 1999 break; 2000 /* triggers drawing of vertex buffers setup elsewhere */ 2001 case PACKET3_3D_DRAW_INDX_2: 2002 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2003 r = r100_cs_track_check(p->rdev, track); 2004 if (r) 2005 return r; 2006 break; 2007 /* triggers drawing using indices to vertex buffer */ 2008 case PACKET3_3D_DRAW_VBUF: 2009 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2010 r = r100_cs_track_check(p->rdev, track); 2011 if (r) 2012 return r; 2013 break; 2014 /* triggers drawing of vertex buffers setup elsewhere */ 2015 case PACKET3_3D_DRAW_INDX: 2016 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2017 r = r100_cs_track_check(p->rdev, track); 2018 if (r) 2019 return r; 2020 break; 2021 /* triggers drawing using indices to vertex buffer */ 2022 case PACKET3_3D_CLEAR_HIZ: 2023 case PACKET3_3D_CLEAR_ZMASK: 2024 if (p->rdev->hyperz_filp != p->filp) 2025 return -EINVAL; 2026 break; 2027 case PACKET3_NOP: 2028 break; 2029 default: 2030 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2031 return -EINVAL; 2032 } 2033 return 0; 2034 } 2035 2036 int r100_cs_parse(struct radeon_cs_parser *p) 2037 { 2038 struct radeon_cs_packet pkt; 2039 struct r100_cs_track *track; 2040 int r; 2041 2042 track = kzalloc(sizeof(*track), GFP_KERNEL); 2043 if (!track) 2044 return -ENOMEM; 2045 r100_cs_track_clear(p->rdev, track); 2046 p->track = track; 2047 do { 2048 r = radeon_cs_packet_parse(p, &pkt, p->idx); 2049 if (r) { 2050 kfree(p->track); 2051 p->track = NULL; 2052 return r; 2053 } 2054 p->idx += pkt.count + 2; 2055 switch (pkt.type) { 2056 case RADEON_PACKET_TYPE0: 2057 if (p->rdev->family >= CHIP_R200) 2058 r = r100_cs_parse_packet0(p, &pkt, 2059 p->rdev->config.r100.reg_safe_bm, 2060 p->rdev->config.r100.reg_safe_bm_size, 2061 &r200_packet0_check); 2062 else 2063 r = r100_cs_parse_packet0(p, &pkt, 2064 p->rdev->config.r100.reg_safe_bm, 2065 p->rdev->config.r100.reg_safe_bm_size, 2066 &r100_packet0_check); 2067 break; 2068 case RADEON_PACKET_TYPE2: 2069 break; 2070 case RADEON_PACKET_TYPE3: 2071 r = r100_packet3_check(p, &pkt); 2072 break; 2073 default: 2074 DRM_ERROR("Unknown packet type %d !\n", 2075 pkt.type); 2076 kfree(p->track); 2077 p->track = NULL; 2078 return -EINVAL; 2079 } 2080 if (r) { 2081 kfree(p->track); 2082 p->track = NULL; 2083 return r; 2084 } 2085 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2086 kfree(p->track); 2087 p->track = NULL; 2088 return 0; 2089 } 2090 2091 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2092 { 2093 DRM_ERROR("pitch %d\n", t->pitch); 2094 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2095 DRM_ERROR("width %d\n", t->width); 2096 DRM_ERROR("width_11 %d\n", t->width_11); 2097 DRM_ERROR("height %d\n", t->height); 2098 DRM_ERROR("height_11 %d\n", t->height_11); 2099 DRM_ERROR("num levels %d\n", t->num_levels); 2100 DRM_ERROR("depth %d\n", t->txdepth); 2101 DRM_ERROR("bpp %d\n", t->cpp); 2102 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2103 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2104 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2105 DRM_ERROR("compress format %d\n", t->compress_format); 2106 } 2107 2108 static int r100_track_compress_size(int compress_format, int w, int h) 2109 { 2110 int block_width, block_height, block_bytes; 2111 int wblocks, hblocks; 2112 int min_wblocks; 2113 int sz; 2114 2115 block_width = 4; 2116 block_height = 4; 2117 2118 switch (compress_format) { 2119 case R100_TRACK_COMP_DXT1: 2120 block_bytes = 8; 2121 min_wblocks = 4; 2122 break; 2123 default: 2124 case R100_TRACK_COMP_DXT35: 2125 block_bytes = 16; 2126 min_wblocks = 2; 2127 break; 2128 } 2129 2130 hblocks = (h + block_height - 1) / block_height; 2131 wblocks = (w + block_width - 1) / block_width; 2132 if (wblocks < min_wblocks) 2133 wblocks = min_wblocks; 2134 sz = wblocks * hblocks * block_bytes; 2135 return sz; 2136 } 2137 2138 static int r100_cs_track_cube(struct radeon_device *rdev, 2139 struct r100_cs_track *track, unsigned idx) 2140 { 2141 unsigned face, w, h; 2142 struct radeon_bo *cube_robj; 2143 unsigned long size; 2144 unsigned compress_format = track->textures[idx].compress_format; 2145 2146 for (face = 0; face < 5; face++) { 2147 cube_robj = track->textures[idx].cube_info[face].robj; 2148 w = track->textures[idx].cube_info[face].width; 2149 h = track->textures[idx].cube_info[face].height; 2150 2151 if (compress_format) { 2152 size = r100_track_compress_size(compress_format, w, h); 2153 } else 2154 size = w * h; 2155 size *= track->textures[idx].cpp; 2156 2157 size += track->textures[idx].cube_info[face].offset; 2158 2159 if (size > radeon_bo_size(cube_robj)) { 2160 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2161 size, radeon_bo_size(cube_robj)); 2162 r100_cs_track_texture_print(&track->textures[idx]); 2163 return -1; 2164 } 2165 } 2166 return 0; 2167 } 2168 2169 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2170 struct r100_cs_track *track) 2171 { 2172 struct radeon_bo *robj; 2173 unsigned long size; 2174 unsigned u, i, w, h, d; 2175 int ret; 2176 2177 for (u = 0; u < track->num_texture; u++) { 2178 if (!track->textures[u].enabled) 2179 continue; 2180 if (track->textures[u].lookup_disable) 2181 continue; 2182 robj = track->textures[u].robj; 2183 if (robj == NULL) { 2184 DRM_ERROR("No texture bound to unit %u\n", u); 2185 return -EINVAL; 2186 } 2187 size = 0; 2188 for (i = 0; i <= track->textures[u].num_levels; i++) { 2189 if (track->textures[u].use_pitch) { 2190 if (rdev->family < CHIP_R300) 2191 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2192 else 2193 w = track->textures[u].pitch / (1 << i); 2194 } else { 2195 w = track->textures[u].width; 2196 if (rdev->family >= CHIP_RV515) 2197 w |= track->textures[u].width_11; 2198 w = w / (1 << i); 2199 if (track->textures[u].roundup_w) 2200 w = roundup_pow_of_two(w); 2201 } 2202 h = track->textures[u].height; 2203 if (rdev->family >= CHIP_RV515) 2204 h |= track->textures[u].height_11; 2205 h = h / (1 << i); 2206 if (track->textures[u].roundup_h) 2207 h = roundup_pow_of_two(h); 2208 if (track->textures[u].tex_coord_type == 1) { 2209 d = (1 << track->textures[u].txdepth) / (1 << i); 2210 if (!d) 2211 d = 1; 2212 } else { 2213 d = 1; 2214 } 2215 if (track->textures[u].compress_format) { 2216 2217 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2218 /* compressed textures are block based */ 2219 } else 2220 size += w * h * d; 2221 } 2222 size *= track->textures[u].cpp; 2223 2224 switch (track->textures[u].tex_coord_type) { 2225 case 0: 2226 case 1: 2227 break; 2228 case 2: 2229 if (track->separate_cube) { 2230 ret = r100_cs_track_cube(rdev, track, u); 2231 if (ret) 2232 return ret; 2233 } else 2234 size *= 6; 2235 break; 2236 default: 2237 DRM_ERROR("Invalid texture coordinate type %u for unit " 2238 "%u\n", track->textures[u].tex_coord_type, u); 2239 return -EINVAL; 2240 } 2241 if (size > radeon_bo_size(robj)) { 2242 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2243 "%lu\n", u, size, radeon_bo_size(robj)); 2244 r100_cs_track_texture_print(&track->textures[u]); 2245 return -EINVAL; 2246 } 2247 } 2248 return 0; 2249 } 2250 2251 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2252 { 2253 unsigned i; 2254 unsigned long size; 2255 unsigned prim_walk; 2256 unsigned nverts; 2257 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2258 2259 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2260 !track->blend_read_enable) 2261 num_cb = 0; 2262 2263 for (i = 0; i < num_cb; i++) { 2264 if (track->cb[i].robj == NULL) { 2265 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2266 return -EINVAL; 2267 } 2268 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2269 size += track->cb[i].offset; 2270 if (size > radeon_bo_size(track->cb[i].robj)) { 2271 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2272 "(need %lu have %lu) !\n", i, size, 2273 radeon_bo_size(track->cb[i].robj)); 2274 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2275 i, track->cb[i].pitch, track->cb[i].cpp, 2276 track->cb[i].offset, track->maxy); 2277 return -EINVAL; 2278 } 2279 } 2280 track->cb_dirty = false; 2281 2282 if (track->zb_dirty && track->z_enabled) { 2283 if (track->zb.robj == NULL) { 2284 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2285 return -EINVAL; 2286 } 2287 size = track->zb.pitch * track->zb.cpp * track->maxy; 2288 size += track->zb.offset; 2289 if (size > radeon_bo_size(track->zb.robj)) { 2290 DRM_ERROR("[drm] Buffer too small for z buffer " 2291 "(need %lu have %lu) !\n", size, 2292 radeon_bo_size(track->zb.robj)); 2293 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2294 track->zb.pitch, track->zb.cpp, 2295 track->zb.offset, track->maxy); 2296 return -EINVAL; 2297 } 2298 } 2299 track->zb_dirty = false; 2300 2301 if (track->aa_dirty && track->aaresolve) { 2302 if (track->aa.robj == NULL) { 2303 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2304 return -EINVAL; 2305 } 2306 /* I believe the format comes from colorbuffer0. */ 2307 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2308 size += track->aa.offset; 2309 if (size > radeon_bo_size(track->aa.robj)) { 2310 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2311 "(need %lu have %lu) !\n", i, size, 2312 radeon_bo_size(track->aa.robj)); 2313 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2314 i, track->aa.pitch, track->cb[0].cpp, 2315 track->aa.offset, track->maxy); 2316 return -EINVAL; 2317 } 2318 } 2319 track->aa_dirty = false; 2320 2321 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2322 if (track->vap_vf_cntl & (1 << 14)) { 2323 nverts = track->vap_alt_nverts; 2324 } else { 2325 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2326 } 2327 switch (prim_walk) { 2328 case 1: 2329 for (i = 0; i < track->num_arrays; i++) { 2330 size = track->arrays[i].esize * track->max_indx * 4; 2331 if (track->arrays[i].robj == NULL) { 2332 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2333 "bound\n", prim_walk, i); 2334 return -EINVAL; 2335 } 2336 if (size > radeon_bo_size(track->arrays[i].robj)) { 2337 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2338 "need %lu dwords have %lu dwords\n", 2339 prim_walk, i, size >> 2, 2340 radeon_bo_size(track->arrays[i].robj) 2341 >> 2); 2342 DRM_ERROR("Max indices %u\n", track->max_indx); 2343 return -EINVAL; 2344 } 2345 } 2346 break; 2347 case 2: 2348 for (i = 0; i < track->num_arrays; i++) { 2349 size = track->arrays[i].esize * (nverts - 1) * 4; 2350 if (track->arrays[i].robj == NULL) { 2351 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2352 "bound\n", prim_walk, i); 2353 return -EINVAL; 2354 } 2355 if (size > radeon_bo_size(track->arrays[i].robj)) { 2356 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2357 "need %lu dwords have %lu dwords\n", 2358 prim_walk, i, size >> 2, 2359 radeon_bo_size(track->arrays[i].robj) 2360 >> 2); 2361 return -EINVAL; 2362 } 2363 } 2364 break; 2365 case 3: 2366 size = track->vtx_size * nverts; 2367 if (size != track->immd_dwords) { 2368 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2369 track->immd_dwords, size); 2370 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2371 nverts, track->vtx_size); 2372 return -EINVAL; 2373 } 2374 break; 2375 default: 2376 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2377 prim_walk); 2378 return -EINVAL; 2379 } 2380 2381 if (track->tex_dirty) { 2382 track->tex_dirty = false; 2383 return r100_cs_track_texture_check(rdev, track); 2384 } 2385 return 0; 2386 } 2387 2388 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2389 { 2390 unsigned i, face; 2391 2392 track->cb_dirty = true; 2393 track->zb_dirty = true; 2394 track->tex_dirty = true; 2395 track->aa_dirty = true; 2396 2397 if (rdev->family < CHIP_R300) { 2398 track->num_cb = 1; 2399 if (rdev->family <= CHIP_RS200) 2400 track->num_texture = 3; 2401 else 2402 track->num_texture = 6; 2403 track->maxy = 2048; 2404 track->separate_cube = 1; 2405 } else { 2406 track->num_cb = 4; 2407 track->num_texture = 16; 2408 track->maxy = 4096; 2409 track->separate_cube = 0; 2410 track->aaresolve = false; 2411 track->aa.robj = NULL; 2412 } 2413 2414 for (i = 0; i < track->num_cb; i++) { 2415 track->cb[i].robj = NULL; 2416 track->cb[i].pitch = 8192; 2417 track->cb[i].cpp = 16; 2418 track->cb[i].offset = 0; 2419 } 2420 track->z_enabled = true; 2421 track->zb.robj = NULL; 2422 track->zb.pitch = 8192; 2423 track->zb.cpp = 4; 2424 track->zb.offset = 0; 2425 track->vtx_size = 0x7F; 2426 track->immd_dwords = 0xFFFFFFFFUL; 2427 track->num_arrays = 11; 2428 track->max_indx = 0x00FFFFFFUL; 2429 for (i = 0; i < track->num_arrays; i++) { 2430 track->arrays[i].robj = NULL; 2431 track->arrays[i].esize = 0x7F; 2432 } 2433 for (i = 0; i < track->num_texture; i++) { 2434 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2435 track->textures[i].pitch = 16536; 2436 track->textures[i].width = 16536; 2437 track->textures[i].height = 16536; 2438 track->textures[i].width_11 = 1 << 11; 2439 track->textures[i].height_11 = 1 << 11; 2440 track->textures[i].num_levels = 12; 2441 if (rdev->family <= CHIP_RS200) { 2442 track->textures[i].tex_coord_type = 0; 2443 track->textures[i].txdepth = 0; 2444 } else { 2445 track->textures[i].txdepth = 16; 2446 track->textures[i].tex_coord_type = 1; 2447 } 2448 track->textures[i].cpp = 64; 2449 track->textures[i].robj = NULL; 2450 /* CS IB emission code makes sure texture unit are disabled */ 2451 track->textures[i].enabled = false; 2452 track->textures[i].lookup_disable = false; 2453 track->textures[i].roundup_w = true; 2454 track->textures[i].roundup_h = true; 2455 if (track->separate_cube) 2456 for (face = 0; face < 5; face++) { 2457 track->textures[i].cube_info[face].robj = NULL; 2458 track->textures[i].cube_info[face].width = 16536; 2459 track->textures[i].cube_info[face].height = 16536; 2460 track->textures[i].cube_info[face].offset = 0; 2461 } 2462 } 2463 } 2464 2465 /* 2466 * Global GPU functions 2467 */ 2468 static void r100_errata(struct radeon_device *rdev) 2469 { 2470 rdev->pll_errata = 0; 2471 2472 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2473 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2474 } 2475 2476 if (rdev->family == CHIP_RV100 || 2477 rdev->family == CHIP_RS100 || 2478 rdev->family == CHIP_RS200) { 2479 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2480 } 2481 } 2482 2483 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2484 { 2485 unsigned i; 2486 uint32_t tmp; 2487 2488 for (i = 0; i < rdev->usec_timeout; i++) { 2489 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2490 if (tmp >= n) { 2491 return 0; 2492 } 2493 DRM_UDELAY(1); 2494 } 2495 return -1; 2496 } 2497 2498 int r100_gui_wait_for_idle(struct radeon_device *rdev) 2499 { 2500 unsigned i; 2501 uint32_t tmp; 2502 2503 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2504 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" 2505 " Bad things might happen.\n"); 2506 } 2507 for (i = 0; i < rdev->usec_timeout; i++) { 2508 tmp = RREG32(RADEON_RBBM_STATUS); 2509 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2510 return 0; 2511 } 2512 DRM_UDELAY(1); 2513 } 2514 return -1; 2515 } 2516 2517 int r100_mc_wait_for_idle(struct radeon_device *rdev) 2518 { 2519 unsigned i; 2520 uint32_t tmp; 2521 2522 for (i = 0; i < rdev->usec_timeout; i++) { 2523 /* read MC_STATUS */ 2524 tmp = RREG32(RADEON_MC_STATUS); 2525 if (tmp & RADEON_MC_IDLE) { 2526 return 0; 2527 } 2528 DRM_UDELAY(1); 2529 } 2530 return -1; 2531 } 2532 2533 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2534 { 2535 u32 rbbm_status; 2536 2537 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2538 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2539 radeon_ring_lockup_update(rdev, ring); 2540 return false; 2541 } 2542 return radeon_ring_test_lockup(rdev, ring); 2543 } 2544 2545 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2546 void r100_enable_bm(struct radeon_device *rdev) 2547 { 2548 uint32_t tmp; 2549 /* Enable bus mastering */ 2550 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2551 WREG32(RADEON_BUS_CNTL, tmp); 2552 } 2553 2554 void r100_bm_disable(struct radeon_device *rdev) 2555 { 2556 u32 tmp; 2557 2558 /* disable bus mastering */ 2559 tmp = RREG32(R_000030_BUS_CNTL); 2560 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2561 mdelay(1); 2562 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2563 mdelay(1); 2564 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2565 tmp = RREG32(RADEON_BUS_CNTL); 2566 mdelay(1); 2567 pci_disable_busmaster(rdev->dev->bsddev); 2568 mdelay(1); 2569 } 2570 2571 int r100_asic_reset(struct radeon_device *rdev) 2572 { 2573 struct r100_mc_save save; 2574 u32 status, tmp; 2575 int ret = 0; 2576 2577 status = RREG32(R_000E40_RBBM_STATUS); 2578 if (!G_000E40_GUI_ACTIVE(status)) { 2579 return 0; 2580 } 2581 r100_mc_stop(rdev, &save); 2582 status = RREG32(R_000E40_RBBM_STATUS); 2583 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2584 /* stop CP */ 2585 WREG32(RADEON_CP_CSQ_CNTL, 0); 2586 tmp = RREG32(RADEON_CP_RB_CNTL); 2587 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2588 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2589 WREG32(RADEON_CP_RB_WPTR, 0); 2590 WREG32(RADEON_CP_RB_CNTL, tmp); 2591 /* save PCI state */ 2592 pci_save_state(device_get_parent(rdev->dev->bsddev)); 2593 /* disable bus mastering */ 2594 r100_bm_disable(rdev); 2595 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2596 S_0000F0_SOFT_RESET_RE(1) | 2597 S_0000F0_SOFT_RESET_PP(1) | 2598 S_0000F0_SOFT_RESET_RB(1)); 2599 RREG32(R_0000F0_RBBM_SOFT_RESET); 2600 mdelay(500); 2601 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2602 mdelay(1); 2603 status = RREG32(R_000E40_RBBM_STATUS); 2604 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2605 /* reset CP */ 2606 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2607 RREG32(R_0000F0_RBBM_SOFT_RESET); 2608 mdelay(500); 2609 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2610 mdelay(1); 2611 status = RREG32(R_000E40_RBBM_STATUS); 2612 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2613 /* restore PCI & busmastering */ 2614 pci_restore_state(device_get_parent(rdev->dev->bsddev)); 2615 r100_enable_bm(rdev); 2616 /* Check if GPU is idle */ 2617 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2618 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2619 dev_err(rdev->dev, "failed to reset GPU\n"); 2620 ret = -1; 2621 } else 2622 dev_info(rdev->dev, "GPU reset succeed\n"); 2623 r100_mc_resume(rdev, &save); 2624 return ret; 2625 } 2626 2627 void r100_set_common_regs(struct radeon_device *rdev) 2628 { 2629 struct drm_device *dev = rdev->ddev; 2630 bool force_dac2 = false; 2631 u32 tmp; 2632 2633 /* set these so they don't interfere with anything */ 2634 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2635 WREG32(RADEON_SUBPIC_CNTL, 0); 2636 WREG32(RADEON_VIPH_CONTROL, 0); 2637 WREG32(RADEON_I2C_CNTL_1, 0); 2638 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2639 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2640 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2641 2642 /* always set up dac2 on rn50 and some rv100 as lots 2643 * of servers seem to wire it up to a VGA port but 2644 * don't report it in the bios connector 2645 * table. 2646 */ 2647 switch (dev->pdev->device) { 2648 /* RN50 */ 2649 case 0x515e: 2650 case 0x5969: 2651 force_dac2 = true; 2652 break; 2653 /* RV100*/ 2654 case 0x5159: 2655 case 0x515a: 2656 /* DELL triple head servers */ 2657 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && 2658 ((dev->pdev->subsystem_device == 0x016c) || 2659 (dev->pdev->subsystem_device == 0x016d) || 2660 (dev->pdev->subsystem_device == 0x016e) || 2661 (dev->pdev->subsystem_device == 0x016f) || 2662 (dev->pdev->subsystem_device == 0x0170) || 2663 (dev->pdev->subsystem_device == 0x017d) || 2664 (dev->pdev->subsystem_device == 0x017e) || 2665 (dev->pdev->subsystem_device == 0x0183) || 2666 (dev->pdev->subsystem_device == 0x018a) || 2667 (dev->pdev->subsystem_device == 0x019a))) 2668 force_dac2 = true; 2669 break; 2670 } 2671 2672 if (force_dac2) { 2673 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2674 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2675 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2676 2677 /* For CRT on DAC2, don't turn it on if BIOS didn't 2678 enable it, even it's detected. 2679 */ 2680 2681 /* force it to crtc0 */ 2682 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2683 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2684 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2685 2686 /* set up the TV DAC */ 2687 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2688 RADEON_TV_DAC_STD_MASK | 2689 RADEON_TV_DAC_RDACPD | 2690 RADEON_TV_DAC_GDACPD | 2691 RADEON_TV_DAC_BDACPD | 2692 RADEON_TV_DAC_BGADJ_MASK | 2693 RADEON_TV_DAC_DACADJ_MASK); 2694 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2695 RADEON_TV_DAC_NHOLD | 2696 RADEON_TV_DAC_STD_PS2 | 2697 (0x58 << 16)); 2698 2699 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2700 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2701 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2702 } 2703 2704 /* switch PM block to ACPI mode */ 2705 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2706 tmp &= ~RADEON_PM_MODE_SEL; 2707 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2708 2709 } 2710 2711 /* 2712 * VRAM info 2713 */ 2714 static void r100_vram_get_type(struct radeon_device *rdev) 2715 { 2716 uint32_t tmp; 2717 2718 rdev->mc.vram_is_ddr = false; 2719 if (rdev->flags & RADEON_IS_IGP) 2720 rdev->mc.vram_is_ddr = true; 2721 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2722 rdev->mc.vram_is_ddr = true; 2723 if ((rdev->family == CHIP_RV100) || 2724 (rdev->family == CHIP_RS100) || 2725 (rdev->family == CHIP_RS200)) { 2726 tmp = RREG32(RADEON_MEM_CNTL); 2727 if (tmp & RV100_HALF_MODE) { 2728 rdev->mc.vram_width = 32; 2729 } else { 2730 rdev->mc.vram_width = 64; 2731 } 2732 if (rdev->flags & RADEON_SINGLE_CRTC) { 2733 rdev->mc.vram_width /= 4; 2734 rdev->mc.vram_is_ddr = true; 2735 } 2736 } else if (rdev->family <= CHIP_RV280) { 2737 tmp = RREG32(RADEON_MEM_CNTL); 2738 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2739 rdev->mc.vram_width = 128; 2740 } else { 2741 rdev->mc.vram_width = 64; 2742 } 2743 } else { 2744 /* newer IGPs */ 2745 rdev->mc.vram_width = 128; 2746 } 2747 } 2748 2749 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2750 { 2751 u32 aper_size; 2752 u8 byte; 2753 2754 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2755 2756 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2757 * that is has the 2nd generation multifunction PCI interface 2758 */ 2759 if (rdev->family == CHIP_RV280 || 2760 rdev->family >= CHIP_RV350) { 2761 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2762 ~RADEON_HDP_APER_CNTL); 2763 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2764 return aper_size * 2; 2765 } 2766 2767 /* Older cards have all sorts of funny issues to deal with. First 2768 * check if it's a multifunction card by reading the PCI config 2769 * header type... Limit those to one aperture size 2770 */ 2771 byte = pci_read_config(rdev->dev->bsddev, 0xe, 1); 2772 if (byte & 0x80) { 2773 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2774 DRM_INFO("Limiting VRAM to one aperture\n"); 2775 return aper_size; 2776 } 2777 2778 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2779 * have set it up. We don't write this as it's broken on some ASICs but 2780 * we expect the BIOS to have done the right thing (might be too optimistic...) 2781 */ 2782 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2783 return aper_size * 2; 2784 return aper_size; 2785 } 2786 2787 void r100_vram_init_sizes(struct radeon_device *rdev) 2788 { 2789 u64 config_aper_size; 2790 2791 /* work out accessible VRAM */ 2792 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 2793 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 2794 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2795 /* FIXME we don't use the second aperture yet when we could use it */ 2796 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2797 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2798 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2799 if (rdev->flags & RADEON_IS_IGP) { 2800 uint32_t tom; 2801 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2802 tom = RREG32(RADEON_NB_TOM); 2803 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2804 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2805 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2806 } else { 2807 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2808 /* Some production boards of m6 will report 0 2809 * if it's 8 MB 2810 */ 2811 if (rdev->mc.real_vram_size == 0) { 2812 rdev->mc.real_vram_size = 8192 * 1024; 2813 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2814 } 2815 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2816 * Novell bug 204882 + along with lots of ubuntu ones 2817 */ 2818 if (rdev->mc.aper_size > config_aper_size) 2819 config_aper_size = rdev->mc.aper_size; 2820 2821 if (config_aper_size > rdev->mc.real_vram_size) 2822 rdev->mc.mc_vram_size = config_aper_size; 2823 else 2824 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2825 } 2826 } 2827 2828 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2829 { 2830 uint32_t temp; 2831 2832 temp = RREG32(RADEON_CONFIG_CNTL); 2833 if (state == false) { 2834 temp &= ~RADEON_CFG_VGA_RAM_EN; 2835 temp |= RADEON_CFG_VGA_IO_DIS; 2836 } else { 2837 temp &= ~RADEON_CFG_VGA_IO_DIS; 2838 } 2839 WREG32(RADEON_CONFIG_CNTL, temp); 2840 } 2841 2842 static void r100_mc_init(struct radeon_device *rdev) 2843 { 2844 u64 base; 2845 2846 r100_vram_get_type(rdev); 2847 r100_vram_init_sizes(rdev); 2848 base = rdev->mc.aper_base; 2849 if (rdev->flags & RADEON_IS_IGP) 2850 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2851 radeon_vram_location(rdev, &rdev->mc, base); 2852 rdev->mc.gtt_base_align = 0; 2853 if (!(rdev->flags & RADEON_IS_AGP)) 2854 radeon_gtt_location(rdev, &rdev->mc); 2855 radeon_update_bandwidth_info(rdev); 2856 } 2857 2858 2859 /* 2860 * Indirect registers accessor 2861 */ 2862 void r100_pll_errata_after_index(struct radeon_device *rdev) 2863 { 2864 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2865 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2866 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2867 } 2868 } 2869 2870 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2871 { 2872 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2873 * or the chip could hang on a subsequent access 2874 */ 2875 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2876 mdelay(5); 2877 } 2878 2879 /* This function is required to workaround a hardware bug in some (all?) 2880 * revisions of the R300. This workaround should be called after every 2881 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2882 * may not be correct. 2883 */ 2884 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2885 uint32_t save, tmp; 2886 2887 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2888 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2889 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2890 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2891 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2892 } 2893 } 2894 2895 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2896 { 2897 uint32_t data; 2898 2899 spin_lock(&rdev->pll_idx_lock); 2900 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2901 r100_pll_errata_after_index(rdev); 2902 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2903 r100_pll_errata_after_data(rdev); 2904 spin_unlock(&rdev->pll_idx_lock); 2905 return data; 2906 } 2907 2908 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2909 { 2910 spin_lock(&rdev->pll_idx_lock); 2911 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2912 r100_pll_errata_after_index(rdev); 2913 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2914 r100_pll_errata_after_data(rdev); 2915 spin_unlock(&rdev->pll_idx_lock); 2916 } 2917 2918 static void r100_set_safe_registers(struct radeon_device *rdev) 2919 { 2920 if (ASIC_IS_RN50(rdev)) { 2921 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2922 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 2923 } else if (rdev->family < CHIP_R200) { 2924 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2925 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 2926 } else { 2927 r200_set_safe_registers(rdev); 2928 } 2929 } 2930 2931 /* 2932 * Debugfs info 2933 */ 2934 #if defined(CONFIG_DEBUG_FS) 2935 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2936 { 2937 struct drm_info_node *node = (struct drm_info_node *) m->private; 2938 struct drm_device *dev = node->minor->dev; 2939 struct radeon_device *rdev = dev->dev_private; 2940 uint32_t reg, value; 2941 unsigned i; 2942 2943 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2944 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2945 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2946 for (i = 0; i < 64; i++) { 2947 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 2948 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 2949 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 2950 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 2951 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 2952 } 2953 return 0; 2954 } 2955 2956 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 2957 { 2958 struct drm_info_node *node = (struct drm_info_node *) m->private; 2959 struct drm_device *dev = node->minor->dev; 2960 struct radeon_device *rdev = dev->dev_private; 2961 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2962 uint32_t rdp, wdp; 2963 unsigned count, i, j; 2964 2965 radeon_ring_free_size(rdev, ring); 2966 rdp = RREG32(RADEON_CP_RB_RPTR); 2967 wdp = RREG32(RADEON_CP_RB_WPTR); 2968 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 2969 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2970 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2971 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2972 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2973 seq_printf(m, "%u dwords in ring\n", count); 2974 if (ring->ready) { 2975 for (j = 0; j <= count; j++) { 2976 i = (rdp + j) & ring->ptr_mask; 2977 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2978 } 2979 } 2980 return 0; 2981 } 2982 2983 2984 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 2985 { 2986 struct drm_info_node *node = (struct drm_info_node *) m->private; 2987 struct drm_device *dev = node->minor->dev; 2988 struct radeon_device *rdev = dev->dev_private; 2989 uint32_t csq_stat, csq2_stat, tmp; 2990 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 2991 unsigned i; 2992 2993 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2994 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 2995 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 2996 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 2997 r_rptr = (csq_stat >> 0) & 0x3ff; 2998 r_wptr = (csq_stat >> 10) & 0x3ff; 2999 ib1_rptr = (csq_stat >> 20) & 0x3ff; 3000 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 3001 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 3002 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 3003 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 3004 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 3005 seq_printf(m, "Ring rptr %u\n", r_rptr); 3006 seq_printf(m, "Ring wptr %u\n", r_wptr); 3007 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 3008 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 3009 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 3010 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 3011 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 3012 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 3013 seq_printf(m, "Ring fifo:\n"); 3014 for (i = 0; i < 256; i++) { 3015 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3016 tmp = RREG32(RADEON_CP_CSQ_DATA); 3017 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 3018 } 3019 seq_printf(m, "Indirect1 fifo:\n"); 3020 for (i = 256; i <= 512; i++) { 3021 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3022 tmp = RREG32(RADEON_CP_CSQ_DATA); 3023 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3024 } 3025 seq_printf(m, "Indirect2 fifo:\n"); 3026 for (i = 640; i < ib1_wptr; i++) { 3027 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3028 tmp = RREG32(RADEON_CP_CSQ_DATA); 3029 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3030 } 3031 return 0; 3032 } 3033 3034 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3035 { 3036 struct drm_info_node *node = (struct drm_info_node *) m->private; 3037 struct drm_device *dev = node->minor->dev; 3038 struct radeon_device *rdev = dev->dev_private; 3039 uint32_t tmp; 3040 3041 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3042 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3043 tmp = RREG32(RADEON_MC_FB_LOCATION); 3044 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3045 tmp = RREG32(RADEON_BUS_CNTL); 3046 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3047 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3048 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3049 tmp = RREG32(RADEON_AGP_BASE); 3050 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3051 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3052 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3053 tmp = RREG32(0x01D0); 3054 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3055 tmp = RREG32(RADEON_AIC_LO_ADDR); 3056 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3057 tmp = RREG32(RADEON_AIC_HI_ADDR); 3058 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3059 tmp = RREG32(0x01E4); 3060 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3061 return 0; 3062 } 3063 3064 static struct drm_info_list r100_debugfs_rbbm_list[] = { 3065 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3066 }; 3067 3068 static struct drm_info_list r100_debugfs_cp_list[] = { 3069 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3070 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3071 }; 3072 3073 static struct drm_info_list r100_debugfs_mc_info_list[] = { 3074 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3075 }; 3076 #endif 3077 3078 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3079 { 3080 #if defined(CONFIG_DEBUG_FS) 3081 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3082 #else 3083 return 0; 3084 #endif 3085 } 3086 3087 int r100_debugfs_cp_init(struct radeon_device *rdev) 3088 { 3089 #if defined(CONFIG_DEBUG_FS) 3090 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3091 #else 3092 return 0; 3093 #endif 3094 } 3095 3096 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3097 { 3098 #if defined(CONFIG_DEBUG_FS) 3099 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3100 #else 3101 return 0; 3102 #endif 3103 } 3104 3105 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3106 uint32_t tiling_flags, uint32_t pitch, 3107 uint32_t offset, uint32_t obj_size) 3108 { 3109 int surf_index = reg * 16; 3110 int flags = 0; 3111 3112 if (rdev->family <= CHIP_RS200) { 3113 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3114 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3115 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3116 if (tiling_flags & RADEON_TILING_MACRO) 3117 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3118 /* setting pitch to 0 disables tiling */ 3119 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3120 == 0) 3121 pitch = 0; 3122 } else if (rdev->family <= CHIP_RV280) { 3123 if (tiling_flags & (RADEON_TILING_MACRO)) 3124 flags |= R200_SURF_TILE_COLOR_MACRO; 3125 if (tiling_flags & RADEON_TILING_MICRO) 3126 flags |= R200_SURF_TILE_COLOR_MICRO; 3127 } else { 3128 if (tiling_flags & RADEON_TILING_MACRO) 3129 flags |= R300_SURF_TILE_MACRO; 3130 if (tiling_flags & RADEON_TILING_MICRO) 3131 flags |= R300_SURF_TILE_MICRO; 3132 } 3133 3134 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3135 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3136 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3137 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3138 3139 /* r100/r200 divide by 16 */ 3140 if (rdev->family < CHIP_R300) 3141 flags |= pitch / 16; 3142 else 3143 flags |= pitch / 8; 3144 3145 3146 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3147 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3148 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3149 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3150 return 0; 3151 } 3152 3153 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3154 { 3155 int surf_index = reg * 16; 3156 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3157 } 3158 3159 void r100_bandwidth_update(struct radeon_device *rdev) 3160 { 3161 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3162 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3163 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 3164 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3165 fixed20_12 memtcas_ff[8] = { 3166 dfixed_init(1), 3167 dfixed_init(2), 3168 dfixed_init(3), 3169 dfixed_init(0), 3170 dfixed_init_half(1), 3171 dfixed_init_half(2), 3172 dfixed_init(0), 3173 }; 3174 fixed20_12 memtcas_rs480_ff[8] = { 3175 dfixed_init(0), 3176 dfixed_init(1), 3177 dfixed_init(2), 3178 dfixed_init(3), 3179 dfixed_init(0), 3180 dfixed_init_half(1), 3181 dfixed_init_half(2), 3182 dfixed_init_half(3), 3183 }; 3184 fixed20_12 memtcas2_ff[8] = { 3185 dfixed_init(0), 3186 dfixed_init(1), 3187 dfixed_init(2), 3188 dfixed_init(3), 3189 dfixed_init(4), 3190 dfixed_init(5), 3191 dfixed_init(6), 3192 dfixed_init(7), 3193 }; 3194 fixed20_12 memtrbs[8] = { 3195 dfixed_init(1), 3196 dfixed_init_half(1), 3197 dfixed_init(2), 3198 dfixed_init_half(2), 3199 dfixed_init(3), 3200 dfixed_init_half(3), 3201 dfixed_init(4), 3202 dfixed_init_half(4) 3203 }; 3204 fixed20_12 memtrbs_r4xx[8] = { 3205 dfixed_init(4), 3206 dfixed_init(5), 3207 dfixed_init(6), 3208 dfixed_init(7), 3209 dfixed_init(8), 3210 dfixed_init(9), 3211 dfixed_init(10), 3212 dfixed_init(11) 3213 }; 3214 fixed20_12 min_mem_eff; 3215 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3216 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3217 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 3218 disp_drain_rate2, read_return_rate; 3219 fixed20_12 time_disp1_drop_priority; 3220 int c; 3221 int cur_size = 16; /* in octawords */ 3222 int critical_point = 0, critical_point2; 3223 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 3224 int stop_req, max_stop_req; 3225 struct drm_display_mode *mode1 = NULL; 3226 struct drm_display_mode *mode2 = NULL; 3227 uint32_t pixel_bytes1 = 0; 3228 uint32_t pixel_bytes2 = 0; 3229 3230 if (!rdev->mode_info.mode_config_initialized) 3231 return; 3232 3233 radeon_update_display_priority(rdev); 3234 3235 if (rdev->mode_info.crtcs[0]->base.enabled) { 3236 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3237 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; 3238 } 3239 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3240 if (rdev->mode_info.crtcs[1]->base.enabled) { 3241 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3242 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; 3243 } 3244 } 3245 3246 min_mem_eff.full = dfixed_const_8(0); 3247 /* get modes */ 3248 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3249 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3250 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3251 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3252 /* check crtc enables */ 3253 if (mode2) 3254 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3255 if (mode1) 3256 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3257 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3258 } 3259 3260 /* 3261 * determine is there is enough bw for current mode 3262 */ 3263 sclk_ff = rdev->pm.sclk; 3264 mclk_ff = rdev->pm.mclk; 3265 3266 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3267 temp_ff.full = dfixed_const(temp); 3268 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3269 3270 pix_clk.full = 0; 3271 pix_clk2.full = 0; 3272 peak_disp_bw.full = 0; 3273 if (mode1) { 3274 temp_ff.full = dfixed_const(1000); 3275 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3276 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3277 temp_ff.full = dfixed_const(pixel_bytes1); 3278 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3279 } 3280 if (mode2) { 3281 temp_ff.full = dfixed_const(1000); 3282 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3283 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3284 temp_ff.full = dfixed_const(pixel_bytes2); 3285 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3286 } 3287 3288 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3289 if (peak_disp_bw.full >= mem_bw.full) { 3290 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3291 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3292 } 3293 3294 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3295 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3296 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3297 mem_trcd = ((temp >> 2) & 0x3) + 1; 3298 mem_trp = ((temp & 0x3)) + 1; 3299 mem_tras = ((temp & 0x70) >> 4) + 1; 3300 } else if (rdev->family == CHIP_R300 || 3301 rdev->family == CHIP_R350) { /* r300, r350 */ 3302 mem_trcd = (temp & 0x7) + 1; 3303 mem_trp = ((temp >> 8) & 0x7) + 1; 3304 mem_tras = ((temp >> 11) & 0xf) + 4; 3305 } else if (rdev->family == CHIP_RV350 || 3306 rdev->family <= CHIP_RV380) { 3307 /* rv3x0 */ 3308 mem_trcd = (temp & 0x7) + 3; 3309 mem_trp = ((temp >> 8) & 0x7) + 3; 3310 mem_tras = ((temp >> 11) & 0xf) + 6; 3311 } else if (rdev->family == CHIP_R420 || 3312 rdev->family == CHIP_R423 || 3313 rdev->family == CHIP_RV410) { 3314 /* r4xx */ 3315 mem_trcd = (temp & 0xf) + 3; 3316 if (mem_trcd > 15) 3317 mem_trcd = 15; 3318 mem_trp = ((temp >> 8) & 0xf) + 3; 3319 if (mem_trp > 15) 3320 mem_trp = 15; 3321 mem_tras = ((temp >> 12) & 0x1f) + 6; 3322 if (mem_tras > 31) 3323 mem_tras = 31; 3324 } else { /* RV200, R200 */ 3325 mem_trcd = (temp & 0x7) + 1; 3326 mem_trp = ((temp >> 8) & 0x7) + 1; 3327 mem_tras = ((temp >> 12) & 0xf) + 4; 3328 } 3329 /* convert to FF */ 3330 trcd_ff.full = dfixed_const(mem_trcd); 3331 trp_ff.full = dfixed_const(mem_trp); 3332 tras_ff.full = dfixed_const(mem_tras); 3333 3334 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3335 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3336 data = (temp & (7 << 20)) >> 20; 3337 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3338 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3339 tcas_ff = memtcas_rs480_ff[data]; 3340 else 3341 tcas_ff = memtcas_ff[data]; 3342 } else 3343 tcas_ff = memtcas2_ff[data]; 3344 3345 if (rdev->family == CHIP_RS400 || 3346 rdev->family == CHIP_RS480) { 3347 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3348 data = (temp >> 23) & 0x7; 3349 if (data < 5) 3350 tcas_ff.full += dfixed_const(data); 3351 } 3352 3353 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3354 /* on the R300, Tcas is included in Trbs. 3355 */ 3356 temp = RREG32(RADEON_MEM_CNTL); 3357 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3358 if (data == 1) { 3359 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3360 temp = RREG32(R300_MC_IND_INDEX); 3361 temp &= ~R300_MC_IND_ADDR_MASK; 3362 temp |= R300_MC_READ_CNTL_CD_mcind; 3363 WREG32(R300_MC_IND_INDEX, temp); 3364 temp = RREG32(R300_MC_IND_DATA); 3365 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3366 } else { 3367 temp = RREG32(R300_MC_READ_CNTL_AB); 3368 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3369 } 3370 } else { 3371 temp = RREG32(R300_MC_READ_CNTL_AB); 3372 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3373 } 3374 if (rdev->family == CHIP_RV410 || 3375 rdev->family == CHIP_R420 || 3376 rdev->family == CHIP_R423) 3377 trbs_ff = memtrbs_r4xx[data]; 3378 else 3379 trbs_ff = memtrbs[data]; 3380 tcas_ff.full += trbs_ff.full; 3381 } 3382 3383 sclk_eff_ff.full = sclk_ff.full; 3384 3385 if (rdev->flags & RADEON_IS_AGP) { 3386 fixed20_12 agpmode_ff; 3387 agpmode_ff.full = dfixed_const(radeon_agpmode); 3388 temp_ff.full = dfixed_const_666(16); 3389 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3390 } 3391 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3392 3393 if (ASIC_IS_R300(rdev)) { 3394 sclk_delay_ff.full = dfixed_const(250); 3395 } else { 3396 if ((rdev->family == CHIP_RV100) || 3397 rdev->flags & RADEON_IS_IGP) { 3398 if (rdev->mc.vram_is_ddr) 3399 sclk_delay_ff.full = dfixed_const(41); 3400 else 3401 sclk_delay_ff.full = dfixed_const(33); 3402 } else { 3403 if (rdev->mc.vram_width == 128) 3404 sclk_delay_ff.full = dfixed_const(57); 3405 else 3406 sclk_delay_ff.full = dfixed_const(41); 3407 } 3408 } 3409 3410 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3411 3412 if (rdev->mc.vram_is_ddr) { 3413 if (rdev->mc.vram_width == 32) { 3414 k1.full = dfixed_const(40); 3415 c = 3; 3416 } else { 3417 k1.full = dfixed_const(20); 3418 c = 1; 3419 } 3420 } else { 3421 k1.full = dfixed_const(40); 3422 c = 3; 3423 } 3424 3425 temp_ff.full = dfixed_const(2); 3426 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3427 temp_ff.full = dfixed_const(c); 3428 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3429 temp_ff.full = dfixed_const(4); 3430 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3431 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3432 mc_latency_mclk.full += k1.full; 3433 3434 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3435 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3436 3437 /* 3438 HW cursor time assuming worst case of full size colour cursor. 3439 */ 3440 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3441 temp_ff.full += trcd_ff.full; 3442 if (temp_ff.full < tras_ff.full) 3443 temp_ff.full = tras_ff.full; 3444 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3445 3446 temp_ff.full = dfixed_const(cur_size); 3447 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3448 /* 3449 Find the total latency for the display data. 3450 */ 3451 disp_latency_overhead.full = dfixed_const(8); 3452 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3453 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3454 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3455 3456 if (mc_latency_mclk.full > mc_latency_sclk.full) 3457 disp_latency.full = mc_latency_mclk.full; 3458 else 3459 disp_latency.full = mc_latency_sclk.full; 3460 3461 /* setup Max GRPH_STOP_REQ default value */ 3462 if (ASIC_IS_RV100(rdev)) 3463 max_stop_req = 0x5c; 3464 else 3465 max_stop_req = 0x7c; 3466 3467 if (mode1) { 3468 /* CRTC1 3469 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3470 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3471 */ 3472 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3473 3474 if (stop_req > max_stop_req) 3475 stop_req = max_stop_req; 3476 3477 /* 3478 Find the drain rate of the display buffer. 3479 */ 3480 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3481 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3482 3483 /* 3484 Find the critical point of the display buffer. 3485 */ 3486 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3487 crit_point_ff.full += dfixed_const_half(0); 3488 3489 critical_point = dfixed_trunc(crit_point_ff); 3490 3491 if (rdev->disp_priority == 2) { 3492 critical_point = 0; 3493 } 3494 3495 /* 3496 The critical point should never be above max_stop_req-4. Setting 3497 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3498 */ 3499 if (max_stop_req - critical_point < 4) 3500 critical_point = 0; 3501 3502 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3503 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3504 critical_point = 0x10; 3505 } 3506 3507 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3508 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3509 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3510 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3511 if ((rdev->family == CHIP_R350) && 3512 (stop_req > 0x15)) { 3513 stop_req -= 0x10; 3514 } 3515 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3516 temp |= RADEON_GRPH_BUFFER_SIZE; 3517 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3518 RADEON_GRPH_CRITICAL_AT_SOF | 3519 RADEON_GRPH_STOP_CNTL); 3520 /* 3521 Write the result into the register. 3522 */ 3523 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3524 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3525 3526 #if 0 3527 if ((rdev->family == CHIP_RS400) || 3528 (rdev->family == CHIP_RS480)) { 3529 /* attempt to program RS400 disp regs correctly ??? */ 3530 temp = RREG32(RS400_DISP1_REG_CNTL); 3531 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3532 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3533 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3534 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3535 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3536 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3537 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3538 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3539 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3540 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3541 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3542 } 3543 #endif 3544 3545 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3546 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3547 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3548 } 3549 3550 if (mode2) { 3551 u32 grph2_cntl; 3552 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3553 3554 if (stop_req > max_stop_req) 3555 stop_req = max_stop_req; 3556 3557 /* 3558 Find the drain rate of the display buffer. 3559 */ 3560 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3561 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3562 3563 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3564 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3565 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3566 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3567 if ((rdev->family == CHIP_R350) && 3568 (stop_req > 0x15)) { 3569 stop_req -= 0x10; 3570 } 3571 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3572 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3573 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3574 RADEON_GRPH_CRITICAL_AT_SOF | 3575 RADEON_GRPH_STOP_CNTL); 3576 3577 if ((rdev->family == CHIP_RS100) || 3578 (rdev->family == CHIP_RS200)) 3579 critical_point2 = 0; 3580 else { 3581 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3582 temp_ff.full = dfixed_const(temp); 3583 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3584 if (sclk_ff.full < temp_ff.full) 3585 temp_ff.full = sclk_ff.full; 3586 3587 read_return_rate.full = temp_ff.full; 3588 3589 if (mode1) { 3590 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3591 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3592 } else { 3593 time_disp1_drop_priority.full = 0; 3594 } 3595 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3596 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3597 crit_point_ff.full += dfixed_const_half(0); 3598 3599 critical_point2 = dfixed_trunc(crit_point_ff); 3600 3601 if (rdev->disp_priority == 2) { 3602 critical_point2 = 0; 3603 } 3604 3605 if (max_stop_req - critical_point2 < 4) 3606 critical_point2 = 0; 3607 3608 } 3609 3610 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3611 /* some R300 cards have problem with this set to 0 */ 3612 critical_point2 = 0x10; 3613 } 3614 3615 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3616 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3617 3618 if ((rdev->family == CHIP_RS400) || 3619 (rdev->family == CHIP_RS480)) { 3620 #if 0 3621 /* attempt to program RS400 disp2 regs correctly ??? */ 3622 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3623 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3624 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3625 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3626 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3627 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3628 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3629 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3630 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3631 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3632 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3633 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3634 #endif 3635 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3636 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3637 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3638 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3639 } 3640 3641 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3642 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3643 } 3644 } 3645 3646 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3647 { 3648 uint32_t scratch; 3649 uint32_t tmp = 0; 3650 unsigned i; 3651 int r; 3652 3653 r = radeon_scratch_get(rdev, &scratch); 3654 if (r) { 3655 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3656 return r; 3657 } 3658 WREG32(scratch, 0xCAFEDEAD); 3659 r = radeon_ring_lock(rdev, ring, 2); 3660 if (r) { 3661 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3662 radeon_scratch_free(rdev, scratch); 3663 return r; 3664 } 3665 radeon_ring_write(ring, PACKET0(scratch, 0)); 3666 radeon_ring_write(ring, 0xDEADBEEF); 3667 radeon_ring_unlock_commit(rdev, ring, false); 3668 for (i = 0; i < rdev->usec_timeout; i++) { 3669 tmp = RREG32(scratch); 3670 if (tmp == 0xDEADBEEF) { 3671 break; 3672 } 3673 DRM_UDELAY(1); 3674 } 3675 if (i < rdev->usec_timeout) { 3676 DRM_INFO("ring test succeeded in %d usecs\n", i); 3677 } else { 3678 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3679 scratch, tmp); 3680 r = -EINVAL; 3681 } 3682 radeon_scratch_free(rdev, scratch); 3683 return r; 3684 } 3685 3686 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3687 { 3688 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3689 3690 if (ring->rptr_save_reg) { 3691 u32 next_rptr = ring->wptr + 2 + 3; 3692 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3693 radeon_ring_write(ring, next_rptr); 3694 } 3695 3696 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3697 radeon_ring_write(ring, ib->gpu_addr); 3698 radeon_ring_write(ring, ib->length_dw); 3699 } 3700 3701 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3702 { 3703 struct radeon_ib ib; 3704 uint32_t scratch; 3705 uint32_t tmp = 0; 3706 unsigned i; 3707 int r; 3708 3709 r = radeon_scratch_get(rdev, &scratch); 3710 if (r) { 3711 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3712 return r; 3713 } 3714 WREG32(scratch, 0xCAFEDEAD); 3715 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3716 if (r) { 3717 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3718 goto free_scratch; 3719 } 3720 ib.ptr[0] = PACKET0(scratch, 0); 3721 ib.ptr[1] = 0xDEADBEEF; 3722 ib.ptr[2] = PACKET2(0); 3723 ib.ptr[3] = PACKET2(0); 3724 ib.ptr[4] = PACKET2(0); 3725 ib.ptr[5] = PACKET2(0); 3726 ib.ptr[6] = PACKET2(0); 3727 ib.ptr[7] = PACKET2(0); 3728 ib.length_dw = 8; 3729 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3730 if (r) { 3731 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3732 goto free_ib; 3733 } 3734 r = radeon_fence_wait(ib.fence, false); 3735 if (r) { 3736 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3737 goto free_ib; 3738 } 3739 for (i = 0; i < rdev->usec_timeout; i++) { 3740 tmp = RREG32(scratch); 3741 if (tmp == 0xDEADBEEF) { 3742 break; 3743 } 3744 DRM_UDELAY(1); 3745 } 3746 if (i < rdev->usec_timeout) { 3747 DRM_INFO("ib test succeeded in %u usecs\n", i); 3748 } else { 3749 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3750 scratch, tmp); 3751 r = -EINVAL; 3752 } 3753 free_ib: 3754 radeon_ib_free(rdev, &ib); 3755 free_scratch: 3756 radeon_scratch_free(rdev, scratch); 3757 return r; 3758 } 3759 3760 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3761 { 3762 /* Shutdown CP we shouldn't need to do that but better be safe than 3763 * sorry 3764 */ 3765 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3766 WREG32(R_000740_CP_CSQ_CNTL, 0); 3767 3768 /* Save few CRTC registers */ 3769 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3770 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3771 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3772 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3773 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3774 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3775 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3776 } 3777 3778 /* Disable VGA aperture access */ 3779 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3780 /* Disable cursor, overlay, crtc */ 3781 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3782 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3783 S_000054_CRTC_DISPLAY_DIS(1)); 3784 WREG32(R_000050_CRTC_GEN_CNTL, 3785 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3786 S_000050_CRTC_DISP_REQ_EN_B(1)); 3787 WREG32(R_000420_OV0_SCALE_CNTL, 3788 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3789 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3790 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3791 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3792 S_000360_CUR2_LOCK(1)); 3793 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3794 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3795 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3796 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3797 WREG32(R_000360_CUR2_OFFSET, 3798 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3799 } 3800 } 3801 3802 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3803 { 3804 /* Update base address for crtc */ 3805 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3806 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3807 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3808 } 3809 /* Restore CRTC registers */ 3810 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3811 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3812 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3813 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3814 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3815 } 3816 } 3817 3818 void r100_vga_render_disable(struct radeon_device *rdev) 3819 { 3820 u32 tmp; 3821 3822 tmp = RREG8(R_0003C2_GENMO_WT); 3823 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3824 } 3825 3826 static void r100_debugfs(struct radeon_device *rdev) 3827 { 3828 int r; 3829 3830 r = r100_debugfs_mc_info_init(rdev); 3831 if (r) 3832 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3833 } 3834 3835 static void r100_mc_program(struct radeon_device *rdev) 3836 { 3837 struct r100_mc_save save; 3838 3839 /* Stops all mc clients */ 3840 r100_mc_stop(rdev, &save); 3841 if (rdev->flags & RADEON_IS_AGP) { 3842 WREG32(R_00014C_MC_AGP_LOCATION, 3843 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3844 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3845 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3846 if (rdev->family > CHIP_RV200) 3847 WREG32(R_00015C_AGP_BASE_2, 3848 upper_32_bits(rdev->mc.agp_base) & 0xff); 3849 } else { 3850 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3851 WREG32(R_000170_AGP_BASE, 0); 3852 if (rdev->family > CHIP_RV200) 3853 WREG32(R_00015C_AGP_BASE_2, 0); 3854 } 3855 /* Wait for mc idle */ 3856 if (r100_mc_wait_for_idle(rdev)) 3857 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3858 /* Program MC, should be a 32bits limited address space */ 3859 WREG32(R_000148_MC_FB_LOCATION, 3860 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3861 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3862 r100_mc_resume(rdev, &save); 3863 } 3864 3865 static void r100_clock_startup(struct radeon_device *rdev) 3866 { 3867 u32 tmp; 3868 3869 if (radeon_dynclks != -1 && radeon_dynclks) 3870 radeon_legacy_set_clock_gating(rdev, 1); 3871 /* We need to force on some of the block */ 3872 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3873 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3874 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3875 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3876 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3877 } 3878 3879 static int r100_startup(struct radeon_device *rdev) 3880 { 3881 int r; 3882 3883 /* set common regs */ 3884 r100_set_common_regs(rdev); 3885 /* program mc */ 3886 r100_mc_program(rdev); 3887 /* Resume clock */ 3888 r100_clock_startup(rdev); 3889 /* Initialize GART (initialize after TTM so we can allocate 3890 * memory through TTM but finalize after TTM) */ 3891 r100_enable_bm(rdev); 3892 if (rdev->flags & RADEON_IS_PCI) { 3893 r = r100_pci_gart_enable(rdev); 3894 if (r) 3895 return r; 3896 } 3897 3898 /* allocate wb buffer */ 3899 r = radeon_wb_init(rdev); 3900 if (r) 3901 return r; 3902 3903 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3904 if (r) { 3905 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3906 return r; 3907 } 3908 3909 /* Enable IRQ */ 3910 if (!rdev->irq.installed) { 3911 r = radeon_irq_kms_init(rdev); 3912 if (r) 3913 return r; 3914 } 3915 3916 r100_irq_set(rdev); 3917 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3918 /* 1M ring buffer */ 3919 r = r100_cp_init(rdev, 1024 * 1024); 3920 if (r) { 3921 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3922 return r; 3923 } 3924 3925 r = radeon_ib_pool_init(rdev); 3926 if (r) { 3927 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3928 return r; 3929 } 3930 3931 return 0; 3932 } 3933 3934 int r100_resume(struct radeon_device *rdev) 3935 { 3936 int r; 3937 3938 /* Make sur GART are not working */ 3939 if (rdev->flags & RADEON_IS_PCI) 3940 r100_pci_gart_disable(rdev); 3941 /* Resume clock before doing reset */ 3942 r100_clock_startup(rdev); 3943 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3944 if (radeon_asic_reset(rdev)) { 3945 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3946 RREG32(R_000E40_RBBM_STATUS), 3947 RREG32(R_0007C0_CP_STAT)); 3948 } 3949 /* post */ 3950 radeon_combios_asic_init(rdev->ddev); 3951 /* Resume clock after posting */ 3952 r100_clock_startup(rdev); 3953 /* Initialize surface registers */ 3954 radeon_surface_init(rdev); 3955 3956 rdev->accel_working = true; 3957 r = r100_startup(rdev); 3958 if (r) { 3959 rdev->accel_working = false; 3960 } 3961 return r; 3962 } 3963 3964 int r100_suspend(struct radeon_device *rdev) 3965 { 3966 radeon_pm_suspend(rdev); 3967 r100_cp_disable(rdev); 3968 radeon_wb_disable(rdev); 3969 r100_irq_disable(rdev); 3970 if (rdev->flags & RADEON_IS_PCI) 3971 r100_pci_gart_disable(rdev); 3972 return 0; 3973 } 3974 3975 void r100_fini(struct radeon_device *rdev) 3976 { 3977 radeon_pm_fini(rdev); 3978 r100_cp_fini(rdev); 3979 radeon_wb_fini(rdev); 3980 radeon_ib_pool_fini(rdev); 3981 radeon_gem_fini(rdev); 3982 if (rdev->flags & RADEON_IS_PCI) 3983 r100_pci_gart_fini(rdev); 3984 radeon_agp_fini(rdev); 3985 radeon_irq_kms_fini(rdev); 3986 radeon_fence_driver_fini(rdev); 3987 radeon_bo_fini(rdev); 3988 radeon_atombios_fini(rdev); 3989 r100_cp_fini_microcode(rdev); 3990 kfree(rdev->bios); 3991 rdev->bios = NULL; 3992 } 3993 3994 /* 3995 * Due to how kexec works, it can leave the hw fully initialised when it 3996 * boots the new kernel. However doing our init sequence with the CP and 3997 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 3998 * do some quick sanity checks and restore sane values to avoid this 3999 * problem. 4000 */ 4001 void r100_restore_sanity(struct radeon_device *rdev) 4002 { 4003 u32 tmp; 4004 4005 tmp = RREG32(RADEON_CP_CSQ_CNTL); 4006 if (tmp) { 4007 WREG32(RADEON_CP_CSQ_CNTL, 0); 4008 } 4009 tmp = RREG32(RADEON_CP_RB_CNTL); 4010 if (tmp) { 4011 WREG32(RADEON_CP_RB_CNTL, 0); 4012 } 4013 tmp = RREG32(RADEON_SCRATCH_UMSK); 4014 if (tmp) { 4015 WREG32(RADEON_SCRATCH_UMSK, 0); 4016 } 4017 } 4018 4019 int r100_init(struct radeon_device *rdev) 4020 { 4021 int r; 4022 4023 /* Register debugfs file specific to this group of asics */ 4024 r100_debugfs(rdev); 4025 /* Disable VGA */ 4026 r100_vga_render_disable(rdev); 4027 /* Initialize scratch registers */ 4028 radeon_scratch_init(rdev); 4029 /* Initialize surface registers */ 4030 radeon_surface_init(rdev); 4031 /* sanity check some register to avoid hangs like after kexec */ 4032 r100_restore_sanity(rdev); 4033 /* TODO: disable VGA need to use VGA request */ 4034 /* BIOS*/ 4035 if (!radeon_get_bios(rdev)) { 4036 if (ASIC_IS_AVIVO(rdev)) 4037 return -EINVAL; 4038 } 4039 if (rdev->is_atom_bios) { 4040 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4041 return -EINVAL; 4042 } else { 4043 r = radeon_combios_init(rdev); 4044 if (r) 4045 return r; 4046 } 4047 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4048 if (radeon_asic_reset(rdev)) { 4049 dev_warn(rdev->dev, 4050 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4051 RREG32(R_000E40_RBBM_STATUS), 4052 RREG32(R_0007C0_CP_STAT)); 4053 } 4054 /* check if cards are posted or not */ 4055 if (radeon_boot_test_post_card(rdev) == false) 4056 return -EINVAL; 4057 /* Set asic errata */ 4058 r100_errata(rdev); 4059 /* Initialize clocks */ 4060 radeon_get_clock_info(rdev->ddev); 4061 /* initialize AGP */ 4062 if (rdev->flags & RADEON_IS_AGP) { 4063 r = radeon_agp_init(rdev); 4064 if (r) { 4065 radeon_agp_disable(rdev); 4066 } 4067 } 4068 /* initialize VRAM */ 4069 r100_mc_init(rdev); 4070 /* Fence driver */ 4071 r = radeon_fence_driver_init(rdev); 4072 if (r) 4073 return r; 4074 /* Memory manager */ 4075 r = radeon_bo_init(rdev); 4076 if (r) 4077 return r; 4078 if (rdev->flags & RADEON_IS_PCI) { 4079 r = r100_pci_gart_init(rdev); 4080 if (r) 4081 return r; 4082 } 4083 r100_set_safe_registers(rdev); 4084 4085 /* Initialize power management */ 4086 radeon_pm_init(rdev); 4087 4088 rdev->accel_working = true; 4089 r = r100_startup(rdev); 4090 if (r) { 4091 /* Somethings want wront with the accel init stop accel */ 4092 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4093 r100_cp_fini(rdev); 4094 radeon_wb_fini(rdev); 4095 radeon_ib_pool_fini(rdev); 4096 radeon_irq_kms_fini(rdev); 4097 if (rdev->flags & RADEON_IS_PCI) 4098 r100_pci_gart_fini(rdev); 4099 rdev->accel_working = false; 4100 } 4101 return 0; 4102 } 4103 4104 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 4105 bool always_indirect) 4106 { 4107 if (reg < rdev->rmmio_size && !always_indirect) 4108 return bus_read_4(rdev->rmmio, reg); 4109 else { 4110 uint32_t ret; 4111 4112 spin_lock(&rdev->mmio_idx_lock); 4113 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4114 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA); 4115 spin_unlock(&rdev->mmio_idx_lock); 4116 4117 return ret; 4118 } 4119 } 4120 4121 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 4122 bool always_indirect) 4123 { 4124 if (reg < rdev->rmmio_size && !always_indirect) 4125 bus_write_4(rdev->rmmio, reg, v); 4126 else { 4127 spin_lock(&rdev->mmio_idx_lock); 4128 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4129 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v); 4130 spin_unlock(&rdev->mmio_idx_lock); 4131 } 4132 } 4133 4134 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4135 { 4136 if (reg < rdev->rio_mem_size) 4137 return bus_read_4(rdev->rio_mem, reg); 4138 else { 4139 /* XXX No locking? -- dumbbell@ */ 4140 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4141 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA); 4142 } 4143 } 4144 4145 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4146 { 4147 if (reg < rdev->rio_mem_size) 4148 bus_write_4(rdev->rio_mem, reg, v); 4149 else { 4150 /* XXX No locking? -- dumbbell@ */ 4151 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4152 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v); 4153 } 4154 } 4155