1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/r100.c 255573 2013-09-14 17:24:41Z dumbbell $ 29 */ 30 #include <drm/drmP.h> 31 #include <uapi_drm/radeon_drm.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "r100d.h" 36 #include "rs100d.h" 37 #include "rv200d.h" 38 #include "rv250d.h" 39 #include "atom.h" 40 41 #include <linux/firmware.h> 42 #include <linux/module.h> 43 44 #include "r100_reg_safe.h" 45 #include "rn50_reg_safe.h" 46 47 /* Firmware Names */ 48 #define FIRMWARE_R100 "radeonkmsfw_R100_cp" 49 #define FIRMWARE_R200 "radeonkmsfw_R200_cp" 50 #define FIRMWARE_R300 "radeonkmsfw_R300_cp" 51 #define FIRMWARE_R420 "radeonkmsfw_R420_cp" 52 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 53 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 54 #define FIRMWARE_R520 "radeonkmsfw_R520_cp" 55 56 MODULE_FIRMWARE(FIRMWARE_R100); 57 MODULE_FIRMWARE(FIRMWARE_R200); 58 MODULE_FIRMWARE(FIRMWARE_R300); 59 MODULE_FIRMWARE(FIRMWARE_R420); 60 MODULE_FIRMWARE(FIRMWARE_RS690); 61 MODULE_FIRMWARE(FIRMWARE_RS600); 62 MODULE_FIRMWARE(FIRMWARE_R520); 63 64 #include "r100_track.h" 65 66 /* This files gather functions specifics to: 67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * and others in some cases. 69 */ 70 71 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) 72 { 73 if (crtc == 0) { 74 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 75 return true; 76 else 77 return false; 78 } else { 79 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 80 return true; 81 else 82 return false; 83 } 84 } 85 86 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) 87 { 88 u32 vline1, vline2; 89 90 if (crtc == 0) { 91 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 92 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 93 } else { 94 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 95 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 96 } 97 if (vline1 != vline2) 98 return true; 99 else 100 return false; 101 } 102 103 /** 104 * r100_wait_for_vblank - vblank wait asic callback. 105 * 106 * @rdev: radeon_device pointer 107 * @crtc: crtc to wait for vblank on 108 * 109 * Wait for vblank on the requested crtc (r1xx-r4xx). 110 */ 111 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 112 { 113 unsigned i = 0; 114 115 if (crtc >= rdev->num_crtc) 116 return; 117 118 if (crtc == 0) { 119 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) 120 return; 121 } else { 122 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) 123 return; 124 } 125 126 /* depending on when we hit vblank, we may be close to active; if so, 127 * wait for another frame. 128 */ 129 while (r100_is_in_vblank(rdev, crtc)) { 130 if (i++ % 100 == 0) { 131 if (!r100_is_counter_moving(rdev, crtc)) 132 break; 133 } 134 } 135 136 while (!r100_is_in_vblank(rdev, crtc)) { 137 if (i++ % 100 == 0) { 138 if (!r100_is_counter_moving(rdev, crtc)) 139 break; 140 } 141 } 142 } 143 144 /** 145 * r100_page_flip - pageflip callback. 146 * 147 * @rdev: radeon_device pointer 148 * @crtc_id: crtc to cleanup pageflip on 149 * @crtc_base: new address of the crtc (GPU MC address) 150 * 151 * Does the actual pageflip (r1xx-r4xx). 152 * During vblank we take the crtc lock and wait for the update_pending 153 * bit to go high, when it does, we release the lock, and allow the 154 * double buffered update to take place. 155 */ 156 void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 157 { 158 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 159 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 160 int i; 161 162 /* Lock the graphics update lock */ 163 /* update the scanout addresses */ 164 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 165 166 /* Wait for update_pending to go high. */ 167 for (i = 0; i < rdev->usec_timeout; i++) { 168 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 169 break; 170 udelay(1); 171 } 172 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 173 174 /* Unlock the lock, so double-buffering can take place inside vblank */ 175 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 176 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 177 178 } 179 180 /** 181 * r100_page_flip_pending - check if page flip is still pending 182 * 183 * @rdev: radeon_device pointer 184 * @crtc_id: crtc to check 185 * 186 * Check if the last pagefilp is still pending (r1xx-r4xx). 187 * Returns the current update pending status. 188 */ 189 bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id) 190 { 191 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 192 193 /* Return current update_pending status: */ 194 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & 195 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET); 196 } 197 198 /** 199 * r100_pm_get_dynpm_state - look up dynpm power state callback. 200 * 201 * @rdev: radeon_device pointer 202 * 203 * Look up the optimal power state based on the 204 * current state of the GPU (r1xx-r5xx). 205 * Used for dynpm only. 206 */ 207 void r100_pm_get_dynpm_state(struct radeon_device *rdev) 208 { 209 int i; 210 rdev->pm.dynpm_can_upclock = true; 211 rdev->pm.dynpm_can_downclock = true; 212 213 switch (rdev->pm.dynpm_planned_action) { 214 case DYNPM_ACTION_MINIMUM: 215 rdev->pm.requested_power_state_index = 0; 216 rdev->pm.dynpm_can_downclock = false; 217 break; 218 case DYNPM_ACTION_DOWNCLOCK: 219 if (rdev->pm.current_power_state_index == 0) { 220 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 221 rdev->pm.dynpm_can_downclock = false; 222 } else { 223 if (rdev->pm.active_crtc_count > 1) { 224 for (i = 0; i < rdev->pm.num_power_states; i++) { 225 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 226 continue; 227 else if (i >= rdev->pm.current_power_state_index) { 228 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 229 break; 230 } else { 231 rdev->pm.requested_power_state_index = i; 232 break; 233 } 234 } 235 } else 236 rdev->pm.requested_power_state_index = 237 rdev->pm.current_power_state_index - 1; 238 } 239 /* don't use the power state if crtcs are active and no display flag is set */ 240 if ((rdev->pm.active_crtc_count > 0) && 241 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 242 RADEON_PM_MODE_NO_DISPLAY)) { 243 rdev->pm.requested_power_state_index++; 244 } 245 break; 246 case DYNPM_ACTION_UPCLOCK: 247 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 248 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 249 rdev->pm.dynpm_can_upclock = false; 250 } else { 251 if (rdev->pm.active_crtc_count > 1) { 252 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 253 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 254 continue; 255 else if (i <= rdev->pm.current_power_state_index) { 256 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 257 break; 258 } else { 259 rdev->pm.requested_power_state_index = i; 260 break; 261 } 262 } 263 } else 264 rdev->pm.requested_power_state_index = 265 rdev->pm.current_power_state_index + 1; 266 } 267 break; 268 case DYNPM_ACTION_DEFAULT: 269 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 270 rdev->pm.dynpm_can_upclock = false; 271 break; 272 case DYNPM_ACTION_NONE: 273 default: 274 DRM_ERROR("Requested mode for not defined action\n"); 275 return; 276 } 277 /* only one clock mode per power state */ 278 rdev->pm.requested_clock_mode_index = 0; 279 280 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 281 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 282 clock_info[rdev->pm.requested_clock_mode_index].sclk, 283 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 284 clock_info[rdev->pm.requested_clock_mode_index].mclk, 285 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 286 pcie_lanes); 287 } 288 289 /** 290 * r100_pm_init_profile - Initialize power profiles callback. 291 * 292 * @rdev: radeon_device pointer 293 * 294 * Initialize the power states used in profile mode 295 * (r1xx-r3xx). 296 * Used for profile mode only. 297 */ 298 void r100_pm_init_profile(struct radeon_device *rdev) 299 { 300 /* default */ 301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 303 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 304 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 305 /* low sh */ 306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 309 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 310 /* mid sh */ 311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 313 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 314 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 315 /* high sh */ 316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 318 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 319 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 320 /* low mh */ 321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 323 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 324 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 325 /* mid mh */ 326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 328 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 329 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 330 /* high mh */ 331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 334 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 335 } 336 337 /** 338 * r100_pm_misc - set additional pm hw parameters callback. 339 * 340 * @rdev: radeon_device pointer 341 * 342 * Set non-clock parameters associated with a power state 343 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 344 */ 345 void r100_pm_misc(struct radeon_device *rdev) 346 { 347 int requested_index = rdev->pm.requested_power_state_index; 348 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 349 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 350 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 351 352 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 353 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 354 tmp = RREG32(voltage->gpio.reg); 355 if (voltage->active_high) 356 tmp |= voltage->gpio.mask; 357 else 358 tmp &= ~(voltage->gpio.mask); 359 WREG32(voltage->gpio.reg, tmp); 360 if (voltage->delay) 361 udelay(voltage->delay); 362 } else { 363 tmp = RREG32(voltage->gpio.reg); 364 if (voltage->active_high) 365 tmp &= ~voltage->gpio.mask; 366 else 367 tmp |= voltage->gpio.mask; 368 WREG32(voltage->gpio.reg, tmp); 369 if (voltage->delay) 370 udelay(voltage->delay); 371 } 372 } 373 374 sclk_cntl = RREG32_PLL(SCLK_CNTL); 375 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 376 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 377 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 378 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 379 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 380 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 381 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 382 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 383 else 384 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 385 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 386 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 387 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 388 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 389 } else 390 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 391 392 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 393 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 394 if (voltage->delay) { 395 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 396 switch (voltage->delay) { 397 case 33: 398 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 399 break; 400 case 66: 401 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 402 break; 403 case 99: 404 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 405 break; 406 case 132: 407 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 408 break; 409 } 410 } else 411 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 412 } else 413 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 414 415 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 416 sclk_cntl &= ~FORCE_HDP; 417 else 418 sclk_cntl |= FORCE_HDP; 419 420 WREG32_PLL(SCLK_CNTL, sclk_cntl); 421 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 422 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 423 424 /* set pcie lanes */ 425 if ((rdev->flags & RADEON_IS_PCIE) && 426 !(rdev->flags & RADEON_IS_IGP) && 427 rdev->asic->pm.set_pcie_lanes && 428 (ps->pcie_lanes != 429 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 430 radeon_set_pcie_lanes(rdev, 431 ps->pcie_lanes); 432 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 433 } 434 } 435 436 /** 437 * r100_pm_prepare - pre-power state change callback. 438 * 439 * @rdev: radeon_device pointer 440 * 441 * Prepare for a power state change (r1xx-r4xx). 442 */ 443 void r100_pm_prepare(struct radeon_device *rdev) 444 { 445 struct drm_device *ddev = rdev->ddev; 446 struct drm_crtc *crtc; 447 struct radeon_crtc *radeon_crtc; 448 u32 tmp; 449 450 /* disable any active CRTCs */ 451 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 452 radeon_crtc = to_radeon_crtc(crtc); 453 if (radeon_crtc->enabled) { 454 if (radeon_crtc->crtc_id) { 455 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 456 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 457 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 458 } else { 459 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 460 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 461 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 462 } 463 } 464 } 465 } 466 467 /** 468 * r100_pm_finish - post-power state change callback. 469 * 470 * @rdev: radeon_device pointer 471 * 472 * Clean up after a power state change (r1xx-r4xx). 473 */ 474 void r100_pm_finish(struct radeon_device *rdev) 475 { 476 struct drm_device *ddev = rdev->ddev; 477 struct drm_crtc *crtc; 478 struct radeon_crtc *radeon_crtc; 479 u32 tmp; 480 481 /* enable any active CRTCs */ 482 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 483 radeon_crtc = to_radeon_crtc(crtc); 484 if (radeon_crtc->enabled) { 485 if (radeon_crtc->crtc_id) { 486 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 487 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 488 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 489 } else { 490 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 491 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 492 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 493 } 494 } 495 } 496 } 497 498 /** 499 * r100_gui_idle - gui idle callback. 500 * 501 * @rdev: radeon_device pointer 502 * 503 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 504 * Returns true if idle, false if not. 505 */ 506 bool r100_gui_idle(struct radeon_device *rdev) 507 { 508 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 509 return false; 510 else 511 return true; 512 } 513 514 /* hpd for digital panel detect/disconnect */ 515 /** 516 * r100_hpd_sense - hpd sense callback. 517 * 518 * @rdev: radeon_device pointer 519 * @hpd: hpd (hotplug detect) pin 520 * 521 * Checks if a digital monitor is connected (r1xx-r4xx). 522 * Returns true if connected, false if not connected. 523 */ 524 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 525 { 526 bool connected = false; 527 528 switch (hpd) { 529 case RADEON_HPD_1: 530 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 531 connected = true; 532 break; 533 case RADEON_HPD_2: 534 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 535 connected = true; 536 break; 537 default: 538 break; 539 } 540 return connected; 541 } 542 543 /** 544 * r100_hpd_set_polarity - hpd set polarity callback. 545 * 546 * @rdev: radeon_device pointer 547 * @hpd: hpd (hotplug detect) pin 548 * 549 * Set the polarity of the hpd pin (r1xx-r4xx). 550 */ 551 void r100_hpd_set_polarity(struct radeon_device *rdev, 552 enum radeon_hpd_id hpd) 553 { 554 u32 tmp; 555 bool connected = r100_hpd_sense(rdev, hpd); 556 557 switch (hpd) { 558 case RADEON_HPD_1: 559 tmp = RREG32(RADEON_FP_GEN_CNTL); 560 if (connected) 561 tmp &= ~RADEON_FP_DETECT_INT_POL; 562 else 563 tmp |= RADEON_FP_DETECT_INT_POL; 564 WREG32(RADEON_FP_GEN_CNTL, tmp); 565 break; 566 case RADEON_HPD_2: 567 tmp = RREG32(RADEON_FP2_GEN_CNTL); 568 if (connected) 569 tmp &= ~RADEON_FP2_DETECT_INT_POL; 570 else 571 tmp |= RADEON_FP2_DETECT_INT_POL; 572 WREG32(RADEON_FP2_GEN_CNTL, tmp); 573 break; 574 default: 575 break; 576 } 577 } 578 579 /** 580 * r100_hpd_init - hpd setup callback. 581 * 582 * @rdev: radeon_device pointer 583 * 584 * Setup the hpd pins used by the card (r1xx-r4xx). 585 * Set the polarity, and enable the hpd interrupts. 586 */ 587 void r100_hpd_init(struct radeon_device *rdev) 588 { 589 struct drm_device *dev = rdev->ddev; 590 struct drm_connector *connector; 591 unsigned enable = 0; 592 593 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 594 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 595 enable |= 1 << radeon_connector->hpd.hpd; 596 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 597 } 598 radeon_irq_kms_enable_hpd(rdev, enable); 599 } 600 601 /** 602 * r100_hpd_fini - hpd tear down callback. 603 * 604 * @rdev: radeon_device pointer 605 * 606 * Tear down the hpd pins used by the card (r1xx-r4xx). 607 * Disable the hpd interrupts. 608 */ 609 void r100_hpd_fini(struct radeon_device *rdev) 610 { 611 struct drm_device *dev = rdev->ddev; 612 struct drm_connector *connector; 613 unsigned disable = 0; 614 615 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 616 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 617 disable |= 1 << radeon_connector->hpd.hpd; 618 } 619 radeon_irq_kms_disable_hpd(rdev, disable); 620 } 621 622 /* 623 * PCI GART 624 */ 625 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 626 { 627 /* TODO: can we do somethings here ? */ 628 /* It seems hw only cache one entry so we should discard this 629 * entry otherwise if first GPU GART read hit this entry it 630 * could end up in wrong address. */ 631 } 632 633 int r100_pci_gart_init(struct radeon_device *rdev) 634 { 635 int r; 636 637 if (rdev->gart.ptr) { 638 WARN(1, "R100 PCI GART already initialized\n"); 639 return 0; 640 } 641 /* Initialize common gart structure */ 642 r = radeon_gart_init(rdev); 643 if (r) 644 return r; 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 647 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 648 return radeon_gart_table_ram_alloc(rdev); 649 } 650 651 int r100_pci_gart_enable(struct radeon_device *rdev) 652 { 653 uint32_t tmp; 654 655 /* discard memory request outside of configured range */ 656 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 657 WREG32(RADEON_AIC_CNTL, tmp); 658 /* set address range for PCI address translate */ 659 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 660 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 661 /* set PCI GART page-table base address */ 662 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 663 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 664 WREG32(RADEON_AIC_CNTL, tmp); 665 r100_pci_gart_tlb_flush(rdev); 666 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 667 (unsigned)(rdev->mc.gtt_size >> 20), 668 (unsigned long long)rdev->gart.table_addr); 669 rdev->gart.ready = true; 670 return 0; 671 } 672 673 void r100_pci_gart_disable(struct radeon_device *rdev) 674 { 675 uint32_t tmp; 676 677 /* discard memory request outside of configured range */ 678 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 679 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 680 WREG32(RADEON_AIC_LO_ADDR, 0); 681 WREG32(RADEON_AIC_HI_ADDR, 0); 682 } 683 684 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 685 uint64_t addr, uint32_t flags) 686 { 687 u32 *gtt = rdev->gart.ptr; 688 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 689 } 690 691 void r100_pci_gart_fini(struct radeon_device *rdev) 692 { 693 radeon_gart_fini(rdev); 694 r100_pci_gart_disable(rdev); 695 radeon_gart_table_ram_free(rdev); 696 } 697 698 int r100_irq_set(struct radeon_device *rdev) 699 { 700 uint32_t tmp = 0; 701 702 if (!rdev->irq.installed) { 703 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 704 WREG32(R_000040_GEN_INT_CNTL, 0); 705 return -EINVAL; 706 } 707 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 708 tmp |= RADEON_SW_INT_ENABLE; 709 } 710 if (rdev->irq.crtc_vblank_int[0] || 711 atomic_read(&rdev->irq.pflip[0])) { 712 tmp |= RADEON_CRTC_VBLANK_MASK; 713 } 714 if (rdev->irq.crtc_vblank_int[1] || 715 atomic_read(&rdev->irq.pflip[1])) { 716 tmp |= RADEON_CRTC2_VBLANK_MASK; 717 } 718 if (rdev->irq.hpd[0]) { 719 tmp |= RADEON_FP_DETECT_MASK; 720 } 721 if (rdev->irq.hpd[1]) { 722 tmp |= RADEON_FP2_DETECT_MASK; 723 } 724 WREG32(RADEON_GEN_INT_CNTL, tmp); 725 return 0; 726 } 727 728 void r100_irq_disable(struct radeon_device *rdev) 729 { 730 u32 tmp; 731 732 WREG32(R_000040_GEN_INT_CNTL, 0); 733 /* Wait and acknowledge irq */ 734 mdelay(1); 735 tmp = RREG32(R_000044_GEN_INT_STATUS); 736 WREG32(R_000044_GEN_INT_STATUS, tmp); 737 } 738 739 static uint32_t r100_irq_ack(struct radeon_device *rdev) 740 { 741 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 742 uint32_t irq_mask = RADEON_SW_INT_TEST | 743 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 744 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 745 746 if (irqs) { 747 WREG32(RADEON_GEN_INT_STATUS, irqs); 748 } 749 return irqs & irq_mask; 750 } 751 752 irqreturn_t r100_irq_process(struct radeon_device *rdev) 753 { 754 uint32_t status, msi_rearm; 755 bool queue_hotplug = false; 756 757 status = r100_irq_ack(rdev); 758 if (!status) { 759 return IRQ_NONE; 760 } 761 if (rdev->shutdown) { 762 return IRQ_NONE; 763 } 764 while (status) { 765 /* SW interrupt */ 766 if (status & RADEON_SW_INT_TEST) { 767 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 768 } 769 /* Vertical blank interrupts */ 770 if (status & RADEON_CRTC_VBLANK_STAT) { 771 if (rdev->irq.crtc_vblank_int[0]) { 772 drm_handle_vblank(rdev->ddev, 0); 773 rdev->pm.vblank_sync = true; 774 wake_up(&rdev->irq.vblank_queue); 775 } 776 if (atomic_read(&rdev->irq.pflip[0])) 777 radeon_crtc_handle_vblank(rdev, 0); 778 } 779 if (status & RADEON_CRTC2_VBLANK_STAT) { 780 if (rdev->irq.crtc_vblank_int[1]) { 781 drm_handle_vblank(rdev->ddev, 1); 782 rdev->pm.vblank_sync = true; 783 wake_up(&rdev->irq.vblank_queue); 784 } 785 if (atomic_read(&rdev->irq.pflip[1])) 786 radeon_crtc_handle_vblank(rdev, 1); 787 } 788 if (status & RADEON_FP_DETECT_STAT) { 789 queue_hotplug = true; 790 DRM_DEBUG("HPD1\n"); 791 } 792 if (status & RADEON_FP2_DETECT_STAT) { 793 queue_hotplug = true; 794 DRM_DEBUG("HPD2\n"); 795 } 796 status = r100_irq_ack(rdev); 797 } 798 if (queue_hotplug) 799 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 800 if (rdev->msi_enabled) { 801 switch (rdev->family) { 802 case CHIP_RS400: 803 case CHIP_RS480: 804 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 805 WREG32(RADEON_AIC_CNTL, msi_rearm); 806 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 807 break; 808 default: 809 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 810 break; 811 } 812 } 813 return IRQ_HANDLED; 814 } 815 816 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 817 { 818 if (crtc == 0) 819 return RREG32(RADEON_CRTC_CRNT_FRAME); 820 else 821 return RREG32(RADEON_CRTC2_CRNT_FRAME); 822 } 823 824 /** 825 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer 826 * rdev: radeon device structure 827 * ring: ring buffer struct for emitting packets 828 */ 829 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) 830 { 831 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 832 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 833 RADEON_HDP_READ_BUFFER_INVALIDATE); 834 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 835 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 836 } 837 838 /* Who ever call radeon_fence_emit should call ring_lock and ask 839 * for enough space (today caller are ib schedule and buffer move) */ 840 void r100_fence_ring_emit(struct radeon_device *rdev, 841 struct radeon_fence *fence) 842 { 843 struct radeon_ring *ring = &rdev->ring[fence->ring]; 844 845 /* We have to make sure that caches are flushed before 846 * CPU might read something from VRAM. */ 847 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 848 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 849 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 850 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 851 /* Wait until IDLE & CLEAN */ 852 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 853 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 854 r100_ring_hdp_flush(rdev, ring); 855 /* Emit fence sequence & fire IRQ */ 856 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 857 radeon_ring_write(ring, fence->seq); 858 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 859 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 860 } 861 862 bool r100_semaphore_ring_emit(struct radeon_device *rdev, 863 struct radeon_ring *ring, 864 struct radeon_semaphore *semaphore, 865 bool emit_wait) 866 { 867 /* Unused on older asics, since we don't have semaphores or multiple rings */ 868 BUG(); 869 return false; 870 } 871 872 int r100_copy_blit(struct radeon_device *rdev, 873 uint64_t src_offset, 874 uint64_t dst_offset, 875 unsigned num_gpu_pages, 876 struct radeon_fence **fence) 877 { 878 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 879 uint32_t cur_pages; 880 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 881 uint32_t pitch; 882 uint32_t stride_pixels; 883 unsigned ndw; 884 int num_loops; 885 int r = 0; 886 887 /* radeon limited to 16k stride */ 888 stride_bytes &= 0x3fff; 889 /* radeon pitch is /64 */ 890 pitch = stride_bytes / 64; 891 stride_pixels = stride_bytes / 4; 892 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 893 894 /* Ask for enough room for blit + flush + fence */ 895 ndw = 64 + (10 * num_loops); 896 r = radeon_ring_lock(rdev, ring, ndw); 897 if (r) { 898 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 899 return -EINVAL; 900 } 901 while (num_gpu_pages > 0) { 902 cur_pages = num_gpu_pages; 903 if (cur_pages > 8191) { 904 cur_pages = 8191; 905 } 906 num_gpu_pages -= cur_pages; 907 908 /* pages are in Y direction - height 909 page width in X direction - width */ 910 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 911 radeon_ring_write(ring, 912 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 913 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 914 RADEON_GMC_SRC_CLIPPING | 915 RADEON_GMC_DST_CLIPPING | 916 RADEON_GMC_BRUSH_NONE | 917 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 918 RADEON_GMC_SRC_DATATYPE_COLOR | 919 RADEON_ROP3_S | 920 RADEON_DP_SRC_SOURCE_MEMORY | 921 RADEON_GMC_CLR_CMP_CNTL_DIS | 922 RADEON_GMC_WR_MSK_DIS); 923 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 924 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 925 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 926 radeon_ring_write(ring, 0); 927 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 928 radeon_ring_write(ring, num_gpu_pages); 929 radeon_ring_write(ring, num_gpu_pages); 930 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 931 } 932 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 933 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 934 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 935 radeon_ring_write(ring, 936 RADEON_WAIT_2D_IDLECLEAN | 937 RADEON_WAIT_HOST_IDLECLEAN | 938 RADEON_WAIT_DMA_GUI_IDLE); 939 if (fence) { 940 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 941 } 942 radeon_ring_unlock_commit(rdev, ring, false); 943 return r; 944 } 945 946 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 947 { 948 unsigned i; 949 u32 tmp; 950 951 for (i = 0; i < rdev->usec_timeout; i++) { 952 tmp = RREG32(R_000E40_RBBM_STATUS); 953 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 954 return 0; 955 } 956 udelay(1); 957 } 958 return -1; 959 } 960 961 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 962 { 963 int r; 964 965 r = radeon_ring_lock(rdev, ring, 2); 966 if (r) { 967 return; 968 } 969 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 970 radeon_ring_write(ring, 971 RADEON_ISYNC_ANY2D_IDLE3D | 972 RADEON_ISYNC_ANY3D_IDLE2D | 973 RADEON_ISYNC_WAIT_IDLEGUI | 974 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 975 radeon_ring_unlock_commit(rdev, ring, false); 976 } 977 978 979 /* Load the microcode for the CP */ 980 static int r100_cp_init_microcode(struct radeon_device *rdev) 981 { 982 const char *fw_name = NULL; 983 int err; 984 985 DRM_DEBUG_KMS("\n"); 986 987 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 988 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 989 (rdev->family == CHIP_RS200)) { 990 DRM_INFO("Loading R100 Microcode\n"); 991 fw_name = FIRMWARE_R100; 992 } else if ((rdev->family == CHIP_R200) || 993 (rdev->family == CHIP_RV250) || 994 (rdev->family == CHIP_RV280) || 995 (rdev->family == CHIP_RS300)) { 996 DRM_INFO("Loading R200 Microcode\n"); 997 fw_name = FIRMWARE_R200; 998 } else if ((rdev->family == CHIP_R300) || 999 (rdev->family == CHIP_R350) || 1000 (rdev->family == CHIP_RV350) || 1001 (rdev->family == CHIP_RV380) || 1002 (rdev->family == CHIP_RS400) || 1003 (rdev->family == CHIP_RS480)) { 1004 DRM_INFO("Loading R300 Microcode\n"); 1005 fw_name = FIRMWARE_R300; 1006 } else if ((rdev->family == CHIP_R420) || 1007 (rdev->family == CHIP_R423) || 1008 (rdev->family == CHIP_RV410)) { 1009 DRM_INFO("Loading R400 Microcode\n"); 1010 fw_name = FIRMWARE_R420; 1011 } else if ((rdev->family == CHIP_RS690) || 1012 (rdev->family == CHIP_RS740)) { 1013 DRM_INFO("Loading RS690/RS740 Microcode\n"); 1014 fw_name = FIRMWARE_RS690; 1015 } else if (rdev->family == CHIP_RS600) { 1016 DRM_INFO("Loading RS600 Microcode\n"); 1017 fw_name = FIRMWARE_RS600; 1018 } else if ((rdev->family == CHIP_RV515) || 1019 (rdev->family == CHIP_R520) || 1020 (rdev->family == CHIP_RV530) || 1021 (rdev->family == CHIP_R580) || 1022 (rdev->family == CHIP_RV560) || 1023 (rdev->family == CHIP_RV570)) { 1024 DRM_INFO("Loading R500 Microcode\n"); 1025 fw_name = FIRMWARE_R520; 1026 } 1027 1028 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 1029 if (err) { 1030 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1031 fw_name); 1032 } else if (rdev->me_fw->datasize % 8) { 1033 printk(KERN_ERR 1034 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1035 rdev->me_fw->datasize, fw_name); 1036 err = -EINVAL; 1037 release_firmware(rdev->me_fw); 1038 rdev->me_fw = NULL; 1039 } 1040 return err; 1041 } 1042 1043 u32 r100_gfx_get_rptr(struct radeon_device *rdev, 1044 struct radeon_ring *ring) 1045 { 1046 u32 rptr; 1047 1048 if (rdev->wb.enabled) 1049 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 1050 else 1051 rptr = RREG32(RADEON_CP_RB_RPTR); 1052 1053 return rptr; 1054 } 1055 1056 u32 r100_gfx_get_wptr(struct radeon_device *rdev, 1057 struct radeon_ring *ring) 1058 { 1059 u32 wptr; 1060 1061 wptr = RREG32(RADEON_CP_RB_WPTR); 1062 1063 return wptr; 1064 } 1065 1066 void r100_gfx_set_wptr(struct radeon_device *rdev, 1067 struct radeon_ring *ring) 1068 { 1069 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1070 (void)RREG32(RADEON_CP_RB_WPTR); 1071 } 1072 1073 /** 1074 * r100_cp_fini_microcode - drop the firmware image reference 1075 * 1076 * @rdev: radeon_device pointer 1077 * 1078 * Drop the me firmware image reference. 1079 * Called at driver shutdown. 1080 */ 1081 static void r100_cp_fini_microcode (struct radeon_device *rdev) 1082 { 1083 release_firmware(rdev->me_fw); 1084 rdev->me_fw = NULL; 1085 } 1086 1087 static void r100_cp_load_microcode(struct radeon_device *rdev) 1088 { 1089 const __be32 *fw_data; 1090 int i, size; 1091 1092 if (r100_gui_wait_for_idle(rdev)) { 1093 printk(KERN_WARNING "Failed to wait GUI idle while " 1094 "programming pipes. Bad things might happen.\n"); 1095 } 1096 1097 if (rdev->me_fw) { 1098 size = rdev->me_fw->datasize / 4; 1099 fw_data = (const __be32 *)rdev->me_fw->data; 1100 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1101 for (i = 0; i < size; i += 2) { 1102 WREG32(RADEON_CP_ME_RAM_DATAH, 1103 be32_to_cpup(&fw_data[i])); 1104 WREG32(RADEON_CP_ME_RAM_DATAL, 1105 be32_to_cpup(&fw_data[i + 1])); 1106 } 1107 } 1108 } 1109 1110 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1111 { 1112 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1113 unsigned rb_bufsz; 1114 unsigned rb_blksz; 1115 unsigned max_fetch; 1116 unsigned pre_write_timer; 1117 unsigned pre_write_limit; 1118 unsigned indirect2_start; 1119 unsigned indirect1_start; 1120 uint32_t tmp; 1121 int r; 1122 1123 if (r100_debugfs_cp_init(rdev)) { 1124 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1125 } 1126 if (!rdev->me_fw) { 1127 r = r100_cp_init_microcode(rdev); 1128 if (r) { 1129 DRM_ERROR("Failed to load firmware!\n"); 1130 return r; 1131 } 1132 } 1133 1134 /* Align ring size */ 1135 rb_bufsz = order_base_2(ring_size / 8); 1136 ring_size = (1 << (rb_bufsz + 1)) * 4; 1137 r100_cp_load_microcode(rdev); 1138 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1139 RADEON_CP_PACKET2); 1140 if (r) { 1141 return r; 1142 } 1143 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1144 * the rptr copy in system ram */ 1145 rb_blksz = 9; 1146 /* cp will read 128bytes at a time (4 dwords) */ 1147 max_fetch = 1; 1148 ring->align_mask = 16 - 1; 1149 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1150 pre_write_timer = 64; 1151 /* Force CP_RB_WPTR write if written more than one time before the 1152 * delay expire 1153 */ 1154 pre_write_limit = 0; 1155 /* Setup the cp cache like this (cache size is 96 dwords) : 1156 * RING 0 to 15 1157 * INDIRECT1 16 to 79 1158 * INDIRECT2 80 to 95 1159 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1160 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1161 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1162 * Idea being that most of the gpu cmd will be through indirect1 buffer 1163 * so it gets the bigger cache. 1164 */ 1165 indirect2_start = 80; 1166 indirect1_start = 16; 1167 /* cp setup */ 1168 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1169 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1170 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1171 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1172 #ifdef __BIG_ENDIAN 1173 tmp |= RADEON_BUF_SWAP_32BIT; 1174 #endif 1175 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1176 1177 /* Set ring address */ 1178 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1179 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1180 /* Force read & write ptr to 0 */ 1181 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1182 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1183 ring->wptr = 0; 1184 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1185 1186 /* set the wb address whether it's enabled or not */ 1187 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1188 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1189 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1190 1191 if (rdev->wb.enabled) 1192 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1193 else { 1194 tmp |= RADEON_RB_NO_UPDATE; 1195 WREG32(R_000770_SCRATCH_UMSK, 0); 1196 } 1197 1198 WREG32(RADEON_CP_RB_CNTL, tmp); 1199 udelay(10); 1200 /* Set cp mode to bus mastering & enable cp*/ 1201 WREG32(RADEON_CP_CSQ_MODE, 1202 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1203 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1204 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1205 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1206 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1207 1208 /* at this point everything should be setup correctly to enable master */ 1209 pci_enable_busmaster(rdev->dev); 1210 1211 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1212 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1213 if (r) { 1214 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1215 return r; 1216 } 1217 ring->ready = true; 1218 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1219 1220 if (!ring->rptr_save_reg /* not resuming from suspend */ 1221 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1222 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1223 if (r) { 1224 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1225 ring->rptr_save_reg = 0; 1226 } 1227 } 1228 return 0; 1229 } 1230 1231 void r100_cp_fini(struct radeon_device *rdev) 1232 { 1233 if (r100_cp_wait_for_idle(rdev)) { 1234 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1235 } 1236 /* Disable ring */ 1237 r100_cp_disable(rdev); 1238 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1239 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1240 DRM_INFO("radeon: cp finalized\n"); 1241 } 1242 1243 void r100_cp_disable(struct radeon_device *rdev) 1244 { 1245 /* Disable ring */ 1246 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1247 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1248 WREG32(RADEON_CP_CSQ_MODE, 0); 1249 WREG32(RADEON_CP_CSQ_CNTL, 0); 1250 WREG32(R_000770_SCRATCH_UMSK, 0); 1251 if (r100_gui_wait_for_idle(rdev)) { 1252 printk(KERN_WARNING "Failed to wait GUI idle while " 1253 "programming pipes. Bad things might happen.\n"); 1254 } 1255 } 1256 1257 /* 1258 * CS functions 1259 */ 1260 int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1261 struct radeon_cs_packet *pkt, 1262 unsigned idx, 1263 unsigned reg) 1264 { 1265 int r; 1266 u32 tile_flags = 0; 1267 u32 tmp; 1268 struct radeon_cs_reloc *reloc; 1269 u32 value; 1270 1271 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1272 if (r) { 1273 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1274 idx, reg); 1275 radeon_cs_dump_packet(p, pkt); 1276 return r; 1277 } 1278 1279 value = radeon_get_ib_value(p, idx); 1280 tmp = value & 0x003fffff; 1281 tmp += (((u32)reloc->gpu_offset) >> 10); 1282 1283 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1284 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1285 tile_flags |= RADEON_DST_TILE_MACRO; 1286 if (reloc->tiling_flags & RADEON_TILING_MICRO) { 1287 if (reg == RADEON_SRC_PITCH_OFFSET) { 1288 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1289 radeon_cs_dump_packet(p, pkt); 1290 return -EINVAL; 1291 } 1292 tile_flags |= RADEON_DST_TILE_MICRO; 1293 } 1294 1295 tmp |= tile_flags; 1296 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1297 } else 1298 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1299 return 0; 1300 } 1301 1302 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1303 struct radeon_cs_packet *pkt, 1304 int idx) 1305 { 1306 unsigned c, i; 1307 struct radeon_cs_reloc *reloc; 1308 struct r100_cs_track *track; 1309 int r = 0; 1310 volatile uint32_t *ib; 1311 u32 idx_value; 1312 1313 ib = p->ib.ptr; 1314 track = (struct r100_cs_track *)p->track; 1315 c = radeon_get_ib_value(p, idx++) & 0x1F; 1316 if (c > 16) { 1317 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1318 pkt->opcode); 1319 radeon_cs_dump_packet(p, pkt); 1320 return -EINVAL; 1321 } 1322 track->num_arrays = c; 1323 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1324 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1325 if (r) { 1326 DRM_ERROR("No reloc for packet3 %d\n", 1327 pkt->opcode); 1328 radeon_cs_dump_packet(p, pkt); 1329 return r; 1330 } 1331 idx_value = radeon_get_ib_value(p, idx); 1332 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1333 1334 track->arrays[i + 0].esize = idx_value >> 8; 1335 track->arrays[i + 0].robj = reloc->robj; 1336 track->arrays[i + 0].esize &= 0x7F; 1337 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1338 if (r) { 1339 DRM_ERROR("No reloc for packet3 %d\n", 1340 pkt->opcode); 1341 radeon_cs_dump_packet(p, pkt); 1342 return r; 1343 } 1344 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); 1345 track->arrays[i + 1].robj = reloc->robj; 1346 track->arrays[i + 1].esize = idx_value >> 24; 1347 track->arrays[i + 1].esize &= 0x7F; 1348 } 1349 if (c & 1) { 1350 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1351 if (r) { 1352 DRM_ERROR("No reloc for packet3 %d\n", 1353 pkt->opcode); 1354 radeon_cs_dump_packet(p, pkt); 1355 return r; 1356 } 1357 idx_value = radeon_get_ib_value(p, idx); 1358 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1359 track->arrays[i + 0].robj = reloc->robj; 1360 track->arrays[i + 0].esize = idx_value >> 8; 1361 track->arrays[i + 0].esize &= 0x7F; 1362 } 1363 return r; 1364 } 1365 1366 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1367 struct radeon_cs_packet *pkt, 1368 const unsigned *auth, unsigned n, 1369 radeon_packet0_check_t check) 1370 { 1371 unsigned reg; 1372 unsigned i, j, m; 1373 unsigned idx; 1374 int r; 1375 1376 idx = pkt->idx + 1; 1377 reg = pkt->reg; 1378 /* Check that register fall into register range 1379 * determined by the number of entry (n) in the 1380 * safe register bitmap. 1381 */ 1382 if (pkt->one_reg_wr) { 1383 if ((reg >> 7) > n) { 1384 return -EINVAL; 1385 } 1386 } else { 1387 if (((reg + (pkt->count << 2)) >> 7) > n) { 1388 return -EINVAL; 1389 } 1390 } 1391 for (i = 0; i <= pkt->count; i++, idx++) { 1392 j = (reg >> 7); 1393 m = 1 << ((reg >> 2) & 31); 1394 if (auth[j] & m) { 1395 r = check(p, pkt, idx, reg); 1396 if (r) { 1397 return r; 1398 } 1399 } 1400 if (pkt->one_reg_wr) { 1401 if (!(auth[j] & m)) { 1402 break; 1403 } 1404 } else { 1405 reg += 4; 1406 } 1407 } 1408 return 0; 1409 } 1410 1411 /** 1412 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1413 * @parser: parser structure holding parsing context. 1414 * 1415 * Userspace sends a special sequence for VLINE waits. 1416 * PACKET0 - VLINE_START_END + value 1417 * PACKET0 - WAIT_UNTIL +_value 1418 * RELOC (P3) - crtc_id in reloc. 1419 * 1420 * This function parses this and relocates the VLINE START END 1421 * and WAIT UNTIL packets to the correct crtc. 1422 * It also detects a switched off crtc and nulls out the 1423 * wait in that case. 1424 */ 1425 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1426 { 1427 struct drm_crtc *crtc; 1428 struct radeon_crtc *radeon_crtc; 1429 struct radeon_cs_packet p3reloc, waitreloc; 1430 int crtc_id; 1431 int r; 1432 uint32_t header, h_idx, reg; 1433 volatile uint32_t *ib; 1434 1435 ib = p->ib.ptr; 1436 1437 /* parse the wait until */ 1438 r = radeon_cs_packet_parse(p, &waitreloc, p->idx); 1439 if (r) 1440 return r; 1441 1442 /* check its a wait until and only 1 count */ 1443 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1444 waitreloc.count != 0) { 1445 DRM_ERROR("vline wait had illegal wait until segment\n"); 1446 return -EINVAL; 1447 } 1448 1449 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1450 DRM_ERROR("vline wait had illegal wait until\n"); 1451 return -EINVAL; 1452 } 1453 1454 /* jump over the NOP */ 1455 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1456 if (r) 1457 return r; 1458 1459 h_idx = p->idx - 2; 1460 p->idx += waitreloc.count + 2; 1461 p->idx += p3reloc.count + 2; 1462 1463 header = radeon_get_ib_value(p, h_idx); 1464 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1465 reg = R100_CP_PACKET0_GET_REG(header); 1466 crtc = drm_crtc_find(p->rdev->ddev, crtc_id); 1467 if (!crtc) { 1468 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1469 return -ENOENT; 1470 } 1471 radeon_crtc = to_radeon_crtc(crtc); 1472 crtc_id = radeon_crtc->crtc_id; 1473 1474 if (!crtc->enabled) { 1475 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1476 ib[h_idx + 2] = PACKET2(0); 1477 ib[h_idx + 3] = PACKET2(0); 1478 } else if (crtc_id == 1) { 1479 switch (reg) { 1480 case AVIVO_D1MODE_VLINE_START_END: 1481 header &= ~R300_CP_PACKET0_REG_MASK; 1482 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1483 break; 1484 case RADEON_CRTC_GUI_TRIG_VLINE: 1485 header &= ~R300_CP_PACKET0_REG_MASK; 1486 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1487 break; 1488 default: 1489 DRM_ERROR("unknown crtc reloc\n"); 1490 return -EINVAL; 1491 } 1492 ib[h_idx] = header; 1493 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int r100_get_vtx_size(uint32_t vtx_fmt) 1500 { 1501 int vtx_size; 1502 vtx_size = 2; 1503 /* ordered according to bits in spec */ 1504 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1505 vtx_size++; 1506 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1507 vtx_size += 3; 1508 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1509 vtx_size++; 1510 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1511 vtx_size++; 1512 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1513 vtx_size += 3; 1514 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1515 vtx_size++; 1516 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1517 vtx_size++; 1518 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1519 vtx_size += 2; 1520 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1521 vtx_size += 2; 1522 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1523 vtx_size++; 1524 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1525 vtx_size += 2; 1526 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1527 vtx_size++; 1528 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1529 vtx_size += 2; 1530 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1531 vtx_size++; 1532 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1533 vtx_size++; 1534 /* blend weight */ 1535 if (vtx_fmt & (0x7 << 15)) 1536 vtx_size += (vtx_fmt >> 15) & 0x7; 1537 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1538 vtx_size += 3; 1539 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1540 vtx_size += 2; 1541 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1542 vtx_size++; 1543 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1544 vtx_size++; 1545 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1546 vtx_size++; 1547 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1548 vtx_size++; 1549 return vtx_size; 1550 } 1551 1552 static int r100_packet0_check(struct radeon_cs_parser *p, 1553 struct radeon_cs_packet *pkt, 1554 unsigned idx, unsigned reg) 1555 { 1556 struct radeon_cs_reloc *reloc; 1557 struct r100_cs_track *track; 1558 volatile uint32_t *ib; 1559 uint32_t tmp; 1560 int r; 1561 int i, face; 1562 u32 tile_flags = 0; 1563 u32 idx_value; 1564 1565 ib = p->ib.ptr; 1566 track = (struct r100_cs_track *)p->track; 1567 1568 idx_value = radeon_get_ib_value(p, idx); 1569 1570 switch (reg) { 1571 case RADEON_CRTC_GUI_TRIG_VLINE: 1572 r = r100_cs_packet_parse_vline(p); 1573 if (r) { 1574 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1575 idx, reg); 1576 radeon_cs_dump_packet(p, pkt); 1577 return r; 1578 } 1579 break; 1580 /* FIXME: only allow PACKET3 blit? easier to check for out of 1581 * range access */ 1582 case RADEON_DST_PITCH_OFFSET: 1583 case RADEON_SRC_PITCH_OFFSET: 1584 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1585 if (r) 1586 return r; 1587 break; 1588 case RADEON_RB3D_DEPTHOFFSET: 1589 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1590 if (r) { 1591 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1592 idx, reg); 1593 radeon_cs_dump_packet(p, pkt); 1594 return r; 1595 } 1596 track->zb.robj = reloc->robj; 1597 track->zb.offset = idx_value; 1598 track->zb_dirty = true; 1599 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1600 break; 1601 case RADEON_RB3D_COLOROFFSET: 1602 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1603 if (r) { 1604 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1605 idx, reg); 1606 radeon_cs_dump_packet(p, pkt); 1607 return r; 1608 } 1609 track->cb[0].robj = reloc->robj; 1610 track->cb[0].offset = idx_value; 1611 track->cb_dirty = true; 1612 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1613 break; 1614 case RADEON_PP_TXOFFSET_0: 1615 case RADEON_PP_TXOFFSET_1: 1616 case RADEON_PP_TXOFFSET_2: 1617 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1618 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1619 if (r) { 1620 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1621 idx, reg); 1622 radeon_cs_dump_packet(p, pkt); 1623 return r; 1624 } 1625 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1626 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1627 tile_flags |= RADEON_TXO_MACRO_TILE; 1628 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1629 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1630 1631 tmp = idx_value & ~(0x7 << 2); 1632 tmp |= tile_flags; 1633 ib[idx] = tmp + ((u32)reloc->gpu_offset); 1634 } else 1635 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1636 track->textures[i].robj = reloc->robj; 1637 track->tex_dirty = true; 1638 break; 1639 case RADEON_PP_CUBIC_OFFSET_T0_0: 1640 case RADEON_PP_CUBIC_OFFSET_T0_1: 1641 case RADEON_PP_CUBIC_OFFSET_T0_2: 1642 case RADEON_PP_CUBIC_OFFSET_T0_3: 1643 case RADEON_PP_CUBIC_OFFSET_T0_4: 1644 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1645 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1646 if (r) { 1647 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1648 idx, reg); 1649 radeon_cs_dump_packet(p, pkt); 1650 return r; 1651 } 1652 track->textures[0].cube_info[i].offset = idx_value; 1653 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1654 track->textures[0].cube_info[i].robj = reloc->robj; 1655 track->tex_dirty = true; 1656 break; 1657 case RADEON_PP_CUBIC_OFFSET_T1_0: 1658 case RADEON_PP_CUBIC_OFFSET_T1_1: 1659 case RADEON_PP_CUBIC_OFFSET_T1_2: 1660 case RADEON_PP_CUBIC_OFFSET_T1_3: 1661 case RADEON_PP_CUBIC_OFFSET_T1_4: 1662 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1663 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1664 if (r) { 1665 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1666 idx, reg); 1667 radeon_cs_dump_packet(p, pkt); 1668 return r; 1669 } 1670 track->textures[1].cube_info[i].offset = idx_value; 1671 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1672 track->textures[1].cube_info[i].robj = reloc->robj; 1673 track->tex_dirty = true; 1674 break; 1675 case RADEON_PP_CUBIC_OFFSET_T2_0: 1676 case RADEON_PP_CUBIC_OFFSET_T2_1: 1677 case RADEON_PP_CUBIC_OFFSET_T2_2: 1678 case RADEON_PP_CUBIC_OFFSET_T2_3: 1679 case RADEON_PP_CUBIC_OFFSET_T2_4: 1680 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1681 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1682 if (r) { 1683 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1684 idx, reg); 1685 radeon_cs_dump_packet(p, pkt); 1686 return r; 1687 } 1688 track->textures[2].cube_info[i].offset = idx_value; 1689 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1690 track->textures[2].cube_info[i].robj = reloc->robj; 1691 track->tex_dirty = true; 1692 break; 1693 case RADEON_RE_WIDTH_HEIGHT: 1694 track->maxy = ((idx_value >> 16) & 0x7FF); 1695 track->cb_dirty = true; 1696 track->zb_dirty = true; 1697 break; 1698 case RADEON_RB3D_COLORPITCH: 1699 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1700 if (r) { 1701 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1702 idx, reg); 1703 radeon_cs_dump_packet(p, pkt); 1704 return r; 1705 } 1706 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1707 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1708 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1709 if (reloc->tiling_flags & RADEON_TILING_MICRO) 1710 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1711 1712 tmp = idx_value & ~(0x7 << 16); 1713 tmp |= tile_flags; 1714 ib[idx] = tmp; 1715 } else 1716 ib[idx] = idx_value; 1717 1718 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1719 track->cb_dirty = true; 1720 break; 1721 case RADEON_RB3D_DEPTHPITCH: 1722 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1723 track->zb_dirty = true; 1724 break; 1725 case RADEON_RB3D_CNTL: 1726 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1727 case 7: 1728 case 8: 1729 case 9: 1730 case 11: 1731 case 12: 1732 track->cb[0].cpp = 1; 1733 break; 1734 case 3: 1735 case 4: 1736 case 15: 1737 track->cb[0].cpp = 2; 1738 break; 1739 case 6: 1740 track->cb[0].cpp = 4; 1741 break; 1742 default: 1743 DRM_ERROR("Invalid color buffer format (%d) !\n", 1744 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1745 return -EINVAL; 1746 } 1747 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1748 track->cb_dirty = true; 1749 track->zb_dirty = true; 1750 break; 1751 case RADEON_RB3D_ZSTENCILCNTL: 1752 switch (idx_value & 0xf) { 1753 case 0: 1754 track->zb.cpp = 2; 1755 break; 1756 case 2: 1757 case 3: 1758 case 4: 1759 case 5: 1760 case 9: 1761 case 11: 1762 track->zb.cpp = 4; 1763 break; 1764 default: 1765 break; 1766 } 1767 track->zb_dirty = true; 1768 break; 1769 case RADEON_RB3D_ZPASS_ADDR: 1770 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1771 if (r) { 1772 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1773 idx, reg); 1774 radeon_cs_dump_packet(p, pkt); 1775 return r; 1776 } 1777 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1778 break; 1779 case RADEON_PP_CNTL: 1780 { 1781 uint32_t temp = idx_value >> 4; 1782 for (i = 0; i < track->num_texture; i++) 1783 track->textures[i].enabled = !!(temp & (1 << i)); 1784 track->tex_dirty = true; 1785 } 1786 break; 1787 case RADEON_SE_VF_CNTL: 1788 track->vap_vf_cntl = idx_value; 1789 break; 1790 case RADEON_SE_VTX_FMT: 1791 track->vtx_size = r100_get_vtx_size(idx_value); 1792 break; 1793 case RADEON_PP_TEX_SIZE_0: 1794 case RADEON_PP_TEX_SIZE_1: 1795 case RADEON_PP_TEX_SIZE_2: 1796 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1797 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1798 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1799 track->tex_dirty = true; 1800 break; 1801 case RADEON_PP_TEX_PITCH_0: 1802 case RADEON_PP_TEX_PITCH_1: 1803 case RADEON_PP_TEX_PITCH_2: 1804 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1805 track->textures[i].pitch = idx_value + 32; 1806 track->tex_dirty = true; 1807 break; 1808 case RADEON_PP_TXFILTER_0: 1809 case RADEON_PP_TXFILTER_1: 1810 case RADEON_PP_TXFILTER_2: 1811 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1812 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1813 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1814 tmp = (idx_value >> 23) & 0x7; 1815 if (tmp == 2 || tmp == 6) 1816 track->textures[i].roundup_w = false; 1817 tmp = (idx_value >> 27) & 0x7; 1818 if (tmp == 2 || tmp == 6) 1819 track->textures[i].roundup_h = false; 1820 track->tex_dirty = true; 1821 break; 1822 case RADEON_PP_TXFORMAT_0: 1823 case RADEON_PP_TXFORMAT_1: 1824 case RADEON_PP_TXFORMAT_2: 1825 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1826 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1827 track->textures[i].use_pitch = 1; 1828 } else { 1829 track->textures[i].use_pitch = 0; 1830 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1831 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1832 } 1833 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1834 track->textures[i].tex_coord_type = 2; 1835 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1836 case RADEON_TXFORMAT_I8: 1837 case RADEON_TXFORMAT_RGB332: 1838 case RADEON_TXFORMAT_Y8: 1839 track->textures[i].cpp = 1; 1840 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1841 break; 1842 case RADEON_TXFORMAT_AI88: 1843 case RADEON_TXFORMAT_ARGB1555: 1844 case RADEON_TXFORMAT_RGB565: 1845 case RADEON_TXFORMAT_ARGB4444: 1846 case RADEON_TXFORMAT_VYUY422: 1847 case RADEON_TXFORMAT_YVYU422: 1848 case RADEON_TXFORMAT_SHADOW16: 1849 case RADEON_TXFORMAT_LDUDV655: 1850 case RADEON_TXFORMAT_DUDV88: 1851 track->textures[i].cpp = 2; 1852 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1853 break; 1854 case RADEON_TXFORMAT_ARGB8888: 1855 case RADEON_TXFORMAT_RGBA8888: 1856 case RADEON_TXFORMAT_SHADOW32: 1857 case RADEON_TXFORMAT_LDUDUV8888: 1858 track->textures[i].cpp = 4; 1859 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1860 break; 1861 case RADEON_TXFORMAT_DXT1: 1862 track->textures[i].cpp = 1; 1863 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1864 break; 1865 case RADEON_TXFORMAT_DXT23: 1866 case RADEON_TXFORMAT_DXT45: 1867 track->textures[i].cpp = 1; 1868 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1869 break; 1870 } 1871 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1872 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1873 track->tex_dirty = true; 1874 break; 1875 case RADEON_PP_CUBIC_FACES_0: 1876 case RADEON_PP_CUBIC_FACES_1: 1877 case RADEON_PP_CUBIC_FACES_2: 1878 tmp = idx_value; 1879 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1880 for (face = 0; face < 4; face++) { 1881 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1882 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1883 } 1884 track->tex_dirty = true; 1885 break; 1886 default: 1887 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1888 reg, idx); 1889 return -EINVAL; 1890 } 1891 return 0; 1892 } 1893 1894 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1895 struct radeon_cs_packet *pkt, 1896 struct radeon_bo *robj) 1897 { 1898 unsigned idx; 1899 u32 value; 1900 idx = pkt->idx + 1; 1901 value = radeon_get_ib_value(p, idx + 2); 1902 if ((value + 1) > radeon_bo_size(robj)) { 1903 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1904 "(need %u have %lu) !\n", 1905 value + 1, 1906 radeon_bo_size(robj)); 1907 return -EINVAL; 1908 } 1909 return 0; 1910 } 1911 1912 static int r100_packet3_check(struct radeon_cs_parser *p, 1913 struct radeon_cs_packet *pkt) 1914 { 1915 struct radeon_cs_reloc *reloc; 1916 struct r100_cs_track *track; 1917 unsigned idx; 1918 volatile uint32_t *ib; 1919 int r; 1920 1921 ib = p->ib.ptr; 1922 idx = pkt->idx + 1; 1923 track = (struct r100_cs_track *)p->track; 1924 switch (pkt->opcode) { 1925 case PACKET3_3D_LOAD_VBPNTR: 1926 r = r100_packet3_load_vbpntr(p, pkt, idx); 1927 if (r) 1928 return r; 1929 break; 1930 case PACKET3_INDX_BUFFER: 1931 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1932 if (r) { 1933 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1934 radeon_cs_dump_packet(p, pkt); 1935 return r; 1936 } 1937 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); 1938 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1939 if (r) { 1940 return r; 1941 } 1942 break; 1943 case 0x23: 1944 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1945 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1946 if (r) { 1947 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1948 radeon_cs_dump_packet(p, pkt); 1949 return r; 1950 } 1951 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); 1952 track->num_arrays = 1; 1953 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1954 1955 track->arrays[0].robj = reloc->robj; 1956 track->arrays[0].esize = track->vtx_size; 1957 1958 track->max_indx = radeon_get_ib_value(p, idx+1); 1959 1960 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1961 track->immd_dwords = pkt->count - 1; 1962 r = r100_cs_track_check(p->rdev, track); 1963 if (r) 1964 return r; 1965 break; 1966 case PACKET3_3D_DRAW_IMMD: 1967 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1968 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1969 return -EINVAL; 1970 } 1971 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1972 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1973 track->immd_dwords = pkt->count - 1; 1974 r = r100_cs_track_check(p->rdev, track); 1975 if (r) 1976 return r; 1977 break; 1978 /* triggers drawing using in-packet vertex data */ 1979 case PACKET3_3D_DRAW_IMMD_2: 1980 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1981 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1982 return -EINVAL; 1983 } 1984 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1985 track->immd_dwords = pkt->count; 1986 r = r100_cs_track_check(p->rdev, track); 1987 if (r) 1988 return r; 1989 break; 1990 /* triggers drawing using in-packet vertex data */ 1991 case PACKET3_3D_DRAW_VBUF_2: 1992 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1993 r = r100_cs_track_check(p->rdev, track); 1994 if (r) 1995 return r; 1996 break; 1997 /* triggers drawing of vertex buffers setup elsewhere */ 1998 case PACKET3_3D_DRAW_INDX_2: 1999 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2000 r = r100_cs_track_check(p->rdev, track); 2001 if (r) 2002 return r; 2003 break; 2004 /* triggers drawing using indices to vertex buffer */ 2005 case PACKET3_3D_DRAW_VBUF: 2006 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2007 r = r100_cs_track_check(p->rdev, track); 2008 if (r) 2009 return r; 2010 break; 2011 /* triggers drawing of vertex buffers setup elsewhere */ 2012 case PACKET3_3D_DRAW_INDX: 2013 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2014 r = r100_cs_track_check(p->rdev, track); 2015 if (r) 2016 return r; 2017 break; 2018 /* triggers drawing using indices to vertex buffer */ 2019 case PACKET3_3D_CLEAR_HIZ: 2020 case PACKET3_3D_CLEAR_ZMASK: 2021 if (p->rdev->hyperz_filp != p->filp) 2022 return -EINVAL; 2023 break; 2024 case PACKET3_NOP: 2025 break; 2026 default: 2027 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2028 return -EINVAL; 2029 } 2030 return 0; 2031 } 2032 2033 int r100_cs_parse(struct radeon_cs_parser *p) 2034 { 2035 struct radeon_cs_packet pkt; 2036 struct r100_cs_track *track; 2037 int r; 2038 2039 track = kzalloc(sizeof(*track), GFP_KERNEL); 2040 if (!track) 2041 return -ENOMEM; 2042 r100_cs_track_clear(p->rdev, track); 2043 p->track = track; 2044 do { 2045 r = radeon_cs_packet_parse(p, &pkt, p->idx); 2046 if (r) { 2047 kfree(p->track); 2048 p->track = NULL; 2049 return r; 2050 } 2051 p->idx += pkt.count + 2; 2052 switch (pkt.type) { 2053 case RADEON_PACKET_TYPE0: 2054 if (p->rdev->family >= CHIP_R200) 2055 r = r100_cs_parse_packet0(p, &pkt, 2056 p->rdev->config.r100.reg_safe_bm, 2057 p->rdev->config.r100.reg_safe_bm_size, 2058 &r200_packet0_check); 2059 else 2060 r = r100_cs_parse_packet0(p, &pkt, 2061 p->rdev->config.r100.reg_safe_bm, 2062 p->rdev->config.r100.reg_safe_bm_size, 2063 &r100_packet0_check); 2064 break; 2065 case RADEON_PACKET_TYPE2: 2066 break; 2067 case RADEON_PACKET_TYPE3: 2068 r = r100_packet3_check(p, &pkt); 2069 break; 2070 default: 2071 DRM_ERROR("Unknown packet type %d !\n", 2072 pkt.type); 2073 kfree(p->track); 2074 p->track = NULL; 2075 return -EINVAL; 2076 } 2077 if (r) { 2078 kfree(p->track); 2079 p->track = NULL; 2080 return r; 2081 } 2082 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2083 kfree(p->track); 2084 p->track = NULL; 2085 return 0; 2086 } 2087 2088 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2089 { 2090 DRM_ERROR("pitch %d\n", t->pitch); 2091 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2092 DRM_ERROR("width %d\n", t->width); 2093 DRM_ERROR("width_11 %d\n", t->width_11); 2094 DRM_ERROR("height %d\n", t->height); 2095 DRM_ERROR("height_11 %d\n", t->height_11); 2096 DRM_ERROR("num levels %d\n", t->num_levels); 2097 DRM_ERROR("depth %d\n", t->txdepth); 2098 DRM_ERROR("bpp %d\n", t->cpp); 2099 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2100 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2101 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2102 DRM_ERROR("compress format %d\n", t->compress_format); 2103 } 2104 2105 static int r100_track_compress_size(int compress_format, int w, int h) 2106 { 2107 int block_width, block_height, block_bytes; 2108 int wblocks, hblocks; 2109 int min_wblocks; 2110 int sz; 2111 2112 block_width = 4; 2113 block_height = 4; 2114 2115 switch (compress_format) { 2116 case R100_TRACK_COMP_DXT1: 2117 block_bytes = 8; 2118 min_wblocks = 4; 2119 break; 2120 default: 2121 case R100_TRACK_COMP_DXT35: 2122 block_bytes = 16; 2123 min_wblocks = 2; 2124 break; 2125 } 2126 2127 hblocks = (h + block_height - 1) / block_height; 2128 wblocks = (w + block_width - 1) / block_width; 2129 if (wblocks < min_wblocks) 2130 wblocks = min_wblocks; 2131 sz = wblocks * hblocks * block_bytes; 2132 return sz; 2133 } 2134 2135 static int r100_cs_track_cube(struct radeon_device *rdev, 2136 struct r100_cs_track *track, unsigned idx) 2137 { 2138 unsigned face, w, h; 2139 struct radeon_bo *cube_robj; 2140 unsigned long size; 2141 unsigned compress_format = track->textures[idx].compress_format; 2142 2143 for (face = 0; face < 5; face++) { 2144 cube_robj = track->textures[idx].cube_info[face].robj; 2145 w = track->textures[idx].cube_info[face].width; 2146 h = track->textures[idx].cube_info[face].height; 2147 2148 if (compress_format) { 2149 size = r100_track_compress_size(compress_format, w, h); 2150 } else 2151 size = w * h; 2152 size *= track->textures[idx].cpp; 2153 2154 size += track->textures[idx].cube_info[face].offset; 2155 2156 if (size > radeon_bo_size(cube_robj)) { 2157 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2158 size, radeon_bo_size(cube_robj)); 2159 r100_cs_track_texture_print(&track->textures[idx]); 2160 return -1; 2161 } 2162 } 2163 return 0; 2164 } 2165 2166 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2167 struct r100_cs_track *track) 2168 { 2169 struct radeon_bo *robj; 2170 unsigned long size; 2171 unsigned u, i, w, h, d; 2172 int ret; 2173 2174 for (u = 0; u < track->num_texture; u++) { 2175 if (!track->textures[u].enabled) 2176 continue; 2177 if (track->textures[u].lookup_disable) 2178 continue; 2179 robj = track->textures[u].robj; 2180 if (robj == NULL) { 2181 DRM_ERROR("No texture bound to unit %u\n", u); 2182 return -EINVAL; 2183 } 2184 size = 0; 2185 for (i = 0; i <= track->textures[u].num_levels; i++) { 2186 if (track->textures[u].use_pitch) { 2187 if (rdev->family < CHIP_R300) 2188 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2189 else 2190 w = track->textures[u].pitch / (1 << i); 2191 } else { 2192 w = track->textures[u].width; 2193 if (rdev->family >= CHIP_RV515) 2194 w |= track->textures[u].width_11; 2195 w = w / (1 << i); 2196 if (track->textures[u].roundup_w) 2197 w = roundup_pow_of_two(w); 2198 } 2199 h = track->textures[u].height; 2200 if (rdev->family >= CHIP_RV515) 2201 h |= track->textures[u].height_11; 2202 h = h / (1 << i); 2203 if (track->textures[u].roundup_h) 2204 h = roundup_pow_of_two(h); 2205 if (track->textures[u].tex_coord_type == 1) { 2206 d = (1 << track->textures[u].txdepth) / (1 << i); 2207 if (!d) 2208 d = 1; 2209 } else { 2210 d = 1; 2211 } 2212 if (track->textures[u].compress_format) { 2213 2214 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2215 /* compressed textures are block based */ 2216 } else 2217 size += w * h * d; 2218 } 2219 size *= track->textures[u].cpp; 2220 2221 switch (track->textures[u].tex_coord_type) { 2222 case 0: 2223 case 1: 2224 break; 2225 case 2: 2226 if (track->separate_cube) { 2227 ret = r100_cs_track_cube(rdev, track, u); 2228 if (ret) 2229 return ret; 2230 } else 2231 size *= 6; 2232 break; 2233 default: 2234 DRM_ERROR("Invalid texture coordinate type %u for unit " 2235 "%u\n", track->textures[u].tex_coord_type, u); 2236 return -EINVAL; 2237 } 2238 if (size > radeon_bo_size(robj)) { 2239 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2240 "%lu\n", u, size, radeon_bo_size(robj)); 2241 r100_cs_track_texture_print(&track->textures[u]); 2242 return -EINVAL; 2243 } 2244 } 2245 return 0; 2246 } 2247 2248 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2249 { 2250 unsigned i; 2251 unsigned long size; 2252 unsigned prim_walk; 2253 unsigned nverts; 2254 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2255 2256 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2257 !track->blend_read_enable) 2258 num_cb = 0; 2259 2260 for (i = 0; i < num_cb; i++) { 2261 if (track->cb[i].robj == NULL) { 2262 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2263 return -EINVAL; 2264 } 2265 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2266 size += track->cb[i].offset; 2267 if (size > radeon_bo_size(track->cb[i].robj)) { 2268 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2269 "(need %lu have %lu) !\n", i, size, 2270 radeon_bo_size(track->cb[i].robj)); 2271 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2272 i, track->cb[i].pitch, track->cb[i].cpp, 2273 track->cb[i].offset, track->maxy); 2274 return -EINVAL; 2275 } 2276 } 2277 track->cb_dirty = false; 2278 2279 if (track->zb_dirty && track->z_enabled) { 2280 if (track->zb.robj == NULL) { 2281 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2282 return -EINVAL; 2283 } 2284 size = track->zb.pitch * track->zb.cpp * track->maxy; 2285 size += track->zb.offset; 2286 if (size > radeon_bo_size(track->zb.robj)) { 2287 DRM_ERROR("[drm] Buffer too small for z buffer " 2288 "(need %lu have %lu) !\n", size, 2289 radeon_bo_size(track->zb.robj)); 2290 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2291 track->zb.pitch, track->zb.cpp, 2292 track->zb.offset, track->maxy); 2293 return -EINVAL; 2294 } 2295 } 2296 track->zb_dirty = false; 2297 2298 if (track->aa_dirty && track->aaresolve) { 2299 if (track->aa.robj == NULL) { 2300 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2301 return -EINVAL; 2302 } 2303 /* I believe the format comes from colorbuffer0. */ 2304 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2305 size += track->aa.offset; 2306 if (size > radeon_bo_size(track->aa.robj)) { 2307 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2308 "(need %lu have %lu) !\n", i, size, 2309 radeon_bo_size(track->aa.robj)); 2310 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2311 i, track->aa.pitch, track->cb[0].cpp, 2312 track->aa.offset, track->maxy); 2313 return -EINVAL; 2314 } 2315 } 2316 track->aa_dirty = false; 2317 2318 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2319 if (track->vap_vf_cntl & (1 << 14)) { 2320 nverts = track->vap_alt_nverts; 2321 } else { 2322 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2323 } 2324 switch (prim_walk) { 2325 case 1: 2326 for (i = 0; i < track->num_arrays; i++) { 2327 size = track->arrays[i].esize * track->max_indx * 4; 2328 if (track->arrays[i].robj == NULL) { 2329 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2330 "bound\n", prim_walk, i); 2331 return -EINVAL; 2332 } 2333 if (size > radeon_bo_size(track->arrays[i].robj)) { 2334 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2335 "need %lu dwords have %lu dwords\n", 2336 prim_walk, i, size >> 2, 2337 radeon_bo_size(track->arrays[i].robj) 2338 >> 2); 2339 DRM_ERROR("Max indices %u\n", track->max_indx); 2340 return -EINVAL; 2341 } 2342 } 2343 break; 2344 case 2: 2345 for (i = 0; i < track->num_arrays; i++) { 2346 size = track->arrays[i].esize * (nverts - 1) * 4; 2347 if (track->arrays[i].robj == NULL) { 2348 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2349 "bound\n", prim_walk, i); 2350 return -EINVAL; 2351 } 2352 if (size > radeon_bo_size(track->arrays[i].robj)) { 2353 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2354 "need %lu dwords have %lu dwords\n", 2355 prim_walk, i, size >> 2, 2356 radeon_bo_size(track->arrays[i].robj) 2357 >> 2); 2358 return -EINVAL; 2359 } 2360 } 2361 break; 2362 case 3: 2363 size = track->vtx_size * nverts; 2364 if (size != track->immd_dwords) { 2365 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2366 track->immd_dwords, size); 2367 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2368 nverts, track->vtx_size); 2369 return -EINVAL; 2370 } 2371 break; 2372 default: 2373 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2374 prim_walk); 2375 return -EINVAL; 2376 } 2377 2378 if (track->tex_dirty) { 2379 track->tex_dirty = false; 2380 return r100_cs_track_texture_check(rdev, track); 2381 } 2382 return 0; 2383 } 2384 2385 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2386 { 2387 unsigned i, face; 2388 2389 track->cb_dirty = true; 2390 track->zb_dirty = true; 2391 track->tex_dirty = true; 2392 track->aa_dirty = true; 2393 2394 if (rdev->family < CHIP_R300) { 2395 track->num_cb = 1; 2396 if (rdev->family <= CHIP_RS200) 2397 track->num_texture = 3; 2398 else 2399 track->num_texture = 6; 2400 track->maxy = 2048; 2401 track->separate_cube = 1; 2402 } else { 2403 track->num_cb = 4; 2404 track->num_texture = 16; 2405 track->maxy = 4096; 2406 track->separate_cube = 0; 2407 track->aaresolve = false; 2408 track->aa.robj = NULL; 2409 } 2410 2411 for (i = 0; i < track->num_cb; i++) { 2412 track->cb[i].robj = NULL; 2413 track->cb[i].pitch = 8192; 2414 track->cb[i].cpp = 16; 2415 track->cb[i].offset = 0; 2416 } 2417 track->z_enabled = true; 2418 track->zb.robj = NULL; 2419 track->zb.pitch = 8192; 2420 track->zb.cpp = 4; 2421 track->zb.offset = 0; 2422 track->vtx_size = 0x7F; 2423 track->immd_dwords = 0xFFFFFFFFUL; 2424 track->num_arrays = 11; 2425 track->max_indx = 0x00FFFFFFUL; 2426 for (i = 0; i < track->num_arrays; i++) { 2427 track->arrays[i].robj = NULL; 2428 track->arrays[i].esize = 0x7F; 2429 } 2430 for (i = 0; i < track->num_texture; i++) { 2431 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2432 track->textures[i].pitch = 16536; 2433 track->textures[i].width = 16536; 2434 track->textures[i].height = 16536; 2435 track->textures[i].width_11 = 1 << 11; 2436 track->textures[i].height_11 = 1 << 11; 2437 track->textures[i].num_levels = 12; 2438 if (rdev->family <= CHIP_RS200) { 2439 track->textures[i].tex_coord_type = 0; 2440 track->textures[i].txdepth = 0; 2441 } else { 2442 track->textures[i].txdepth = 16; 2443 track->textures[i].tex_coord_type = 1; 2444 } 2445 track->textures[i].cpp = 64; 2446 track->textures[i].robj = NULL; 2447 /* CS IB emission code makes sure texture unit are disabled */ 2448 track->textures[i].enabled = false; 2449 track->textures[i].lookup_disable = false; 2450 track->textures[i].roundup_w = true; 2451 track->textures[i].roundup_h = true; 2452 if (track->separate_cube) 2453 for (face = 0; face < 5; face++) { 2454 track->textures[i].cube_info[face].robj = NULL; 2455 track->textures[i].cube_info[face].width = 16536; 2456 track->textures[i].cube_info[face].height = 16536; 2457 track->textures[i].cube_info[face].offset = 0; 2458 } 2459 } 2460 } 2461 2462 /* 2463 * Global GPU functions 2464 */ 2465 static void r100_errata(struct radeon_device *rdev) 2466 { 2467 rdev->pll_errata = 0; 2468 2469 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2470 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2471 } 2472 2473 if (rdev->family == CHIP_RV100 || 2474 rdev->family == CHIP_RS100 || 2475 rdev->family == CHIP_RS200) { 2476 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2477 } 2478 } 2479 2480 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2481 { 2482 unsigned i; 2483 uint32_t tmp; 2484 2485 for (i = 0; i < rdev->usec_timeout; i++) { 2486 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2487 if (tmp >= n) { 2488 return 0; 2489 } 2490 DRM_UDELAY(1); 2491 } 2492 return -1; 2493 } 2494 2495 int r100_gui_wait_for_idle(struct radeon_device *rdev) 2496 { 2497 unsigned i; 2498 uint32_t tmp; 2499 2500 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2501 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" 2502 " Bad things might happen.\n"); 2503 } 2504 for (i = 0; i < rdev->usec_timeout; i++) { 2505 tmp = RREG32(RADEON_RBBM_STATUS); 2506 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2507 return 0; 2508 } 2509 DRM_UDELAY(1); 2510 } 2511 return -1; 2512 } 2513 2514 int r100_mc_wait_for_idle(struct radeon_device *rdev) 2515 { 2516 unsigned i; 2517 uint32_t tmp; 2518 2519 for (i = 0; i < rdev->usec_timeout; i++) { 2520 /* read MC_STATUS */ 2521 tmp = RREG32(RADEON_MC_STATUS); 2522 if (tmp & RADEON_MC_IDLE) { 2523 return 0; 2524 } 2525 DRM_UDELAY(1); 2526 } 2527 return -1; 2528 } 2529 2530 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2531 { 2532 u32 rbbm_status; 2533 2534 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2535 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2536 radeon_ring_lockup_update(rdev, ring); 2537 return false; 2538 } 2539 return radeon_ring_test_lockup(rdev, ring); 2540 } 2541 2542 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2543 void r100_enable_bm(struct radeon_device *rdev) 2544 { 2545 uint32_t tmp; 2546 /* Enable bus mastering */ 2547 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2548 WREG32(RADEON_BUS_CNTL, tmp); 2549 } 2550 2551 void r100_bm_disable(struct radeon_device *rdev) 2552 { 2553 u32 tmp; 2554 2555 /* disable bus mastering */ 2556 tmp = RREG32(R_000030_BUS_CNTL); 2557 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2558 mdelay(1); 2559 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2560 mdelay(1); 2561 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2562 tmp = RREG32(RADEON_BUS_CNTL); 2563 mdelay(1); 2564 pci_disable_busmaster(rdev->dev); 2565 mdelay(1); 2566 } 2567 2568 int r100_asic_reset(struct radeon_device *rdev) 2569 { 2570 struct r100_mc_save save; 2571 u32 status, tmp; 2572 int ret = 0; 2573 2574 status = RREG32(R_000E40_RBBM_STATUS); 2575 if (!G_000E40_GUI_ACTIVE(status)) { 2576 return 0; 2577 } 2578 r100_mc_stop(rdev, &save); 2579 status = RREG32(R_000E40_RBBM_STATUS); 2580 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2581 /* stop CP */ 2582 WREG32(RADEON_CP_CSQ_CNTL, 0); 2583 tmp = RREG32(RADEON_CP_RB_CNTL); 2584 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2585 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2586 WREG32(RADEON_CP_RB_WPTR, 0); 2587 WREG32(RADEON_CP_RB_CNTL, tmp); 2588 /* save PCI state */ 2589 pci_save_state(device_get_parent(rdev->dev)); 2590 /* disable bus mastering */ 2591 r100_bm_disable(rdev); 2592 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2593 S_0000F0_SOFT_RESET_RE(1) | 2594 S_0000F0_SOFT_RESET_PP(1) | 2595 S_0000F0_SOFT_RESET_RB(1)); 2596 RREG32(R_0000F0_RBBM_SOFT_RESET); 2597 mdelay(500); 2598 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2599 mdelay(1); 2600 status = RREG32(R_000E40_RBBM_STATUS); 2601 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2602 /* reset CP */ 2603 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2604 RREG32(R_0000F0_RBBM_SOFT_RESET); 2605 mdelay(500); 2606 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2607 mdelay(1); 2608 status = RREG32(R_000E40_RBBM_STATUS); 2609 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2610 /* restore PCI & busmastering */ 2611 pci_restore_state(device_get_parent(rdev->dev)); 2612 r100_enable_bm(rdev); 2613 /* Check if GPU is idle */ 2614 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2615 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2616 dev_err(rdev->dev, "failed to reset GPU\n"); 2617 ret = -1; 2618 } else 2619 dev_info(rdev->dev, "GPU reset succeed\n"); 2620 r100_mc_resume(rdev, &save); 2621 return ret; 2622 } 2623 2624 void r100_set_common_regs(struct radeon_device *rdev) 2625 { 2626 struct drm_device *dev = rdev->ddev; 2627 bool force_dac2 = false; 2628 u32 tmp; 2629 2630 /* set these so they don't interfere with anything */ 2631 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2632 WREG32(RADEON_SUBPIC_CNTL, 0); 2633 WREG32(RADEON_VIPH_CONTROL, 0); 2634 WREG32(RADEON_I2C_CNTL_1, 0); 2635 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2636 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2637 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2638 2639 /* always set up dac2 on rn50 and some rv100 as lots 2640 * of servers seem to wire it up to a VGA port but 2641 * don't report it in the bios connector 2642 * table. 2643 */ 2644 switch (dev->pdev->device) { 2645 /* RN50 */ 2646 case 0x515e: 2647 case 0x5969: 2648 force_dac2 = true; 2649 break; 2650 /* RV100*/ 2651 case 0x5159: 2652 case 0x515a: 2653 /* DELL triple head servers */ 2654 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && 2655 ((dev->pdev->subsystem_device == 0x016c) || 2656 (dev->pdev->subsystem_device == 0x016d) || 2657 (dev->pdev->subsystem_device == 0x016e) || 2658 (dev->pdev->subsystem_device == 0x016f) || 2659 (dev->pdev->subsystem_device == 0x0170) || 2660 (dev->pdev->subsystem_device == 0x017d) || 2661 (dev->pdev->subsystem_device == 0x017e) || 2662 (dev->pdev->subsystem_device == 0x0183) || 2663 (dev->pdev->subsystem_device == 0x018a) || 2664 (dev->pdev->subsystem_device == 0x019a))) 2665 force_dac2 = true; 2666 break; 2667 } 2668 2669 if (force_dac2) { 2670 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2671 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2672 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2673 2674 /* For CRT on DAC2, don't turn it on if BIOS didn't 2675 enable it, even it's detected. 2676 */ 2677 2678 /* force it to crtc0 */ 2679 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2680 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2681 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2682 2683 /* set up the TV DAC */ 2684 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2685 RADEON_TV_DAC_STD_MASK | 2686 RADEON_TV_DAC_RDACPD | 2687 RADEON_TV_DAC_GDACPD | 2688 RADEON_TV_DAC_BDACPD | 2689 RADEON_TV_DAC_BGADJ_MASK | 2690 RADEON_TV_DAC_DACADJ_MASK); 2691 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2692 RADEON_TV_DAC_NHOLD | 2693 RADEON_TV_DAC_STD_PS2 | 2694 (0x58 << 16)); 2695 2696 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2697 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2698 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2699 } 2700 2701 /* switch PM block to ACPI mode */ 2702 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2703 tmp &= ~RADEON_PM_MODE_SEL; 2704 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2705 2706 } 2707 2708 /* 2709 * VRAM info 2710 */ 2711 static void r100_vram_get_type(struct radeon_device *rdev) 2712 { 2713 uint32_t tmp; 2714 2715 rdev->mc.vram_is_ddr = false; 2716 if (rdev->flags & RADEON_IS_IGP) 2717 rdev->mc.vram_is_ddr = true; 2718 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2719 rdev->mc.vram_is_ddr = true; 2720 if ((rdev->family == CHIP_RV100) || 2721 (rdev->family == CHIP_RS100) || 2722 (rdev->family == CHIP_RS200)) { 2723 tmp = RREG32(RADEON_MEM_CNTL); 2724 if (tmp & RV100_HALF_MODE) { 2725 rdev->mc.vram_width = 32; 2726 } else { 2727 rdev->mc.vram_width = 64; 2728 } 2729 if (rdev->flags & RADEON_SINGLE_CRTC) { 2730 rdev->mc.vram_width /= 4; 2731 rdev->mc.vram_is_ddr = true; 2732 } 2733 } else if (rdev->family <= CHIP_RV280) { 2734 tmp = RREG32(RADEON_MEM_CNTL); 2735 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2736 rdev->mc.vram_width = 128; 2737 } else { 2738 rdev->mc.vram_width = 64; 2739 } 2740 } else { 2741 /* newer IGPs */ 2742 rdev->mc.vram_width = 128; 2743 } 2744 } 2745 2746 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2747 { 2748 u32 aper_size; 2749 u8 byte; 2750 2751 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2752 2753 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2754 * that is has the 2nd generation multifunction PCI interface 2755 */ 2756 if (rdev->family == CHIP_RV280 || 2757 rdev->family >= CHIP_RV350) { 2758 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2759 ~RADEON_HDP_APER_CNTL); 2760 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2761 return aper_size * 2; 2762 } 2763 2764 /* Older cards have all sorts of funny issues to deal with. First 2765 * check if it's a multifunction card by reading the PCI config 2766 * header type... Limit those to one aperture size 2767 */ 2768 byte = pci_read_config(rdev->dev, 0xe, 1); 2769 if (byte & 0x80) { 2770 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2771 DRM_INFO("Limiting VRAM to one aperture\n"); 2772 return aper_size; 2773 } 2774 2775 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2776 * have set it up. We don't write this as it's broken on some ASICs but 2777 * we expect the BIOS to have done the right thing (might be too optimistic...) 2778 */ 2779 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2780 return aper_size * 2; 2781 return aper_size; 2782 } 2783 2784 void r100_vram_init_sizes(struct radeon_device *rdev) 2785 { 2786 u64 config_aper_size; 2787 2788 /* work out accessible VRAM */ 2789 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 2790 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 2791 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2792 /* FIXME we don't use the second aperture yet when we could use it */ 2793 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2794 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2795 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2796 if (rdev->flags & RADEON_IS_IGP) { 2797 uint32_t tom; 2798 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2799 tom = RREG32(RADEON_NB_TOM); 2800 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2801 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2802 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2803 } else { 2804 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2805 /* Some production boards of m6 will report 0 2806 * if it's 8 MB 2807 */ 2808 if (rdev->mc.real_vram_size == 0) { 2809 rdev->mc.real_vram_size = 8192 * 1024; 2810 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2811 } 2812 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2813 * Novell bug 204882 + along with lots of ubuntu ones 2814 */ 2815 if (rdev->mc.aper_size > config_aper_size) 2816 config_aper_size = rdev->mc.aper_size; 2817 2818 if (config_aper_size > rdev->mc.real_vram_size) 2819 rdev->mc.mc_vram_size = config_aper_size; 2820 else 2821 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2822 } 2823 } 2824 2825 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2826 { 2827 uint32_t temp; 2828 2829 temp = RREG32(RADEON_CONFIG_CNTL); 2830 if (state == false) { 2831 temp &= ~RADEON_CFG_VGA_RAM_EN; 2832 temp |= RADEON_CFG_VGA_IO_DIS; 2833 } else { 2834 temp &= ~RADEON_CFG_VGA_IO_DIS; 2835 } 2836 WREG32(RADEON_CONFIG_CNTL, temp); 2837 } 2838 2839 static void r100_mc_init(struct radeon_device *rdev) 2840 { 2841 u64 base; 2842 2843 r100_vram_get_type(rdev); 2844 r100_vram_init_sizes(rdev); 2845 base = rdev->mc.aper_base; 2846 if (rdev->flags & RADEON_IS_IGP) 2847 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2848 radeon_vram_location(rdev, &rdev->mc, base); 2849 rdev->mc.gtt_base_align = 0; 2850 if (!(rdev->flags & RADEON_IS_AGP)) 2851 radeon_gtt_location(rdev, &rdev->mc); 2852 radeon_update_bandwidth_info(rdev); 2853 } 2854 2855 2856 /* 2857 * Indirect registers accessor 2858 */ 2859 void r100_pll_errata_after_index(struct radeon_device *rdev) 2860 { 2861 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2862 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2863 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2864 } 2865 } 2866 2867 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2868 { 2869 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2870 * or the chip could hang on a subsequent access 2871 */ 2872 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2873 mdelay(5); 2874 } 2875 2876 /* This function is required to workaround a hardware bug in some (all?) 2877 * revisions of the R300. This workaround should be called after every 2878 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2879 * may not be correct. 2880 */ 2881 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2882 uint32_t save, tmp; 2883 2884 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2885 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2886 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2887 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2888 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2889 } 2890 } 2891 2892 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2893 { 2894 uint32_t data; 2895 2896 spin_lock(&rdev->pll_idx_lock); 2897 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2898 r100_pll_errata_after_index(rdev); 2899 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2900 r100_pll_errata_after_data(rdev); 2901 spin_unlock(&rdev->pll_idx_lock); 2902 return data; 2903 } 2904 2905 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2906 { 2907 spin_lock(&rdev->pll_idx_lock); 2908 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2909 r100_pll_errata_after_index(rdev); 2910 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2911 r100_pll_errata_after_data(rdev); 2912 spin_unlock(&rdev->pll_idx_lock); 2913 } 2914 2915 static void r100_set_safe_registers(struct radeon_device *rdev) 2916 { 2917 if (ASIC_IS_RN50(rdev)) { 2918 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2919 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 2920 } else if (rdev->family < CHIP_R200) { 2921 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2922 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 2923 } else { 2924 r200_set_safe_registers(rdev); 2925 } 2926 } 2927 2928 /* 2929 * Debugfs info 2930 */ 2931 #if defined(CONFIG_DEBUG_FS) 2932 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2933 { 2934 struct drm_info_node *node = (struct drm_info_node *) m->private; 2935 struct drm_device *dev = node->minor->dev; 2936 struct radeon_device *rdev = dev->dev_private; 2937 uint32_t reg, value; 2938 unsigned i; 2939 2940 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2941 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2942 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2943 for (i = 0; i < 64; i++) { 2944 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 2945 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 2946 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 2947 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 2948 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 2949 } 2950 return 0; 2951 } 2952 2953 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 2954 { 2955 struct drm_info_node *node = (struct drm_info_node *) m->private; 2956 struct drm_device *dev = node->minor->dev; 2957 struct radeon_device *rdev = dev->dev_private; 2958 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2959 uint32_t rdp, wdp; 2960 unsigned count, i, j; 2961 2962 radeon_ring_free_size(rdev, ring); 2963 rdp = RREG32(RADEON_CP_RB_RPTR); 2964 wdp = RREG32(RADEON_CP_RB_WPTR); 2965 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 2966 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2967 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2968 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2969 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2970 seq_printf(m, "%u dwords in ring\n", count); 2971 if (ring->ready) { 2972 for (j = 0; j <= count; j++) { 2973 i = (rdp + j) & ring->ptr_mask; 2974 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2975 } 2976 } 2977 return 0; 2978 } 2979 2980 2981 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 2982 { 2983 struct drm_info_node *node = (struct drm_info_node *) m->private; 2984 struct drm_device *dev = node->minor->dev; 2985 struct radeon_device *rdev = dev->dev_private; 2986 uint32_t csq_stat, csq2_stat, tmp; 2987 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 2988 unsigned i; 2989 2990 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2991 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 2992 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 2993 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 2994 r_rptr = (csq_stat >> 0) & 0x3ff; 2995 r_wptr = (csq_stat >> 10) & 0x3ff; 2996 ib1_rptr = (csq_stat >> 20) & 0x3ff; 2997 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 2998 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 2999 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 3000 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 3001 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 3002 seq_printf(m, "Ring rptr %u\n", r_rptr); 3003 seq_printf(m, "Ring wptr %u\n", r_wptr); 3004 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 3005 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 3006 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 3007 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 3008 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 3009 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 3010 seq_printf(m, "Ring fifo:\n"); 3011 for (i = 0; i < 256; i++) { 3012 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3013 tmp = RREG32(RADEON_CP_CSQ_DATA); 3014 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 3015 } 3016 seq_printf(m, "Indirect1 fifo:\n"); 3017 for (i = 256; i <= 512; i++) { 3018 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3019 tmp = RREG32(RADEON_CP_CSQ_DATA); 3020 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3021 } 3022 seq_printf(m, "Indirect2 fifo:\n"); 3023 for (i = 640; i < ib1_wptr; i++) { 3024 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3025 tmp = RREG32(RADEON_CP_CSQ_DATA); 3026 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3027 } 3028 return 0; 3029 } 3030 3031 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3032 { 3033 struct drm_info_node *node = (struct drm_info_node *) m->private; 3034 struct drm_device *dev = node->minor->dev; 3035 struct radeon_device *rdev = dev->dev_private; 3036 uint32_t tmp; 3037 3038 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3039 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3040 tmp = RREG32(RADEON_MC_FB_LOCATION); 3041 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3042 tmp = RREG32(RADEON_BUS_CNTL); 3043 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3044 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3045 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3046 tmp = RREG32(RADEON_AGP_BASE); 3047 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3048 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3049 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3050 tmp = RREG32(0x01D0); 3051 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3052 tmp = RREG32(RADEON_AIC_LO_ADDR); 3053 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3054 tmp = RREG32(RADEON_AIC_HI_ADDR); 3055 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3056 tmp = RREG32(0x01E4); 3057 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3058 return 0; 3059 } 3060 3061 static struct drm_info_list r100_debugfs_rbbm_list[] = { 3062 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3063 }; 3064 3065 static struct drm_info_list r100_debugfs_cp_list[] = { 3066 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3067 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3068 }; 3069 3070 static struct drm_info_list r100_debugfs_mc_info_list[] = { 3071 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3072 }; 3073 #endif 3074 3075 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3076 { 3077 #if defined(CONFIG_DEBUG_FS) 3078 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3079 #else 3080 return 0; 3081 #endif 3082 } 3083 3084 int r100_debugfs_cp_init(struct radeon_device *rdev) 3085 { 3086 #if defined(CONFIG_DEBUG_FS) 3087 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3088 #else 3089 return 0; 3090 #endif 3091 } 3092 3093 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3094 { 3095 #if defined(CONFIG_DEBUG_FS) 3096 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3097 #else 3098 return 0; 3099 #endif 3100 } 3101 3102 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3103 uint32_t tiling_flags, uint32_t pitch, 3104 uint32_t offset, uint32_t obj_size) 3105 { 3106 int surf_index = reg * 16; 3107 int flags = 0; 3108 3109 if (rdev->family <= CHIP_RS200) { 3110 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3111 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3112 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3113 if (tiling_flags & RADEON_TILING_MACRO) 3114 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3115 /* setting pitch to 0 disables tiling */ 3116 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3117 == 0) 3118 pitch = 0; 3119 } else if (rdev->family <= CHIP_RV280) { 3120 if (tiling_flags & (RADEON_TILING_MACRO)) 3121 flags |= R200_SURF_TILE_COLOR_MACRO; 3122 if (tiling_flags & RADEON_TILING_MICRO) 3123 flags |= R200_SURF_TILE_COLOR_MICRO; 3124 } else { 3125 if (tiling_flags & RADEON_TILING_MACRO) 3126 flags |= R300_SURF_TILE_MACRO; 3127 if (tiling_flags & RADEON_TILING_MICRO) 3128 flags |= R300_SURF_TILE_MICRO; 3129 } 3130 3131 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3132 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3133 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3134 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3135 3136 /* r100/r200 divide by 16 */ 3137 if (rdev->family < CHIP_R300) 3138 flags |= pitch / 16; 3139 else 3140 flags |= pitch / 8; 3141 3142 3143 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3144 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3145 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3146 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3147 return 0; 3148 } 3149 3150 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3151 { 3152 int surf_index = reg * 16; 3153 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3154 } 3155 3156 void r100_bandwidth_update(struct radeon_device *rdev) 3157 { 3158 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3159 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3160 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 3161 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3162 fixed20_12 memtcas_ff[8] = { 3163 dfixed_init(1), 3164 dfixed_init(2), 3165 dfixed_init(3), 3166 dfixed_init(0), 3167 dfixed_init_half(1), 3168 dfixed_init_half(2), 3169 dfixed_init(0), 3170 }; 3171 fixed20_12 memtcas_rs480_ff[8] = { 3172 dfixed_init(0), 3173 dfixed_init(1), 3174 dfixed_init(2), 3175 dfixed_init(3), 3176 dfixed_init(0), 3177 dfixed_init_half(1), 3178 dfixed_init_half(2), 3179 dfixed_init_half(3), 3180 }; 3181 fixed20_12 memtcas2_ff[8] = { 3182 dfixed_init(0), 3183 dfixed_init(1), 3184 dfixed_init(2), 3185 dfixed_init(3), 3186 dfixed_init(4), 3187 dfixed_init(5), 3188 dfixed_init(6), 3189 dfixed_init(7), 3190 }; 3191 fixed20_12 memtrbs[8] = { 3192 dfixed_init(1), 3193 dfixed_init_half(1), 3194 dfixed_init(2), 3195 dfixed_init_half(2), 3196 dfixed_init(3), 3197 dfixed_init_half(3), 3198 dfixed_init(4), 3199 dfixed_init_half(4) 3200 }; 3201 fixed20_12 memtrbs_r4xx[8] = { 3202 dfixed_init(4), 3203 dfixed_init(5), 3204 dfixed_init(6), 3205 dfixed_init(7), 3206 dfixed_init(8), 3207 dfixed_init(9), 3208 dfixed_init(10), 3209 dfixed_init(11) 3210 }; 3211 fixed20_12 min_mem_eff; 3212 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3213 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3214 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 3215 disp_drain_rate2, read_return_rate; 3216 fixed20_12 time_disp1_drop_priority; 3217 int c; 3218 int cur_size = 16; /* in octawords */ 3219 int critical_point = 0, critical_point2; 3220 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 3221 int stop_req, max_stop_req; 3222 struct drm_display_mode *mode1 = NULL; 3223 struct drm_display_mode *mode2 = NULL; 3224 uint32_t pixel_bytes1 = 0; 3225 uint32_t pixel_bytes2 = 0; 3226 3227 if (!rdev->mode_info.mode_config_initialized) 3228 return; 3229 3230 radeon_update_display_priority(rdev); 3231 3232 if (rdev->mode_info.crtcs[0]->base.enabled) { 3233 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3234 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; 3235 } 3236 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3237 if (rdev->mode_info.crtcs[1]->base.enabled) { 3238 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3239 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; 3240 } 3241 } 3242 3243 min_mem_eff.full = dfixed_const_8(0); 3244 /* get modes */ 3245 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3246 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3247 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3248 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3249 /* check crtc enables */ 3250 if (mode2) 3251 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3252 if (mode1) 3253 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3254 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3255 } 3256 3257 /* 3258 * determine is there is enough bw for current mode 3259 */ 3260 sclk_ff = rdev->pm.sclk; 3261 mclk_ff = rdev->pm.mclk; 3262 3263 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3264 temp_ff.full = dfixed_const(temp); 3265 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3266 3267 pix_clk.full = 0; 3268 pix_clk2.full = 0; 3269 peak_disp_bw.full = 0; 3270 if (mode1) { 3271 temp_ff.full = dfixed_const(1000); 3272 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3273 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3274 temp_ff.full = dfixed_const(pixel_bytes1); 3275 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3276 } 3277 if (mode2) { 3278 temp_ff.full = dfixed_const(1000); 3279 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3280 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3281 temp_ff.full = dfixed_const(pixel_bytes2); 3282 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3283 } 3284 3285 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3286 if (peak_disp_bw.full >= mem_bw.full) { 3287 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3288 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3289 } 3290 3291 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3292 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3293 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3294 mem_trcd = ((temp >> 2) & 0x3) + 1; 3295 mem_trp = ((temp & 0x3)) + 1; 3296 mem_tras = ((temp & 0x70) >> 4) + 1; 3297 } else if (rdev->family == CHIP_R300 || 3298 rdev->family == CHIP_R350) { /* r300, r350 */ 3299 mem_trcd = (temp & 0x7) + 1; 3300 mem_trp = ((temp >> 8) & 0x7) + 1; 3301 mem_tras = ((temp >> 11) & 0xf) + 4; 3302 } else if (rdev->family == CHIP_RV350 || 3303 rdev->family <= CHIP_RV380) { 3304 /* rv3x0 */ 3305 mem_trcd = (temp & 0x7) + 3; 3306 mem_trp = ((temp >> 8) & 0x7) + 3; 3307 mem_tras = ((temp >> 11) & 0xf) + 6; 3308 } else if (rdev->family == CHIP_R420 || 3309 rdev->family == CHIP_R423 || 3310 rdev->family == CHIP_RV410) { 3311 /* r4xx */ 3312 mem_trcd = (temp & 0xf) + 3; 3313 if (mem_trcd > 15) 3314 mem_trcd = 15; 3315 mem_trp = ((temp >> 8) & 0xf) + 3; 3316 if (mem_trp > 15) 3317 mem_trp = 15; 3318 mem_tras = ((temp >> 12) & 0x1f) + 6; 3319 if (mem_tras > 31) 3320 mem_tras = 31; 3321 } else { /* RV200, R200 */ 3322 mem_trcd = (temp & 0x7) + 1; 3323 mem_trp = ((temp >> 8) & 0x7) + 1; 3324 mem_tras = ((temp >> 12) & 0xf) + 4; 3325 } 3326 /* convert to FF */ 3327 trcd_ff.full = dfixed_const(mem_trcd); 3328 trp_ff.full = dfixed_const(mem_trp); 3329 tras_ff.full = dfixed_const(mem_tras); 3330 3331 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3332 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3333 data = (temp & (7 << 20)) >> 20; 3334 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3335 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3336 tcas_ff = memtcas_rs480_ff[data]; 3337 else 3338 tcas_ff = memtcas_ff[data]; 3339 } else 3340 tcas_ff = memtcas2_ff[data]; 3341 3342 if (rdev->family == CHIP_RS400 || 3343 rdev->family == CHIP_RS480) { 3344 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3345 data = (temp >> 23) & 0x7; 3346 if (data < 5) 3347 tcas_ff.full += dfixed_const(data); 3348 } 3349 3350 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3351 /* on the R300, Tcas is included in Trbs. 3352 */ 3353 temp = RREG32(RADEON_MEM_CNTL); 3354 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3355 if (data == 1) { 3356 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3357 temp = RREG32(R300_MC_IND_INDEX); 3358 temp &= ~R300_MC_IND_ADDR_MASK; 3359 temp |= R300_MC_READ_CNTL_CD_mcind; 3360 WREG32(R300_MC_IND_INDEX, temp); 3361 temp = RREG32(R300_MC_IND_DATA); 3362 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3363 } else { 3364 temp = RREG32(R300_MC_READ_CNTL_AB); 3365 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3366 } 3367 } else { 3368 temp = RREG32(R300_MC_READ_CNTL_AB); 3369 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3370 } 3371 if (rdev->family == CHIP_RV410 || 3372 rdev->family == CHIP_R420 || 3373 rdev->family == CHIP_R423) 3374 trbs_ff = memtrbs_r4xx[data]; 3375 else 3376 trbs_ff = memtrbs[data]; 3377 tcas_ff.full += trbs_ff.full; 3378 } 3379 3380 sclk_eff_ff.full = sclk_ff.full; 3381 3382 if (rdev->flags & RADEON_IS_AGP) { 3383 fixed20_12 agpmode_ff; 3384 agpmode_ff.full = dfixed_const(radeon_agpmode); 3385 temp_ff.full = dfixed_const_666(16); 3386 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3387 } 3388 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3389 3390 if (ASIC_IS_R300(rdev)) { 3391 sclk_delay_ff.full = dfixed_const(250); 3392 } else { 3393 if ((rdev->family == CHIP_RV100) || 3394 rdev->flags & RADEON_IS_IGP) { 3395 if (rdev->mc.vram_is_ddr) 3396 sclk_delay_ff.full = dfixed_const(41); 3397 else 3398 sclk_delay_ff.full = dfixed_const(33); 3399 } else { 3400 if (rdev->mc.vram_width == 128) 3401 sclk_delay_ff.full = dfixed_const(57); 3402 else 3403 sclk_delay_ff.full = dfixed_const(41); 3404 } 3405 } 3406 3407 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3408 3409 if (rdev->mc.vram_is_ddr) { 3410 if (rdev->mc.vram_width == 32) { 3411 k1.full = dfixed_const(40); 3412 c = 3; 3413 } else { 3414 k1.full = dfixed_const(20); 3415 c = 1; 3416 } 3417 } else { 3418 k1.full = dfixed_const(40); 3419 c = 3; 3420 } 3421 3422 temp_ff.full = dfixed_const(2); 3423 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3424 temp_ff.full = dfixed_const(c); 3425 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3426 temp_ff.full = dfixed_const(4); 3427 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3428 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3429 mc_latency_mclk.full += k1.full; 3430 3431 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3432 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3433 3434 /* 3435 HW cursor time assuming worst case of full size colour cursor. 3436 */ 3437 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3438 temp_ff.full += trcd_ff.full; 3439 if (temp_ff.full < tras_ff.full) 3440 temp_ff.full = tras_ff.full; 3441 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3442 3443 temp_ff.full = dfixed_const(cur_size); 3444 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3445 /* 3446 Find the total latency for the display data. 3447 */ 3448 disp_latency_overhead.full = dfixed_const(8); 3449 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3450 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3451 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3452 3453 if (mc_latency_mclk.full > mc_latency_sclk.full) 3454 disp_latency.full = mc_latency_mclk.full; 3455 else 3456 disp_latency.full = mc_latency_sclk.full; 3457 3458 /* setup Max GRPH_STOP_REQ default value */ 3459 if (ASIC_IS_RV100(rdev)) 3460 max_stop_req = 0x5c; 3461 else 3462 max_stop_req = 0x7c; 3463 3464 if (mode1) { 3465 /* CRTC1 3466 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3467 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3468 */ 3469 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3470 3471 if (stop_req > max_stop_req) 3472 stop_req = max_stop_req; 3473 3474 /* 3475 Find the drain rate of the display buffer. 3476 */ 3477 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3478 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3479 3480 /* 3481 Find the critical point of the display buffer. 3482 */ 3483 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3484 crit_point_ff.full += dfixed_const_half(0); 3485 3486 critical_point = dfixed_trunc(crit_point_ff); 3487 3488 if (rdev->disp_priority == 2) { 3489 critical_point = 0; 3490 } 3491 3492 /* 3493 The critical point should never be above max_stop_req-4. Setting 3494 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3495 */ 3496 if (max_stop_req - critical_point < 4) 3497 critical_point = 0; 3498 3499 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3500 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3501 critical_point = 0x10; 3502 } 3503 3504 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3505 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3506 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3507 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3508 if ((rdev->family == CHIP_R350) && 3509 (stop_req > 0x15)) { 3510 stop_req -= 0x10; 3511 } 3512 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3513 temp |= RADEON_GRPH_BUFFER_SIZE; 3514 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3515 RADEON_GRPH_CRITICAL_AT_SOF | 3516 RADEON_GRPH_STOP_CNTL); 3517 /* 3518 Write the result into the register. 3519 */ 3520 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3521 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3522 3523 #if 0 3524 if ((rdev->family == CHIP_RS400) || 3525 (rdev->family == CHIP_RS480)) { 3526 /* attempt to program RS400 disp regs correctly ??? */ 3527 temp = RREG32(RS400_DISP1_REG_CNTL); 3528 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3529 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3530 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3531 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3532 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3533 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3534 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3535 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3536 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3537 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3538 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3539 } 3540 #endif 3541 3542 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3543 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3544 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3545 } 3546 3547 if (mode2) { 3548 u32 grph2_cntl; 3549 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3550 3551 if (stop_req > max_stop_req) 3552 stop_req = max_stop_req; 3553 3554 /* 3555 Find the drain rate of the display buffer. 3556 */ 3557 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3558 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3559 3560 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3561 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3562 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3563 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3564 if ((rdev->family == CHIP_R350) && 3565 (stop_req > 0x15)) { 3566 stop_req -= 0x10; 3567 } 3568 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3569 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3570 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3571 RADEON_GRPH_CRITICAL_AT_SOF | 3572 RADEON_GRPH_STOP_CNTL); 3573 3574 if ((rdev->family == CHIP_RS100) || 3575 (rdev->family == CHIP_RS200)) 3576 critical_point2 = 0; 3577 else { 3578 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3579 temp_ff.full = dfixed_const(temp); 3580 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3581 if (sclk_ff.full < temp_ff.full) 3582 temp_ff.full = sclk_ff.full; 3583 3584 read_return_rate.full = temp_ff.full; 3585 3586 if (mode1) { 3587 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3588 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3589 } else { 3590 time_disp1_drop_priority.full = 0; 3591 } 3592 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3593 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3594 crit_point_ff.full += dfixed_const_half(0); 3595 3596 critical_point2 = dfixed_trunc(crit_point_ff); 3597 3598 if (rdev->disp_priority == 2) { 3599 critical_point2 = 0; 3600 } 3601 3602 if (max_stop_req - critical_point2 < 4) 3603 critical_point2 = 0; 3604 3605 } 3606 3607 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3608 /* some R300 cards have problem with this set to 0 */ 3609 critical_point2 = 0x10; 3610 } 3611 3612 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3613 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3614 3615 if ((rdev->family == CHIP_RS400) || 3616 (rdev->family == CHIP_RS480)) { 3617 #if 0 3618 /* attempt to program RS400 disp2 regs correctly ??? */ 3619 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3620 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3621 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3622 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3623 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3624 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3625 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3626 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3627 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3628 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3629 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3630 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3631 #endif 3632 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3633 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3634 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3635 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3636 } 3637 3638 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3639 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3640 } 3641 } 3642 3643 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3644 { 3645 uint32_t scratch; 3646 uint32_t tmp = 0; 3647 unsigned i; 3648 int r; 3649 3650 r = radeon_scratch_get(rdev, &scratch); 3651 if (r) { 3652 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3653 return r; 3654 } 3655 WREG32(scratch, 0xCAFEDEAD); 3656 r = radeon_ring_lock(rdev, ring, 2); 3657 if (r) { 3658 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3659 radeon_scratch_free(rdev, scratch); 3660 return r; 3661 } 3662 radeon_ring_write(ring, PACKET0(scratch, 0)); 3663 radeon_ring_write(ring, 0xDEADBEEF); 3664 radeon_ring_unlock_commit(rdev, ring, false); 3665 for (i = 0; i < rdev->usec_timeout; i++) { 3666 tmp = RREG32(scratch); 3667 if (tmp == 0xDEADBEEF) { 3668 break; 3669 } 3670 DRM_UDELAY(1); 3671 } 3672 if (i < rdev->usec_timeout) { 3673 DRM_INFO("ring test succeeded in %d usecs\n", i); 3674 } else { 3675 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3676 scratch, tmp); 3677 r = -EINVAL; 3678 } 3679 radeon_scratch_free(rdev, scratch); 3680 return r; 3681 } 3682 3683 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3684 { 3685 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3686 3687 if (ring->rptr_save_reg) { 3688 u32 next_rptr = ring->wptr + 2 + 3; 3689 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3690 radeon_ring_write(ring, next_rptr); 3691 } 3692 3693 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3694 radeon_ring_write(ring, ib->gpu_addr); 3695 radeon_ring_write(ring, ib->length_dw); 3696 } 3697 3698 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3699 { 3700 struct radeon_ib ib; 3701 uint32_t scratch; 3702 uint32_t tmp = 0; 3703 unsigned i; 3704 int r; 3705 3706 r = radeon_scratch_get(rdev, &scratch); 3707 if (r) { 3708 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3709 return r; 3710 } 3711 WREG32(scratch, 0xCAFEDEAD); 3712 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3713 if (r) { 3714 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3715 goto free_scratch; 3716 } 3717 ib.ptr[0] = PACKET0(scratch, 0); 3718 ib.ptr[1] = 0xDEADBEEF; 3719 ib.ptr[2] = PACKET2(0); 3720 ib.ptr[3] = PACKET2(0); 3721 ib.ptr[4] = PACKET2(0); 3722 ib.ptr[5] = PACKET2(0); 3723 ib.ptr[6] = PACKET2(0); 3724 ib.ptr[7] = PACKET2(0); 3725 ib.length_dw = 8; 3726 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3727 if (r) { 3728 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3729 goto free_ib; 3730 } 3731 r = radeon_fence_wait(ib.fence, false); 3732 if (r) { 3733 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3734 goto free_ib; 3735 } 3736 for (i = 0; i < rdev->usec_timeout; i++) { 3737 tmp = RREG32(scratch); 3738 if (tmp == 0xDEADBEEF) { 3739 break; 3740 } 3741 DRM_UDELAY(1); 3742 } 3743 if (i < rdev->usec_timeout) { 3744 DRM_INFO("ib test succeeded in %u usecs\n", i); 3745 } else { 3746 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3747 scratch, tmp); 3748 r = -EINVAL; 3749 } 3750 free_ib: 3751 radeon_ib_free(rdev, &ib); 3752 free_scratch: 3753 radeon_scratch_free(rdev, scratch); 3754 return r; 3755 } 3756 3757 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3758 { 3759 /* Shutdown CP we shouldn't need to do that but better be safe than 3760 * sorry 3761 */ 3762 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3763 WREG32(R_000740_CP_CSQ_CNTL, 0); 3764 3765 /* Save few CRTC registers */ 3766 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3767 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3768 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3769 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3770 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3771 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3772 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3773 } 3774 3775 /* Disable VGA aperture access */ 3776 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3777 /* Disable cursor, overlay, crtc */ 3778 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3779 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3780 S_000054_CRTC_DISPLAY_DIS(1)); 3781 WREG32(R_000050_CRTC_GEN_CNTL, 3782 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3783 S_000050_CRTC_DISP_REQ_EN_B(1)); 3784 WREG32(R_000420_OV0_SCALE_CNTL, 3785 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3786 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3787 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3788 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3789 S_000360_CUR2_LOCK(1)); 3790 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3791 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3792 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3793 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3794 WREG32(R_000360_CUR2_OFFSET, 3795 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3796 } 3797 } 3798 3799 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3800 { 3801 /* Update base address for crtc */ 3802 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3803 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3804 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3805 } 3806 /* Restore CRTC registers */ 3807 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3808 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3809 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3810 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3811 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3812 } 3813 } 3814 3815 void r100_vga_render_disable(struct radeon_device *rdev) 3816 { 3817 u32 tmp; 3818 3819 tmp = RREG8(R_0003C2_GENMO_WT); 3820 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3821 } 3822 3823 static void r100_debugfs(struct radeon_device *rdev) 3824 { 3825 int r; 3826 3827 r = r100_debugfs_mc_info_init(rdev); 3828 if (r) 3829 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3830 } 3831 3832 static void r100_mc_program(struct radeon_device *rdev) 3833 { 3834 struct r100_mc_save save; 3835 3836 /* Stops all mc clients */ 3837 r100_mc_stop(rdev, &save); 3838 if (rdev->flags & RADEON_IS_AGP) { 3839 WREG32(R_00014C_MC_AGP_LOCATION, 3840 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3841 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3842 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3843 if (rdev->family > CHIP_RV200) 3844 WREG32(R_00015C_AGP_BASE_2, 3845 upper_32_bits(rdev->mc.agp_base) & 0xff); 3846 } else { 3847 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3848 WREG32(R_000170_AGP_BASE, 0); 3849 if (rdev->family > CHIP_RV200) 3850 WREG32(R_00015C_AGP_BASE_2, 0); 3851 } 3852 /* Wait for mc idle */ 3853 if (r100_mc_wait_for_idle(rdev)) 3854 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3855 /* Program MC, should be a 32bits limited address space */ 3856 WREG32(R_000148_MC_FB_LOCATION, 3857 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3858 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3859 r100_mc_resume(rdev, &save); 3860 } 3861 3862 static void r100_clock_startup(struct radeon_device *rdev) 3863 { 3864 u32 tmp; 3865 3866 if (radeon_dynclks != -1 && radeon_dynclks) 3867 radeon_legacy_set_clock_gating(rdev, 1); 3868 /* We need to force on some of the block */ 3869 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3870 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3871 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3872 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3873 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3874 } 3875 3876 static int r100_startup(struct radeon_device *rdev) 3877 { 3878 int r; 3879 3880 /* set common regs */ 3881 r100_set_common_regs(rdev); 3882 /* program mc */ 3883 r100_mc_program(rdev); 3884 /* Resume clock */ 3885 r100_clock_startup(rdev); 3886 /* Initialize GART (initialize after TTM so we can allocate 3887 * memory through TTM but finalize after TTM) */ 3888 r100_enable_bm(rdev); 3889 if (rdev->flags & RADEON_IS_PCI) { 3890 r = r100_pci_gart_enable(rdev); 3891 if (r) 3892 return r; 3893 } 3894 3895 /* allocate wb buffer */ 3896 r = radeon_wb_init(rdev); 3897 if (r) 3898 return r; 3899 3900 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3901 if (r) { 3902 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3903 return r; 3904 } 3905 3906 /* Enable IRQ */ 3907 if (!rdev->irq.installed) { 3908 r = radeon_irq_kms_init(rdev); 3909 if (r) 3910 return r; 3911 } 3912 3913 r100_irq_set(rdev); 3914 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3915 /* 1M ring buffer */ 3916 r = r100_cp_init(rdev, 1024 * 1024); 3917 if (r) { 3918 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3919 return r; 3920 } 3921 3922 r = radeon_ib_pool_init(rdev); 3923 if (r) { 3924 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3925 return r; 3926 } 3927 3928 return 0; 3929 } 3930 3931 int r100_resume(struct radeon_device *rdev) 3932 { 3933 int r; 3934 3935 /* Make sur GART are not working */ 3936 if (rdev->flags & RADEON_IS_PCI) 3937 r100_pci_gart_disable(rdev); 3938 /* Resume clock before doing reset */ 3939 r100_clock_startup(rdev); 3940 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3941 if (radeon_asic_reset(rdev)) { 3942 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3943 RREG32(R_000E40_RBBM_STATUS), 3944 RREG32(R_0007C0_CP_STAT)); 3945 } 3946 /* post */ 3947 radeon_combios_asic_init(rdev->ddev); 3948 /* Resume clock after posting */ 3949 r100_clock_startup(rdev); 3950 /* Initialize surface registers */ 3951 radeon_surface_init(rdev); 3952 3953 rdev->accel_working = true; 3954 r = r100_startup(rdev); 3955 if (r) { 3956 rdev->accel_working = false; 3957 } 3958 return r; 3959 } 3960 3961 int r100_suspend(struct radeon_device *rdev) 3962 { 3963 radeon_pm_suspend(rdev); 3964 r100_cp_disable(rdev); 3965 radeon_wb_disable(rdev); 3966 r100_irq_disable(rdev); 3967 if (rdev->flags & RADEON_IS_PCI) 3968 r100_pci_gart_disable(rdev); 3969 return 0; 3970 } 3971 3972 void r100_fini(struct radeon_device *rdev) 3973 { 3974 radeon_pm_fini(rdev); 3975 r100_cp_fini(rdev); 3976 radeon_wb_fini(rdev); 3977 radeon_ib_pool_fini(rdev); 3978 radeon_gem_fini(rdev); 3979 if (rdev->flags & RADEON_IS_PCI) 3980 r100_pci_gart_fini(rdev); 3981 radeon_agp_fini(rdev); 3982 radeon_irq_kms_fini(rdev); 3983 radeon_fence_driver_fini(rdev); 3984 radeon_bo_fini(rdev); 3985 radeon_atombios_fini(rdev); 3986 r100_cp_fini_microcode(rdev); 3987 kfree(rdev->bios); 3988 rdev->bios = NULL; 3989 } 3990 3991 /* 3992 * Due to how kexec works, it can leave the hw fully initialised when it 3993 * boots the new kernel. However doing our init sequence with the CP and 3994 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 3995 * do some quick sanity checks and restore sane values to avoid this 3996 * problem. 3997 */ 3998 void r100_restore_sanity(struct radeon_device *rdev) 3999 { 4000 u32 tmp; 4001 4002 tmp = RREG32(RADEON_CP_CSQ_CNTL); 4003 if (tmp) { 4004 WREG32(RADEON_CP_CSQ_CNTL, 0); 4005 } 4006 tmp = RREG32(RADEON_CP_RB_CNTL); 4007 if (tmp) { 4008 WREG32(RADEON_CP_RB_CNTL, 0); 4009 } 4010 tmp = RREG32(RADEON_SCRATCH_UMSK); 4011 if (tmp) { 4012 WREG32(RADEON_SCRATCH_UMSK, 0); 4013 } 4014 } 4015 4016 int r100_init(struct radeon_device *rdev) 4017 { 4018 int r; 4019 4020 /* Register debugfs file specific to this group of asics */ 4021 r100_debugfs(rdev); 4022 /* Disable VGA */ 4023 r100_vga_render_disable(rdev); 4024 /* Initialize scratch registers */ 4025 radeon_scratch_init(rdev); 4026 /* Initialize surface registers */ 4027 radeon_surface_init(rdev); 4028 /* sanity check some register to avoid hangs like after kexec */ 4029 r100_restore_sanity(rdev); 4030 /* TODO: disable VGA need to use VGA request */ 4031 /* BIOS*/ 4032 if (!radeon_get_bios(rdev)) { 4033 if (ASIC_IS_AVIVO(rdev)) 4034 return -EINVAL; 4035 } 4036 if (rdev->is_atom_bios) { 4037 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4038 return -EINVAL; 4039 } else { 4040 r = radeon_combios_init(rdev); 4041 if (r) 4042 return r; 4043 } 4044 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4045 if (radeon_asic_reset(rdev)) { 4046 dev_warn(rdev->dev, 4047 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4048 RREG32(R_000E40_RBBM_STATUS), 4049 RREG32(R_0007C0_CP_STAT)); 4050 } 4051 /* check if cards are posted or not */ 4052 if (radeon_boot_test_post_card(rdev) == false) 4053 return -EINVAL; 4054 /* Set asic errata */ 4055 r100_errata(rdev); 4056 /* Initialize clocks */ 4057 radeon_get_clock_info(rdev->ddev); 4058 /* initialize AGP */ 4059 if (rdev->flags & RADEON_IS_AGP) { 4060 r = radeon_agp_init(rdev); 4061 if (r) { 4062 radeon_agp_disable(rdev); 4063 } 4064 } 4065 /* initialize VRAM */ 4066 r100_mc_init(rdev); 4067 /* Fence driver */ 4068 r = radeon_fence_driver_init(rdev); 4069 if (r) 4070 return r; 4071 /* Memory manager */ 4072 r = radeon_bo_init(rdev); 4073 if (r) 4074 return r; 4075 if (rdev->flags & RADEON_IS_PCI) { 4076 r = r100_pci_gart_init(rdev); 4077 if (r) 4078 return r; 4079 } 4080 r100_set_safe_registers(rdev); 4081 4082 /* Initialize power management */ 4083 radeon_pm_init(rdev); 4084 4085 rdev->accel_working = true; 4086 r = r100_startup(rdev); 4087 if (r) { 4088 /* Somethings want wront with the accel init stop accel */ 4089 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4090 r100_cp_fini(rdev); 4091 radeon_wb_fini(rdev); 4092 radeon_ib_pool_fini(rdev); 4093 radeon_irq_kms_fini(rdev); 4094 if (rdev->flags & RADEON_IS_PCI) 4095 r100_pci_gart_fini(rdev); 4096 rdev->accel_working = false; 4097 } 4098 return 0; 4099 } 4100 4101 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 4102 bool always_indirect) 4103 { 4104 if (reg < rdev->rmmio_size && !always_indirect) 4105 return bus_read_4(rdev->rmmio, reg); 4106 else { 4107 uint32_t ret; 4108 4109 spin_lock(&rdev->mmio_idx_lock); 4110 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4111 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA); 4112 spin_unlock(&rdev->mmio_idx_lock); 4113 4114 return ret; 4115 } 4116 } 4117 4118 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 4119 bool always_indirect) 4120 { 4121 if (reg < rdev->rmmio_size && !always_indirect) 4122 bus_write_4(rdev->rmmio, reg, v); 4123 else { 4124 spin_lock(&rdev->mmio_idx_lock); 4125 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4126 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v); 4127 spin_unlock(&rdev->mmio_idx_lock); 4128 } 4129 } 4130 4131 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4132 { 4133 if (reg < rdev->rio_mem_size) 4134 return bus_read_4(rdev->rio_mem, reg); 4135 else { 4136 /* XXX No locking? -- dumbbell@ */ 4137 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4138 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA); 4139 } 4140 } 4141 4142 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4143 { 4144 if (reg < rdev->rio_mem_size) 4145 bus_write_4(rdev->rio_mem, reg, v); 4146 else { 4147 /* XXX No locking? -- dumbbell@ */ 4148 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4149 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v); 4150 } 4151 } 4152