1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/r100.c 255573 2013-09-14 17:24:41Z dumbbell $ 29 */ 30 #include <drm/drmP.h> 31 #include <uapi_drm/radeon_drm.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "r100d.h" 36 #include "rs100d.h" 37 #include "rv200d.h" 38 #include "rv250d.h" 39 #include "atom.h" 40 41 #include <linux/firmware.h> 42 #include <linux/module.h> 43 44 #include "r100_reg_safe.h" 45 #include "rn50_reg_safe.h" 46 47 /* Firmware Names */ 48 #define FIRMWARE_R100 "radeonkmsfw_R100_cp" 49 #define FIRMWARE_R200 "radeonkmsfw_R200_cp" 50 #define FIRMWARE_R300 "radeonkmsfw_R300_cp" 51 #define FIRMWARE_R420 "radeonkmsfw_R420_cp" 52 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 53 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 54 #define FIRMWARE_R520 "radeonkmsfw_R520_cp" 55 56 MODULE_FIRMWARE(FIRMWARE_R100); 57 MODULE_FIRMWARE(FIRMWARE_R200); 58 MODULE_FIRMWARE(FIRMWARE_R300); 59 MODULE_FIRMWARE(FIRMWARE_R420); 60 MODULE_FIRMWARE(FIRMWARE_RS690); 61 MODULE_FIRMWARE(FIRMWARE_RS600); 62 MODULE_FIRMWARE(FIRMWARE_R520); 63 64 #include "r100_track.h" 65 66 /* This files gather functions specifics to: 67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * and others in some cases. 69 */ 70 71 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) 72 { 73 if (crtc == 0) { 74 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 75 return true; 76 else 77 return false; 78 } else { 79 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 80 return true; 81 else 82 return false; 83 } 84 } 85 86 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) 87 { 88 u32 vline1, vline2; 89 90 if (crtc == 0) { 91 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 92 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 93 } else { 94 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 95 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 96 } 97 if (vline1 != vline2) 98 return true; 99 else 100 return false; 101 } 102 103 /** 104 * r100_wait_for_vblank - vblank wait asic callback. 105 * 106 * @rdev: radeon_device pointer 107 * @crtc: crtc to wait for vblank on 108 * 109 * Wait for vblank on the requested crtc (r1xx-r4xx). 110 */ 111 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 112 { 113 unsigned i = 0; 114 115 if (crtc >= rdev->num_crtc) 116 return; 117 118 if (crtc == 0) { 119 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) 120 return; 121 } else { 122 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) 123 return; 124 } 125 126 /* depending on when we hit vblank, we may be close to active; if so, 127 * wait for another frame. 128 */ 129 while (r100_is_in_vblank(rdev, crtc)) { 130 if (i++ % 100 == 0) { 131 if (!r100_is_counter_moving(rdev, crtc)) 132 break; 133 } 134 } 135 136 while (!r100_is_in_vblank(rdev, crtc)) { 137 if (i++ % 100 == 0) { 138 if (!r100_is_counter_moving(rdev, crtc)) 139 break; 140 } 141 } 142 } 143 144 /** 145 * r100_pre_page_flip - pre-pageflip callback. 146 * 147 * @rdev: radeon_device pointer 148 * @crtc: crtc to prepare for pageflip on 149 * 150 * Pre-pageflip callback (r1xx-r4xx). 151 * Enables the pageflip irq (vblank irq). 152 */ 153 void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 154 { 155 /* enable the pflip int */ 156 radeon_irq_kms_pflip_irq_get(rdev, crtc); 157 } 158 159 /** 160 * r100_post_page_flip - pos-pageflip callback. 161 * 162 * @rdev: radeon_device pointer 163 * @crtc: crtc to cleanup pageflip on 164 * 165 * Post-pageflip callback (r1xx-r4xx). 166 * Disables the pageflip irq (vblank irq). 167 */ 168 void r100_post_page_flip(struct radeon_device *rdev, int crtc) 169 { 170 /* disable the pflip int */ 171 radeon_irq_kms_pflip_irq_put(rdev, crtc); 172 } 173 174 /** 175 * r100_page_flip - pageflip callback. 176 * 177 * @rdev: radeon_device pointer 178 * @crtc_id: crtc to cleanup pageflip on 179 * @crtc_base: new address of the crtc (GPU MC address) 180 * 181 * Does the actual pageflip (r1xx-r4xx). 182 * During vblank we take the crtc lock and wait for the update_pending 183 * bit to go high, when it does, we release the lock, and allow the 184 * double buffered update to take place. 185 * Returns the current update pending status. 186 */ 187 u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 188 { 189 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 190 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 191 int i; 192 193 /* Lock the graphics update lock */ 194 /* update the scanout addresses */ 195 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 196 197 /* Wait for update_pending to go high. */ 198 for (i = 0; i < rdev->usec_timeout; i++) { 199 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 200 break; 201 udelay(1); 202 } 203 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 204 205 /* Unlock the lock, so double-buffering can take place inside vblank */ 206 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 207 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 208 209 /* Return current update_pending status: */ 210 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; 211 } 212 213 /** 214 * r100_pm_get_dynpm_state - look up dynpm power state callback. 215 * 216 * @rdev: radeon_device pointer 217 * 218 * Look up the optimal power state based on the 219 * current state of the GPU (r1xx-r5xx). 220 * Used for dynpm only. 221 */ 222 void r100_pm_get_dynpm_state(struct radeon_device *rdev) 223 { 224 int i; 225 rdev->pm.dynpm_can_upclock = true; 226 rdev->pm.dynpm_can_downclock = true; 227 228 switch (rdev->pm.dynpm_planned_action) { 229 case DYNPM_ACTION_MINIMUM: 230 rdev->pm.requested_power_state_index = 0; 231 rdev->pm.dynpm_can_downclock = false; 232 break; 233 case DYNPM_ACTION_DOWNCLOCK: 234 if (rdev->pm.current_power_state_index == 0) { 235 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 236 rdev->pm.dynpm_can_downclock = false; 237 } else { 238 if (rdev->pm.active_crtc_count > 1) { 239 for (i = 0; i < rdev->pm.num_power_states; i++) { 240 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 241 continue; 242 else if (i >= rdev->pm.current_power_state_index) { 243 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 244 break; 245 } else { 246 rdev->pm.requested_power_state_index = i; 247 break; 248 } 249 } 250 } else 251 rdev->pm.requested_power_state_index = 252 rdev->pm.current_power_state_index - 1; 253 } 254 /* don't use the power state if crtcs are active and no display flag is set */ 255 if ((rdev->pm.active_crtc_count > 0) && 256 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 257 RADEON_PM_MODE_NO_DISPLAY)) { 258 rdev->pm.requested_power_state_index++; 259 } 260 break; 261 case DYNPM_ACTION_UPCLOCK: 262 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 263 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 264 rdev->pm.dynpm_can_upclock = false; 265 } else { 266 if (rdev->pm.active_crtc_count > 1) { 267 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 268 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 269 continue; 270 else if (i <= rdev->pm.current_power_state_index) { 271 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 272 break; 273 } else { 274 rdev->pm.requested_power_state_index = i; 275 break; 276 } 277 } 278 } else 279 rdev->pm.requested_power_state_index = 280 rdev->pm.current_power_state_index + 1; 281 } 282 break; 283 case DYNPM_ACTION_DEFAULT: 284 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 285 rdev->pm.dynpm_can_upclock = false; 286 break; 287 case DYNPM_ACTION_NONE: 288 default: 289 DRM_ERROR("Requested mode for not defined action\n"); 290 return; 291 } 292 /* only one clock mode per power state */ 293 rdev->pm.requested_clock_mode_index = 0; 294 295 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 296 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 297 clock_info[rdev->pm.requested_clock_mode_index].sclk, 298 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 299 clock_info[rdev->pm.requested_clock_mode_index].mclk, 300 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 301 pcie_lanes); 302 } 303 304 /** 305 * r100_pm_init_profile - Initialize power profiles callback. 306 * 307 * @rdev: radeon_device pointer 308 * 309 * Initialize the power states used in profile mode 310 * (r1xx-r3xx). 311 * Used for profile mode only. 312 */ 313 void r100_pm_init_profile(struct radeon_device *rdev) 314 { 315 /* default */ 316 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 317 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 318 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 319 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 320 /* low sh */ 321 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 322 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 323 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 324 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 325 /* mid sh */ 326 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 327 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 328 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 329 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 330 /* high sh */ 331 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 332 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 333 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 334 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 335 /* low mh */ 336 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 337 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 338 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 339 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 340 /* mid mh */ 341 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 342 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 343 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 344 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 345 /* high mh */ 346 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 347 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 348 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 349 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 350 } 351 352 /** 353 * r100_pm_misc - set additional pm hw parameters callback. 354 * 355 * @rdev: radeon_device pointer 356 * 357 * Set non-clock parameters associated with a power state 358 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 359 */ 360 void r100_pm_misc(struct radeon_device *rdev) 361 { 362 int requested_index = rdev->pm.requested_power_state_index; 363 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 364 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 365 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 366 367 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 368 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 369 tmp = RREG32(voltage->gpio.reg); 370 if (voltage->active_high) 371 tmp |= voltage->gpio.mask; 372 else 373 tmp &= ~(voltage->gpio.mask); 374 WREG32(voltage->gpio.reg, tmp); 375 if (voltage->delay) 376 udelay(voltage->delay); 377 } else { 378 tmp = RREG32(voltage->gpio.reg); 379 if (voltage->active_high) 380 tmp &= ~voltage->gpio.mask; 381 else 382 tmp |= voltage->gpio.mask; 383 WREG32(voltage->gpio.reg, tmp); 384 if (voltage->delay) 385 udelay(voltage->delay); 386 } 387 } 388 389 sclk_cntl = RREG32_PLL(SCLK_CNTL); 390 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 391 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 392 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 393 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 394 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 395 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 396 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 397 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 398 else 399 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 400 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 401 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 402 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 403 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 404 } else 405 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 406 407 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 408 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 409 if (voltage->delay) { 410 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 411 switch (voltage->delay) { 412 case 33: 413 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 414 break; 415 case 66: 416 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 417 break; 418 case 99: 419 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 420 break; 421 case 132: 422 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 423 break; 424 } 425 } else 426 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 427 } else 428 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 429 430 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 431 sclk_cntl &= ~FORCE_HDP; 432 else 433 sclk_cntl |= FORCE_HDP; 434 435 WREG32_PLL(SCLK_CNTL, sclk_cntl); 436 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 437 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 438 439 /* set pcie lanes */ 440 if ((rdev->flags & RADEON_IS_PCIE) && 441 !(rdev->flags & RADEON_IS_IGP) && 442 rdev->asic->pm.set_pcie_lanes && 443 (ps->pcie_lanes != 444 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 445 radeon_set_pcie_lanes(rdev, 446 ps->pcie_lanes); 447 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 448 } 449 } 450 451 /** 452 * r100_pm_prepare - pre-power state change callback. 453 * 454 * @rdev: radeon_device pointer 455 * 456 * Prepare for a power state change (r1xx-r4xx). 457 */ 458 void r100_pm_prepare(struct radeon_device *rdev) 459 { 460 struct drm_device *ddev = rdev->ddev; 461 struct drm_crtc *crtc; 462 struct radeon_crtc *radeon_crtc; 463 u32 tmp; 464 465 /* disable any active CRTCs */ 466 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 467 radeon_crtc = to_radeon_crtc(crtc); 468 if (radeon_crtc->enabled) { 469 if (radeon_crtc->crtc_id) { 470 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 471 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 472 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 473 } else { 474 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 475 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 476 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 477 } 478 } 479 } 480 } 481 482 /** 483 * r100_pm_finish - post-power state change callback. 484 * 485 * @rdev: radeon_device pointer 486 * 487 * Clean up after a power state change (r1xx-r4xx). 488 */ 489 void r100_pm_finish(struct radeon_device *rdev) 490 { 491 struct drm_device *ddev = rdev->ddev; 492 struct drm_crtc *crtc; 493 struct radeon_crtc *radeon_crtc; 494 u32 tmp; 495 496 /* enable any active CRTCs */ 497 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 498 radeon_crtc = to_radeon_crtc(crtc); 499 if (radeon_crtc->enabled) { 500 if (radeon_crtc->crtc_id) { 501 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 502 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 503 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 504 } else { 505 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 506 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 507 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 508 } 509 } 510 } 511 } 512 513 /** 514 * r100_gui_idle - gui idle callback. 515 * 516 * @rdev: radeon_device pointer 517 * 518 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 519 * Returns true if idle, false if not. 520 */ 521 bool r100_gui_idle(struct radeon_device *rdev) 522 { 523 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 524 return false; 525 else 526 return true; 527 } 528 529 /* hpd for digital panel detect/disconnect */ 530 /** 531 * r100_hpd_sense - hpd sense callback. 532 * 533 * @rdev: radeon_device pointer 534 * @hpd: hpd (hotplug detect) pin 535 * 536 * Checks if a digital monitor is connected (r1xx-r4xx). 537 * Returns true if connected, false if not connected. 538 */ 539 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 540 { 541 bool connected = false; 542 543 switch (hpd) { 544 case RADEON_HPD_1: 545 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 546 connected = true; 547 break; 548 case RADEON_HPD_2: 549 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 550 connected = true; 551 break; 552 default: 553 break; 554 } 555 return connected; 556 } 557 558 /** 559 * r100_hpd_set_polarity - hpd set polarity callback. 560 * 561 * @rdev: radeon_device pointer 562 * @hpd: hpd (hotplug detect) pin 563 * 564 * Set the polarity of the hpd pin (r1xx-r4xx). 565 */ 566 void r100_hpd_set_polarity(struct radeon_device *rdev, 567 enum radeon_hpd_id hpd) 568 { 569 u32 tmp; 570 bool connected = r100_hpd_sense(rdev, hpd); 571 572 switch (hpd) { 573 case RADEON_HPD_1: 574 tmp = RREG32(RADEON_FP_GEN_CNTL); 575 if (connected) 576 tmp &= ~RADEON_FP_DETECT_INT_POL; 577 else 578 tmp |= RADEON_FP_DETECT_INT_POL; 579 WREG32(RADEON_FP_GEN_CNTL, tmp); 580 break; 581 case RADEON_HPD_2: 582 tmp = RREG32(RADEON_FP2_GEN_CNTL); 583 if (connected) 584 tmp &= ~RADEON_FP2_DETECT_INT_POL; 585 else 586 tmp |= RADEON_FP2_DETECT_INT_POL; 587 WREG32(RADEON_FP2_GEN_CNTL, tmp); 588 break; 589 default: 590 break; 591 } 592 } 593 594 /** 595 * r100_hpd_init - hpd setup callback. 596 * 597 * @rdev: radeon_device pointer 598 * 599 * Setup the hpd pins used by the card (r1xx-r4xx). 600 * Set the polarity, and enable the hpd interrupts. 601 */ 602 void r100_hpd_init(struct radeon_device *rdev) 603 { 604 struct drm_device *dev = rdev->ddev; 605 struct drm_connector *connector; 606 unsigned enable = 0; 607 608 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 609 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 610 enable |= 1 << radeon_connector->hpd.hpd; 611 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 612 } 613 radeon_irq_kms_enable_hpd(rdev, enable); 614 } 615 616 /** 617 * r100_hpd_fini - hpd tear down callback. 618 * 619 * @rdev: radeon_device pointer 620 * 621 * Tear down the hpd pins used by the card (r1xx-r4xx). 622 * Disable the hpd interrupts. 623 */ 624 void r100_hpd_fini(struct radeon_device *rdev) 625 { 626 struct drm_device *dev = rdev->ddev; 627 struct drm_connector *connector; 628 unsigned disable = 0; 629 630 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 631 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 632 disable |= 1 << radeon_connector->hpd.hpd; 633 } 634 radeon_irq_kms_disable_hpd(rdev, disable); 635 } 636 637 /* 638 * PCI GART 639 */ 640 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 641 { 642 /* TODO: can we do somethings here ? */ 643 /* It seems hw only cache one entry so we should discard this 644 * entry otherwise if first GPU GART read hit this entry it 645 * could end up in wrong address. */ 646 } 647 648 int r100_pci_gart_init(struct radeon_device *rdev) 649 { 650 int r; 651 652 if (rdev->gart.ptr) { 653 WARN(1, "R100 PCI GART already initialized\n"); 654 return 0; 655 } 656 /* Initialize common gart structure */ 657 r = radeon_gart_init(rdev); 658 if (r) 659 return r; 660 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 661 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 662 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 663 return radeon_gart_table_ram_alloc(rdev); 664 } 665 666 int r100_pci_gart_enable(struct radeon_device *rdev) 667 { 668 uint32_t tmp; 669 670 radeon_gart_restore(rdev); 671 /* discard memory request outside of configured range */ 672 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 673 WREG32(RADEON_AIC_CNTL, tmp); 674 /* set address range for PCI address translate */ 675 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 676 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 677 /* set PCI GART page-table base address */ 678 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 679 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 680 WREG32(RADEON_AIC_CNTL, tmp); 681 r100_pci_gart_tlb_flush(rdev); 682 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 683 (unsigned)(rdev->mc.gtt_size >> 20), 684 (unsigned long long)rdev->gart.table_addr); 685 rdev->gart.ready = true; 686 return 0; 687 } 688 689 void r100_pci_gart_disable(struct radeon_device *rdev) 690 { 691 uint32_t tmp; 692 693 /* discard memory request outside of configured range */ 694 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 695 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 696 WREG32(RADEON_AIC_LO_ADDR, 0); 697 WREG32(RADEON_AIC_HI_ADDR, 0); 698 } 699 700 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 701 { 702 u32 *gtt = rdev->gart.ptr; 703 704 if (i < 0 || i > rdev->gart.num_gpu_pages) { 705 return -EINVAL; 706 } 707 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 708 return 0; 709 } 710 711 void r100_pci_gart_fini(struct radeon_device *rdev) 712 { 713 radeon_gart_fini(rdev); 714 r100_pci_gart_disable(rdev); 715 radeon_gart_table_ram_free(rdev); 716 } 717 718 int r100_irq_set(struct radeon_device *rdev) 719 { 720 uint32_t tmp = 0; 721 722 if (!rdev->irq.installed) { 723 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 724 WREG32(R_000040_GEN_INT_CNTL, 0); 725 return -EINVAL; 726 } 727 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 728 tmp |= RADEON_SW_INT_ENABLE; 729 } 730 if (rdev->irq.crtc_vblank_int[0] || 731 atomic_read(&rdev->irq.pflip[0])) { 732 tmp |= RADEON_CRTC_VBLANK_MASK; 733 } 734 if (rdev->irq.crtc_vblank_int[1] || 735 atomic_read(&rdev->irq.pflip[1])) { 736 tmp |= RADEON_CRTC2_VBLANK_MASK; 737 } 738 if (rdev->irq.hpd[0]) { 739 tmp |= RADEON_FP_DETECT_MASK; 740 } 741 if (rdev->irq.hpd[1]) { 742 tmp |= RADEON_FP2_DETECT_MASK; 743 } 744 WREG32(RADEON_GEN_INT_CNTL, tmp); 745 return 0; 746 } 747 748 void r100_irq_disable(struct radeon_device *rdev) 749 { 750 u32 tmp; 751 752 WREG32(R_000040_GEN_INT_CNTL, 0); 753 /* Wait and acknowledge irq */ 754 mdelay(1); 755 tmp = RREG32(R_000044_GEN_INT_STATUS); 756 WREG32(R_000044_GEN_INT_STATUS, tmp); 757 } 758 759 static uint32_t r100_irq_ack(struct radeon_device *rdev) 760 { 761 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 762 uint32_t irq_mask = RADEON_SW_INT_TEST | 763 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 764 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 765 766 if (irqs) { 767 WREG32(RADEON_GEN_INT_STATUS, irqs); 768 } 769 return irqs & irq_mask; 770 } 771 772 irqreturn_t r100_irq_process(struct radeon_device *rdev) 773 { 774 uint32_t status, msi_rearm; 775 bool queue_hotplug = false; 776 777 status = r100_irq_ack(rdev); 778 if (!status) { 779 return IRQ_NONE; 780 } 781 if (rdev->shutdown) { 782 return IRQ_NONE; 783 } 784 while (status) { 785 /* SW interrupt */ 786 if (status & RADEON_SW_INT_TEST) { 787 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 788 } 789 /* Vertical blank interrupts */ 790 if (status & RADEON_CRTC_VBLANK_STAT) { 791 if (rdev->irq.crtc_vblank_int[0]) { 792 drm_handle_vblank(rdev->ddev, 0); 793 rdev->pm.vblank_sync = true; 794 wake_up(&rdev->irq.vblank_queue); 795 } 796 if (atomic_read(&rdev->irq.pflip[0])) 797 radeon_crtc_handle_flip(rdev, 0); 798 } 799 if (status & RADEON_CRTC2_VBLANK_STAT) { 800 if (rdev->irq.crtc_vblank_int[1]) { 801 drm_handle_vblank(rdev->ddev, 1); 802 rdev->pm.vblank_sync = true; 803 wake_up(&rdev->irq.vblank_queue); 804 } 805 if (atomic_read(&rdev->irq.pflip[1])) 806 radeon_crtc_handle_flip(rdev, 1); 807 } 808 if (status & RADEON_FP_DETECT_STAT) { 809 queue_hotplug = true; 810 DRM_DEBUG("HPD1\n"); 811 } 812 if (status & RADEON_FP2_DETECT_STAT) { 813 queue_hotplug = true; 814 DRM_DEBUG("HPD2\n"); 815 } 816 status = r100_irq_ack(rdev); 817 } 818 if (queue_hotplug) 819 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 820 if (rdev->msi_enabled) { 821 switch (rdev->family) { 822 case CHIP_RS400: 823 case CHIP_RS480: 824 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 825 WREG32(RADEON_AIC_CNTL, msi_rearm); 826 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 827 break; 828 default: 829 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 830 break; 831 } 832 } 833 return IRQ_HANDLED; 834 } 835 836 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 837 { 838 if (crtc == 0) 839 return RREG32(RADEON_CRTC_CRNT_FRAME); 840 else 841 return RREG32(RADEON_CRTC2_CRNT_FRAME); 842 } 843 844 /* Who ever call radeon_fence_emit should call ring_lock and ask 845 * for enough space (today caller are ib schedule and buffer move) */ 846 void r100_fence_ring_emit(struct radeon_device *rdev, 847 struct radeon_fence *fence) 848 { 849 struct radeon_ring *ring = &rdev->ring[fence->ring]; 850 851 /* We have to make sure that caches are flushed before 852 * CPU might read something from VRAM. */ 853 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 854 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 855 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 856 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 857 /* Wait until IDLE & CLEAN */ 858 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 859 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 860 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 861 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 862 RADEON_HDP_READ_BUFFER_INVALIDATE); 863 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 864 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 865 /* Emit fence sequence & fire IRQ */ 866 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 867 radeon_ring_write(ring, fence->seq); 868 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 869 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 870 } 871 872 void r100_semaphore_ring_emit(struct radeon_device *rdev, 873 struct radeon_ring *ring, 874 struct radeon_semaphore *semaphore, 875 bool emit_wait) 876 { 877 /* Unused on older asics, since we don't have semaphores or multiple rings */ 878 BUG(); 879 } 880 881 int r100_copy_blit(struct radeon_device *rdev, 882 uint64_t src_offset, 883 uint64_t dst_offset, 884 unsigned num_gpu_pages, 885 struct radeon_fence **fence) 886 { 887 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 888 uint32_t cur_pages; 889 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 890 uint32_t pitch; 891 uint32_t stride_pixels; 892 unsigned ndw; 893 int num_loops; 894 int r = 0; 895 896 /* radeon limited to 16k stride */ 897 stride_bytes &= 0x3fff; 898 /* radeon pitch is /64 */ 899 pitch = stride_bytes / 64; 900 stride_pixels = stride_bytes / 4; 901 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 902 903 /* Ask for enough room for blit + flush + fence */ 904 ndw = 64 + (10 * num_loops); 905 r = radeon_ring_lock(rdev, ring, ndw); 906 if (r) { 907 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 908 return -EINVAL; 909 } 910 while (num_gpu_pages > 0) { 911 cur_pages = num_gpu_pages; 912 if (cur_pages > 8191) { 913 cur_pages = 8191; 914 } 915 num_gpu_pages -= cur_pages; 916 917 /* pages are in Y direction - height 918 page width in X direction - width */ 919 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 920 radeon_ring_write(ring, 921 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 922 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 923 RADEON_GMC_SRC_CLIPPING | 924 RADEON_GMC_DST_CLIPPING | 925 RADEON_GMC_BRUSH_NONE | 926 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 927 RADEON_GMC_SRC_DATATYPE_COLOR | 928 RADEON_ROP3_S | 929 RADEON_DP_SRC_SOURCE_MEMORY | 930 RADEON_GMC_CLR_CMP_CNTL_DIS | 931 RADEON_GMC_WR_MSK_DIS); 932 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 933 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 934 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 935 radeon_ring_write(ring, 0); 936 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 937 radeon_ring_write(ring, num_gpu_pages); 938 radeon_ring_write(ring, num_gpu_pages); 939 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 940 } 941 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 942 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 943 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 944 radeon_ring_write(ring, 945 RADEON_WAIT_2D_IDLECLEAN | 946 RADEON_WAIT_HOST_IDLECLEAN | 947 RADEON_WAIT_DMA_GUI_IDLE); 948 if (fence) { 949 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 950 } 951 radeon_ring_unlock_commit(rdev, ring); 952 return r; 953 } 954 955 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 956 { 957 unsigned i; 958 u32 tmp; 959 960 for (i = 0; i < rdev->usec_timeout; i++) { 961 tmp = RREG32(R_000E40_RBBM_STATUS); 962 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 963 return 0; 964 } 965 udelay(1); 966 } 967 return -1; 968 } 969 970 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 971 { 972 int r; 973 974 r = radeon_ring_lock(rdev, ring, 2); 975 if (r) { 976 return; 977 } 978 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 979 radeon_ring_write(ring, 980 RADEON_ISYNC_ANY2D_IDLE3D | 981 RADEON_ISYNC_ANY3D_IDLE2D | 982 RADEON_ISYNC_WAIT_IDLEGUI | 983 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 984 radeon_ring_unlock_commit(rdev, ring); 985 } 986 987 988 /* Load the microcode for the CP */ 989 static int r100_cp_init_microcode(struct radeon_device *rdev) 990 { 991 const char *fw_name = NULL; 992 int err; 993 994 DRM_DEBUG_KMS("\n"); 995 996 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 997 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 998 (rdev->family == CHIP_RS200)) { 999 DRM_INFO("Loading R100 Microcode\n"); 1000 fw_name = FIRMWARE_R100; 1001 } else if ((rdev->family == CHIP_R200) || 1002 (rdev->family == CHIP_RV250) || 1003 (rdev->family == CHIP_RV280) || 1004 (rdev->family == CHIP_RS300)) { 1005 DRM_INFO("Loading R200 Microcode\n"); 1006 fw_name = FIRMWARE_R200; 1007 } else if ((rdev->family == CHIP_R300) || 1008 (rdev->family == CHIP_R350) || 1009 (rdev->family == CHIP_RV350) || 1010 (rdev->family == CHIP_RV380) || 1011 (rdev->family == CHIP_RS400) || 1012 (rdev->family == CHIP_RS480)) { 1013 DRM_INFO("Loading R300 Microcode\n"); 1014 fw_name = FIRMWARE_R300; 1015 } else if ((rdev->family == CHIP_R420) || 1016 (rdev->family == CHIP_R423) || 1017 (rdev->family == CHIP_RV410)) { 1018 DRM_INFO("Loading R400 Microcode\n"); 1019 fw_name = FIRMWARE_R420; 1020 } else if ((rdev->family == CHIP_RS690) || 1021 (rdev->family == CHIP_RS740)) { 1022 DRM_INFO("Loading RS690/RS740 Microcode\n"); 1023 fw_name = FIRMWARE_RS690; 1024 } else if (rdev->family == CHIP_RS600) { 1025 DRM_INFO("Loading RS600 Microcode\n"); 1026 fw_name = FIRMWARE_RS600; 1027 } else if ((rdev->family == CHIP_RV515) || 1028 (rdev->family == CHIP_R520) || 1029 (rdev->family == CHIP_RV530) || 1030 (rdev->family == CHIP_R580) || 1031 (rdev->family == CHIP_RV560) || 1032 (rdev->family == CHIP_RV570)) { 1033 DRM_INFO("Loading R500 Microcode\n"); 1034 fw_name = FIRMWARE_R520; 1035 } 1036 1037 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 1038 if (err) { 1039 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1040 fw_name); 1041 } else if (rdev->me_fw->datasize % 8) { 1042 printk(KERN_ERR 1043 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1044 rdev->me_fw->datasize, fw_name); 1045 err = -EINVAL; 1046 release_firmware(rdev->me_fw); 1047 rdev->me_fw = NULL; 1048 } 1049 return err; 1050 } 1051 1052 /** 1053 * r100_cp_fini_microcode - drop the firmware image reference 1054 * 1055 * @rdev: radeon_device pointer 1056 * 1057 * Drop the me firmware image reference. 1058 * Called at driver shutdown. 1059 */ 1060 static void r100_cp_fini_microcode (struct radeon_device *rdev) 1061 { 1062 release_firmware(rdev->me_fw); 1063 rdev->me_fw = NULL; 1064 } 1065 1066 static void r100_cp_load_microcode(struct radeon_device *rdev) 1067 { 1068 const __be32 *fw_data; 1069 int i, size; 1070 1071 if (r100_gui_wait_for_idle(rdev)) { 1072 printk(KERN_WARNING "Failed to wait GUI idle while " 1073 "programming pipes. Bad things might happen.\n"); 1074 } 1075 1076 if (rdev->me_fw) { 1077 size = rdev->me_fw->datasize / 4; 1078 fw_data = (const __be32 *)rdev->me_fw->data; 1079 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1080 for (i = 0; i < size; i += 2) { 1081 WREG32(RADEON_CP_ME_RAM_DATAH, 1082 be32_to_cpup(&fw_data[i])); 1083 WREG32(RADEON_CP_ME_RAM_DATAL, 1084 be32_to_cpup(&fw_data[i + 1])); 1085 } 1086 } 1087 } 1088 1089 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1090 { 1091 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1092 unsigned rb_bufsz; 1093 unsigned rb_blksz; 1094 unsigned max_fetch; 1095 unsigned pre_write_timer; 1096 unsigned pre_write_limit; 1097 unsigned indirect2_start; 1098 unsigned indirect1_start; 1099 uint32_t tmp; 1100 int r; 1101 1102 if (r100_debugfs_cp_init(rdev)) { 1103 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1104 } 1105 if (!rdev->me_fw) { 1106 r = r100_cp_init_microcode(rdev); 1107 if (r) { 1108 DRM_ERROR("Failed to load firmware!\n"); 1109 return r; 1110 } 1111 } 1112 1113 /* Align ring size */ 1114 rb_bufsz = drm_order(ring_size / 8); 1115 ring_size = (1 << (rb_bufsz + 1)) * 4; 1116 r100_cp_load_microcode(rdev); 1117 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1118 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, 1119 0, 0x7fffff, RADEON_CP_PACKET2); 1120 if (r) { 1121 return r; 1122 } 1123 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1124 * the rptr copy in system ram */ 1125 rb_blksz = 9; 1126 /* cp will read 128bytes at a time (4 dwords) */ 1127 max_fetch = 1; 1128 ring->align_mask = 16 - 1; 1129 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1130 pre_write_timer = 64; 1131 /* Force CP_RB_WPTR write if written more than one time before the 1132 * delay expire 1133 */ 1134 pre_write_limit = 0; 1135 /* Setup the cp cache like this (cache size is 96 dwords) : 1136 * RING 0 to 15 1137 * INDIRECT1 16 to 79 1138 * INDIRECT2 80 to 95 1139 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1140 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1141 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1142 * Idea being that most of the gpu cmd will be through indirect1 buffer 1143 * so it gets the bigger cache. 1144 */ 1145 indirect2_start = 80; 1146 indirect1_start = 16; 1147 /* cp setup */ 1148 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1149 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1150 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1151 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1152 #ifdef __BIG_ENDIAN 1153 tmp |= RADEON_BUF_SWAP_32BIT; 1154 #endif 1155 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1156 1157 /* Set ring address */ 1158 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1159 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1160 /* Force read & write ptr to 0 */ 1161 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1162 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1163 ring->wptr = 0; 1164 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1165 1166 /* set the wb address whether it's enabled or not */ 1167 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1168 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1169 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1170 1171 if (rdev->wb.enabled) 1172 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1173 else { 1174 tmp |= RADEON_RB_NO_UPDATE; 1175 WREG32(R_000770_SCRATCH_UMSK, 0); 1176 } 1177 1178 WREG32(RADEON_CP_RB_CNTL, tmp); 1179 udelay(10); 1180 ring->rptr = RREG32(RADEON_CP_RB_RPTR); 1181 /* Set cp mode to bus mastering & enable cp*/ 1182 WREG32(RADEON_CP_CSQ_MODE, 1183 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1184 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1185 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1186 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1187 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1188 1189 /* at this point everything should be setup correctly to enable master */ 1190 pci_enable_busmaster(rdev->dev); 1191 1192 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1193 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1194 if (r) { 1195 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1196 return r; 1197 } 1198 ring->ready = true; 1199 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1200 1201 if (!ring->rptr_save_reg /* not resuming from suspend */ 1202 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1203 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1204 if (r) { 1205 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1206 ring->rptr_save_reg = 0; 1207 } 1208 } 1209 return 0; 1210 } 1211 1212 void r100_cp_fini(struct radeon_device *rdev) 1213 { 1214 if (r100_cp_wait_for_idle(rdev)) { 1215 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1216 } 1217 /* Disable ring */ 1218 r100_cp_disable(rdev); 1219 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1220 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1221 DRM_INFO("radeon: cp finalized\n"); 1222 } 1223 1224 void r100_cp_disable(struct radeon_device *rdev) 1225 { 1226 /* Disable ring */ 1227 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1228 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1229 WREG32(RADEON_CP_CSQ_MODE, 0); 1230 WREG32(RADEON_CP_CSQ_CNTL, 0); 1231 WREG32(R_000770_SCRATCH_UMSK, 0); 1232 if (r100_gui_wait_for_idle(rdev)) { 1233 printk(KERN_WARNING "Failed to wait GUI idle while " 1234 "programming pipes. Bad things might happen.\n"); 1235 } 1236 } 1237 1238 /* 1239 * CS functions 1240 */ 1241 int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1242 struct radeon_cs_packet *pkt, 1243 unsigned idx, 1244 unsigned reg) 1245 { 1246 int r; 1247 u32 tile_flags = 0; 1248 u32 tmp; 1249 struct radeon_cs_reloc *reloc; 1250 u32 value; 1251 1252 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1253 if (r) { 1254 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1255 idx, reg); 1256 radeon_cs_dump_packet(p, pkt); 1257 return r; 1258 } 1259 1260 value = radeon_get_ib_value(p, idx); 1261 tmp = value & 0x003fffff; 1262 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 1263 1264 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1265 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1266 tile_flags |= RADEON_DST_TILE_MACRO; 1267 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1268 if (reg == RADEON_SRC_PITCH_OFFSET) { 1269 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1270 radeon_cs_dump_packet(p, pkt); 1271 return -EINVAL; 1272 } 1273 tile_flags |= RADEON_DST_TILE_MICRO; 1274 } 1275 1276 tmp |= tile_flags; 1277 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1278 } else 1279 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1280 return 0; 1281 } 1282 1283 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1284 struct radeon_cs_packet *pkt, 1285 int idx) 1286 { 1287 unsigned c, i; 1288 struct radeon_cs_reloc *reloc; 1289 struct r100_cs_track *track; 1290 int r = 0; 1291 volatile uint32_t *ib; 1292 u32 idx_value; 1293 1294 ib = p->ib.ptr; 1295 track = (struct r100_cs_track *)p->track; 1296 c = radeon_get_ib_value(p, idx++) & 0x1F; 1297 if (c > 16) { 1298 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1299 pkt->opcode); 1300 radeon_cs_dump_packet(p, pkt); 1301 return -EINVAL; 1302 } 1303 track->num_arrays = c; 1304 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1305 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1306 if (r) { 1307 DRM_ERROR("No reloc for packet3 %d\n", 1308 pkt->opcode); 1309 radeon_cs_dump_packet(p, pkt); 1310 return r; 1311 } 1312 idx_value = radeon_get_ib_value(p, idx); 1313 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1314 1315 track->arrays[i + 0].esize = idx_value >> 8; 1316 track->arrays[i + 0].robj = reloc->robj; 1317 track->arrays[i + 0].esize &= 0x7F; 1318 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1319 if (r) { 1320 DRM_ERROR("No reloc for packet3 %d\n", 1321 pkt->opcode); 1322 radeon_cs_dump_packet(p, pkt); 1323 return r; 1324 } 1325 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); 1326 track->arrays[i + 1].robj = reloc->robj; 1327 track->arrays[i + 1].esize = idx_value >> 24; 1328 track->arrays[i + 1].esize &= 0x7F; 1329 } 1330 if (c & 1) { 1331 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1332 if (r) { 1333 DRM_ERROR("No reloc for packet3 %d\n", 1334 pkt->opcode); 1335 radeon_cs_dump_packet(p, pkt); 1336 return r; 1337 } 1338 idx_value = radeon_get_ib_value(p, idx); 1339 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1340 track->arrays[i + 0].robj = reloc->robj; 1341 track->arrays[i + 0].esize = idx_value >> 8; 1342 track->arrays[i + 0].esize &= 0x7F; 1343 } 1344 return r; 1345 } 1346 1347 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1348 struct radeon_cs_packet *pkt, 1349 const unsigned *auth, unsigned n, 1350 radeon_packet0_check_t check) 1351 { 1352 unsigned reg; 1353 unsigned i, j, m; 1354 unsigned idx; 1355 int r; 1356 1357 idx = pkt->idx + 1; 1358 reg = pkt->reg; 1359 /* Check that register fall into register range 1360 * determined by the number of entry (n) in the 1361 * safe register bitmap. 1362 */ 1363 if (pkt->one_reg_wr) { 1364 if ((reg >> 7) > n) { 1365 return -EINVAL; 1366 } 1367 } else { 1368 if (((reg + (pkt->count << 2)) >> 7) > n) { 1369 return -EINVAL; 1370 } 1371 } 1372 for (i = 0; i <= pkt->count; i++, idx++) { 1373 j = (reg >> 7); 1374 m = 1 << ((reg >> 2) & 31); 1375 if (auth[j] & m) { 1376 r = check(p, pkt, idx, reg); 1377 if (r) { 1378 return r; 1379 } 1380 } 1381 if (pkt->one_reg_wr) { 1382 if (!(auth[j] & m)) { 1383 break; 1384 } 1385 } else { 1386 reg += 4; 1387 } 1388 } 1389 return 0; 1390 } 1391 1392 /** 1393 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1394 * @parser: parser structure holding parsing context. 1395 * 1396 * Userspace sends a special sequence for VLINE waits. 1397 * PACKET0 - VLINE_START_END + value 1398 * PACKET0 - WAIT_UNTIL +_value 1399 * RELOC (P3) - crtc_id in reloc. 1400 * 1401 * This function parses this and relocates the VLINE START END 1402 * and WAIT UNTIL packets to the correct crtc. 1403 * It also detects a switched off crtc and nulls out the 1404 * wait in that case. 1405 */ 1406 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1407 { 1408 struct drm_mode_object *obj; 1409 struct drm_crtc *crtc; 1410 struct radeon_crtc *radeon_crtc; 1411 struct radeon_cs_packet p3reloc, waitreloc; 1412 int crtc_id; 1413 int r; 1414 uint32_t header, h_idx, reg; 1415 volatile uint32_t *ib; 1416 1417 ib = p->ib.ptr; 1418 1419 /* parse the wait until */ 1420 r = radeon_cs_packet_parse(p, &waitreloc, p->idx); 1421 if (r) 1422 return r; 1423 1424 /* check its a wait until and only 1 count */ 1425 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1426 waitreloc.count != 0) { 1427 DRM_ERROR("vline wait had illegal wait until segment\n"); 1428 return -EINVAL; 1429 } 1430 1431 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1432 DRM_ERROR("vline wait had illegal wait until\n"); 1433 return -EINVAL; 1434 } 1435 1436 /* jump over the NOP */ 1437 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1438 if (r) 1439 return r; 1440 1441 h_idx = p->idx - 2; 1442 p->idx += waitreloc.count + 2; 1443 p->idx += p3reloc.count + 2; 1444 1445 header = radeon_get_ib_value(p, h_idx); 1446 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1447 reg = R100_CP_PACKET0_GET_REG(header); 1448 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1449 if (!obj) { 1450 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1451 return -EINVAL; 1452 } 1453 crtc = obj_to_crtc(obj); 1454 radeon_crtc = to_radeon_crtc(crtc); 1455 crtc_id = radeon_crtc->crtc_id; 1456 1457 if (!crtc->enabled) { 1458 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1459 ib[h_idx + 2] = PACKET2(0); 1460 ib[h_idx + 3] = PACKET2(0); 1461 } else if (crtc_id == 1) { 1462 switch (reg) { 1463 case AVIVO_D1MODE_VLINE_START_END: 1464 header &= ~R300_CP_PACKET0_REG_MASK; 1465 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1466 break; 1467 case RADEON_CRTC_GUI_TRIG_VLINE: 1468 header &= ~R300_CP_PACKET0_REG_MASK; 1469 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1470 break; 1471 default: 1472 DRM_ERROR("unknown crtc reloc\n"); 1473 return -EINVAL; 1474 } 1475 ib[h_idx] = header; 1476 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1477 } 1478 1479 return 0; 1480 } 1481 1482 static int r100_get_vtx_size(uint32_t vtx_fmt) 1483 { 1484 int vtx_size; 1485 vtx_size = 2; 1486 /* ordered according to bits in spec */ 1487 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1488 vtx_size++; 1489 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1490 vtx_size += 3; 1491 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1492 vtx_size++; 1493 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1494 vtx_size++; 1495 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1496 vtx_size += 3; 1497 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1498 vtx_size++; 1499 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1500 vtx_size++; 1501 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1502 vtx_size += 2; 1503 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1504 vtx_size += 2; 1505 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1506 vtx_size++; 1507 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1508 vtx_size += 2; 1509 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1510 vtx_size++; 1511 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1512 vtx_size += 2; 1513 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1514 vtx_size++; 1515 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1516 vtx_size++; 1517 /* blend weight */ 1518 if (vtx_fmt & (0x7 << 15)) 1519 vtx_size += (vtx_fmt >> 15) & 0x7; 1520 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1521 vtx_size += 3; 1522 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1523 vtx_size += 2; 1524 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1525 vtx_size++; 1526 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1527 vtx_size++; 1528 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1529 vtx_size++; 1530 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1531 vtx_size++; 1532 return vtx_size; 1533 } 1534 1535 static int r100_packet0_check(struct radeon_cs_parser *p, 1536 struct radeon_cs_packet *pkt, 1537 unsigned idx, unsigned reg) 1538 { 1539 struct radeon_cs_reloc *reloc; 1540 struct r100_cs_track *track; 1541 volatile uint32_t *ib; 1542 uint32_t tmp; 1543 int r; 1544 int i, face; 1545 u32 tile_flags = 0; 1546 u32 idx_value; 1547 1548 ib = p->ib.ptr; 1549 track = (struct r100_cs_track *)p->track; 1550 1551 idx_value = radeon_get_ib_value(p, idx); 1552 1553 switch (reg) { 1554 case RADEON_CRTC_GUI_TRIG_VLINE: 1555 r = r100_cs_packet_parse_vline(p); 1556 if (r) { 1557 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1558 idx, reg); 1559 radeon_cs_dump_packet(p, pkt); 1560 return r; 1561 } 1562 break; 1563 /* FIXME: only allow PACKET3 blit? easier to check for out of 1564 * range access */ 1565 case RADEON_DST_PITCH_OFFSET: 1566 case RADEON_SRC_PITCH_OFFSET: 1567 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1568 if (r) 1569 return r; 1570 break; 1571 case RADEON_RB3D_DEPTHOFFSET: 1572 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1573 if (r) { 1574 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1575 idx, reg); 1576 radeon_cs_dump_packet(p, pkt); 1577 return r; 1578 } 1579 track->zb.robj = reloc->robj; 1580 track->zb.offset = idx_value; 1581 track->zb_dirty = true; 1582 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1583 break; 1584 case RADEON_RB3D_COLOROFFSET: 1585 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1586 if (r) { 1587 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1588 idx, reg); 1589 radeon_cs_dump_packet(p, pkt); 1590 return r; 1591 } 1592 track->cb[0].robj = reloc->robj; 1593 track->cb[0].offset = idx_value; 1594 track->cb_dirty = true; 1595 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1596 break; 1597 case RADEON_PP_TXOFFSET_0: 1598 case RADEON_PP_TXOFFSET_1: 1599 case RADEON_PP_TXOFFSET_2: 1600 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1601 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1602 if (r) { 1603 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1604 idx, reg); 1605 radeon_cs_dump_packet(p, pkt); 1606 return r; 1607 } 1608 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1609 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1610 tile_flags |= RADEON_TXO_MACRO_TILE; 1611 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1612 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1613 1614 tmp = idx_value & ~(0x7 << 2); 1615 tmp |= tile_flags; 1616 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); 1617 } else 1618 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1619 track->textures[i].robj = reloc->robj; 1620 track->tex_dirty = true; 1621 break; 1622 case RADEON_PP_CUBIC_OFFSET_T0_0: 1623 case RADEON_PP_CUBIC_OFFSET_T0_1: 1624 case RADEON_PP_CUBIC_OFFSET_T0_2: 1625 case RADEON_PP_CUBIC_OFFSET_T0_3: 1626 case RADEON_PP_CUBIC_OFFSET_T0_4: 1627 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1628 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1629 if (r) { 1630 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1631 idx, reg); 1632 radeon_cs_dump_packet(p, pkt); 1633 return r; 1634 } 1635 track->textures[0].cube_info[i].offset = idx_value; 1636 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1637 track->textures[0].cube_info[i].robj = reloc->robj; 1638 track->tex_dirty = true; 1639 break; 1640 case RADEON_PP_CUBIC_OFFSET_T1_0: 1641 case RADEON_PP_CUBIC_OFFSET_T1_1: 1642 case RADEON_PP_CUBIC_OFFSET_T1_2: 1643 case RADEON_PP_CUBIC_OFFSET_T1_3: 1644 case RADEON_PP_CUBIC_OFFSET_T1_4: 1645 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1646 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1647 if (r) { 1648 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1649 idx, reg); 1650 radeon_cs_dump_packet(p, pkt); 1651 return r; 1652 } 1653 track->textures[1].cube_info[i].offset = idx_value; 1654 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1655 track->textures[1].cube_info[i].robj = reloc->robj; 1656 track->tex_dirty = true; 1657 break; 1658 case RADEON_PP_CUBIC_OFFSET_T2_0: 1659 case RADEON_PP_CUBIC_OFFSET_T2_1: 1660 case RADEON_PP_CUBIC_OFFSET_T2_2: 1661 case RADEON_PP_CUBIC_OFFSET_T2_3: 1662 case RADEON_PP_CUBIC_OFFSET_T2_4: 1663 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1664 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1665 if (r) { 1666 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1667 idx, reg); 1668 radeon_cs_dump_packet(p, pkt); 1669 return r; 1670 } 1671 track->textures[2].cube_info[i].offset = idx_value; 1672 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1673 track->textures[2].cube_info[i].robj = reloc->robj; 1674 track->tex_dirty = true; 1675 break; 1676 case RADEON_RE_WIDTH_HEIGHT: 1677 track->maxy = ((idx_value >> 16) & 0x7FF); 1678 track->cb_dirty = true; 1679 track->zb_dirty = true; 1680 break; 1681 case RADEON_RB3D_COLORPITCH: 1682 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1683 if (r) { 1684 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1685 idx, reg); 1686 radeon_cs_dump_packet(p, pkt); 1687 return r; 1688 } 1689 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1690 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1691 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1692 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1693 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1694 1695 tmp = idx_value & ~(0x7 << 16); 1696 tmp |= tile_flags; 1697 ib[idx] = tmp; 1698 } else 1699 ib[idx] = idx_value; 1700 1701 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1702 track->cb_dirty = true; 1703 break; 1704 case RADEON_RB3D_DEPTHPITCH: 1705 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1706 track->zb_dirty = true; 1707 break; 1708 case RADEON_RB3D_CNTL: 1709 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1710 case 7: 1711 case 8: 1712 case 9: 1713 case 11: 1714 case 12: 1715 track->cb[0].cpp = 1; 1716 break; 1717 case 3: 1718 case 4: 1719 case 15: 1720 track->cb[0].cpp = 2; 1721 break; 1722 case 6: 1723 track->cb[0].cpp = 4; 1724 break; 1725 default: 1726 DRM_ERROR("Invalid color buffer format (%d) !\n", 1727 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1728 return -EINVAL; 1729 } 1730 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1731 track->cb_dirty = true; 1732 track->zb_dirty = true; 1733 break; 1734 case RADEON_RB3D_ZSTENCILCNTL: 1735 switch (idx_value & 0xf) { 1736 case 0: 1737 track->zb.cpp = 2; 1738 break; 1739 case 2: 1740 case 3: 1741 case 4: 1742 case 5: 1743 case 9: 1744 case 11: 1745 track->zb.cpp = 4; 1746 break; 1747 default: 1748 break; 1749 } 1750 track->zb_dirty = true; 1751 break; 1752 case RADEON_RB3D_ZPASS_ADDR: 1753 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1754 if (r) { 1755 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1756 idx, reg); 1757 radeon_cs_dump_packet(p, pkt); 1758 return r; 1759 } 1760 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1761 break; 1762 case RADEON_PP_CNTL: 1763 { 1764 uint32_t temp = idx_value >> 4; 1765 for (i = 0; i < track->num_texture; i++) 1766 track->textures[i].enabled = !!(temp & (1 << i)); 1767 track->tex_dirty = true; 1768 } 1769 break; 1770 case RADEON_SE_VF_CNTL: 1771 track->vap_vf_cntl = idx_value; 1772 break; 1773 case RADEON_SE_VTX_FMT: 1774 track->vtx_size = r100_get_vtx_size(idx_value); 1775 break; 1776 case RADEON_PP_TEX_SIZE_0: 1777 case RADEON_PP_TEX_SIZE_1: 1778 case RADEON_PP_TEX_SIZE_2: 1779 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1780 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1781 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1782 track->tex_dirty = true; 1783 break; 1784 case RADEON_PP_TEX_PITCH_0: 1785 case RADEON_PP_TEX_PITCH_1: 1786 case RADEON_PP_TEX_PITCH_2: 1787 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1788 track->textures[i].pitch = idx_value + 32; 1789 track->tex_dirty = true; 1790 break; 1791 case RADEON_PP_TXFILTER_0: 1792 case RADEON_PP_TXFILTER_1: 1793 case RADEON_PP_TXFILTER_2: 1794 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1795 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1796 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1797 tmp = (idx_value >> 23) & 0x7; 1798 if (tmp == 2 || tmp == 6) 1799 track->textures[i].roundup_w = false; 1800 tmp = (idx_value >> 27) & 0x7; 1801 if (tmp == 2 || tmp == 6) 1802 track->textures[i].roundup_h = false; 1803 track->tex_dirty = true; 1804 break; 1805 case RADEON_PP_TXFORMAT_0: 1806 case RADEON_PP_TXFORMAT_1: 1807 case RADEON_PP_TXFORMAT_2: 1808 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1809 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1810 track->textures[i].use_pitch = 1; 1811 } else { 1812 track->textures[i].use_pitch = 0; 1813 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1814 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1815 } 1816 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1817 track->textures[i].tex_coord_type = 2; 1818 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1819 case RADEON_TXFORMAT_I8: 1820 case RADEON_TXFORMAT_RGB332: 1821 case RADEON_TXFORMAT_Y8: 1822 track->textures[i].cpp = 1; 1823 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1824 break; 1825 case RADEON_TXFORMAT_AI88: 1826 case RADEON_TXFORMAT_ARGB1555: 1827 case RADEON_TXFORMAT_RGB565: 1828 case RADEON_TXFORMAT_ARGB4444: 1829 case RADEON_TXFORMAT_VYUY422: 1830 case RADEON_TXFORMAT_YVYU422: 1831 case RADEON_TXFORMAT_SHADOW16: 1832 case RADEON_TXFORMAT_LDUDV655: 1833 case RADEON_TXFORMAT_DUDV88: 1834 track->textures[i].cpp = 2; 1835 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1836 break; 1837 case RADEON_TXFORMAT_ARGB8888: 1838 case RADEON_TXFORMAT_RGBA8888: 1839 case RADEON_TXFORMAT_SHADOW32: 1840 case RADEON_TXFORMAT_LDUDUV8888: 1841 track->textures[i].cpp = 4; 1842 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1843 break; 1844 case RADEON_TXFORMAT_DXT1: 1845 track->textures[i].cpp = 1; 1846 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1847 break; 1848 case RADEON_TXFORMAT_DXT23: 1849 case RADEON_TXFORMAT_DXT45: 1850 track->textures[i].cpp = 1; 1851 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1852 break; 1853 } 1854 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1855 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1856 track->tex_dirty = true; 1857 break; 1858 case RADEON_PP_CUBIC_FACES_0: 1859 case RADEON_PP_CUBIC_FACES_1: 1860 case RADEON_PP_CUBIC_FACES_2: 1861 tmp = idx_value; 1862 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1863 for (face = 0; face < 4; face++) { 1864 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1865 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1866 } 1867 track->tex_dirty = true; 1868 break; 1869 default: 1870 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1871 reg, idx); 1872 return -EINVAL; 1873 } 1874 return 0; 1875 } 1876 1877 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1878 struct radeon_cs_packet *pkt, 1879 struct radeon_bo *robj) 1880 { 1881 unsigned idx; 1882 u32 value; 1883 idx = pkt->idx + 1; 1884 value = radeon_get_ib_value(p, idx + 2); 1885 if ((value + 1) > radeon_bo_size(robj)) { 1886 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1887 "(need %u have %lu) !\n", 1888 value + 1, 1889 radeon_bo_size(robj)); 1890 return -EINVAL; 1891 } 1892 return 0; 1893 } 1894 1895 static int r100_packet3_check(struct radeon_cs_parser *p, 1896 struct radeon_cs_packet *pkt) 1897 { 1898 struct radeon_cs_reloc *reloc; 1899 struct r100_cs_track *track; 1900 unsigned idx; 1901 volatile uint32_t *ib; 1902 int r; 1903 1904 ib = p->ib.ptr; 1905 idx = pkt->idx + 1; 1906 track = (struct r100_cs_track *)p->track; 1907 switch (pkt->opcode) { 1908 case PACKET3_3D_LOAD_VBPNTR: 1909 r = r100_packet3_load_vbpntr(p, pkt, idx); 1910 if (r) 1911 return r; 1912 break; 1913 case PACKET3_INDX_BUFFER: 1914 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1915 if (r) { 1916 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1917 radeon_cs_dump_packet(p, pkt); 1918 return r; 1919 } 1920 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); 1921 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1922 if (r) { 1923 return r; 1924 } 1925 break; 1926 case 0x23: 1927 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1928 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1929 if (r) { 1930 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1931 radeon_cs_dump_packet(p, pkt); 1932 return r; 1933 } 1934 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); 1935 track->num_arrays = 1; 1936 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1937 1938 track->arrays[0].robj = reloc->robj; 1939 track->arrays[0].esize = track->vtx_size; 1940 1941 track->max_indx = radeon_get_ib_value(p, idx+1); 1942 1943 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1944 track->immd_dwords = pkt->count - 1; 1945 r = r100_cs_track_check(p->rdev, track); 1946 if (r) 1947 return r; 1948 break; 1949 case PACKET3_3D_DRAW_IMMD: 1950 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1951 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1952 return -EINVAL; 1953 } 1954 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1955 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1956 track->immd_dwords = pkt->count - 1; 1957 r = r100_cs_track_check(p->rdev, track); 1958 if (r) 1959 return r; 1960 break; 1961 /* triggers drawing using in-packet vertex data */ 1962 case PACKET3_3D_DRAW_IMMD_2: 1963 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1964 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1965 return -EINVAL; 1966 } 1967 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1968 track->immd_dwords = pkt->count; 1969 r = r100_cs_track_check(p->rdev, track); 1970 if (r) 1971 return r; 1972 break; 1973 /* triggers drawing using in-packet vertex data */ 1974 case PACKET3_3D_DRAW_VBUF_2: 1975 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1976 r = r100_cs_track_check(p->rdev, track); 1977 if (r) 1978 return r; 1979 break; 1980 /* triggers drawing of vertex buffers setup elsewhere */ 1981 case PACKET3_3D_DRAW_INDX_2: 1982 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1983 r = r100_cs_track_check(p->rdev, track); 1984 if (r) 1985 return r; 1986 break; 1987 /* triggers drawing using indices to vertex buffer */ 1988 case PACKET3_3D_DRAW_VBUF: 1989 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1990 r = r100_cs_track_check(p->rdev, track); 1991 if (r) 1992 return r; 1993 break; 1994 /* triggers drawing of vertex buffers setup elsewhere */ 1995 case PACKET3_3D_DRAW_INDX: 1996 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1997 r = r100_cs_track_check(p->rdev, track); 1998 if (r) 1999 return r; 2000 break; 2001 /* triggers drawing using indices to vertex buffer */ 2002 case PACKET3_3D_CLEAR_HIZ: 2003 case PACKET3_3D_CLEAR_ZMASK: 2004 if (p->rdev->hyperz_filp != p->filp) 2005 return -EINVAL; 2006 break; 2007 case PACKET3_NOP: 2008 break; 2009 default: 2010 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2011 return -EINVAL; 2012 } 2013 return 0; 2014 } 2015 2016 int r100_cs_parse(struct radeon_cs_parser *p) 2017 { 2018 struct radeon_cs_packet pkt; 2019 struct r100_cs_track *track; 2020 int r; 2021 2022 track = kzalloc(sizeof(*track), GFP_KERNEL); 2023 if (!track) 2024 return -ENOMEM; 2025 r100_cs_track_clear(p->rdev, track); 2026 p->track = track; 2027 do { 2028 r = radeon_cs_packet_parse(p, &pkt, p->idx); 2029 if (r) { 2030 kfree(p->track); 2031 p->track = NULL; 2032 return r; 2033 } 2034 p->idx += pkt.count + 2; 2035 switch (pkt.type) { 2036 case RADEON_PACKET_TYPE0: 2037 if (p->rdev->family >= CHIP_R200) 2038 r = r100_cs_parse_packet0(p, &pkt, 2039 p->rdev->config.r100.reg_safe_bm, 2040 p->rdev->config.r100.reg_safe_bm_size, 2041 &r200_packet0_check); 2042 else 2043 r = r100_cs_parse_packet0(p, &pkt, 2044 p->rdev->config.r100.reg_safe_bm, 2045 p->rdev->config.r100.reg_safe_bm_size, 2046 &r100_packet0_check); 2047 break; 2048 case RADEON_PACKET_TYPE2: 2049 break; 2050 case RADEON_PACKET_TYPE3: 2051 r = r100_packet3_check(p, &pkt); 2052 break; 2053 default: 2054 DRM_ERROR("Unknown packet type %d !\n", 2055 pkt.type); 2056 kfree(p->track); 2057 p->track = NULL; 2058 return -EINVAL; 2059 } 2060 if (r) { 2061 kfree(p->track); 2062 p->track = NULL; 2063 return r; 2064 } 2065 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2066 kfree(p->track); 2067 p->track = NULL; 2068 return 0; 2069 } 2070 2071 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2072 { 2073 DRM_ERROR("pitch %d\n", t->pitch); 2074 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2075 DRM_ERROR("width %d\n", t->width); 2076 DRM_ERROR("width_11 %d\n", t->width_11); 2077 DRM_ERROR("height %d\n", t->height); 2078 DRM_ERROR("height_11 %d\n", t->height_11); 2079 DRM_ERROR("num levels %d\n", t->num_levels); 2080 DRM_ERROR("depth %d\n", t->txdepth); 2081 DRM_ERROR("bpp %d\n", t->cpp); 2082 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2083 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2084 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2085 DRM_ERROR("compress format %d\n", t->compress_format); 2086 } 2087 2088 static int r100_track_compress_size(int compress_format, int w, int h) 2089 { 2090 int block_width, block_height, block_bytes; 2091 int wblocks, hblocks; 2092 int min_wblocks; 2093 int sz; 2094 2095 block_width = 4; 2096 block_height = 4; 2097 2098 switch (compress_format) { 2099 case R100_TRACK_COMP_DXT1: 2100 block_bytes = 8; 2101 min_wblocks = 4; 2102 break; 2103 default: 2104 case R100_TRACK_COMP_DXT35: 2105 block_bytes = 16; 2106 min_wblocks = 2; 2107 break; 2108 } 2109 2110 hblocks = (h + block_height - 1) / block_height; 2111 wblocks = (w + block_width - 1) / block_width; 2112 if (wblocks < min_wblocks) 2113 wblocks = min_wblocks; 2114 sz = wblocks * hblocks * block_bytes; 2115 return sz; 2116 } 2117 2118 static int r100_cs_track_cube(struct radeon_device *rdev, 2119 struct r100_cs_track *track, unsigned idx) 2120 { 2121 unsigned face, w, h; 2122 struct radeon_bo *cube_robj; 2123 unsigned long size; 2124 unsigned compress_format = track->textures[idx].compress_format; 2125 2126 for (face = 0; face < 5; face++) { 2127 cube_robj = track->textures[idx].cube_info[face].robj; 2128 w = track->textures[idx].cube_info[face].width; 2129 h = track->textures[idx].cube_info[face].height; 2130 2131 if (compress_format) { 2132 size = r100_track_compress_size(compress_format, w, h); 2133 } else 2134 size = w * h; 2135 size *= track->textures[idx].cpp; 2136 2137 size += track->textures[idx].cube_info[face].offset; 2138 2139 if (size > radeon_bo_size(cube_robj)) { 2140 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2141 size, radeon_bo_size(cube_robj)); 2142 r100_cs_track_texture_print(&track->textures[idx]); 2143 return -1; 2144 } 2145 } 2146 return 0; 2147 } 2148 2149 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2150 struct r100_cs_track *track) 2151 { 2152 struct radeon_bo *robj; 2153 unsigned long size; 2154 unsigned u, i, w, h, d; 2155 int ret; 2156 2157 for (u = 0; u < track->num_texture; u++) { 2158 if (!track->textures[u].enabled) 2159 continue; 2160 if (track->textures[u].lookup_disable) 2161 continue; 2162 robj = track->textures[u].robj; 2163 if (robj == NULL) { 2164 DRM_ERROR("No texture bound to unit %u\n", u); 2165 return -EINVAL; 2166 } 2167 size = 0; 2168 for (i = 0; i <= track->textures[u].num_levels; i++) { 2169 if (track->textures[u].use_pitch) { 2170 if (rdev->family < CHIP_R300) 2171 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2172 else 2173 w = track->textures[u].pitch / (1 << i); 2174 } else { 2175 w = track->textures[u].width; 2176 if (rdev->family >= CHIP_RV515) 2177 w |= track->textures[u].width_11; 2178 w = w / (1 << i); 2179 if (track->textures[u].roundup_w) 2180 w = roundup_pow_of_two(w); 2181 } 2182 h = track->textures[u].height; 2183 if (rdev->family >= CHIP_RV515) 2184 h |= track->textures[u].height_11; 2185 h = h / (1 << i); 2186 if (track->textures[u].roundup_h) 2187 h = roundup_pow_of_two(h); 2188 if (track->textures[u].tex_coord_type == 1) { 2189 d = (1 << track->textures[u].txdepth) / (1 << i); 2190 if (!d) 2191 d = 1; 2192 } else { 2193 d = 1; 2194 } 2195 if (track->textures[u].compress_format) { 2196 2197 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2198 /* compressed textures are block based */ 2199 } else 2200 size += w * h * d; 2201 } 2202 size *= track->textures[u].cpp; 2203 2204 switch (track->textures[u].tex_coord_type) { 2205 case 0: 2206 case 1: 2207 break; 2208 case 2: 2209 if (track->separate_cube) { 2210 ret = r100_cs_track_cube(rdev, track, u); 2211 if (ret) 2212 return ret; 2213 } else 2214 size *= 6; 2215 break; 2216 default: 2217 DRM_ERROR("Invalid texture coordinate type %u for unit " 2218 "%u\n", track->textures[u].tex_coord_type, u); 2219 return -EINVAL; 2220 } 2221 if (size > radeon_bo_size(robj)) { 2222 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2223 "%lu\n", u, size, radeon_bo_size(robj)); 2224 r100_cs_track_texture_print(&track->textures[u]); 2225 return -EINVAL; 2226 } 2227 } 2228 return 0; 2229 } 2230 2231 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2232 { 2233 unsigned i; 2234 unsigned long size; 2235 unsigned prim_walk; 2236 unsigned nverts; 2237 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2238 2239 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2240 !track->blend_read_enable) 2241 num_cb = 0; 2242 2243 for (i = 0; i < num_cb; i++) { 2244 if (track->cb[i].robj == NULL) { 2245 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2246 return -EINVAL; 2247 } 2248 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2249 size += track->cb[i].offset; 2250 if (size > radeon_bo_size(track->cb[i].robj)) { 2251 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2252 "(need %lu have %lu) !\n", i, size, 2253 radeon_bo_size(track->cb[i].robj)); 2254 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2255 i, track->cb[i].pitch, track->cb[i].cpp, 2256 track->cb[i].offset, track->maxy); 2257 return -EINVAL; 2258 } 2259 } 2260 track->cb_dirty = false; 2261 2262 if (track->zb_dirty && track->z_enabled) { 2263 if (track->zb.robj == NULL) { 2264 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2265 return -EINVAL; 2266 } 2267 size = track->zb.pitch * track->zb.cpp * track->maxy; 2268 size += track->zb.offset; 2269 if (size > radeon_bo_size(track->zb.robj)) { 2270 DRM_ERROR("[drm] Buffer too small for z buffer " 2271 "(need %lu have %lu) !\n", size, 2272 radeon_bo_size(track->zb.robj)); 2273 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2274 track->zb.pitch, track->zb.cpp, 2275 track->zb.offset, track->maxy); 2276 return -EINVAL; 2277 } 2278 } 2279 track->zb_dirty = false; 2280 2281 if (track->aa_dirty && track->aaresolve) { 2282 if (track->aa.robj == NULL) { 2283 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2284 return -EINVAL; 2285 } 2286 /* I believe the format comes from colorbuffer0. */ 2287 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2288 size += track->aa.offset; 2289 if (size > radeon_bo_size(track->aa.robj)) { 2290 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2291 "(need %lu have %lu) !\n", i, size, 2292 radeon_bo_size(track->aa.robj)); 2293 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2294 i, track->aa.pitch, track->cb[0].cpp, 2295 track->aa.offset, track->maxy); 2296 return -EINVAL; 2297 } 2298 } 2299 track->aa_dirty = false; 2300 2301 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2302 if (track->vap_vf_cntl & (1 << 14)) { 2303 nverts = track->vap_alt_nverts; 2304 } else { 2305 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2306 } 2307 switch (prim_walk) { 2308 case 1: 2309 for (i = 0; i < track->num_arrays; i++) { 2310 size = track->arrays[i].esize * track->max_indx * 4; 2311 if (track->arrays[i].robj == NULL) { 2312 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2313 "bound\n", prim_walk, i); 2314 return -EINVAL; 2315 } 2316 if (size > radeon_bo_size(track->arrays[i].robj)) { 2317 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2318 "need %lu dwords have %lu dwords\n", 2319 prim_walk, i, size >> 2, 2320 radeon_bo_size(track->arrays[i].robj) 2321 >> 2); 2322 DRM_ERROR("Max indices %u\n", track->max_indx); 2323 return -EINVAL; 2324 } 2325 } 2326 break; 2327 case 2: 2328 for (i = 0; i < track->num_arrays; i++) { 2329 size = track->arrays[i].esize * (nverts - 1) * 4; 2330 if (track->arrays[i].robj == NULL) { 2331 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2332 "bound\n", prim_walk, i); 2333 return -EINVAL; 2334 } 2335 if (size > radeon_bo_size(track->arrays[i].robj)) { 2336 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2337 "need %lu dwords have %lu dwords\n", 2338 prim_walk, i, size >> 2, 2339 radeon_bo_size(track->arrays[i].robj) 2340 >> 2); 2341 return -EINVAL; 2342 } 2343 } 2344 break; 2345 case 3: 2346 size = track->vtx_size * nverts; 2347 if (size != track->immd_dwords) { 2348 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2349 track->immd_dwords, size); 2350 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2351 nverts, track->vtx_size); 2352 return -EINVAL; 2353 } 2354 break; 2355 default: 2356 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2357 prim_walk); 2358 return -EINVAL; 2359 } 2360 2361 if (track->tex_dirty) { 2362 track->tex_dirty = false; 2363 return r100_cs_track_texture_check(rdev, track); 2364 } 2365 return 0; 2366 } 2367 2368 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2369 { 2370 unsigned i, face; 2371 2372 track->cb_dirty = true; 2373 track->zb_dirty = true; 2374 track->tex_dirty = true; 2375 track->aa_dirty = true; 2376 2377 if (rdev->family < CHIP_R300) { 2378 track->num_cb = 1; 2379 if (rdev->family <= CHIP_RS200) 2380 track->num_texture = 3; 2381 else 2382 track->num_texture = 6; 2383 track->maxy = 2048; 2384 track->separate_cube = 1; 2385 } else { 2386 track->num_cb = 4; 2387 track->num_texture = 16; 2388 track->maxy = 4096; 2389 track->separate_cube = 0; 2390 track->aaresolve = false; 2391 track->aa.robj = NULL; 2392 } 2393 2394 for (i = 0; i < track->num_cb; i++) { 2395 track->cb[i].robj = NULL; 2396 track->cb[i].pitch = 8192; 2397 track->cb[i].cpp = 16; 2398 track->cb[i].offset = 0; 2399 } 2400 track->z_enabled = true; 2401 track->zb.robj = NULL; 2402 track->zb.pitch = 8192; 2403 track->zb.cpp = 4; 2404 track->zb.offset = 0; 2405 track->vtx_size = 0x7F; 2406 track->immd_dwords = 0xFFFFFFFFUL; 2407 track->num_arrays = 11; 2408 track->max_indx = 0x00FFFFFFUL; 2409 for (i = 0; i < track->num_arrays; i++) { 2410 track->arrays[i].robj = NULL; 2411 track->arrays[i].esize = 0x7F; 2412 } 2413 for (i = 0; i < track->num_texture; i++) { 2414 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2415 track->textures[i].pitch = 16536; 2416 track->textures[i].width = 16536; 2417 track->textures[i].height = 16536; 2418 track->textures[i].width_11 = 1 << 11; 2419 track->textures[i].height_11 = 1 << 11; 2420 track->textures[i].num_levels = 12; 2421 if (rdev->family <= CHIP_RS200) { 2422 track->textures[i].tex_coord_type = 0; 2423 track->textures[i].txdepth = 0; 2424 } else { 2425 track->textures[i].txdepth = 16; 2426 track->textures[i].tex_coord_type = 1; 2427 } 2428 track->textures[i].cpp = 64; 2429 track->textures[i].robj = NULL; 2430 /* CS IB emission code makes sure texture unit are disabled */ 2431 track->textures[i].enabled = false; 2432 track->textures[i].lookup_disable = false; 2433 track->textures[i].roundup_w = true; 2434 track->textures[i].roundup_h = true; 2435 if (track->separate_cube) 2436 for (face = 0; face < 5; face++) { 2437 track->textures[i].cube_info[face].robj = NULL; 2438 track->textures[i].cube_info[face].width = 16536; 2439 track->textures[i].cube_info[face].height = 16536; 2440 track->textures[i].cube_info[face].offset = 0; 2441 } 2442 } 2443 } 2444 2445 /* 2446 * Global GPU functions 2447 */ 2448 static void r100_errata(struct radeon_device *rdev) 2449 { 2450 rdev->pll_errata = 0; 2451 2452 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2453 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2454 } 2455 2456 if (rdev->family == CHIP_RV100 || 2457 rdev->family == CHIP_RS100 || 2458 rdev->family == CHIP_RS200) { 2459 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2460 } 2461 } 2462 2463 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2464 { 2465 unsigned i; 2466 uint32_t tmp; 2467 2468 for (i = 0; i < rdev->usec_timeout; i++) { 2469 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2470 if (tmp >= n) { 2471 return 0; 2472 } 2473 DRM_UDELAY(1); 2474 } 2475 return -1; 2476 } 2477 2478 int r100_gui_wait_for_idle(struct radeon_device *rdev) 2479 { 2480 unsigned i; 2481 uint32_t tmp; 2482 2483 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2484 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" 2485 " Bad things might happen.\n"); 2486 } 2487 for (i = 0; i < rdev->usec_timeout; i++) { 2488 tmp = RREG32(RADEON_RBBM_STATUS); 2489 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2490 return 0; 2491 } 2492 DRM_UDELAY(1); 2493 } 2494 return -1; 2495 } 2496 2497 int r100_mc_wait_for_idle(struct radeon_device *rdev) 2498 { 2499 unsigned i; 2500 uint32_t tmp; 2501 2502 for (i = 0; i < rdev->usec_timeout; i++) { 2503 /* read MC_STATUS */ 2504 tmp = RREG32(RADEON_MC_STATUS); 2505 if (tmp & RADEON_MC_IDLE) { 2506 return 0; 2507 } 2508 DRM_UDELAY(1); 2509 } 2510 return -1; 2511 } 2512 2513 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2514 { 2515 u32 rbbm_status; 2516 2517 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2518 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2519 radeon_ring_lockup_update(ring); 2520 return false; 2521 } 2522 /* force CP activities */ 2523 radeon_ring_force_activity(rdev, ring); 2524 return radeon_ring_test_lockup(rdev, ring); 2525 } 2526 2527 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2528 void r100_enable_bm(struct radeon_device *rdev) 2529 { 2530 uint32_t tmp; 2531 /* Enable bus mastering */ 2532 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2533 WREG32(RADEON_BUS_CNTL, tmp); 2534 } 2535 2536 void r100_bm_disable(struct radeon_device *rdev) 2537 { 2538 u32 tmp; 2539 2540 /* disable bus mastering */ 2541 tmp = RREG32(R_000030_BUS_CNTL); 2542 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2543 mdelay(1); 2544 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2545 mdelay(1); 2546 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2547 tmp = RREG32(RADEON_BUS_CNTL); 2548 mdelay(1); 2549 pci_disable_busmaster(rdev->dev); 2550 mdelay(1); 2551 } 2552 2553 int r100_asic_reset(struct radeon_device *rdev) 2554 { 2555 struct r100_mc_save save; 2556 u32 status, tmp; 2557 int ret = 0; 2558 2559 status = RREG32(R_000E40_RBBM_STATUS); 2560 if (!G_000E40_GUI_ACTIVE(status)) { 2561 return 0; 2562 } 2563 r100_mc_stop(rdev, &save); 2564 status = RREG32(R_000E40_RBBM_STATUS); 2565 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2566 /* stop CP */ 2567 WREG32(RADEON_CP_CSQ_CNTL, 0); 2568 tmp = RREG32(RADEON_CP_RB_CNTL); 2569 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2570 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2571 WREG32(RADEON_CP_RB_WPTR, 0); 2572 WREG32(RADEON_CP_RB_CNTL, tmp); 2573 /* save PCI state */ 2574 pci_save_state(device_get_parent(rdev->dev)); 2575 /* disable bus mastering */ 2576 r100_bm_disable(rdev); 2577 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2578 S_0000F0_SOFT_RESET_RE(1) | 2579 S_0000F0_SOFT_RESET_PP(1) | 2580 S_0000F0_SOFT_RESET_RB(1)); 2581 RREG32(R_0000F0_RBBM_SOFT_RESET); 2582 mdelay(500); 2583 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2584 mdelay(1); 2585 status = RREG32(R_000E40_RBBM_STATUS); 2586 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2587 /* reset CP */ 2588 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2589 RREG32(R_0000F0_RBBM_SOFT_RESET); 2590 mdelay(500); 2591 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2592 mdelay(1); 2593 status = RREG32(R_000E40_RBBM_STATUS); 2594 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2595 /* restore PCI & busmastering */ 2596 pci_restore_state(device_get_parent(rdev->dev)); 2597 r100_enable_bm(rdev); 2598 /* Check if GPU is idle */ 2599 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2600 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2601 dev_err(rdev->dev, "failed to reset GPU\n"); 2602 ret = -1; 2603 } else 2604 dev_info(rdev->dev, "GPU reset succeed\n"); 2605 r100_mc_resume(rdev, &save); 2606 return ret; 2607 } 2608 2609 void r100_set_common_regs(struct radeon_device *rdev) 2610 { 2611 struct drm_device *dev = rdev->ddev; 2612 bool force_dac2 = false; 2613 u32 tmp; 2614 2615 /* set these so they don't interfere with anything */ 2616 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2617 WREG32(RADEON_SUBPIC_CNTL, 0); 2618 WREG32(RADEON_VIPH_CONTROL, 0); 2619 WREG32(RADEON_I2C_CNTL_1, 0); 2620 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2621 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2622 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2623 2624 /* always set up dac2 on rn50 and some rv100 as lots 2625 * of servers seem to wire it up to a VGA port but 2626 * don't report it in the bios connector 2627 * table. 2628 */ 2629 switch (dev->pci_device) { 2630 /* RN50 */ 2631 case 0x515e: 2632 case 0x5969: 2633 force_dac2 = true; 2634 break; 2635 /* RV100*/ 2636 case 0x5159: 2637 case 0x515a: 2638 /* DELL triple head servers */ 2639 if ((dev->pci_subvendor == 0x1028 /* DELL */) && 2640 ((dev->pci_subdevice == 0x016c) || 2641 (dev->pci_subdevice == 0x016d) || 2642 (dev->pci_subdevice == 0x016e) || 2643 (dev->pci_subdevice == 0x016f) || 2644 (dev->pci_subdevice == 0x0170) || 2645 (dev->pci_subdevice == 0x017d) || 2646 (dev->pci_subdevice == 0x017e) || 2647 (dev->pci_subdevice == 0x0183) || 2648 (dev->pci_subdevice == 0x018a) || 2649 (dev->pci_subdevice == 0x019a))) 2650 force_dac2 = true; 2651 break; 2652 } 2653 2654 if (force_dac2) { 2655 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2656 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2657 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2658 2659 /* For CRT on DAC2, don't turn it on if BIOS didn't 2660 enable it, even it's detected. 2661 */ 2662 2663 /* force it to crtc0 */ 2664 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2665 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2666 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2667 2668 /* set up the TV DAC */ 2669 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2670 RADEON_TV_DAC_STD_MASK | 2671 RADEON_TV_DAC_RDACPD | 2672 RADEON_TV_DAC_GDACPD | 2673 RADEON_TV_DAC_BDACPD | 2674 RADEON_TV_DAC_BGADJ_MASK | 2675 RADEON_TV_DAC_DACADJ_MASK); 2676 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2677 RADEON_TV_DAC_NHOLD | 2678 RADEON_TV_DAC_STD_PS2 | 2679 (0x58 << 16)); 2680 2681 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2682 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2683 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2684 } 2685 2686 /* switch PM block to ACPI mode */ 2687 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2688 tmp &= ~RADEON_PM_MODE_SEL; 2689 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2690 2691 } 2692 2693 /* 2694 * VRAM info 2695 */ 2696 static void r100_vram_get_type(struct radeon_device *rdev) 2697 { 2698 uint32_t tmp; 2699 2700 rdev->mc.vram_is_ddr = false; 2701 if (rdev->flags & RADEON_IS_IGP) 2702 rdev->mc.vram_is_ddr = true; 2703 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2704 rdev->mc.vram_is_ddr = true; 2705 if ((rdev->family == CHIP_RV100) || 2706 (rdev->family == CHIP_RS100) || 2707 (rdev->family == CHIP_RS200)) { 2708 tmp = RREG32(RADEON_MEM_CNTL); 2709 if (tmp & RV100_HALF_MODE) { 2710 rdev->mc.vram_width = 32; 2711 } else { 2712 rdev->mc.vram_width = 64; 2713 } 2714 if (rdev->flags & RADEON_SINGLE_CRTC) { 2715 rdev->mc.vram_width /= 4; 2716 rdev->mc.vram_is_ddr = true; 2717 } 2718 } else if (rdev->family <= CHIP_RV280) { 2719 tmp = RREG32(RADEON_MEM_CNTL); 2720 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2721 rdev->mc.vram_width = 128; 2722 } else { 2723 rdev->mc.vram_width = 64; 2724 } 2725 } else { 2726 /* newer IGPs */ 2727 rdev->mc.vram_width = 128; 2728 } 2729 } 2730 2731 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2732 { 2733 u32 aper_size; 2734 u8 byte; 2735 2736 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2737 2738 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2739 * that is has the 2nd generation multifunction PCI interface 2740 */ 2741 if (rdev->family == CHIP_RV280 || 2742 rdev->family >= CHIP_RV350) { 2743 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2744 ~RADEON_HDP_APER_CNTL); 2745 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2746 return aper_size * 2; 2747 } 2748 2749 /* Older cards have all sorts of funny issues to deal with. First 2750 * check if it's a multifunction card by reading the PCI config 2751 * header type... Limit those to one aperture size 2752 */ 2753 byte = pci_read_config(rdev->dev, 0xe, 1); 2754 if (byte & 0x80) { 2755 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2756 DRM_INFO("Limiting VRAM to one aperture\n"); 2757 return aper_size; 2758 } 2759 2760 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2761 * have set it up. We don't write this as it's broken on some ASICs but 2762 * we expect the BIOS to have done the right thing (might be too optimistic...) 2763 */ 2764 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2765 return aper_size * 2; 2766 return aper_size; 2767 } 2768 2769 void r100_vram_init_sizes(struct radeon_device *rdev) 2770 { 2771 u64 config_aper_size; 2772 2773 /* work out accessible VRAM */ 2774 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 2775 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 2776 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2777 /* FIXME we don't use the second aperture yet when we could use it */ 2778 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2779 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2780 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2781 if (rdev->flags & RADEON_IS_IGP) { 2782 uint32_t tom; 2783 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2784 tom = RREG32(RADEON_NB_TOM); 2785 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2786 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2787 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2788 } else { 2789 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2790 /* Some production boards of m6 will report 0 2791 * if it's 8 MB 2792 */ 2793 if (rdev->mc.real_vram_size == 0) { 2794 rdev->mc.real_vram_size = 8192 * 1024; 2795 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2796 } 2797 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2798 * Novell bug 204882 + along with lots of ubuntu ones 2799 */ 2800 if (rdev->mc.aper_size > config_aper_size) 2801 config_aper_size = rdev->mc.aper_size; 2802 2803 if (config_aper_size > rdev->mc.real_vram_size) 2804 rdev->mc.mc_vram_size = config_aper_size; 2805 else 2806 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2807 } 2808 } 2809 2810 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2811 { 2812 uint32_t temp; 2813 2814 temp = RREG32(RADEON_CONFIG_CNTL); 2815 if (state == false) { 2816 temp &= ~RADEON_CFG_VGA_RAM_EN; 2817 temp |= RADEON_CFG_VGA_IO_DIS; 2818 } else { 2819 temp &= ~RADEON_CFG_VGA_IO_DIS; 2820 } 2821 WREG32(RADEON_CONFIG_CNTL, temp); 2822 } 2823 2824 static void r100_mc_init(struct radeon_device *rdev) 2825 { 2826 u64 base; 2827 2828 r100_vram_get_type(rdev); 2829 r100_vram_init_sizes(rdev); 2830 base = rdev->mc.aper_base; 2831 if (rdev->flags & RADEON_IS_IGP) 2832 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2833 radeon_vram_location(rdev, &rdev->mc, base); 2834 rdev->mc.gtt_base_align = 0; 2835 if (!(rdev->flags & RADEON_IS_AGP)) 2836 radeon_gtt_location(rdev, &rdev->mc); 2837 radeon_update_bandwidth_info(rdev); 2838 } 2839 2840 2841 /* 2842 * Indirect registers accessor 2843 */ 2844 void r100_pll_errata_after_index(struct radeon_device *rdev) 2845 { 2846 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2847 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2848 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2849 } 2850 } 2851 2852 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2853 { 2854 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2855 * or the chip could hang on a subsequent access 2856 */ 2857 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2858 mdelay(5); 2859 } 2860 2861 /* This function is required to workaround a hardware bug in some (all?) 2862 * revisions of the R300. This workaround should be called after every 2863 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2864 * may not be correct. 2865 */ 2866 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2867 uint32_t save, tmp; 2868 2869 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2870 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2871 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2872 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2873 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2874 } 2875 } 2876 2877 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2878 { 2879 uint32_t data; 2880 2881 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2882 r100_pll_errata_after_index(rdev); 2883 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2884 r100_pll_errata_after_data(rdev); 2885 return data; 2886 } 2887 2888 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2889 { 2890 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2891 r100_pll_errata_after_index(rdev); 2892 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2893 r100_pll_errata_after_data(rdev); 2894 } 2895 2896 static void r100_set_safe_registers(struct radeon_device *rdev) 2897 { 2898 if (ASIC_IS_RN50(rdev)) { 2899 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2900 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 2901 } else if (rdev->family < CHIP_R200) { 2902 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2903 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 2904 } else { 2905 r200_set_safe_registers(rdev); 2906 } 2907 } 2908 2909 /* 2910 * Debugfs info 2911 */ 2912 #if defined(CONFIG_DEBUG_FS) 2913 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2914 { 2915 struct drm_info_node *node = (struct drm_info_node *) m->private; 2916 struct drm_device *dev = node->minor->dev; 2917 struct radeon_device *rdev = dev->dev_private; 2918 uint32_t reg, value; 2919 unsigned i; 2920 2921 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2922 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2923 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2924 for (i = 0; i < 64; i++) { 2925 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 2926 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 2927 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 2928 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 2929 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 2930 } 2931 return 0; 2932 } 2933 2934 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 2935 { 2936 struct drm_info_node *node = (struct drm_info_node *) m->private; 2937 struct drm_device *dev = node->minor->dev; 2938 struct radeon_device *rdev = dev->dev_private; 2939 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2940 uint32_t rdp, wdp; 2941 unsigned count, i, j; 2942 2943 radeon_ring_free_size(rdev, ring); 2944 rdp = RREG32(RADEON_CP_RB_RPTR); 2945 wdp = RREG32(RADEON_CP_RB_WPTR); 2946 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 2947 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2948 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2949 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2950 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2951 seq_printf(m, "%u dwords in ring\n", count); 2952 for (j = 0; j <= count; j++) { 2953 i = (rdp + j) & ring->ptr_mask; 2954 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2955 } 2956 return 0; 2957 } 2958 2959 2960 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 2961 { 2962 struct drm_info_node *node = (struct drm_info_node *) m->private; 2963 struct drm_device *dev = node->minor->dev; 2964 struct radeon_device *rdev = dev->dev_private; 2965 uint32_t csq_stat, csq2_stat, tmp; 2966 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 2967 unsigned i; 2968 2969 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2970 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 2971 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 2972 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 2973 r_rptr = (csq_stat >> 0) & 0x3ff; 2974 r_wptr = (csq_stat >> 10) & 0x3ff; 2975 ib1_rptr = (csq_stat >> 20) & 0x3ff; 2976 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 2977 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 2978 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 2979 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 2980 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 2981 seq_printf(m, "Ring rptr %u\n", r_rptr); 2982 seq_printf(m, "Ring wptr %u\n", r_wptr); 2983 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 2984 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 2985 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 2986 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 2987 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 2988 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 2989 seq_printf(m, "Ring fifo:\n"); 2990 for (i = 0; i < 256; i++) { 2991 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 2992 tmp = RREG32(RADEON_CP_CSQ_DATA); 2993 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 2994 } 2995 seq_printf(m, "Indirect1 fifo:\n"); 2996 for (i = 256; i <= 512; i++) { 2997 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 2998 tmp = RREG32(RADEON_CP_CSQ_DATA); 2999 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3000 } 3001 seq_printf(m, "Indirect2 fifo:\n"); 3002 for (i = 640; i < ib1_wptr; i++) { 3003 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3004 tmp = RREG32(RADEON_CP_CSQ_DATA); 3005 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3006 } 3007 return 0; 3008 } 3009 3010 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3011 { 3012 struct drm_info_node *node = (struct drm_info_node *) m->private; 3013 struct drm_device *dev = node->minor->dev; 3014 struct radeon_device *rdev = dev->dev_private; 3015 uint32_t tmp; 3016 3017 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3018 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3019 tmp = RREG32(RADEON_MC_FB_LOCATION); 3020 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3021 tmp = RREG32(RADEON_BUS_CNTL); 3022 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3023 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3024 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3025 tmp = RREG32(RADEON_AGP_BASE); 3026 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3027 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3028 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3029 tmp = RREG32(0x01D0); 3030 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3031 tmp = RREG32(RADEON_AIC_LO_ADDR); 3032 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3033 tmp = RREG32(RADEON_AIC_HI_ADDR); 3034 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3035 tmp = RREG32(0x01E4); 3036 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3037 return 0; 3038 } 3039 3040 static struct drm_info_list r100_debugfs_rbbm_list[] = { 3041 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3042 }; 3043 3044 static struct drm_info_list r100_debugfs_cp_list[] = { 3045 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3046 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3047 }; 3048 3049 static struct drm_info_list r100_debugfs_mc_info_list[] = { 3050 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3051 }; 3052 #endif 3053 3054 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3055 { 3056 #if defined(CONFIG_DEBUG_FS) 3057 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3058 #else 3059 return 0; 3060 #endif 3061 } 3062 3063 int r100_debugfs_cp_init(struct radeon_device *rdev) 3064 { 3065 #if defined(CONFIG_DEBUG_FS) 3066 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3067 #else 3068 return 0; 3069 #endif 3070 } 3071 3072 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3073 { 3074 #if defined(CONFIG_DEBUG_FS) 3075 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3076 #else 3077 return 0; 3078 #endif 3079 } 3080 3081 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3082 uint32_t tiling_flags, uint32_t pitch, 3083 uint32_t offset, uint32_t obj_size) 3084 { 3085 int surf_index = reg * 16; 3086 int flags = 0; 3087 3088 if (rdev->family <= CHIP_RS200) { 3089 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3090 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3091 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3092 if (tiling_flags & RADEON_TILING_MACRO) 3093 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3094 /* setting pitch to 0 disables tiling */ 3095 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3096 == 0) 3097 pitch = 0; 3098 } else if (rdev->family <= CHIP_RV280) { 3099 if (tiling_flags & (RADEON_TILING_MACRO)) 3100 flags |= R200_SURF_TILE_COLOR_MACRO; 3101 if (tiling_flags & RADEON_TILING_MICRO) 3102 flags |= R200_SURF_TILE_COLOR_MICRO; 3103 } else { 3104 if (tiling_flags & RADEON_TILING_MACRO) 3105 flags |= R300_SURF_TILE_MACRO; 3106 if (tiling_flags & RADEON_TILING_MICRO) 3107 flags |= R300_SURF_TILE_MICRO; 3108 } 3109 3110 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3111 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3112 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3113 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3114 3115 /* r100/r200 divide by 16 */ 3116 if (rdev->family < CHIP_R300) 3117 flags |= pitch / 16; 3118 else 3119 flags |= pitch / 8; 3120 3121 3122 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3123 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3124 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3125 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3126 return 0; 3127 } 3128 3129 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3130 { 3131 int surf_index = reg * 16; 3132 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3133 } 3134 3135 void r100_bandwidth_update(struct radeon_device *rdev) 3136 { 3137 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3138 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3139 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 3140 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3141 fixed20_12 memtcas_ff[8] = { 3142 dfixed_init(1), 3143 dfixed_init(2), 3144 dfixed_init(3), 3145 dfixed_init(0), 3146 dfixed_init_half(1), 3147 dfixed_init_half(2), 3148 dfixed_init(0), 3149 }; 3150 fixed20_12 memtcas_rs480_ff[8] = { 3151 dfixed_init(0), 3152 dfixed_init(1), 3153 dfixed_init(2), 3154 dfixed_init(3), 3155 dfixed_init(0), 3156 dfixed_init_half(1), 3157 dfixed_init_half(2), 3158 dfixed_init_half(3), 3159 }; 3160 fixed20_12 memtcas2_ff[8] = { 3161 dfixed_init(0), 3162 dfixed_init(1), 3163 dfixed_init(2), 3164 dfixed_init(3), 3165 dfixed_init(4), 3166 dfixed_init(5), 3167 dfixed_init(6), 3168 dfixed_init(7), 3169 }; 3170 fixed20_12 memtrbs[8] = { 3171 dfixed_init(1), 3172 dfixed_init_half(1), 3173 dfixed_init(2), 3174 dfixed_init_half(2), 3175 dfixed_init(3), 3176 dfixed_init_half(3), 3177 dfixed_init(4), 3178 dfixed_init_half(4) 3179 }; 3180 fixed20_12 memtrbs_r4xx[8] = { 3181 dfixed_init(4), 3182 dfixed_init(5), 3183 dfixed_init(6), 3184 dfixed_init(7), 3185 dfixed_init(8), 3186 dfixed_init(9), 3187 dfixed_init(10), 3188 dfixed_init(11) 3189 }; 3190 fixed20_12 min_mem_eff; 3191 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3192 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3193 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 3194 disp_drain_rate2, read_return_rate; 3195 fixed20_12 time_disp1_drop_priority; 3196 int c; 3197 int cur_size = 16; /* in octawords */ 3198 int critical_point = 0, critical_point2; 3199 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 3200 int stop_req, max_stop_req; 3201 struct drm_display_mode *mode1 = NULL; 3202 struct drm_display_mode *mode2 = NULL; 3203 uint32_t pixel_bytes1 = 0; 3204 uint32_t pixel_bytes2 = 0; 3205 3206 radeon_update_display_priority(rdev); 3207 3208 if (rdev->mode_info.crtcs[0]->base.enabled) { 3209 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3210 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; 3211 } 3212 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3213 if (rdev->mode_info.crtcs[1]->base.enabled) { 3214 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3215 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; 3216 } 3217 } 3218 3219 min_mem_eff.full = dfixed_const_8(0); 3220 /* get modes */ 3221 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3222 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3223 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3224 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3225 /* check crtc enables */ 3226 if (mode2) 3227 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3228 if (mode1) 3229 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3230 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3231 } 3232 3233 /* 3234 * determine is there is enough bw for current mode 3235 */ 3236 sclk_ff = rdev->pm.sclk; 3237 mclk_ff = rdev->pm.mclk; 3238 3239 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3240 temp_ff.full = dfixed_const(temp); 3241 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3242 3243 pix_clk.full = 0; 3244 pix_clk2.full = 0; 3245 peak_disp_bw.full = 0; 3246 if (mode1) { 3247 temp_ff.full = dfixed_const(1000); 3248 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3249 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3250 temp_ff.full = dfixed_const(pixel_bytes1); 3251 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3252 } 3253 if (mode2) { 3254 temp_ff.full = dfixed_const(1000); 3255 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3256 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3257 temp_ff.full = dfixed_const(pixel_bytes2); 3258 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3259 } 3260 3261 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3262 if (peak_disp_bw.full >= mem_bw.full) { 3263 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3264 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3265 } 3266 3267 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3268 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3269 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3270 mem_trcd = ((temp >> 2) & 0x3) + 1; 3271 mem_trp = ((temp & 0x3)) + 1; 3272 mem_tras = ((temp & 0x70) >> 4) + 1; 3273 } else if (rdev->family == CHIP_R300 || 3274 rdev->family == CHIP_R350) { /* r300, r350 */ 3275 mem_trcd = (temp & 0x7) + 1; 3276 mem_trp = ((temp >> 8) & 0x7) + 1; 3277 mem_tras = ((temp >> 11) & 0xf) + 4; 3278 } else if (rdev->family == CHIP_RV350 || 3279 rdev->family <= CHIP_RV380) { 3280 /* rv3x0 */ 3281 mem_trcd = (temp & 0x7) + 3; 3282 mem_trp = ((temp >> 8) & 0x7) + 3; 3283 mem_tras = ((temp >> 11) & 0xf) + 6; 3284 } else if (rdev->family == CHIP_R420 || 3285 rdev->family == CHIP_R423 || 3286 rdev->family == CHIP_RV410) { 3287 /* r4xx */ 3288 mem_trcd = (temp & 0xf) + 3; 3289 if (mem_trcd > 15) 3290 mem_trcd = 15; 3291 mem_trp = ((temp >> 8) & 0xf) + 3; 3292 if (mem_trp > 15) 3293 mem_trp = 15; 3294 mem_tras = ((temp >> 12) & 0x1f) + 6; 3295 if (mem_tras > 31) 3296 mem_tras = 31; 3297 } else { /* RV200, R200 */ 3298 mem_trcd = (temp & 0x7) + 1; 3299 mem_trp = ((temp >> 8) & 0x7) + 1; 3300 mem_tras = ((temp >> 12) & 0xf) + 4; 3301 } 3302 /* convert to FF */ 3303 trcd_ff.full = dfixed_const(mem_trcd); 3304 trp_ff.full = dfixed_const(mem_trp); 3305 tras_ff.full = dfixed_const(mem_tras); 3306 3307 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3308 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3309 data = (temp & (7 << 20)) >> 20; 3310 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3311 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3312 tcas_ff = memtcas_rs480_ff[data]; 3313 else 3314 tcas_ff = memtcas_ff[data]; 3315 } else 3316 tcas_ff = memtcas2_ff[data]; 3317 3318 if (rdev->family == CHIP_RS400 || 3319 rdev->family == CHIP_RS480) { 3320 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3321 data = (temp >> 23) & 0x7; 3322 if (data < 5) 3323 tcas_ff.full += dfixed_const(data); 3324 } 3325 3326 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3327 /* on the R300, Tcas is included in Trbs. 3328 */ 3329 temp = RREG32(RADEON_MEM_CNTL); 3330 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3331 if (data == 1) { 3332 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3333 temp = RREG32(R300_MC_IND_INDEX); 3334 temp &= ~R300_MC_IND_ADDR_MASK; 3335 temp |= R300_MC_READ_CNTL_CD_mcind; 3336 WREG32(R300_MC_IND_INDEX, temp); 3337 temp = RREG32(R300_MC_IND_DATA); 3338 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3339 } else { 3340 temp = RREG32(R300_MC_READ_CNTL_AB); 3341 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3342 } 3343 } else { 3344 temp = RREG32(R300_MC_READ_CNTL_AB); 3345 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3346 } 3347 if (rdev->family == CHIP_RV410 || 3348 rdev->family == CHIP_R420 || 3349 rdev->family == CHIP_R423) 3350 trbs_ff = memtrbs_r4xx[data]; 3351 else 3352 trbs_ff = memtrbs[data]; 3353 tcas_ff.full += trbs_ff.full; 3354 } 3355 3356 sclk_eff_ff.full = sclk_ff.full; 3357 3358 if (rdev->flags & RADEON_IS_AGP) { 3359 fixed20_12 agpmode_ff; 3360 agpmode_ff.full = dfixed_const(radeon_agpmode); 3361 temp_ff.full = dfixed_const_666(16); 3362 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3363 } 3364 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3365 3366 if (ASIC_IS_R300(rdev)) { 3367 sclk_delay_ff.full = dfixed_const(250); 3368 } else { 3369 if ((rdev->family == CHIP_RV100) || 3370 rdev->flags & RADEON_IS_IGP) { 3371 if (rdev->mc.vram_is_ddr) 3372 sclk_delay_ff.full = dfixed_const(41); 3373 else 3374 sclk_delay_ff.full = dfixed_const(33); 3375 } else { 3376 if (rdev->mc.vram_width == 128) 3377 sclk_delay_ff.full = dfixed_const(57); 3378 else 3379 sclk_delay_ff.full = dfixed_const(41); 3380 } 3381 } 3382 3383 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3384 3385 if (rdev->mc.vram_is_ddr) { 3386 if (rdev->mc.vram_width == 32) { 3387 k1.full = dfixed_const(40); 3388 c = 3; 3389 } else { 3390 k1.full = dfixed_const(20); 3391 c = 1; 3392 } 3393 } else { 3394 k1.full = dfixed_const(40); 3395 c = 3; 3396 } 3397 3398 temp_ff.full = dfixed_const(2); 3399 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3400 temp_ff.full = dfixed_const(c); 3401 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3402 temp_ff.full = dfixed_const(4); 3403 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3404 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3405 mc_latency_mclk.full += k1.full; 3406 3407 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3408 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3409 3410 /* 3411 HW cursor time assuming worst case of full size colour cursor. 3412 */ 3413 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3414 temp_ff.full += trcd_ff.full; 3415 if (temp_ff.full < tras_ff.full) 3416 temp_ff.full = tras_ff.full; 3417 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3418 3419 temp_ff.full = dfixed_const(cur_size); 3420 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3421 /* 3422 Find the total latency for the display data. 3423 */ 3424 disp_latency_overhead.full = dfixed_const(8); 3425 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3426 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3427 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3428 3429 if (mc_latency_mclk.full > mc_latency_sclk.full) 3430 disp_latency.full = mc_latency_mclk.full; 3431 else 3432 disp_latency.full = mc_latency_sclk.full; 3433 3434 /* setup Max GRPH_STOP_REQ default value */ 3435 if (ASIC_IS_RV100(rdev)) 3436 max_stop_req = 0x5c; 3437 else 3438 max_stop_req = 0x7c; 3439 3440 if (mode1) { 3441 /* CRTC1 3442 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3443 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3444 */ 3445 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3446 3447 if (stop_req > max_stop_req) 3448 stop_req = max_stop_req; 3449 3450 /* 3451 Find the drain rate of the display buffer. 3452 */ 3453 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3454 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3455 3456 /* 3457 Find the critical point of the display buffer. 3458 */ 3459 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3460 crit_point_ff.full += dfixed_const_half(0); 3461 3462 critical_point = dfixed_trunc(crit_point_ff); 3463 3464 if (rdev->disp_priority == 2) { 3465 critical_point = 0; 3466 } 3467 3468 /* 3469 The critical point should never be above max_stop_req-4. Setting 3470 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3471 */ 3472 if (max_stop_req - critical_point < 4) 3473 critical_point = 0; 3474 3475 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3476 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3477 critical_point = 0x10; 3478 } 3479 3480 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3481 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3482 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3483 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3484 if ((rdev->family == CHIP_R350) && 3485 (stop_req > 0x15)) { 3486 stop_req -= 0x10; 3487 } 3488 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3489 temp |= RADEON_GRPH_BUFFER_SIZE; 3490 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3491 RADEON_GRPH_CRITICAL_AT_SOF | 3492 RADEON_GRPH_STOP_CNTL); 3493 /* 3494 Write the result into the register. 3495 */ 3496 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3497 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3498 3499 #if 0 3500 if ((rdev->family == CHIP_RS400) || 3501 (rdev->family == CHIP_RS480)) { 3502 /* attempt to program RS400 disp regs correctly ??? */ 3503 temp = RREG32(RS400_DISP1_REG_CNTL); 3504 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3505 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3506 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3507 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3508 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3509 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3510 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3511 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3512 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3513 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3514 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3515 } 3516 #endif 3517 3518 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3519 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3520 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3521 } 3522 3523 if (mode2) { 3524 u32 grph2_cntl; 3525 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3526 3527 if (stop_req > max_stop_req) 3528 stop_req = max_stop_req; 3529 3530 /* 3531 Find the drain rate of the display buffer. 3532 */ 3533 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3534 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3535 3536 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3537 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3538 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3539 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3540 if ((rdev->family == CHIP_R350) && 3541 (stop_req > 0x15)) { 3542 stop_req -= 0x10; 3543 } 3544 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3545 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3546 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3547 RADEON_GRPH_CRITICAL_AT_SOF | 3548 RADEON_GRPH_STOP_CNTL); 3549 3550 if ((rdev->family == CHIP_RS100) || 3551 (rdev->family == CHIP_RS200)) 3552 critical_point2 = 0; 3553 else { 3554 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3555 temp_ff.full = dfixed_const(temp); 3556 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3557 if (sclk_ff.full < temp_ff.full) 3558 temp_ff.full = sclk_ff.full; 3559 3560 read_return_rate.full = temp_ff.full; 3561 3562 if (mode1) { 3563 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3564 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3565 } else { 3566 time_disp1_drop_priority.full = 0; 3567 } 3568 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3569 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3570 crit_point_ff.full += dfixed_const_half(0); 3571 3572 critical_point2 = dfixed_trunc(crit_point_ff); 3573 3574 if (rdev->disp_priority == 2) { 3575 critical_point2 = 0; 3576 } 3577 3578 if (max_stop_req - critical_point2 < 4) 3579 critical_point2 = 0; 3580 3581 } 3582 3583 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3584 /* some R300 cards have problem with this set to 0 */ 3585 critical_point2 = 0x10; 3586 } 3587 3588 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3589 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3590 3591 if ((rdev->family == CHIP_RS400) || 3592 (rdev->family == CHIP_RS480)) { 3593 #if 0 3594 /* attempt to program RS400 disp2 regs correctly ??? */ 3595 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3596 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3597 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3598 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3599 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3600 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3601 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3602 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3603 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3604 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3605 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3606 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3607 #endif 3608 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3609 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3610 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3611 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3612 } 3613 3614 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3615 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3616 } 3617 } 3618 3619 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3620 { 3621 uint32_t scratch; 3622 uint32_t tmp = 0; 3623 unsigned i; 3624 int r; 3625 3626 r = radeon_scratch_get(rdev, &scratch); 3627 if (r) { 3628 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3629 return r; 3630 } 3631 WREG32(scratch, 0xCAFEDEAD); 3632 r = radeon_ring_lock(rdev, ring, 2); 3633 if (r) { 3634 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3635 radeon_scratch_free(rdev, scratch); 3636 return r; 3637 } 3638 radeon_ring_write(ring, PACKET0(scratch, 0)); 3639 radeon_ring_write(ring, 0xDEADBEEF); 3640 radeon_ring_unlock_commit(rdev, ring); 3641 for (i = 0; i < rdev->usec_timeout; i++) { 3642 tmp = RREG32(scratch); 3643 if (tmp == 0xDEADBEEF) { 3644 break; 3645 } 3646 DRM_UDELAY(1); 3647 } 3648 if (i < rdev->usec_timeout) { 3649 DRM_INFO("ring test succeeded in %d usecs\n", i); 3650 } else { 3651 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3652 scratch, tmp); 3653 r = -EINVAL; 3654 } 3655 radeon_scratch_free(rdev, scratch); 3656 return r; 3657 } 3658 3659 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3660 { 3661 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3662 3663 if (ring->rptr_save_reg) { 3664 u32 next_rptr = ring->wptr + 2 + 3; 3665 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3666 radeon_ring_write(ring, next_rptr); 3667 } 3668 3669 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3670 radeon_ring_write(ring, ib->gpu_addr); 3671 radeon_ring_write(ring, ib->length_dw); 3672 } 3673 3674 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3675 { 3676 struct radeon_ib ib; 3677 uint32_t scratch; 3678 uint32_t tmp = 0; 3679 unsigned i; 3680 int r; 3681 3682 r = radeon_scratch_get(rdev, &scratch); 3683 if (r) { 3684 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3685 return r; 3686 } 3687 WREG32(scratch, 0xCAFEDEAD); 3688 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3689 if (r) { 3690 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3691 goto free_scratch; 3692 } 3693 ib.ptr[0] = PACKET0(scratch, 0); 3694 ib.ptr[1] = 0xDEADBEEF; 3695 ib.ptr[2] = PACKET2(0); 3696 ib.ptr[3] = PACKET2(0); 3697 ib.ptr[4] = PACKET2(0); 3698 ib.ptr[5] = PACKET2(0); 3699 ib.ptr[6] = PACKET2(0); 3700 ib.ptr[7] = PACKET2(0); 3701 ib.length_dw = 8; 3702 r = radeon_ib_schedule(rdev, &ib, NULL); 3703 if (r) { 3704 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3705 goto free_ib; 3706 } 3707 r = radeon_fence_wait(ib.fence, false); 3708 if (r) { 3709 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3710 goto free_ib; 3711 } 3712 for (i = 0; i < rdev->usec_timeout; i++) { 3713 tmp = RREG32(scratch); 3714 if (tmp == 0xDEADBEEF) { 3715 break; 3716 } 3717 DRM_UDELAY(1); 3718 } 3719 if (i < rdev->usec_timeout) { 3720 DRM_INFO("ib test succeeded in %u usecs\n", i); 3721 } else { 3722 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3723 scratch, tmp); 3724 r = -EINVAL; 3725 } 3726 free_ib: 3727 radeon_ib_free(rdev, &ib); 3728 free_scratch: 3729 radeon_scratch_free(rdev, scratch); 3730 return r; 3731 } 3732 3733 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3734 { 3735 /* Shutdown CP we shouldn't need to do that but better be safe than 3736 * sorry 3737 */ 3738 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3739 WREG32(R_000740_CP_CSQ_CNTL, 0); 3740 3741 /* Save few CRTC registers */ 3742 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3743 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3744 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3745 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3746 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3747 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3748 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3749 } 3750 3751 /* Disable VGA aperture access */ 3752 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3753 /* Disable cursor, overlay, crtc */ 3754 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3755 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3756 S_000054_CRTC_DISPLAY_DIS(1)); 3757 WREG32(R_000050_CRTC_GEN_CNTL, 3758 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3759 S_000050_CRTC_DISP_REQ_EN_B(1)); 3760 WREG32(R_000420_OV0_SCALE_CNTL, 3761 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3762 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3763 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3764 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3765 S_000360_CUR2_LOCK(1)); 3766 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3767 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3768 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3769 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3770 WREG32(R_000360_CUR2_OFFSET, 3771 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3772 } 3773 } 3774 3775 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3776 { 3777 /* Update base address for crtc */ 3778 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3779 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3780 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3781 } 3782 /* Restore CRTC registers */ 3783 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3784 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3785 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3786 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3787 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3788 } 3789 } 3790 3791 void r100_vga_render_disable(struct radeon_device *rdev) 3792 { 3793 u32 tmp; 3794 3795 tmp = RREG8(R_0003C2_GENMO_WT); 3796 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3797 } 3798 3799 static void r100_debugfs(struct radeon_device *rdev) 3800 { 3801 int r; 3802 3803 r = r100_debugfs_mc_info_init(rdev); 3804 if (r) 3805 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3806 } 3807 3808 static void r100_mc_program(struct radeon_device *rdev) 3809 { 3810 struct r100_mc_save save; 3811 3812 /* Stops all mc clients */ 3813 r100_mc_stop(rdev, &save); 3814 if (rdev->flags & RADEON_IS_AGP) { 3815 WREG32(R_00014C_MC_AGP_LOCATION, 3816 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3817 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3818 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3819 if (rdev->family > CHIP_RV200) 3820 WREG32(R_00015C_AGP_BASE_2, 3821 upper_32_bits(rdev->mc.agp_base) & 0xff); 3822 } else { 3823 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3824 WREG32(R_000170_AGP_BASE, 0); 3825 if (rdev->family > CHIP_RV200) 3826 WREG32(R_00015C_AGP_BASE_2, 0); 3827 } 3828 /* Wait for mc idle */ 3829 if (r100_mc_wait_for_idle(rdev)) 3830 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3831 /* Program MC, should be a 32bits limited address space */ 3832 WREG32(R_000148_MC_FB_LOCATION, 3833 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3834 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3835 r100_mc_resume(rdev, &save); 3836 } 3837 3838 static void r100_clock_startup(struct radeon_device *rdev) 3839 { 3840 u32 tmp; 3841 3842 if (radeon_dynclks != -1 && radeon_dynclks) 3843 radeon_legacy_set_clock_gating(rdev, 1); 3844 /* We need to force on some of the block */ 3845 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3846 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3847 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3848 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3849 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3850 } 3851 3852 static int r100_startup(struct radeon_device *rdev) 3853 { 3854 int r; 3855 3856 /* set common regs */ 3857 r100_set_common_regs(rdev); 3858 /* program mc */ 3859 r100_mc_program(rdev); 3860 /* Resume clock */ 3861 r100_clock_startup(rdev); 3862 /* Initialize GART (initialize after TTM so we can allocate 3863 * memory through TTM but finalize after TTM) */ 3864 r100_enable_bm(rdev); 3865 if (rdev->flags & RADEON_IS_PCI) { 3866 r = r100_pci_gart_enable(rdev); 3867 if (r) 3868 return r; 3869 } 3870 3871 /* allocate wb buffer */ 3872 r = radeon_wb_init(rdev); 3873 if (r) 3874 return r; 3875 3876 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3877 if (r) { 3878 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3879 return r; 3880 } 3881 3882 /* Enable IRQ */ 3883 if (!rdev->irq.installed) { 3884 r = radeon_irq_kms_init(rdev); 3885 if (r) 3886 return r; 3887 } 3888 3889 r100_irq_set(rdev); 3890 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3891 /* 1M ring buffer */ 3892 r = r100_cp_init(rdev, 1024 * 1024); 3893 if (r) { 3894 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3895 return r; 3896 } 3897 3898 r = radeon_ib_pool_init(rdev); 3899 if (r) { 3900 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3901 return r; 3902 } 3903 3904 return 0; 3905 } 3906 3907 int r100_resume(struct radeon_device *rdev) 3908 { 3909 int r; 3910 3911 /* Make sur GART are not working */ 3912 if (rdev->flags & RADEON_IS_PCI) 3913 r100_pci_gart_disable(rdev); 3914 /* Resume clock before doing reset */ 3915 r100_clock_startup(rdev); 3916 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3917 if (radeon_asic_reset(rdev)) { 3918 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3919 RREG32(R_000E40_RBBM_STATUS), 3920 RREG32(R_0007C0_CP_STAT)); 3921 } 3922 /* post */ 3923 radeon_combios_asic_init(rdev->ddev); 3924 /* Resume clock after posting */ 3925 r100_clock_startup(rdev); 3926 /* Initialize surface registers */ 3927 radeon_surface_init(rdev); 3928 3929 rdev->accel_working = true; 3930 r = r100_startup(rdev); 3931 if (r) { 3932 rdev->accel_working = false; 3933 } 3934 return r; 3935 } 3936 3937 int r100_suspend(struct radeon_device *rdev) 3938 { 3939 r100_cp_disable(rdev); 3940 radeon_wb_disable(rdev); 3941 r100_irq_disable(rdev); 3942 if (rdev->flags & RADEON_IS_PCI) 3943 r100_pci_gart_disable(rdev); 3944 return 0; 3945 } 3946 3947 void r100_fini(struct radeon_device *rdev) 3948 { 3949 r100_cp_fini(rdev); 3950 radeon_wb_fini(rdev); 3951 radeon_ib_pool_fini(rdev); 3952 radeon_gem_fini(rdev); 3953 if (rdev->flags & RADEON_IS_PCI) 3954 r100_pci_gart_fini(rdev); 3955 radeon_agp_fini(rdev); 3956 radeon_irq_kms_fini(rdev); 3957 radeon_fence_driver_fini(rdev); 3958 radeon_bo_fini(rdev); 3959 radeon_atombios_fini(rdev); 3960 r100_cp_fini_microcode(rdev); 3961 kfree(rdev->bios); 3962 rdev->bios = NULL; 3963 } 3964 3965 /* 3966 * Due to how kexec works, it can leave the hw fully initialised when it 3967 * boots the new kernel. However doing our init sequence with the CP and 3968 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 3969 * do some quick sanity checks and restore sane values to avoid this 3970 * problem. 3971 */ 3972 void r100_restore_sanity(struct radeon_device *rdev) 3973 { 3974 u32 tmp; 3975 3976 tmp = RREG32(RADEON_CP_CSQ_CNTL); 3977 if (tmp) { 3978 WREG32(RADEON_CP_CSQ_CNTL, 0); 3979 } 3980 tmp = RREG32(RADEON_CP_RB_CNTL); 3981 if (tmp) { 3982 WREG32(RADEON_CP_RB_CNTL, 0); 3983 } 3984 tmp = RREG32(RADEON_SCRATCH_UMSK); 3985 if (tmp) { 3986 WREG32(RADEON_SCRATCH_UMSK, 0); 3987 } 3988 } 3989 3990 int r100_init(struct radeon_device *rdev) 3991 { 3992 int r; 3993 3994 /* Register debugfs file specific to this group of asics */ 3995 r100_debugfs(rdev); 3996 /* Disable VGA */ 3997 r100_vga_render_disable(rdev); 3998 /* Initialize scratch registers */ 3999 radeon_scratch_init(rdev); 4000 /* Initialize surface registers */ 4001 radeon_surface_init(rdev); 4002 /* sanity check some register to avoid hangs like after kexec */ 4003 r100_restore_sanity(rdev); 4004 /* TODO: disable VGA need to use VGA request */ 4005 /* BIOS*/ 4006 if (!radeon_get_bios(rdev)) { 4007 if (ASIC_IS_AVIVO(rdev)) 4008 return -EINVAL; 4009 } 4010 if (rdev->is_atom_bios) { 4011 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4012 return -EINVAL; 4013 } else { 4014 r = radeon_combios_init(rdev); 4015 if (r) 4016 return r; 4017 } 4018 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4019 if (radeon_asic_reset(rdev)) { 4020 dev_warn(rdev->dev, 4021 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4022 RREG32(R_000E40_RBBM_STATUS), 4023 RREG32(R_0007C0_CP_STAT)); 4024 } 4025 /* check if cards are posted or not */ 4026 if (radeon_boot_test_post_card(rdev) == false) 4027 return -EINVAL; 4028 /* Set asic errata */ 4029 r100_errata(rdev); 4030 /* Initialize clocks */ 4031 radeon_get_clock_info(rdev->ddev); 4032 /* initialize AGP */ 4033 if (rdev->flags & RADEON_IS_AGP) { 4034 r = radeon_agp_init(rdev); 4035 if (r) { 4036 radeon_agp_disable(rdev); 4037 } 4038 } 4039 /* initialize VRAM */ 4040 r100_mc_init(rdev); 4041 /* Fence driver */ 4042 r = radeon_fence_driver_init(rdev); 4043 if (r) 4044 return r; 4045 /* Memory manager */ 4046 r = radeon_bo_init(rdev); 4047 if (r) 4048 return r; 4049 if (rdev->flags & RADEON_IS_PCI) { 4050 r = r100_pci_gart_init(rdev); 4051 if (r) 4052 return r; 4053 } 4054 r100_set_safe_registers(rdev); 4055 4056 rdev->accel_working = true; 4057 r = r100_startup(rdev); 4058 if (r) { 4059 /* Somethings want wront with the accel init stop accel */ 4060 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4061 r100_cp_fini(rdev); 4062 radeon_wb_fini(rdev); 4063 radeon_ib_pool_fini(rdev); 4064 radeon_irq_kms_fini(rdev); 4065 if (rdev->flags & RADEON_IS_PCI) 4066 r100_pci_gart_fini(rdev); 4067 rdev->accel_working = false; 4068 } 4069 return 0; 4070 } 4071 4072 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 4073 bool always_indirect) 4074 { 4075 if (reg < rdev->rmmio_size && !always_indirect) 4076 return bus_read_4(rdev->rmmio, reg); 4077 else { 4078 uint32_t ret; 4079 4080 spin_lock(&rdev->mmio_idx_lock); 4081 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4082 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA); 4083 spin_unlock(&rdev->mmio_idx_lock); 4084 4085 return ret; 4086 } 4087 } 4088 4089 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 4090 bool always_indirect) 4091 { 4092 if (reg < rdev->rmmio_size && !always_indirect) 4093 bus_write_4(rdev->rmmio, reg, v); 4094 else { 4095 spin_lock(&rdev->mmio_idx_lock); 4096 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4097 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v); 4098 spin_unlock(&rdev->mmio_idx_lock); 4099 } 4100 } 4101 4102 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4103 { 4104 if (reg < rdev->rio_mem_size) 4105 return bus_read_4(rdev->rio_mem, reg); 4106 else { 4107 /* XXX No locking? -- dumbbell@ */ 4108 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4109 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA); 4110 } 4111 } 4112 4113 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4114 { 4115 if (reg < rdev->rio_mem_size) 4116 bus_write_4(rdev->rio_mem, reg, v); 4117 else { 4118 /* XXX No locking? -- dumbbell@ */ 4119 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4120 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v); 4121 } 4122 } 4123