1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/r100.c 255573 2013-09-14 17:24:41Z dumbbell $ 29 */ 30 31 #include <drm/drmP.h> 32 #include <uapi_drm/radeon_drm.h> 33 #include "radeon_reg.h" 34 #include "radeon.h" 35 #include "radeon_asic.h" 36 #include "r100d.h" 37 #include "rs100d.h" 38 #include "rv200d.h" 39 #include "rv250d.h" 40 #include "atom.h" 41 42 #include "r100_reg_safe.h" 43 #include "rn50_reg_safe.h" 44 45 /* Firmware Names */ 46 #define FIRMWARE_R100 "radeonkmsfw_R100_cp" 47 #define FIRMWARE_R200 "radeonkmsfw_R200_cp" 48 #define FIRMWARE_R300 "radeonkmsfw_R300_cp" 49 #define FIRMWARE_R420 "radeonkmsfw_R420_cp" 50 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 51 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 52 #define FIRMWARE_R520 "radeonkmsfw_R520_cp" 53 54 #include "r100_track.h" 55 56 /* This files gather functions specifics to: 57 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 58 * and others in some cases. 59 */ 60 61 /** 62 * r100_wait_for_vblank - vblank wait asic callback. 63 * 64 * @rdev: radeon_device pointer 65 * @crtc: crtc to wait for vblank on 66 * 67 * Wait for vblank on the requested crtc (r1xx-r4xx). 68 */ 69 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 70 { 71 int i; 72 73 if (crtc >= rdev->num_crtc) 74 return; 75 76 if (crtc == 0) { 77 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { 78 for (i = 0; i < rdev->usec_timeout; i++) { 79 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) 80 break; 81 DRM_UDELAY(1); 82 } 83 for (i = 0; i < rdev->usec_timeout; i++) { 84 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 85 break; 86 DRM_UDELAY(1); 87 } 88 } 89 } else { 90 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) { 91 for (i = 0; i < rdev->usec_timeout; i++) { 92 if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)) 93 break; 94 DRM_UDELAY(1); 95 } 96 for (i = 0; i < rdev->usec_timeout; i++) { 97 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 98 break; 99 DRM_UDELAY(1); 100 } 101 } 102 } 103 } 104 105 /** 106 * r100_pre_page_flip - pre-pageflip callback. 107 * 108 * @rdev: radeon_device pointer 109 * @crtc: crtc to prepare for pageflip on 110 * 111 * Pre-pageflip callback (r1xx-r4xx). 112 * Enables the pageflip irq (vblank irq). 113 */ 114 void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 115 { 116 /* enable the pflip int */ 117 radeon_irq_kms_pflip_irq_get(rdev, crtc); 118 } 119 120 /** 121 * r100_post_page_flip - pos-pageflip callback. 122 * 123 * @rdev: radeon_device pointer 124 * @crtc: crtc to cleanup pageflip on 125 * 126 * Post-pageflip callback (r1xx-r4xx). 127 * Disables the pageflip irq (vblank irq). 128 */ 129 void r100_post_page_flip(struct radeon_device *rdev, int crtc) 130 { 131 /* disable the pflip int */ 132 radeon_irq_kms_pflip_irq_put(rdev, crtc); 133 } 134 135 /** 136 * r100_page_flip - pageflip callback. 137 * 138 * @rdev: radeon_device pointer 139 * @crtc_id: crtc to cleanup pageflip on 140 * @crtc_base: new address of the crtc (GPU MC address) 141 * 142 * Does the actual pageflip (r1xx-r4xx). 143 * During vblank we take the crtc lock and wait for the update_pending 144 * bit to go high, when it does, we release the lock, and allow the 145 * double buffered update to take place. 146 * Returns the current update pending status. 147 */ 148 u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 149 { 150 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 151 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 152 int i; 153 154 /* Lock the graphics update lock */ 155 /* update the scanout addresses */ 156 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 157 158 /* Wait for update_pending to go high. */ 159 for (i = 0; i < rdev->usec_timeout; i++) { 160 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 161 break; 162 DRM_UDELAY(1); 163 } 164 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 165 166 /* Unlock the lock, so double-buffering can take place inside vblank */ 167 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 168 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 169 170 /* Return current update_pending status: */ 171 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; 172 } 173 174 /** 175 * r100_pm_get_dynpm_state - look up dynpm power state callback. 176 * 177 * @rdev: radeon_device pointer 178 * 179 * Look up the optimal power state based on the 180 * current state of the GPU (r1xx-r5xx). 181 * Used for dynpm only. 182 */ 183 void r100_pm_get_dynpm_state(struct radeon_device *rdev) 184 { 185 int i; 186 rdev->pm.dynpm_can_upclock = true; 187 rdev->pm.dynpm_can_downclock = true; 188 189 switch (rdev->pm.dynpm_planned_action) { 190 case DYNPM_ACTION_MINIMUM: 191 rdev->pm.requested_power_state_index = 0; 192 rdev->pm.dynpm_can_downclock = false; 193 break; 194 case DYNPM_ACTION_DOWNCLOCK: 195 if (rdev->pm.current_power_state_index == 0) { 196 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 197 rdev->pm.dynpm_can_downclock = false; 198 } else { 199 if (rdev->pm.active_crtc_count > 1) { 200 for (i = 0; i < rdev->pm.num_power_states; i++) { 201 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 202 continue; 203 else if (i >= rdev->pm.current_power_state_index) { 204 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 205 break; 206 } else { 207 rdev->pm.requested_power_state_index = i; 208 break; 209 } 210 } 211 } else 212 rdev->pm.requested_power_state_index = 213 rdev->pm.current_power_state_index - 1; 214 } 215 /* don't use the power state if crtcs are active and no display flag is set */ 216 if ((rdev->pm.active_crtc_count > 0) && 217 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 218 RADEON_PM_MODE_NO_DISPLAY)) { 219 rdev->pm.requested_power_state_index++; 220 } 221 break; 222 case DYNPM_ACTION_UPCLOCK: 223 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 224 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 225 rdev->pm.dynpm_can_upclock = false; 226 } else { 227 if (rdev->pm.active_crtc_count > 1) { 228 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 229 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 230 continue; 231 else if (i <= rdev->pm.current_power_state_index) { 232 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 233 break; 234 } else { 235 rdev->pm.requested_power_state_index = i; 236 break; 237 } 238 } 239 } else 240 rdev->pm.requested_power_state_index = 241 rdev->pm.current_power_state_index + 1; 242 } 243 break; 244 case DYNPM_ACTION_DEFAULT: 245 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 246 rdev->pm.dynpm_can_upclock = false; 247 break; 248 case DYNPM_ACTION_NONE: 249 default: 250 DRM_ERROR("Requested mode for not defined action\n"); 251 return; 252 } 253 /* only one clock mode per power state */ 254 rdev->pm.requested_clock_mode_index = 0; 255 256 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 257 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 258 clock_info[rdev->pm.requested_clock_mode_index].sclk, 259 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 260 clock_info[rdev->pm.requested_clock_mode_index].mclk, 261 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 262 pcie_lanes); 263 } 264 265 /** 266 * r100_pm_init_profile - Initialize power profiles callback. 267 * 268 * @rdev: radeon_device pointer 269 * 270 * Initialize the power states used in profile mode 271 * (r1xx-r3xx). 272 * Used for profile mode only. 273 */ 274 void r100_pm_init_profile(struct radeon_device *rdev) 275 { 276 /* default */ 277 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 278 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 279 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 280 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 281 /* low sh */ 282 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 283 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 284 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 285 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 286 /* mid sh */ 287 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 288 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 289 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 290 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 291 /* high sh */ 292 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 293 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 294 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 295 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 296 /* low mh */ 297 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 298 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 299 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 300 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 301 /* mid mh */ 302 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 303 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 304 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 305 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 306 /* high mh */ 307 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 309 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 310 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 311 } 312 313 /** 314 * r100_pm_misc - set additional pm hw parameters callback. 315 * 316 * @rdev: radeon_device pointer 317 * 318 * Set non-clock parameters associated with a power state 319 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 320 */ 321 void r100_pm_misc(struct radeon_device *rdev) 322 { 323 int requested_index = rdev->pm.requested_power_state_index; 324 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 325 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 326 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 327 328 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 329 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 330 tmp = RREG32(voltage->gpio.reg); 331 if (voltage->active_high) 332 tmp |= voltage->gpio.mask; 333 else 334 tmp &= ~(voltage->gpio.mask); 335 WREG32(voltage->gpio.reg, tmp); 336 if (voltage->delay) 337 DRM_UDELAY(voltage->delay); 338 } else { 339 tmp = RREG32(voltage->gpio.reg); 340 if (voltage->active_high) 341 tmp &= ~voltage->gpio.mask; 342 else 343 tmp |= voltage->gpio.mask; 344 WREG32(voltage->gpio.reg, tmp); 345 if (voltage->delay) 346 DRM_UDELAY(voltage->delay); 347 } 348 } 349 350 sclk_cntl = RREG32_PLL(SCLK_CNTL); 351 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 352 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 353 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 354 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 355 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 356 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 357 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 358 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 359 else 360 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 361 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 362 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 363 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 364 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 365 } else 366 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 367 368 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 369 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 370 if (voltage->delay) { 371 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 372 switch (voltage->delay) { 373 case 33: 374 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 375 break; 376 case 66: 377 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 378 break; 379 case 99: 380 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 381 break; 382 case 132: 383 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 384 break; 385 } 386 } else 387 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 388 } else 389 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 390 391 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 392 sclk_cntl &= ~FORCE_HDP; 393 else 394 sclk_cntl |= FORCE_HDP; 395 396 WREG32_PLL(SCLK_CNTL, sclk_cntl); 397 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 398 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 399 400 /* set pcie lanes */ 401 if ((rdev->flags & RADEON_IS_PCIE) && 402 !(rdev->flags & RADEON_IS_IGP) && 403 rdev->asic->pm.set_pcie_lanes && 404 (ps->pcie_lanes != 405 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 406 radeon_set_pcie_lanes(rdev, 407 ps->pcie_lanes); 408 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 409 } 410 } 411 412 /** 413 * r100_pm_prepare - pre-power state change callback. 414 * 415 * @rdev: radeon_device pointer 416 * 417 * Prepare for a power state change (r1xx-r4xx). 418 */ 419 void r100_pm_prepare(struct radeon_device *rdev) 420 { 421 struct drm_device *ddev = rdev->ddev; 422 struct drm_crtc *crtc; 423 struct radeon_crtc *radeon_crtc; 424 u32 tmp; 425 426 /* disable any active CRTCs */ 427 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 428 radeon_crtc = to_radeon_crtc(crtc); 429 if (radeon_crtc->enabled) { 430 if (radeon_crtc->crtc_id) { 431 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 432 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 433 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 434 } else { 435 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 436 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 437 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 438 } 439 } 440 } 441 } 442 443 /** 444 * r100_pm_finish - post-power state change callback. 445 * 446 * @rdev: radeon_device pointer 447 * 448 * Clean up after a power state change (r1xx-r4xx). 449 */ 450 void r100_pm_finish(struct radeon_device *rdev) 451 { 452 struct drm_device *ddev = rdev->ddev; 453 struct drm_crtc *crtc; 454 struct radeon_crtc *radeon_crtc; 455 u32 tmp; 456 457 /* enable any active CRTCs */ 458 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 459 radeon_crtc = to_radeon_crtc(crtc); 460 if (radeon_crtc->enabled) { 461 if (radeon_crtc->crtc_id) { 462 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 463 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 464 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 465 } else { 466 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 467 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 468 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 469 } 470 } 471 } 472 } 473 474 /** 475 * r100_gui_idle - gui idle callback. 476 * 477 * @rdev: radeon_device pointer 478 * 479 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 480 * Returns true if idle, false if not. 481 */ 482 bool r100_gui_idle(struct radeon_device *rdev) 483 { 484 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 485 return false; 486 else 487 return true; 488 } 489 490 /* hpd for digital panel detect/disconnect */ 491 /** 492 * r100_hpd_sense - hpd sense callback. 493 * 494 * @rdev: radeon_device pointer 495 * @hpd: hpd (hotplug detect) pin 496 * 497 * Checks if a digital monitor is connected (r1xx-r4xx). 498 * Returns true if connected, false if not connected. 499 */ 500 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 501 { 502 bool connected = false; 503 504 switch (hpd) { 505 case RADEON_HPD_1: 506 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 507 connected = true; 508 break; 509 case RADEON_HPD_2: 510 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 511 connected = true; 512 break; 513 default: 514 break; 515 } 516 return connected; 517 } 518 519 /** 520 * r100_hpd_set_polarity - hpd set polarity callback. 521 * 522 * @rdev: radeon_device pointer 523 * @hpd: hpd (hotplug detect) pin 524 * 525 * Set the polarity of the hpd pin (r1xx-r4xx). 526 */ 527 void r100_hpd_set_polarity(struct radeon_device *rdev, 528 enum radeon_hpd_id hpd) 529 { 530 u32 tmp; 531 bool connected = r100_hpd_sense(rdev, hpd); 532 533 switch (hpd) { 534 case RADEON_HPD_1: 535 tmp = RREG32(RADEON_FP_GEN_CNTL); 536 if (connected) 537 tmp &= ~RADEON_FP_DETECT_INT_POL; 538 else 539 tmp |= RADEON_FP_DETECT_INT_POL; 540 WREG32(RADEON_FP_GEN_CNTL, tmp); 541 break; 542 case RADEON_HPD_2: 543 tmp = RREG32(RADEON_FP2_GEN_CNTL); 544 if (connected) 545 tmp &= ~RADEON_FP2_DETECT_INT_POL; 546 else 547 tmp |= RADEON_FP2_DETECT_INT_POL; 548 WREG32(RADEON_FP2_GEN_CNTL, tmp); 549 break; 550 default: 551 break; 552 } 553 } 554 555 /** 556 * r100_hpd_init - hpd setup callback. 557 * 558 * @rdev: radeon_device pointer 559 * 560 * Setup the hpd pins used by the card (r1xx-r4xx). 561 * Set the polarity, and enable the hpd interrupts. 562 */ 563 void r100_hpd_init(struct radeon_device *rdev) 564 { 565 struct drm_device *dev = rdev->ddev; 566 struct drm_connector *connector; 567 unsigned enable = 0; 568 569 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 570 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 571 enable |= 1 << radeon_connector->hpd.hpd; 572 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 573 } 574 radeon_irq_kms_enable_hpd(rdev, enable); 575 } 576 577 /** 578 * r100_hpd_fini - hpd tear down callback. 579 * 580 * @rdev: radeon_device pointer 581 * 582 * Tear down the hpd pins used by the card (r1xx-r4xx). 583 * Disable the hpd interrupts. 584 */ 585 void r100_hpd_fini(struct radeon_device *rdev) 586 { 587 struct drm_device *dev = rdev->ddev; 588 struct drm_connector *connector; 589 unsigned disable = 0; 590 591 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 592 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 593 disable |= 1 << radeon_connector->hpd.hpd; 594 } 595 radeon_irq_kms_disable_hpd(rdev, disable); 596 } 597 598 /* 599 * PCI GART 600 */ 601 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 602 { 603 /* TODO: can we do somethings here ? */ 604 /* It seems hw only cache one entry so we should discard this 605 * entry otherwise if first GPU GART read hit this entry it 606 * could end up in wrong address. */ 607 } 608 609 int r100_pci_gart_init(struct radeon_device *rdev) 610 { 611 int r; 612 613 if (rdev->gart.ptr) { 614 DRM_ERROR("R100 PCI GART already initialized\n"); 615 return 0; 616 } 617 /* Initialize common gart structure */ 618 r = radeon_gart_init(rdev); 619 if (r) 620 return r; 621 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 622 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 623 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 624 return radeon_gart_table_ram_alloc(rdev); 625 } 626 627 int r100_pci_gart_enable(struct radeon_device *rdev) 628 { 629 uint32_t tmp; 630 631 radeon_gart_restore(rdev); 632 /* discard memory request outside of configured range */ 633 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 634 WREG32(RADEON_AIC_CNTL, tmp); 635 /* set address range for PCI address translate */ 636 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 637 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 638 /* set PCI GART page-table base address */ 639 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 640 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 641 WREG32(RADEON_AIC_CNTL, tmp); 642 r100_pci_gart_tlb_flush(rdev); 643 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 644 (unsigned)(rdev->mc.gtt_size >> 20), 645 (unsigned long long)rdev->gart.table_addr); 646 rdev->gart.ready = true; 647 return 0; 648 } 649 650 void r100_pci_gart_disable(struct radeon_device *rdev) 651 { 652 uint32_t tmp; 653 654 /* discard memory request outside of configured range */ 655 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 656 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 657 WREG32(RADEON_AIC_LO_ADDR, 0); 658 WREG32(RADEON_AIC_HI_ADDR, 0); 659 } 660 661 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 662 { 663 u32 *gtt = rdev->gart.ptr; 664 665 if (i < 0 || i > rdev->gart.num_gpu_pages) { 666 return -EINVAL; 667 } 668 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 669 return 0; 670 } 671 672 void r100_pci_gart_fini(struct radeon_device *rdev) 673 { 674 radeon_gart_fini(rdev); 675 r100_pci_gart_disable(rdev); 676 radeon_gart_table_ram_free(rdev); 677 } 678 679 int r100_irq_set(struct radeon_device *rdev) 680 { 681 uint32_t tmp = 0; 682 683 if (!rdev->irq.installed) { 684 DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n"); 685 WREG32(R_000040_GEN_INT_CNTL, 0); 686 return -EINVAL; 687 } 688 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 689 tmp |= RADEON_SW_INT_ENABLE; 690 } 691 if (rdev->irq.crtc_vblank_int[0] || 692 atomic_read(&rdev->irq.pflip[0])) { 693 tmp |= RADEON_CRTC_VBLANK_MASK; 694 } 695 if (rdev->irq.crtc_vblank_int[1] || 696 atomic_read(&rdev->irq.pflip[1])) { 697 tmp |= RADEON_CRTC2_VBLANK_MASK; 698 } 699 if (rdev->irq.hpd[0]) { 700 tmp |= RADEON_FP_DETECT_MASK; 701 } 702 if (rdev->irq.hpd[1]) { 703 tmp |= RADEON_FP2_DETECT_MASK; 704 } 705 WREG32(RADEON_GEN_INT_CNTL, tmp); 706 return 0; 707 } 708 709 void r100_irq_disable(struct radeon_device *rdev) 710 { 711 u32 tmp; 712 713 WREG32(R_000040_GEN_INT_CNTL, 0); 714 /* Wait and acknowledge irq */ 715 DRM_MDELAY(1); 716 tmp = RREG32(R_000044_GEN_INT_STATUS); 717 WREG32(R_000044_GEN_INT_STATUS, tmp); 718 } 719 720 static uint32_t r100_irq_ack(struct radeon_device *rdev) 721 { 722 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 723 uint32_t irq_mask = RADEON_SW_INT_TEST | 724 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 725 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 726 727 if (irqs) { 728 WREG32(RADEON_GEN_INT_STATUS, irqs); 729 } 730 return irqs & irq_mask; 731 } 732 733 irqreturn_t r100_irq_process(struct radeon_device *rdev) 734 { 735 uint32_t status, msi_rearm; 736 bool queue_hotplug = false; 737 738 status = r100_irq_ack(rdev); 739 if (!status) { 740 return IRQ_NONE; 741 } 742 if (rdev->shutdown) { 743 return IRQ_NONE; 744 } 745 while (status) { 746 /* SW interrupt */ 747 if (status & RADEON_SW_INT_TEST) { 748 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 749 } 750 /* Vertical blank interrupts */ 751 if (status & RADEON_CRTC_VBLANK_STAT) { 752 if (rdev->irq.crtc_vblank_int[0]) { 753 drm_handle_vblank(rdev->ddev, 0); 754 rdev->pm.vblank_sync = true; 755 DRM_WAKEUP(&rdev->irq.vblank_queue); 756 } 757 if (atomic_read(&rdev->irq.pflip[0])) 758 radeon_crtc_handle_flip(rdev, 0); 759 } 760 if (status & RADEON_CRTC2_VBLANK_STAT) { 761 if (rdev->irq.crtc_vblank_int[1]) { 762 drm_handle_vblank(rdev->ddev, 1); 763 rdev->pm.vblank_sync = true; 764 DRM_WAKEUP(&rdev->irq.vblank_queue); 765 } 766 if (atomic_read(&rdev->irq.pflip[1])) 767 radeon_crtc_handle_flip(rdev, 1); 768 } 769 if (status & RADEON_FP_DETECT_STAT) { 770 queue_hotplug = true; 771 DRM_DEBUG("HPD1\n"); 772 } 773 if (status & RADEON_FP2_DETECT_STAT) { 774 queue_hotplug = true; 775 DRM_DEBUG("HPD2\n"); 776 } 777 status = r100_irq_ack(rdev); 778 } 779 if (queue_hotplug) 780 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 781 if (rdev->msi_enabled) { 782 switch (rdev->family) { 783 case CHIP_RS400: 784 case CHIP_RS480: 785 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 786 WREG32(RADEON_AIC_CNTL, msi_rearm); 787 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 788 break; 789 default: 790 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 791 break; 792 } 793 } 794 return IRQ_HANDLED; 795 } 796 797 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 798 { 799 if (crtc == 0) 800 return RREG32(RADEON_CRTC_CRNT_FRAME); 801 else 802 return RREG32(RADEON_CRTC2_CRNT_FRAME); 803 } 804 805 /* Who ever call radeon_fence_emit should call ring_lock and ask 806 * for enough space (today caller are ib schedule and buffer move) */ 807 void r100_fence_ring_emit(struct radeon_device *rdev, 808 struct radeon_fence *fence) 809 { 810 struct radeon_ring *ring = &rdev->ring[fence->ring]; 811 812 /* We have to make sure that caches are flushed before 813 * CPU might read something from VRAM. */ 814 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 815 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 816 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 817 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 818 /* Wait until IDLE & CLEAN */ 819 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 820 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 821 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 822 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 823 RADEON_HDP_READ_BUFFER_INVALIDATE); 824 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 825 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 826 /* Emit fence sequence & fire IRQ */ 827 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 828 radeon_ring_write(ring, fence->seq); 829 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 830 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 831 } 832 833 void r100_semaphore_ring_emit(struct radeon_device *rdev, 834 struct radeon_ring *ring, 835 struct radeon_semaphore *semaphore, 836 bool emit_wait) 837 { 838 /* Unused on older asics, since we don't have semaphores or multiple rings */ 839 panic("%s: Unused on older asics", __func__); 840 } 841 842 int r100_copy_blit(struct radeon_device *rdev, 843 uint64_t src_offset, 844 uint64_t dst_offset, 845 unsigned num_gpu_pages, 846 struct radeon_fence **fence) 847 { 848 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 849 uint32_t cur_pages; 850 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 851 uint32_t pitch; 852 uint32_t stride_pixels; 853 unsigned ndw; 854 int num_loops; 855 int r = 0; 856 857 /* radeon limited to 16k stride */ 858 stride_bytes &= 0x3fff; 859 /* radeon pitch is /64 */ 860 pitch = stride_bytes / 64; 861 stride_pixels = stride_bytes / 4; 862 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 863 864 /* Ask for enough room for blit + flush + fence */ 865 ndw = 64 + (10 * num_loops); 866 r = radeon_ring_lock(rdev, ring, ndw); 867 if (r) { 868 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 869 return -EINVAL; 870 } 871 while (num_gpu_pages > 0) { 872 cur_pages = num_gpu_pages; 873 if (cur_pages > 8191) { 874 cur_pages = 8191; 875 } 876 num_gpu_pages -= cur_pages; 877 878 /* pages are in Y direction - height 879 page width in X direction - width */ 880 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 881 radeon_ring_write(ring, 882 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 883 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 884 RADEON_GMC_SRC_CLIPPING | 885 RADEON_GMC_DST_CLIPPING | 886 RADEON_GMC_BRUSH_NONE | 887 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 888 RADEON_GMC_SRC_DATATYPE_COLOR | 889 RADEON_ROP3_S | 890 RADEON_DP_SRC_SOURCE_MEMORY | 891 RADEON_GMC_CLR_CMP_CNTL_DIS | 892 RADEON_GMC_WR_MSK_DIS); 893 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 894 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 895 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 896 radeon_ring_write(ring, 0); 897 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 898 radeon_ring_write(ring, num_gpu_pages); 899 radeon_ring_write(ring, num_gpu_pages); 900 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 901 } 902 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 903 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 904 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 905 radeon_ring_write(ring, 906 RADEON_WAIT_2D_IDLECLEAN | 907 RADEON_WAIT_HOST_IDLECLEAN | 908 RADEON_WAIT_DMA_GUI_IDLE); 909 if (fence) { 910 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 911 } 912 radeon_ring_unlock_commit(rdev, ring); 913 return r; 914 } 915 916 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 917 { 918 unsigned i; 919 u32 tmp; 920 921 for (i = 0; i < rdev->usec_timeout; i++) { 922 tmp = RREG32(R_000E40_RBBM_STATUS); 923 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 924 return 0; 925 } 926 DRM_UDELAY(1); 927 } 928 return -1; 929 } 930 931 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 932 { 933 int r; 934 935 r = radeon_ring_lock(rdev, ring, 2); 936 if (r) { 937 return; 938 } 939 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 940 radeon_ring_write(ring, 941 RADEON_ISYNC_ANY2D_IDLE3D | 942 RADEON_ISYNC_ANY3D_IDLE2D | 943 RADEON_ISYNC_WAIT_IDLEGUI | 944 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 945 radeon_ring_unlock_commit(rdev, ring); 946 } 947 948 949 /* Load the microcode for the CP */ 950 static int r100_cp_init_microcode(struct radeon_device *rdev) 951 { 952 const char *fw_name = NULL; 953 int err; 954 955 DRM_DEBUG_KMS("\n"); 956 957 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 958 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 959 (rdev->family == CHIP_RS200)) { 960 DRM_INFO("Loading R100 Microcode\n"); 961 fw_name = FIRMWARE_R100; 962 } else if ((rdev->family == CHIP_R200) || 963 (rdev->family == CHIP_RV250) || 964 (rdev->family == CHIP_RV280) || 965 (rdev->family == CHIP_RS300)) { 966 DRM_INFO("Loading R200 Microcode\n"); 967 fw_name = FIRMWARE_R200; 968 } else if ((rdev->family == CHIP_R300) || 969 (rdev->family == CHIP_R350) || 970 (rdev->family == CHIP_RV350) || 971 (rdev->family == CHIP_RV380) || 972 (rdev->family == CHIP_RS400) || 973 (rdev->family == CHIP_RS480)) { 974 DRM_INFO("Loading R300 Microcode\n"); 975 fw_name = FIRMWARE_R300; 976 } else if ((rdev->family == CHIP_R420) || 977 (rdev->family == CHIP_R423) || 978 (rdev->family == CHIP_RV410)) { 979 DRM_INFO("Loading R400 Microcode\n"); 980 fw_name = FIRMWARE_R420; 981 } else if ((rdev->family == CHIP_RS690) || 982 (rdev->family == CHIP_RS740)) { 983 DRM_INFO("Loading RS690/RS740 Microcode\n"); 984 fw_name = FIRMWARE_RS690; 985 } else if (rdev->family == CHIP_RS600) { 986 DRM_INFO("Loading RS600 Microcode\n"); 987 fw_name = FIRMWARE_RS600; 988 } else if ((rdev->family == CHIP_RV515) || 989 (rdev->family == CHIP_R520) || 990 (rdev->family == CHIP_RV530) || 991 (rdev->family == CHIP_R580) || 992 (rdev->family == CHIP_RV560) || 993 (rdev->family == CHIP_RV570)) { 994 DRM_INFO("Loading R500 Microcode\n"); 995 fw_name = FIRMWARE_R520; 996 } 997 998 err = 0; 999 rdev->me_fw = firmware_get(fw_name); 1000 if (rdev->me_fw == NULL) { 1001 DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n", 1002 fw_name); 1003 err = -ENOENT; 1004 } else if (rdev->me_fw->datasize % 8) { 1005 DRM_ERROR( 1006 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1007 rdev->me_fw->datasize, fw_name); 1008 err = -EINVAL; 1009 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 1010 rdev->me_fw = NULL; 1011 } 1012 return err; 1013 } 1014 1015 /** 1016 * r100_cp_fini_microcode - drop the firmware image reference 1017 * 1018 * @rdev: radeon_device pointer 1019 * 1020 * Drop the me firmware image reference. 1021 * Called at driver shutdown. 1022 */ 1023 static void r100_cp_fini_microcode (struct radeon_device *rdev) 1024 { 1025 1026 if (rdev->me_fw != NULL) { 1027 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 1028 rdev->me_fw = NULL; 1029 } 1030 } 1031 1032 static void r100_cp_load_microcode(struct radeon_device *rdev) 1033 { 1034 const __be32 *fw_data; 1035 int i, size; 1036 1037 if (r100_gui_wait_for_idle(rdev)) { 1038 DRM_ERROR("Failed to wait GUI idle while " 1039 "programming pipes. Bad things might happen.\n"); 1040 } 1041 1042 if (rdev->me_fw) { 1043 size = rdev->me_fw->datasize / 4; 1044 fw_data = (const __be32 *)rdev->me_fw->data; 1045 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1046 for (i = 0; i < size; i += 2) { 1047 WREG32(RADEON_CP_ME_RAM_DATAH, 1048 be32_to_cpup(&fw_data[i])); 1049 WREG32(RADEON_CP_ME_RAM_DATAL, 1050 be32_to_cpup(&fw_data[i + 1])); 1051 } 1052 } 1053 } 1054 1055 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1056 { 1057 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1058 unsigned rb_bufsz; 1059 unsigned rb_blksz; 1060 unsigned max_fetch; 1061 unsigned pre_write_timer; 1062 unsigned pre_write_limit; 1063 unsigned indirect2_start; 1064 unsigned indirect1_start; 1065 uint32_t tmp; 1066 int r; 1067 1068 if (r100_debugfs_cp_init(rdev)) { 1069 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1070 } 1071 if (!rdev->me_fw) { 1072 r = r100_cp_init_microcode(rdev); 1073 if (r) { 1074 DRM_ERROR("Failed to load firmware!\n"); 1075 return r; 1076 } 1077 } 1078 1079 /* Align ring size */ 1080 rb_bufsz = drm_order(ring_size / 8); 1081 ring_size = (1 << (rb_bufsz + 1)) * 4; 1082 r100_cp_load_microcode(rdev); 1083 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1084 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, 1085 0, 0x7fffff, RADEON_CP_PACKET2); 1086 if (r) { 1087 return r; 1088 } 1089 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1090 * the rptr copy in system ram */ 1091 rb_blksz = 9; 1092 /* cp will read 128bytes at a time (4 dwords) */ 1093 max_fetch = 1; 1094 ring->align_mask = 16 - 1; 1095 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1096 pre_write_timer = 64; 1097 /* Force CP_RB_WPTR write if written more than one time before the 1098 * delay expire 1099 */ 1100 pre_write_limit = 0; 1101 /* Setup the cp cache like this (cache size is 96 dwords) : 1102 * RING 0 to 15 1103 * INDIRECT1 16 to 79 1104 * INDIRECT2 80 to 95 1105 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1106 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1107 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1108 * Idea being that most of the gpu cmd will be through indirect1 buffer 1109 * so it gets the bigger cache. 1110 */ 1111 indirect2_start = 80; 1112 indirect1_start = 16; 1113 /* cp setup */ 1114 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1115 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1116 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1117 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1118 #ifdef __BIG_ENDIAN 1119 tmp |= RADEON_BUF_SWAP_32BIT; 1120 #endif 1121 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1122 1123 /* Set ring address */ 1124 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1125 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1126 /* Force read & write ptr to 0 */ 1127 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1128 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1129 ring->wptr = 0; 1130 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1131 1132 /* set the wb address whether it's enabled or not */ 1133 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1134 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1135 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1136 1137 if (rdev->wb.enabled) 1138 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1139 else { 1140 tmp |= RADEON_RB_NO_UPDATE; 1141 WREG32(R_000770_SCRATCH_UMSK, 0); 1142 } 1143 1144 WREG32(RADEON_CP_RB_CNTL, tmp); 1145 DRM_UDELAY(10); 1146 ring->rptr = RREG32(RADEON_CP_RB_RPTR); 1147 /* Set cp mode to bus mastering & enable cp*/ 1148 WREG32(RADEON_CP_CSQ_MODE, 1149 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1150 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1151 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1152 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1153 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1154 1155 /* at this point everything should be setup correctly to enable master */ 1156 pci_enable_busmaster(rdev->dev); 1157 1158 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1159 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1160 if (r) { 1161 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1162 return r; 1163 } 1164 ring->ready = true; 1165 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1166 1167 if (!ring->rptr_save_reg /* not resuming from suspend */ 1168 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1169 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1170 if (r) { 1171 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1172 ring->rptr_save_reg = 0; 1173 } 1174 } 1175 return 0; 1176 } 1177 1178 void r100_cp_fini(struct radeon_device *rdev) 1179 { 1180 if (r100_cp_wait_for_idle(rdev)) { 1181 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1182 } 1183 /* Disable ring */ 1184 r100_cp_disable(rdev); 1185 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1186 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1187 DRM_INFO("radeon: cp finalized\n"); 1188 } 1189 1190 void r100_cp_disable(struct radeon_device *rdev) 1191 { 1192 /* Disable ring */ 1193 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1194 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1195 WREG32(RADEON_CP_CSQ_MODE, 0); 1196 WREG32(RADEON_CP_CSQ_CNTL, 0); 1197 WREG32(R_000770_SCRATCH_UMSK, 0); 1198 if (r100_gui_wait_for_idle(rdev)) { 1199 DRM_ERROR("Failed to wait GUI idle while " 1200 "programming pipes. Bad things might happen.\n"); 1201 } 1202 } 1203 1204 /* 1205 * CS functions 1206 */ 1207 int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1208 struct radeon_cs_packet *pkt, 1209 unsigned idx, 1210 unsigned reg) 1211 { 1212 int r; 1213 u32 tile_flags = 0; 1214 u32 tmp; 1215 struct radeon_cs_reloc *reloc; 1216 u32 value; 1217 1218 r = r100_cs_packet_next_reloc(p, &reloc); 1219 if (r) { 1220 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1221 idx, reg); 1222 r100_cs_dump_packet(p, pkt); 1223 return r; 1224 } 1225 1226 value = radeon_get_ib_value(p, idx); 1227 tmp = value & 0x003fffff; 1228 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 1229 1230 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1231 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1232 tile_flags |= RADEON_DST_TILE_MACRO; 1233 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1234 if (reg == RADEON_SRC_PITCH_OFFSET) { 1235 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1236 r100_cs_dump_packet(p, pkt); 1237 return -EINVAL; 1238 } 1239 tile_flags |= RADEON_DST_TILE_MICRO; 1240 } 1241 1242 tmp |= tile_flags; 1243 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1244 } else 1245 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1246 return 0; 1247 } 1248 1249 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1250 struct radeon_cs_packet *pkt, 1251 int idx) 1252 { 1253 unsigned c, i; 1254 struct radeon_cs_reloc *reloc; 1255 struct r100_cs_track *track; 1256 int r = 0; 1257 volatile uint32_t *ib; 1258 u32 idx_value; 1259 1260 ib = p->ib.ptr; 1261 track = (struct r100_cs_track *)p->track; 1262 c = radeon_get_ib_value(p, idx++) & 0x1F; 1263 if (c > 16) { 1264 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1265 pkt->opcode); 1266 r100_cs_dump_packet(p, pkt); 1267 return -EINVAL; 1268 } 1269 track->num_arrays = c; 1270 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1271 r = r100_cs_packet_next_reloc(p, &reloc); 1272 if (r) { 1273 DRM_ERROR("No reloc for packet3 %d\n", 1274 pkt->opcode); 1275 r100_cs_dump_packet(p, pkt); 1276 return r; 1277 } 1278 idx_value = radeon_get_ib_value(p, idx); 1279 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1280 1281 track->arrays[i + 0].esize = idx_value >> 8; 1282 track->arrays[i + 0].robj = reloc->robj; 1283 track->arrays[i + 0].esize &= 0x7F; 1284 r = r100_cs_packet_next_reloc(p, &reloc); 1285 if (r) { 1286 DRM_ERROR("No reloc for packet3 %d\n", 1287 pkt->opcode); 1288 r100_cs_dump_packet(p, pkt); 1289 return r; 1290 } 1291 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); 1292 track->arrays[i + 1].robj = reloc->robj; 1293 track->arrays[i + 1].esize = idx_value >> 24; 1294 track->arrays[i + 1].esize &= 0x7F; 1295 } 1296 if (c & 1) { 1297 r = r100_cs_packet_next_reloc(p, &reloc); 1298 if (r) { 1299 DRM_ERROR("No reloc for packet3 %d\n", 1300 pkt->opcode); 1301 r100_cs_dump_packet(p, pkt); 1302 return r; 1303 } 1304 idx_value = radeon_get_ib_value(p, idx); 1305 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1306 track->arrays[i + 0].robj = reloc->robj; 1307 track->arrays[i + 0].esize = idx_value >> 8; 1308 track->arrays[i + 0].esize &= 0x7F; 1309 } 1310 return r; 1311 } 1312 1313 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1314 struct radeon_cs_packet *pkt, 1315 const unsigned *auth, unsigned n, 1316 radeon_packet0_check_t check) 1317 { 1318 unsigned reg; 1319 unsigned i, j, m; 1320 unsigned idx; 1321 int r; 1322 1323 idx = pkt->idx + 1; 1324 reg = pkt->reg; 1325 /* Check that register fall into register range 1326 * determined by the number of entry (n) in the 1327 * safe register bitmap. 1328 */ 1329 if (pkt->one_reg_wr) { 1330 if ((reg >> 7) > n) { 1331 return -EINVAL; 1332 } 1333 } else { 1334 if (((reg + (pkt->count << 2)) >> 7) > n) { 1335 return -EINVAL; 1336 } 1337 } 1338 for (i = 0; i <= pkt->count; i++, idx++) { 1339 j = (reg >> 7); 1340 m = 1 << ((reg >> 2) & 31); 1341 if (auth[j] & m) { 1342 r = check(p, pkt, idx, reg); 1343 if (r) { 1344 return r; 1345 } 1346 } 1347 if (pkt->one_reg_wr) { 1348 if (!(auth[j] & m)) { 1349 break; 1350 } 1351 } else { 1352 reg += 4; 1353 } 1354 } 1355 return 0; 1356 } 1357 1358 void r100_cs_dump_packet(struct radeon_cs_parser *p, 1359 struct radeon_cs_packet *pkt) 1360 { 1361 volatile uint32_t *ib; 1362 unsigned i; 1363 unsigned idx; 1364 1365 ib = p->ib.ptr; 1366 idx = pkt->idx; 1367 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 1368 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 1369 } 1370 } 1371 1372 /** 1373 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet 1374 * @parser: parser structure holding parsing context. 1375 * @pkt: where to store packet informations 1376 * 1377 * Assume that chunk_ib_index is properly set. Will return -EINVAL 1378 * if packet is bigger than remaining ib size. or if packets is unknown. 1379 **/ 1380 int r100_cs_packet_parse(struct radeon_cs_parser *p, 1381 struct radeon_cs_packet *pkt, 1382 unsigned idx) 1383 { 1384 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 1385 uint32_t header; 1386 1387 if (idx >= ib_chunk->length_dw) { 1388 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 1389 idx, ib_chunk->length_dw); 1390 return -EINVAL; 1391 } 1392 header = radeon_get_ib_value(p, idx); 1393 pkt->idx = idx; 1394 pkt->type = CP_PACKET_GET_TYPE(header); 1395 pkt->count = CP_PACKET_GET_COUNT(header); 1396 switch (pkt->type) { 1397 case PACKET_TYPE0: 1398 pkt->reg = CP_PACKET0_GET_REG(header); 1399 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header); 1400 break; 1401 case PACKET_TYPE3: 1402 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 1403 break; 1404 case PACKET_TYPE2: 1405 pkt->count = -1; 1406 break; 1407 default: 1408 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 1409 return -EINVAL; 1410 } 1411 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 1412 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 1413 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 1414 return -EINVAL; 1415 } 1416 return 0; 1417 } 1418 1419 /** 1420 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1421 * @parser: parser structure holding parsing context. 1422 * 1423 * Userspace sends a special sequence for VLINE waits. 1424 * PACKET0 - VLINE_START_END + value 1425 * PACKET0 - WAIT_UNTIL +_value 1426 * RELOC (P3) - crtc_id in reloc. 1427 * 1428 * This function parses this and relocates the VLINE START END 1429 * and WAIT UNTIL packets to the correct crtc. 1430 * It also detects a switched off crtc and nulls out the 1431 * wait in that case. 1432 */ 1433 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1434 { 1435 struct drm_mode_object *obj; 1436 struct drm_crtc *crtc; 1437 struct radeon_crtc *radeon_crtc; 1438 struct radeon_cs_packet p3reloc, waitreloc; 1439 int crtc_id; 1440 int r; 1441 uint32_t header, h_idx, reg; 1442 volatile uint32_t *ib; 1443 1444 ib = p->ib.ptr; 1445 1446 /* parse the wait until */ 1447 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 1448 if (r) 1449 return r; 1450 1451 /* check its a wait until and only 1 count */ 1452 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1453 waitreloc.count != 0) { 1454 DRM_ERROR("vline wait had illegal wait until segment\n"); 1455 return -EINVAL; 1456 } 1457 1458 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1459 DRM_ERROR("vline wait had illegal wait until\n"); 1460 return -EINVAL; 1461 } 1462 1463 /* jump over the NOP */ 1464 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1465 if (r) 1466 return r; 1467 1468 h_idx = p->idx - 2; 1469 p->idx += waitreloc.count + 2; 1470 p->idx += p3reloc.count + 2; 1471 1472 header = radeon_get_ib_value(p, h_idx); 1473 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1474 reg = CP_PACKET0_GET_REG(header); 1475 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1476 if (!obj) { 1477 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1478 return -EINVAL; 1479 } 1480 crtc = obj_to_crtc(obj); 1481 radeon_crtc = to_radeon_crtc(crtc); 1482 crtc_id = radeon_crtc->crtc_id; 1483 1484 if (!crtc->enabled) { 1485 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1486 ib[h_idx + 2] = PACKET2(0); 1487 ib[h_idx + 3] = PACKET2(0); 1488 } else if (crtc_id == 1) { 1489 switch (reg) { 1490 case AVIVO_D1MODE_VLINE_START_END: 1491 header &= ~R300_CP_PACKET0_REG_MASK; 1492 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1493 break; 1494 case RADEON_CRTC_GUI_TRIG_VLINE: 1495 header &= ~R300_CP_PACKET0_REG_MASK; 1496 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1497 break; 1498 default: 1499 DRM_ERROR("unknown crtc reloc\n"); 1500 return -EINVAL; 1501 } 1502 ib[h_idx] = header; 1503 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1504 } 1505 1506 return 0; 1507 } 1508 1509 /** 1510 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 1511 * @parser: parser structure holding parsing context. 1512 * @data: pointer to relocation data 1513 * @offset_start: starting offset 1514 * @offset_mask: offset mask (to align start offset on) 1515 * @reloc: reloc informations 1516 * 1517 * Check next packet is relocation packet3, do bo validation and compute 1518 * GPU offset using the provided start. 1519 **/ 1520 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 1521 struct radeon_cs_reloc **cs_reloc) 1522 { 1523 struct radeon_cs_chunk *relocs_chunk; 1524 struct radeon_cs_packet p3reloc; 1525 unsigned idx; 1526 int r; 1527 1528 if (p->chunk_relocs_idx == -1) { 1529 DRM_ERROR("No relocation chunk !\n"); 1530 return -EINVAL; 1531 } 1532 *cs_reloc = NULL; 1533 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 1534 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 1535 if (r) { 1536 return r; 1537 } 1538 p->idx += p3reloc.count + 2; 1539 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 1540 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 1541 p3reloc.idx); 1542 r100_cs_dump_packet(p, &p3reloc); 1543 return -EINVAL; 1544 } 1545 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 1546 if (idx >= relocs_chunk->length_dw) { 1547 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 1548 idx, relocs_chunk->length_dw); 1549 r100_cs_dump_packet(p, &p3reloc); 1550 return -EINVAL; 1551 } 1552 /* FIXME: we assume reloc size is 4 dwords */ 1553 *cs_reloc = p->relocs_ptr[(idx / 4)]; 1554 return 0; 1555 } 1556 1557 static int r100_get_vtx_size(uint32_t vtx_fmt) 1558 { 1559 int vtx_size; 1560 vtx_size = 2; 1561 /* ordered according to bits in spec */ 1562 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1563 vtx_size++; 1564 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1565 vtx_size += 3; 1566 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1567 vtx_size++; 1568 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1569 vtx_size++; 1570 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1571 vtx_size += 3; 1572 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1573 vtx_size++; 1574 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1575 vtx_size++; 1576 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1577 vtx_size += 2; 1578 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1579 vtx_size += 2; 1580 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1581 vtx_size++; 1582 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1583 vtx_size += 2; 1584 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1585 vtx_size++; 1586 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1587 vtx_size += 2; 1588 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1589 vtx_size++; 1590 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1591 vtx_size++; 1592 /* blend weight */ 1593 if (vtx_fmt & (0x7 << 15)) 1594 vtx_size += (vtx_fmt >> 15) & 0x7; 1595 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1596 vtx_size += 3; 1597 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1598 vtx_size += 2; 1599 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1600 vtx_size++; 1601 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1602 vtx_size++; 1603 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1604 vtx_size++; 1605 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1606 vtx_size++; 1607 return vtx_size; 1608 } 1609 1610 static int r100_packet0_check(struct radeon_cs_parser *p, 1611 struct radeon_cs_packet *pkt, 1612 unsigned idx, unsigned reg) 1613 { 1614 struct radeon_cs_reloc *reloc; 1615 struct r100_cs_track *track; 1616 volatile uint32_t *ib; 1617 uint32_t tmp; 1618 int r; 1619 int i, face; 1620 u32 tile_flags = 0; 1621 u32 idx_value; 1622 1623 ib = p->ib.ptr; 1624 track = (struct r100_cs_track *)p->track; 1625 1626 idx_value = radeon_get_ib_value(p, idx); 1627 1628 switch (reg) { 1629 case RADEON_CRTC_GUI_TRIG_VLINE: 1630 r = r100_cs_packet_parse_vline(p); 1631 if (r) { 1632 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1633 idx, reg); 1634 r100_cs_dump_packet(p, pkt); 1635 return r; 1636 } 1637 break; 1638 /* FIXME: only allow PACKET3 blit? easier to check for out of 1639 * range access */ 1640 case RADEON_DST_PITCH_OFFSET: 1641 case RADEON_SRC_PITCH_OFFSET: 1642 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1643 if (r) 1644 return r; 1645 break; 1646 case RADEON_RB3D_DEPTHOFFSET: 1647 r = r100_cs_packet_next_reloc(p, &reloc); 1648 if (r) { 1649 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1650 idx, reg); 1651 r100_cs_dump_packet(p, pkt); 1652 return r; 1653 } 1654 track->zb.robj = reloc->robj; 1655 track->zb.offset = idx_value; 1656 track->zb_dirty = true; 1657 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1658 break; 1659 case RADEON_RB3D_COLOROFFSET: 1660 r = r100_cs_packet_next_reloc(p, &reloc); 1661 if (r) { 1662 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1663 idx, reg); 1664 r100_cs_dump_packet(p, pkt); 1665 return r; 1666 } 1667 track->cb[0].robj = reloc->robj; 1668 track->cb[0].offset = idx_value; 1669 track->cb_dirty = true; 1670 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1671 break; 1672 case RADEON_PP_TXOFFSET_0: 1673 case RADEON_PP_TXOFFSET_1: 1674 case RADEON_PP_TXOFFSET_2: 1675 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1676 r = r100_cs_packet_next_reloc(p, &reloc); 1677 if (r) { 1678 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1679 idx, reg); 1680 r100_cs_dump_packet(p, pkt); 1681 return r; 1682 } 1683 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1684 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1685 tile_flags |= RADEON_TXO_MACRO_TILE; 1686 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1687 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1688 1689 tmp = idx_value & ~(0x7 << 2); 1690 tmp |= tile_flags; 1691 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); 1692 } else 1693 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1694 track->textures[i].robj = reloc->robj; 1695 track->tex_dirty = true; 1696 break; 1697 case RADEON_PP_CUBIC_OFFSET_T0_0: 1698 case RADEON_PP_CUBIC_OFFSET_T0_1: 1699 case RADEON_PP_CUBIC_OFFSET_T0_2: 1700 case RADEON_PP_CUBIC_OFFSET_T0_3: 1701 case RADEON_PP_CUBIC_OFFSET_T0_4: 1702 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1703 r = r100_cs_packet_next_reloc(p, &reloc); 1704 if (r) { 1705 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1706 idx, reg); 1707 r100_cs_dump_packet(p, pkt); 1708 return r; 1709 } 1710 track->textures[0].cube_info[i].offset = idx_value; 1711 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1712 track->textures[0].cube_info[i].robj = reloc->robj; 1713 track->tex_dirty = true; 1714 break; 1715 case RADEON_PP_CUBIC_OFFSET_T1_0: 1716 case RADEON_PP_CUBIC_OFFSET_T1_1: 1717 case RADEON_PP_CUBIC_OFFSET_T1_2: 1718 case RADEON_PP_CUBIC_OFFSET_T1_3: 1719 case RADEON_PP_CUBIC_OFFSET_T1_4: 1720 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1721 r = r100_cs_packet_next_reloc(p, &reloc); 1722 if (r) { 1723 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1724 idx, reg); 1725 r100_cs_dump_packet(p, pkt); 1726 return r; 1727 } 1728 track->textures[1].cube_info[i].offset = idx_value; 1729 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1730 track->textures[1].cube_info[i].robj = reloc->robj; 1731 track->tex_dirty = true; 1732 break; 1733 case RADEON_PP_CUBIC_OFFSET_T2_0: 1734 case RADEON_PP_CUBIC_OFFSET_T2_1: 1735 case RADEON_PP_CUBIC_OFFSET_T2_2: 1736 case RADEON_PP_CUBIC_OFFSET_T2_3: 1737 case RADEON_PP_CUBIC_OFFSET_T2_4: 1738 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1739 r = r100_cs_packet_next_reloc(p, &reloc); 1740 if (r) { 1741 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1742 idx, reg); 1743 r100_cs_dump_packet(p, pkt); 1744 return r; 1745 } 1746 track->textures[2].cube_info[i].offset = idx_value; 1747 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1748 track->textures[2].cube_info[i].robj = reloc->robj; 1749 track->tex_dirty = true; 1750 break; 1751 case RADEON_RE_WIDTH_HEIGHT: 1752 track->maxy = ((idx_value >> 16) & 0x7FF); 1753 track->cb_dirty = true; 1754 track->zb_dirty = true; 1755 break; 1756 case RADEON_RB3D_COLORPITCH: 1757 r = r100_cs_packet_next_reloc(p, &reloc); 1758 if (r) { 1759 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1760 idx, reg); 1761 r100_cs_dump_packet(p, pkt); 1762 return r; 1763 } 1764 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1765 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1766 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1767 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1768 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1769 1770 tmp = idx_value & ~(0x7 << 16); 1771 tmp |= tile_flags; 1772 ib[idx] = tmp; 1773 } else 1774 ib[idx] = idx_value; 1775 1776 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1777 track->cb_dirty = true; 1778 break; 1779 case RADEON_RB3D_DEPTHPITCH: 1780 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1781 track->zb_dirty = true; 1782 break; 1783 case RADEON_RB3D_CNTL: 1784 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1785 case 7: 1786 case 8: 1787 case 9: 1788 case 11: 1789 case 12: 1790 track->cb[0].cpp = 1; 1791 break; 1792 case 3: 1793 case 4: 1794 case 15: 1795 track->cb[0].cpp = 2; 1796 break; 1797 case 6: 1798 track->cb[0].cpp = 4; 1799 break; 1800 default: 1801 DRM_ERROR("Invalid color buffer format (%d) !\n", 1802 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1803 return -EINVAL; 1804 } 1805 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1806 track->cb_dirty = true; 1807 track->zb_dirty = true; 1808 break; 1809 case RADEON_RB3D_ZSTENCILCNTL: 1810 switch (idx_value & 0xf) { 1811 case 0: 1812 track->zb.cpp = 2; 1813 break; 1814 case 2: 1815 case 3: 1816 case 4: 1817 case 5: 1818 case 9: 1819 case 11: 1820 track->zb.cpp = 4; 1821 break; 1822 default: 1823 break; 1824 } 1825 track->zb_dirty = true; 1826 break; 1827 case RADEON_RB3D_ZPASS_ADDR: 1828 r = r100_cs_packet_next_reloc(p, &reloc); 1829 if (r) { 1830 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1831 idx, reg); 1832 r100_cs_dump_packet(p, pkt); 1833 return r; 1834 } 1835 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1836 break; 1837 case RADEON_PP_CNTL: 1838 { 1839 uint32_t temp = idx_value >> 4; 1840 for (i = 0; i < track->num_texture; i++) 1841 track->textures[i].enabled = !!(temp & (1 << i)); 1842 track->tex_dirty = true; 1843 } 1844 break; 1845 case RADEON_SE_VF_CNTL: 1846 track->vap_vf_cntl = idx_value; 1847 break; 1848 case RADEON_SE_VTX_FMT: 1849 track->vtx_size = r100_get_vtx_size(idx_value); 1850 break; 1851 case RADEON_PP_TEX_SIZE_0: 1852 case RADEON_PP_TEX_SIZE_1: 1853 case RADEON_PP_TEX_SIZE_2: 1854 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1855 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1856 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1857 track->tex_dirty = true; 1858 break; 1859 case RADEON_PP_TEX_PITCH_0: 1860 case RADEON_PP_TEX_PITCH_1: 1861 case RADEON_PP_TEX_PITCH_2: 1862 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1863 track->textures[i].pitch = idx_value + 32; 1864 track->tex_dirty = true; 1865 break; 1866 case RADEON_PP_TXFILTER_0: 1867 case RADEON_PP_TXFILTER_1: 1868 case RADEON_PP_TXFILTER_2: 1869 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1870 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1871 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1872 tmp = (idx_value >> 23) & 0x7; 1873 if (tmp == 2 || tmp == 6) 1874 track->textures[i].roundup_w = false; 1875 tmp = (idx_value >> 27) & 0x7; 1876 if (tmp == 2 || tmp == 6) 1877 track->textures[i].roundup_h = false; 1878 track->tex_dirty = true; 1879 break; 1880 case RADEON_PP_TXFORMAT_0: 1881 case RADEON_PP_TXFORMAT_1: 1882 case RADEON_PP_TXFORMAT_2: 1883 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1884 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1885 track->textures[i].use_pitch = 1; 1886 } else { 1887 track->textures[i].use_pitch = 0; 1888 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1889 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1890 } 1891 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1892 track->textures[i].tex_coord_type = 2; 1893 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1894 case RADEON_TXFORMAT_I8: 1895 case RADEON_TXFORMAT_RGB332: 1896 case RADEON_TXFORMAT_Y8: 1897 track->textures[i].cpp = 1; 1898 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1899 break; 1900 case RADEON_TXFORMAT_AI88: 1901 case RADEON_TXFORMAT_ARGB1555: 1902 case RADEON_TXFORMAT_RGB565: 1903 case RADEON_TXFORMAT_ARGB4444: 1904 case RADEON_TXFORMAT_VYUY422: 1905 case RADEON_TXFORMAT_YVYU422: 1906 case RADEON_TXFORMAT_SHADOW16: 1907 case RADEON_TXFORMAT_LDUDV655: 1908 case RADEON_TXFORMAT_DUDV88: 1909 track->textures[i].cpp = 2; 1910 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1911 break; 1912 case RADEON_TXFORMAT_ARGB8888: 1913 case RADEON_TXFORMAT_RGBA8888: 1914 case RADEON_TXFORMAT_SHADOW32: 1915 case RADEON_TXFORMAT_LDUDUV8888: 1916 track->textures[i].cpp = 4; 1917 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1918 break; 1919 case RADEON_TXFORMAT_DXT1: 1920 track->textures[i].cpp = 1; 1921 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1922 break; 1923 case RADEON_TXFORMAT_DXT23: 1924 case RADEON_TXFORMAT_DXT45: 1925 track->textures[i].cpp = 1; 1926 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1927 break; 1928 } 1929 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1930 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1931 track->tex_dirty = true; 1932 break; 1933 case RADEON_PP_CUBIC_FACES_0: 1934 case RADEON_PP_CUBIC_FACES_1: 1935 case RADEON_PP_CUBIC_FACES_2: 1936 tmp = idx_value; 1937 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1938 for (face = 0; face < 4; face++) { 1939 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1940 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1941 } 1942 track->tex_dirty = true; 1943 break; 1944 default: 1945 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", 1946 reg, idx); 1947 return -EINVAL; 1948 } 1949 return 0; 1950 } 1951 1952 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1953 struct radeon_cs_packet *pkt, 1954 struct radeon_bo *robj) 1955 { 1956 unsigned idx; 1957 u32 value; 1958 idx = pkt->idx + 1; 1959 value = radeon_get_ib_value(p, idx + 2); 1960 if ((value + 1) > radeon_bo_size(robj)) { 1961 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1962 "(need %u have %lu) !\n", 1963 value + 1, 1964 radeon_bo_size(robj)); 1965 return -EINVAL; 1966 } 1967 return 0; 1968 } 1969 1970 static int r100_packet3_check(struct radeon_cs_parser *p, 1971 struct radeon_cs_packet *pkt) 1972 { 1973 struct radeon_cs_reloc *reloc; 1974 struct r100_cs_track *track; 1975 unsigned idx; 1976 volatile uint32_t *ib; 1977 int r; 1978 1979 ib = p->ib.ptr; 1980 idx = pkt->idx + 1; 1981 track = (struct r100_cs_track *)p->track; 1982 switch (pkt->opcode) { 1983 case PACKET3_3D_LOAD_VBPNTR: 1984 r = r100_packet3_load_vbpntr(p, pkt, idx); 1985 if (r) 1986 return r; 1987 break; 1988 case PACKET3_INDX_BUFFER: 1989 r = r100_cs_packet_next_reloc(p, &reloc); 1990 if (r) { 1991 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1992 r100_cs_dump_packet(p, pkt); 1993 return r; 1994 } 1995 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); 1996 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1997 if (r) { 1998 return r; 1999 } 2000 break; 2001 case 0x23: 2002 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 2003 r = r100_cs_packet_next_reloc(p, &reloc); 2004 if (r) { 2005 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 2006 r100_cs_dump_packet(p, pkt); 2007 return r; 2008 } 2009 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); 2010 track->num_arrays = 1; 2011 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 2012 2013 track->arrays[0].robj = reloc->robj; 2014 track->arrays[0].esize = track->vtx_size; 2015 2016 track->max_indx = radeon_get_ib_value(p, idx+1); 2017 2018 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 2019 track->immd_dwords = pkt->count - 1; 2020 r = r100_cs_track_check(p->rdev, track); 2021 if (r) 2022 return r; 2023 break; 2024 case PACKET3_3D_DRAW_IMMD: 2025 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 2026 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 2027 return -EINVAL; 2028 } 2029 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 2030 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2031 track->immd_dwords = pkt->count - 1; 2032 r = r100_cs_track_check(p->rdev, track); 2033 if (r) 2034 return r; 2035 break; 2036 /* triggers drawing using in-packet vertex data */ 2037 case PACKET3_3D_DRAW_IMMD_2: 2038 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 2039 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 2040 return -EINVAL; 2041 } 2042 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2043 track->immd_dwords = pkt->count; 2044 r = r100_cs_track_check(p->rdev, track); 2045 if (r) 2046 return r; 2047 break; 2048 /* triggers drawing using in-packet vertex data */ 2049 case PACKET3_3D_DRAW_VBUF_2: 2050 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2051 r = r100_cs_track_check(p->rdev, track); 2052 if (r) 2053 return r; 2054 break; 2055 /* triggers drawing of vertex buffers setup elsewhere */ 2056 case PACKET3_3D_DRAW_INDX_2: 2057 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2058 r = r100_cs_track_check(p->rdev, track); 2059 if (r) 2060 return r; 2061 break; 2062 /* triggers drawing using indices to vertex buffer */ 2063 case PACKET3_3D_DRAW_VBUF: 2064 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2065 r = r100_cs_track_check(p->rdev, track); 2066 if (r) 2067 return r; 2068 break; 2069 /* triggers drawing of vertex buffers setup elsewhere */ 2070 case PACKET3_3D_DRAW_INDX: 2071 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2072 r = r100_cs_track_check(p->rdev, track); 2073 if (r) 2074 return r; 2075 break; 2076 /* triggers drawing using indices to vertex buffer */ 2077 case PACKET3_3D_CLEAR_HIZ: 2078 case PACKET3_3D_CLEAR_ZMASK: 2079 if (p->rdev->hyperz_filp != p->filp) 2080 return -EINVAL; 2081 break; 2082 case PACKET3_NOP: 2083 break; 2084 default: 2085 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2086 return -EINVAL; 2087 } 2088 return 0; 2089 } 2090 2091 int r100_cs_parse(struct radeon_cs_parser *p) 2092 { 2093 struct radeon_cs_packet pkt; 2094 struct r100_cs_track *track; 2095 int r; 2096 2097 track = kmalloc(sizeof(*track), M_DRM, M_ZERO | M_WAITOK); 2098 if (!track) 2099 return -ENOMEM; 2100 r100_cs_track_clear(p->rdev, track); 2101 p->track = track; 2102 do { 2103 r = r100_cs_packet_parse(p, &pkt, p->idx); 2104 if (r) { 2105 drm_free(p->track, M_DRM); 2106 p->track = NULL; 2107 return r; 2108 } 2109 p->idx += pkt.count + 2; 2110 switch (pkt.type) { 2111 case PACKET_TYPE0: 2112 if (p->rdev->family >= CHIP_R200) 2113 r = r100_cs_parse_packet0(p, &pkt, 2114 p->rdev->config.r100.reg_safe_bm, 2115 p->rdev->config.r100.reg_safe_bm_size, 2116 &r200_packet0_check); 2117 else 2118 r = r100_cs_parse_packet0(p, &pkt, 2119 p->rdev->config.r100.reg_safe_bm, 2120 p->rdev->config.r100.reg_safe_bm_size, 2121 &r100_packet0_check); 2122 break; 2123 case PACKET_TYPE2: 2124 break; 2125 case PACKET_TYPE3: 2126 r = r100_packet3_check(p, &pkt); 2127 break; 2128 default: 2129 DRM_ERROR("Unknown packet type %d !\n", 2130 pkt.type); 2131 drm_free(p->track, M_DRM); 2132 p->track = NULL; 2133 return -EINVAL; 2134 } 2135 if (r) { 2136 drm_free(p->track, M_DRM); 2137 p->track = NULL; 2138 return r; 2139 } 2140 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2141 drm_free(p->track, M_DRM); 2142 p->track = NULL; 2143 return 0; 2144 } 2145 2146 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2147 { 2148 DRM_ERROR("pitch %d\n", t->pitch); 2149 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2150 DRM_ERROR("width %d\n", t->width); 2151 DRM_ERROR("width_11 %d\n", t->width_11); 2152 DRM_ERROR("height %d\n", t->height); 2153 DRM_ERROR("height_11 %d\n", t->height_11); 2154 DRM_ERROR("num levels %d\n", t->num_levels); 2155 DRM_ERROR("depth %d\n", t->txdepth); 2156 DRM_ERROR("bpp %d\n", t->cpp); 2157 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2158 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2159 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2160 DRM_ERROR("compress format %d\n", t->compress_format); 2161 } 2162 2163 static int r100_track_compress_size(int compress_format, int w, int h) 2164 { 2165 int block_width, block_height, block_bytes; 2166 int wblocks, hblocks; 2167 int min_wblocks; 2168 int sz; 2169 2170 block_width = 4; 2171 block_height = 4; 2172 2173 switch (compress_format) { 2174 case R100_TRACK_COMP_DXT1: 2175 block_bytes = 8; 2176 min_wblocks = 4; 2177 break; 2178 default: 2179 case R100_TRACK_COMP_DXT35: 2180 block_bytes = 16; 2181 min_wblocks = 2; 2182 break; 2183 } 2184 2185 hblocks = (h + block_height - 1) / block_height; 2186 wblocks = (w + block_width - 1) / block_width; 2187 if (wblocks < min_wblocks) 2188 wblocks = min_wblocks; 2189 sz = wblocks * hblocks * block_bytes; 2190 return sz; 2191 } 2192 2193 static int r100_cs_track_cube(struct radeon_device *rdev, 2194 struct r100_cs_track *track, unsigned idx) 2195 { 2196 unsigned face, w, h; 2197 struct radeon_bo *cube_robj; 2198 unsigned long size; 2199 unsigned compress_format = track->textures[idx].compress_format; 2200 2201 for (face = 0; face < 5; face++) { 2202 cube_robj = track->textures[idx].cube_info[face].robj; 2203 w = track->textures[idx].cube_info[face].width; 2204 h = track->textures[idx].cube_info[face].height; 2205 2206 if (compress_format) { 2207 size = r100_track_compress_size(compress_format, w, h); 2208 } else 2209 size = w * h; 2210 size *= track->textures[idx].cpp; 2211 2212 size += track->textures[idx].cube_info[face].offset; 2213 2214 if (size > radeon_bo_size(cube_robj)) { 2215 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2216 size, radeon_bo_size(cube_robj)); 2217 r100_cs_track_texture_print(&track->textures[idx]); 2218 return -1; 2219 } 2220 } 2221 return 0; 2222 } 2223 2224 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2225 struct r100_cs_track *track) 2226 { 2227 struct radeon_bo *robj; 2228 unsigned long size; 2229 unsigned u, i, w, h, d; 2230 int ret; 2231 2232 for (u = 0; u < track->num_texture; u++) { 2233 if (!track->textures[u].enabled) 2234 continue; 2235 if (track->textures[u].lookup_disable) 2236 continue; 2237 robj = track->textures[u].robj; 2238 if (robj == NULL) { 2239 DRM_ERROR("No texture bound to unit %u\n", u); 2240 return -EINVAL; 2241 } 2242 size = 0; 2243 for (i = 0; i <= track->textures[u].num_levels; i++) { 2244 if (track->textures[u].use_pitch) { 2245 if (rdev->family < CHIP_R300) 2246 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2247 else 2248 w = track->textures[u].pitch / (1 << i); 2249 } else { 2250 w = track->textures[u].width; 2251 if (rdev->family >= CHIP_RV515) 2252 w |= track->textures[u].width_11; 2253 w = w / (1 << i); 2254 if (track->textures[u].roundup_w) 2255 w = roundup_pow_of_two(w); 2256 } 2257 h = track->textures[u].height; 2258 if (rdev->family >= CHIP_RV515) 2259 h |= track->textures[u].height_11; 2260 h = h / (1 << i); 2261 if (track->textures[u].roundup_h) 2262 h = roundup_pow_of_two(h); 2263 if (track->textures[u].tex_coord_type == 1) { 2264 d = (1 << track->textures[u].txdepth) / (1 << i); 2265 if (!d) 2266 d = 1; 2267 } else { 2268 d = 1; 2269 } 2270 if (track->textures[u].compress_format) { 2271 2272 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2273 /* compressed textures are block based */ 2274 } else 2275 size += w * h * d; 2276 } 2277 size *= track->textures[u].cpp; 2278 2279 switch (track->textures[u].tex_coord_type) { 2280 case 0: 2281 case 1: 2282 break; 2283 case 2: 2284 if (track->separate_cube) { 2285 ret = r100_cs_track_cube(rdev, track, u); 2286 if (ret) 2287 return ret; 2288 } else 2289 size *= 6; 2290 break; 2291 default: 2292 DRM_ERROR("Invalid texture coordinate type %u for unit " 2293 "%u\n", track->textures[u].tex_coord_type, u); 2294 return -EINVAL; 2295 } 2296 if (size > radeon_bo_size(robj)) { 2297 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2298 "%lu\n", u, size, radeon_bo_size(robj)); 2299 r100_cs_track_texture_print(&track->textures[u]); 2300 return -EINVAL; 2301 } 2302 } 2303 return 0; 2304 } 2305 2306 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2307 { 2308 unsigned i; 2309 unsigned long size; 2310 unsigned prim_walk; 2311 unsigned nverts; 2312 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2313 2314 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2315 !track->blend_read_enable) 2316 num_cb = 0; 2317 2318 for (i = 0; i < num_cb; i++) { 2319 if (track->cb[i].robj == NULL) { 2320 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2321 return -EINVAL; 2322 } 2323 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2324 size += track->cb[i].offset; 2325 if (size > radeon_bo_size(track->cb[i].robj)) { 2326 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2327 "(need %lu have %lu) !\n", i, size, 2328 radeon_bo_size(track->cb[i].robj)); 2329 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2330 i, track->cb[i].pitch, track->cb[i].cpp, 2331 track->cb[i].offset, track->maxy); 2332 return -EINVAL; 2333 } 2334 } 2335 track->cb_dirty = false; 2336 2337 if (track->zb_dirty && track->z_enabled) { 2338 if (track->zb.robj == NULL) { 2339 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2340 return -EINVAL; 2341 } 2342 size = track->zb.pitch * track->zb.cpp * track->maxy; 2343 size += track->zb.offset; 2344 if (size > radeon_bo_size(track->zb.robj)) { 2345 DRM_ERROR("[drm] Buffer too small for z buffer " 2346 "(need %lu have %lu) !\n", size, 2347 radeon_bo_size(track->zb.robj)); 2348 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2349 track->zb.pitch, track->zb.cpp, 2350 track->zb.offset, track->maxy); 2351 return -EINVAL; 2352 } 2353 } 2354 track->zb_dirty = false; 2355 2356 if (track->aa_dirty && track->aaresolve) { 2357 if (track->aa.robj == NULL) { 2358 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2359 return -EINVAL; 2360 } 2361 /* I believe the format comes from colorbuffer0. */ 2362 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2363 size += track->aa.offset; 2364 if (size > radeon_bo_size(track->aa.robj)) { 2365 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2366 "(need %lu have %lu) !\n", i, size, 2367 radeon_bo_size(track->aa.robj)); 2368 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2369 i, track->aa.pitch, track->cb[0].cpp, 2370 track->aa.offset, track->maxy); 2371 return -EINVAL; 2372 } 2373 } 2374 track->aa_dirty = false; 2375 2376 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2377 if (track->vap_vf_cntl & (1 << 14)) { 2378 nverts = track->vap_alt_nverts; 2379 } else { 2380 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2381 } 2382 switch (prim_walk) { 2383 case 1: 2384 for (i = 0; i < track->num_arrays; i++) { 2385 size = track->arrays[i].esize * track->max_indx * 4; 2386 if (track->arrays[i].robj == NULL) { 2387 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2388 "bound\n", prim_walk, i); 2389 return -EINVAL; 2390 } 2391 if (size > radeon_bo_size(track->arrays[i].robj)) { 2392 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2393 "need %lu dwords have %lu dwords\n", 2394 prim_walk, i, size >> 2, 2395 radeon_bo_size(track->arrays[i].robj) 2396 >> 2); 2397 DRM_ERROR("Max indices %u\n", track->max_indx); 2398 return -EINVAL; 2399 } 2400 } 2401 break; 2402 case 2: 2403 for (i = 0; i < track->num_arrays; i++) { 2404 size = track->arrays[i].esize * (nverts - 1) * 4; 2405 if (track->arrays[i].robj == NULL) { 2406 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2407 "bound\n", prim_walk, i); 2408 return -EINVAL; 2409 } 2410 if (size > radeon_bo_size(track->arrays[i].robj)) { 2411 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2412 "need %lu dwords have %lu dwords\n", 2413 prim_walk, i, size >> 2, 2414 radeon_bo_size(track->arrays[i].robj) 2415 >> 2); 2416 return -EINVAL; 2417 } 2418 } 2419 break; 2420 case 3: 2421 size = track->vtx_size * nverts; 2422 if (size != track->immd_dwords) { 2423 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2424 track->immd_dwords, size); 2425 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2426 nverts, track->vtx_size); 2427 return -EINVAL; 2428 } 2429 break; 2430 default: 2431 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2432 prim_walk); 2433 return -EINVAL; 2434 } 2435 2436 if (track->tex_dirty) { 2437 track->tex_dirty = false; 2438 return r100_cs_track_texture_check(rdev, track); 2439 } 2440 return 0; 2441 } 2442 2443 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2444 { 2445 unsigned i, face; 2446 2447 track->cb_dirty = true; 2448 track->zb_dirty = true; 2449 track->tex_dirty = true; 2450 track->aa_dirty = true; 2451 2452 if (rdev->family < CHIP_R300) { 2453 track->num_cb = 1; 2454 if (rdev->family <= CHIP_RS200) 2455 track->num_texture = 3; 2456 else 2457 track->num_texture = 6; 2458 track->maxy = 2048; 2459 track->separate_cube = 1; 2460 } else { 2461 track->num_cb = 4; 2462 track->num_texture = 16; 2463 track->maxy = 4096; 2464 track->separate_cube = 0; 2465 track->aaresolve = false; 2466 track->aa.robj = NULL; 2467 } 2468 2469 for (i = 0; i < track->num_cb; i++) { 2470 track->cb[i].robj = NULL; 2471 track->cb[i].pitch = 8192; 2472 track->cb[i].cpp = 16; 2473 track->cb[i].offset = 0; 2474 } 2475 track->z_enabled = true; 2476 track->zb.robj = NULL; 2477 track->zb.pitch = 8192; 2478 track->zb.cpp = 4; 2479 track->zb.offset = 0; 2480 track->vtx_size = 0x7F; 2481 track->immd_dwords = 0xFFFFFFFFUL; 2482 track->num_arrays = 11; 2483 track->max_indx = 0x00FFFFFFUL; 2484 for (i = 0; i < track->num_arrays; i++) { 2485 track->arrays[i].robj = NULL; 2486 track->arrays[i].esize = 0x7F; 2487 } 2488 for (i = 0; i < track->num_texture; i++) { 2489 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2490 track->textures[i].pitch = 16536; 2491 track->textures[i].width = 16536; 2492 track->textures[i].height = 16536; 2493 track->textures[i].width_11 = 1 << 11; 2494 track->textures[i].height_11 = 1 << 11; 2495 track->textures[i].num_levels = 12; 2496 if (rdev->family <= CHIP_RS200) { 2497 track->textures[i].tex_coord_type = 0; 2498 track->textures[i].txdepth = 0; 2499 } else { 2500 track->textures[i].txdepth = 16; 2501 track->textures[i].tex_coord_type = 1; 2502 } 2503 track->textures[i].cpp = 64; 2504 track->textures[i].robj = NULL; 2505 /* CS IB emission code makes sure texture unit are disabled */ 2506 track->textures[i].enabled = false; 2507 track->textures[i].lookup_disable = false; 2508 track->textures[i].roundup_w = true; 2509 track->textures[i].roundup_h = true; 2510 if (track->separate_cube) 2511 for (face = 0; face < 5; face++) { 2512 track->textures[i].cube_info[face].robj = NULL; 2513 track->textures[i].cube_info[face].width = 16536; 2514 track->textures[i].cube_info[face].height = 16536; 2515 track->textures[i].cube_info[face].offset = 0; 2516 } 2517 } 2518 } 2519 2520 /* 2521 * Global GPU functions 2522 */ 2523 static void r100_errata(struct radeon_device *rdev) 2524 { 2525 rdev->pll_errata = 0; 2526 2527 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2528 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2529 } 2530 2531 if (rdev->family == CHIP_RV100 || 2532 rdev->family == CHIP_RS100 || 2533 rdev->family == CHIP_RS200) { 2534 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2535 } 2536 } 2537 2538 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2539 { 2540 unsigned i; 2541 uint32_t tmp; 2542 2543 for (i = 0; i < rdev->usec_timeout; i++) { 2544 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2545 if (tmp >= n) { 2546 return 0; 2547 } 2548 DRM_UDELAY(1); 2549 } 2550 return -1; 2551 } 2552 2553 int r100_gui_wait_for_idle(struct radeon_device *rdev) 2554 { 2555 unsigned i; 2556 uint32_t tmp; 2557 2558 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2559 DRM_ERROR("radeon: wait for empty RBBM fifo failed !" 2560 " Bad things might happen.\n"); 2561 } 2562 for (i = 0; i < rdev->usec_timeout; i++) { 2563 tmp = RREG32(RADEON_RBBM_STATUS); 2564 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2565 return 0; 2566 } 2567 DRM_UDELAY(1); 2568 } 2569 return -1; 2570 } 2571 2572 int r100_mc_wait_for_idle(struct radeon_device *rdev) 2573 { 2574 unsigned i; 2575 uint32_t tmp; 2576 2577 for (i = 0; i < rdev->usec_timeout; i++) { 2578 /* read MC_STATUS */ 2579 tmp = RREG32(RADEON_MC_STATUS); 2580 if (tmp & RADEON_MC_IDLE) { 2581 return 0; 2582 } 2583 DRM_UDELAY(1); 2584 } 2585 return -1; 2586 } 2587 2588 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2589 { 2590 u32 rbbm_status; 2591 2592 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2593 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2594 radeon_ring_lockup_update(ring); 2595 return false; 2596 } 2597 /* force CP activities */ 2598 radeon_ring_force_activity(rdev, ring); 2599 return radeon_ring_test_lockup(rdev, ring); 2600 } 2601 2602 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2603 void r100_enable_bm(struct radeon_device *rdev) 2604 { 2605 uint32_t tmp; 2606 /* Enable bus mastering */ 2607 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2608 WREG32(RADEON_BUS_CNTL, tmp); 2609 } 2610 2611 void r100_bm_disable(struct radeon_device *rdev) 2612 { 2613 u32 tmp; 2614 2615 /* disable bus mastering */ 2616 tmp = RREG32(R_000030_BUS_CNTL); 2617 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2618 DRM_MDELAY(1); 2619 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2620 DRM_MDELAY(1); 2621 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2622 tmp = RREG32(RADEON_BUS_CNTL); 2623 DRM_MDELAY(1); 2624 pci_disable_busmaster(rdev->dev); 2625 DRM_MDELAY(1); 2626 } 2627 2628 int r100_asic_reset(struct radeon_device *rdev) 2629 { 2630 struct r100_mc_save save; 2631 u32 status, tmp; 2632 int ret = 0; 2633 2634 status = RREG32(R_000E40_RBBM_STATUS); 2635 if (!G_000E40_GUI_ACTIVE(status)) { 2636 return 0; 2637 } 2638 r100_mc_stop(rdev, &save); 2639 status = RREG32(R_000E40_RBBM_STATUS); 2640 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2641 /* stop CP */ 2642 WREG32(RADEON_CP_CSQ_CNTL, 0); 2643 tmp = RREG32(RADEON_CP_RB_CNTL); 2644 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2645 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2646 WREG32(RADEON_CP_RB_WPTR, 0); 2647 WREG32(RADEON_CP_RB_CNTL, tmp); 2648 /* save PCI state */ 2649 pci_save_state(device_get_parent(rdev->dev)); 2650 /* disable bus mastering */ 2651 r100_bm_disable(rdev); 2652 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2653 S_0000F0_SOFT_RESET_RE(1) | 2654 S_0000F0_SOFT_RESET_PP(1) | 2655 S_0000F0_SOFT_RESET_RB(1)); 2656 RREG32(R_0000F0_RBBM_SOFT_RESET); 2657 DRM_MDELAY(500); 2658 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2659 DRM_MDELAY(1); 2660 status = RREG32(R_000E40_RBBM_STATUS); 2661 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2662 /* reset CP */ 2663 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2664 RREG32(R_0000F0_RBBM_SOFT_RESET); 2665 DRM_MDELAY(500); 2666 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2667 DRM_MDELAY(1); 2668 status = RREG32(R_000E40_RBBM_STATUS); 2669 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2670 /* restore PCI & busmastering */ 2671 pci_restore_state(device_get_parent(rdev->dev)); 2672 r100_enable_bm(rdev); 2673 /* Check if GPU is idle */ 2674 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2675 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2676 dev_err(rdev->dev, "failed to reset GPU\n"); 2677 ret = -1; 2678 } else 2679 dev_info(rdev->dev, "GPU reset succeed\n"); 2680 r100_mc_resume(rdev, &save); 2681 return ret; 2682 } 2683 2684 void r100_set_common_regs(struct radeon_device *rdev) 2685 { 2686 struct drm_device *dev = rdev->ddev; 2687 bool force_dac2 = false; 2688 u32 tmp; 2689 2690 /* set these so they don't interfere with anything */ 2691 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2692 WREG32(RADEON_SUBPIC_CNTL, 0); 2693 WREG32(RADEON_VIPH_CONTROL, 0); 2694 WREG32(RADEON_I2C_CNTL_1, 0); 2695 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2696 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2697 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2698 2699 /* always set up dac2 on rn50 and some rv100 as lots 2700 * of servers seem to wire it up to a VGA port but 2701 * don't report it in the bios connector 2702 * table. 2703 */ 2704 switch (dev->pci_device) { 2705 /* RN50 */ 2706 case 0x515e: 2707 case 0x5969: 2708 force_dac2 = true; 2709 break; 2710 /* RV100*/ 2711 case 0x5159: 2712 case 0x515a: 2713 /* DELL triple head servers */ 2714 if ((dev->pci_subvendor == 0x1028 /* DELL */) && 2715 ((dev->pci_subdevice == 0x016c) || 2716 (dev->pci_subdevice == 0x016d) || 2717 (dev->pci_subdevice == 0x016e) || 2718 (dev->pci_subdevice == 0x016f) || 2719 (dev->pci_subdevice == 0x0170) || 2720 (dev->pci_subdevice == 0x017d) || 2721 (dev->pci_subdevice == 0x017e) || 2722 (dev->pci_subdevice == 0x0183) || 2723 (dev->pci_subdevice == 0x018a) || 2724 (dev->pci_subdevice == 0x019a))) 2725 force_dac2 = true; 2726 break; 2727 } 2728 2729 if (force_dac2) { 2730 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2731 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2732 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2733 2734 /* For CRT on DAC2, don't turn it on if BIOS didn't 2735 enable it, even it's detected. 2736 */ 2737 2738 /* force it to crtc0 */ 2739 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2740 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2741 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2742 2743 /* set up the TV DAC */ 2744 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2745 RADEON_TV_DAC_STD_MASK | 2746 RADEON_TV_DAC_RDACPD | 2747 RADEON_TV_DAC_GDACPD | 2748 RADEON_TV_DAC_BDACPD | 2749 RADEON_TV_DAC_BGADJ_MASK | 2750 RADEON_TV_DAC_DACADJ_MASK); 2751 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2752 RADEON_TV_DAC_NHOLD | 2753 RADEON_TV_DAC_STD_PS2 | 2754 (0x58 << 16)); 2755 2756 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2757 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2758 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2759 } 2760 2761 /* switch PM block to ACPI mode */ 2762 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2763 tmp &= ~RADEON_PM_MODE_SEL; 2764 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2765 2766 } 2767 2768 /* 2769 * VRAM info 2770 */ 2771 static void r100_vram_get_type(struct radeon_device *rdev) 2772 { 2773 uint32_t tmp; 2774 2775 rdev->mc.vram_is_ddr = false; 2776 if (rdev->flags & RADEON_IS_IGP) 2777 rdev->mc.vram_is_ddr = true; 2778 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2779 rdev->mc.vram_is_ddr = true; 2780 if ((rdev->family == CHIP_RV100) || 2781 (rdev->family == CHIP_RS100) || 2782 (rdev->family == CHIP_RS200)) { 2783 tmp = RREG32(RADEON_MEM_CNTL); 2784 if (tmp & RV100_HALF_MODE) { 2785 rdev->mc.vram_width = 32; 2786 } else { 2787 rdev->mc.vram_width = 64; 2788 } 2789 if (rdev->flags & RADEON_SINGLE_CRTC) { 2790 rdev->mc.vram_width /= 4; 2791 rdev->mc.vram_is_ddr = true; 2792 } 2793 } else if (rdev->family <= CHIP_RV280) { 2794 tmp = RREG32(RADEON_MEM_CNTL); 2795 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2796 rdev->mc.vram_width = 128; 2797 } else { 2798 rdev->mc.vram_width = 64; 2799 } 2800 } else { 2801 /* newer IGPs */ 2802 rdev->mc.vram_width = 128; 2803 } 2804 } 2805 2806 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2807 { 2808 u32 aper_size; 2809 u8 byte; 2810 2811 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2812 2813 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2814 * that is has the 2nd generation multifunction PCI interface 2815 */ 2816 if (rdev->family == CHIP_RV280 || 2817 rdev->family >= CHIP_RV350) { 2818 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2819 ~RADEON_HDP_APER_CNTL); 2820 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2821 return aper_size * 2; 2822 } 2823 2824 /* Older cards have all sorts of funny issues to deal with. First 2825 * check if it's a multifunction card by reading the PCI config 2826 * header type... Limit those to one aperture size 2827 */ 2828 byte = pci_read_config(rdev->dev, 0xe, 1); 2829 if (byte & 0x80) { 2830 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2831 DRM_INFO("Limiting VRAM to one aperture\n"); 2832 return aper_size; 2833 } 2834 2835 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2836 * have set it up. We don't write this as it's broken on some ASICs but 2837 * we expect the BIOS to have done the right thing (might be too optimistic...) 2838 */ 2839 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2840 return aper_size * 2; 2841 return aper_size; 2842 } 2843 2844 void r100_vram_init_sizes(struct radeon_device *rdev) 2845 { 2846 u64 config_aper_size; 2847 2848 /* work out accessible VRAM */ 2849 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 2850 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 2851 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2852 /* FIXME we don't use the second aperture yet when we could use it */ 2853 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2854 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2855 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2856 if (rdev->flags & RADEON_IS_IGP) { 2857 uint32_t tom; 2858 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2859 tom = RREG32(RADEON_NB_TOM); 2860 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2861 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2862 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2863 } else { 2864 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2865 /* Some production boards of m6 will report 0 2866 * if it's 8 MB 2867 */ 2868 if (rdev->mc.real_vram_size == 0) { 2869 rdev->mc.real_vram_size = 8192 * 1024; 2870 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2871 } 2872 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2873 * Novell bug 204882 + along with lots of ubuntu ones 2874 */ 2875 if (rdev->mc.aper_size > config_aper_size) 2876 config_aper_size = rdev->mc.aper_size; 2877 2878 if (config_aper_size > rdev->mc.real_vram_size) 2879 rdev->mc.mc_vram_size = config_aper_size; 2880 else 2881 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2882 } 2883 } 2884 2885 void r100_vga_set_state(struct radeon_device *rdev, bool state) 2886 { 2887 uint32_t temp; 2888 2889 temp = RREG32(RADEON_CONFIG_CNTL); 2890 if (state == false) { 2891 temp &= ~RADEON_CFG_VGA_RAM_EN; 2892 temp |= RADEON_CFG_VGA_IO_DIS; 2893 } else { 2894 temp &= ~RADEON_CFG_VGA_IO_DIS; 2895 } 2896 WREG32(RADEON_CONFIG_CNTL, temp); 2897 } 2898 2899 static void r100_mc_init(struct radeon_device *rdev) 2900 { 2901 u64 base; 2902 2903 r100_vram_get_type(rdev); 2904 r100_vram_init_sizes(rdev); 2905 base = rdev->mc.aper_base; 2906 if (rdev->flags & RADEON_IS_IGP) 2907 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2908 radeon_vram_location(rdev, &rdev->mc, base); 2909 rdev->mc.gtt_base_align = 0; 2910 if (!(rdev->flags & RADEON_IS_AGP)) 2911 radeon_gtt_location(rdev, &rdev->mc); 2912 radeon_update_bandwidth_info(rdev); 2913 } 2914 2915 2916 /* 2917 * Indirect registers accessor 2918 */ 2919 void r100_pll_errata_after_index(struct radeon_device *rdev) 2920 { 2921 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2922 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2923 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2924 } 2925 } 2926 2927 static void r100_pll_errata_after_data(struct radeon_device *rdev) 2928 { 2929 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2930 * or the chip could hang on a subsequent access 2931 */ 2932 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2933 DRM_MDELAY(5); 2934 } 2935 2936 /* This function is required to workaround a hardware bug in some (all?) 2937 * revisions of the R300. This workaround should be called after every 2938 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2939 * may not be correct. 2940 */ 2941 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2942 uint32_t save, tmp; 2943 2944 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2945 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2946 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2947 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2948 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2949 } 2950 } 2951 2952 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2953 { 2954 uint32_t data; 2955 2956 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2957 r100_pll_errata_after_index(rdev); 2958 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2959 r100_pll_errata_after_data(rdev); 2960 return data; 2961 } 2962 2963 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2964 { 2965 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2966 r100_pll_errata_after_index(rdev); 2967 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2968 r100_pll_errata_after_data(rdev); 2969 } 2970 2971 static void r100_set_safe_registers(struct radeon_device *rdev) 2972 { 2973 if (ASIC_IS_RN50(rdev)) { 2974 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2975 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(rn50_reg_safe_bm); 2976 } else if (rdev->family < CHIP_R200) { 2977 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2978 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r100_reg_safe_bm); 2979 } else { 2980 r200_set_safe_registers(rdev); 2981 } 2982 } 2983 2984 /* 2985 * Debugfs info 2986 */ 2987 #if defined(CONFIG_DEBUG_FS) 2988 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2989 { 2990 struct drm_info_node *node = (struct drm_info_node *) m->private; 2991 struct drm_device *dev = node->minor->dev; 2992 struct radeon_device *rdev = dev->dev_private; 2993 uint32_t reg, value; 2994 unsigned i; 2995 2996 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2997 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2998 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2999 for (i = 0; i < 64; i++) { 3000 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 3001 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 3002 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 3003 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 3004 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 3005 } 3006 return 0; 3007 } 3008 3009 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 3010 { 3011 struct drm_info_node *node = (struct drm_info_node *) m->private; 3012 struct drm_device *dev = node->minor->dev; 3013 struct radeon_device *rdev = dev->dev_private; 3014 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3015 uint32_t rdp, wdp; 3016 unsigned count, i, j; 3017 3018 radeon_ring_free_size(rdev, ring); 3019 rdp = RREG32(RADEON_CP_RB_RPTR); 3020 wdp = RREG32(RADEON_CP_RB_WPTR); 3021 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 3022 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 3023 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 3024 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 3025 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 3026 seq_printf(m, "%u dwords in ring\n", count); 3027 for (j = 0; j <= count; j++) { 3028 i = (rdp + j) & ring->ptr_mask; 3029 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 3030 } 3031 return 0; 3032 } 3033 3034 3035 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 3036 { 3037 struct drm_info_node *node = (struct drm_info_node *) m->private; 3038 struct drm_device *dev = node->minor->dev; 3039 struct radeon_device *rdev = dev->dev_private; 3040 uint32_t csq_stat, csq2_stat, tmp; 3041 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 3042 unsigned i; 3043 3044 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 3045 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 3046 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 3047 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 3048 r_rptr = (csq_stat >> 0) & 0x3ff; 3049 r_wptr = (csq_stat >> 10) & 0x3ff; 3050 ib1_rptr = (csq_stat >> 20) & 0x3ff; 3051 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 3052 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 3053 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 3054 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 3055 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 3056 seq_printf(m, "Ring rptr %u\n", r_rptr); 3057 seq_printf(m, "Ring wptr %u\n", r_wptr); 3058 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 3059 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 3060 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 3061 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 3062 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 3063 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 3064 seq_printf(m, "Ring fifo:\n"); 3065 for (i = 0; i < 256; i++) { 3066 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3067 tmp = RREG32(RADEON_CP_CSQ_DATA); 3068 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 3069 } 3070 seq_printf(m, "Indirect1 fifo:\n"); 3071 for (i = 256; i <= 512; i++) { 3072 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3073 tmp = RREG32(RADEON_CP_CSQ_DATA); 3074 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3075 } 3076 seq_printf(m, "Indirect2 fifo:\n"); 3077 for (i = 640; i < ib1_wptr; i++) { 3078 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3079 tmp = RREG32(RADEON_CP_CSQ_DATA); 3080 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3081 } 3082 return 0; 3083 } 3084 3085 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3086 { 3087 struct drm_info_node *node = (struct drm_info_node *) m->private; 3088 struct drm_device *dev = node->minor->dev; 3089 struct radeon_device *rdev = dev->dev_private; 3090 uint32_t tmp; 3091 3092 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3093 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3094 tmp = RREG32(RADEON_MC_FB_LOCATION); 3095 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3096 tmp = RREG32(RADEON_BUS_CNTL); 3097 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3098 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3099 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3100 tmp = RREG32(RADEON_AGP_BASE); 3101 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3102 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3103 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3104 tmp = RREG32(0x01D0); 3105 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3106 tmp = RREG32(RADEON_AIC_LO_ADDR); 3107 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3108 tmp = RREG32(RADEON_AIC_HI_ADDR); 3109 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3110 tmp = RREG32(0x01E4); 3111 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3112 return 0; 3113 } 3114 3115 static struct drm_info_list r100_debugfs_rbbm_list[] = { 3116 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3117 }; 3118 3119 static struct drm_info_list r100_debugfs_cp_list[] = { 3120 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3121 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3122 }; 3123 3124 static struct drm_info_list r100_debugfs_mc_info_list[] = { 3125 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3126 }; 3127 #endif 3128 3129 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3130 { 3131 #if defined(CONFIG_DEBUG_FS) 3132 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3133 #else 3134 return 0; 3135 #endif 3136 } 3137 3138 int r100_debugfs_cp_init(struct radeon_device *rdev) 3139 { 3140 #if defined(CONFIG_DEBUG_FS) 3141 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3142 #else 3143 return 0; 3144 #endif 3145 } 3146 3147 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3148 { 3149 #if defined(CONFIG_DEBUG_FS) 3150 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3151 #else 3152 return 0; 3153 #endif 3154 } 3155 3156 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3157 uint32_t tiling_flags, uint32_t pitch, 3158 uint32_t offset, uint32_t obj_size) 3159 { 3160 int surf_index = reg * 16; 3161 int flags = 0; 3162 3163 if (rdev->family <= CHIP_RS200) { 3164 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3165 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3166 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3167 if (tiling_flags & RADEON_TILING_MACRO) 3168 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3169 } else if (rdev->family <= CHIP_RV280) { 3170 if (tiling_flags & (RADEON_TILING_MACRO)) 3171 flags |= R200_SURF_TILE_COLOR_MACRO; 3172 if (tiling_flags & RADEON_TILING_MICRO) 3173 flags |= R200_SURF_TILE_COLOR_MICRO; 3174 } else { 3175 if (tiling_flags & RADEON_TILING_MACRO) 3176 flags |= R300_SURF_TILE_MACRO; 3177 if (tiling_flags & RADEON_TILING_MICRO) 3178 flags |= R300_SURF_TILE_MICRO; 3179 } 3180 3181 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3182 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3183 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3184 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3185 3186 /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ 3187 if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { 3188 if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) 3189 if (ASIC_IS_RN50(rdev)) 3190 pitch /= 16; 3191 } 3192 3193 /* r100/r200 divide by 16 */ 3194 if (rdev->family < CHIP_R300) 3195 flags |= pitch / 16; 3196 else 3197 flags |= pitch / 8; 3198 3199 3200 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3201 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3202 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3203 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3204 return 0; 3205 } 3206 3207 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3208 { 3209 int surf_index = reg * 16; 3210 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3211 } 3212 3213 void r100_bandwidth_update(struct radeon_device *rdev) 3214 { 3215 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3216 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3217 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 3218 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3219 fixed20_12 memtcas_ff[8] = { 3220 dfixed_init(1), 3221 dfixed_init(2), 3222 dfixed_init(3), 3223 dfixed_init(0), 3224 dfixed_init_half(1), 3225 dfixed_init_half(2), 3226 dfixed_init(0), 3227 }; 3228 fixed20_12 memtcas_rs480_ff[8] = { 3229 dfixed_init(0), 3230 dfixed_init(1), 3231 dfixed_init(2), 3232 dfixed_init(3), 3233 dfixed_init(0), 3234 dfixed_init_half(1), 3235 dfixed_init_half(2), 3236 dfixed_init_half(3), 3237 }; 3238 fixed20_12 memtcas2_ff[8] = { 3239 dfixed_init(0), 3240 dfixed_init(1), 3241 dfixed_init(2), 3242 dfixed_init(3), 3243 dfixed_init(4), 3244 dfixed_init(5), 3245 dfixed_init(6), 3246 dfixed_init(7), 3247 }; 3248 fixed20_12 memtrbs[8] = { 3249 dfixed_init(1), 3250 dfixed_init_half(1), 3251 dfixed_init(2), 3252 dfixed_init_half(2), 3253 dfixed_init(3), 3254 dfixed_init_half(3), 3255 dfixed_init(4), 3256 dfixed_init_half(4) 3257 }; 3258 fixed20_12 memtrbs_r4xx[8] = { 3259 dfixed_init(4), 3260 dfixed_init(5), 3261 dfixed_init(6), 3262 dfixed_init(7), 3263 dfixed_init(8), 3264 dfixed_init(9), 3265 dfixed_init(10), 3266 dfixed_init(11) 3267 }; 3268 fixed20_12 min_mem_eff; 3269 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3270 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3271 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 3272 disp_drain_rate2, read_return_rate; 3273 fixed20_12 time_disp1_drop_priority; 3274 int c; 3275 int cur_size = 16; /* in octawords */ 3276 int critical_point = 0, critical_point2; 3277 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 3278 int stop_req, max_stop_req; 3279 struct drm_display_mode *mode1 = NULL; 3280 struct drm_display_mode *mode2 = NULL; 3281 uint32_t pixel_bytes1 = 0; 3282 uint32_t pixel_bytes2 = 0; 3283 3284 radeon_update_display_priority(rdev); 3285 3286 if (rdev->mode_info.crtcs[0]->base.enabled) { 3287 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3288 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 3289 } 3290 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3291 if (rdev->mode_info.crtcs[1]->base.enabled) { 3292 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3293 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; 3294 } 3295 } 3296 3297 min_mem_eff.full = dfixed_const_8(0); 3298 /* get modes */ 3299 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3300 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3301 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3302 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3303 /* check crtc enables */ 3304 if (mode2) 3305 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3306 if (mode1) 3307 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3308 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3309 } 3310 3311 /* 3312 * determine is there is enough bw for current mode 3313 */ 3314 sclk_ff = rdev->pm.sclk; 3315 mclk_ff = rdev->pm.mclk; 3316 3317 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3318 temp_ff.full = dfixed_const(temp); 3319 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3320 3321 pix_clk.full = 0; 3322 pix_clk2.full = 0; 3323 peak_disp_bw.full = 0; 3324 if (mode1) { 3325 temp_ff.full = dfixed_const(1000); 3326 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3327 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3328 temp_ff.full = dfixed_const(pixel_bytes1); 3329 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3330 } 3331 if (mode2) { 3332 temp_ff.full = dfixed_const(1000); 3333 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3334 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3335 temp_ff.full = dfixed_const(pixel_bytes2); 3336 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3337 } 3338 3339 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3340 if (peak_disp_bw.full >= mem_bw.full) { 3341 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3342 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3343 } 3344 3345 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3346 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3347 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3348 mem_trcd = ((temp >> 2) & 0x3) + 1; 3349 mem_trp = ((temp & 0x3)) + 1; 3350 mem_tras = ((temp & 0x70) >> 4) + 1; 3351 } else if (rdev->family == CHIP_R300 || 3352 rdev->family == CHIP_R350) { /* r300, r350 */ 3353 mem_trcd = (temp & 0x7) + 1; 3354 mem_trp = ((temp >> 8) & 0x7) + 1; 3355 mem_tras = ((temp >> 11) & 0xf) + 4; 3356 } else if (rdev->family == CHIP_RV350 || 3357 rdev->family <= CHIP_RV380) { 3358 /* rv3x0 */ 3359 mem_trcd = (temp & 0x7) + 3; 3360 mem_trp = ((temp >> 8) & 0x7) + 3; 3361 mem_tras = ((temp >> 11) & 0xf) + 6; 3362 } else if (rdev->family == CHIP_R420 || 3363 rdev->family == CHIP_R423 || 3364 rdev->family == CHIP_RV410) { 3365 /* r4xx */ 3366 mem_trcd = (temp & 0xf) + 3; 3367 if (mem_trcd > 15) 3368 mem_trcd = 15; 3369 mem_trp = ((temp >> 8) & 0xf) + 3; 3370 if (mem_trp > 15) 3371 mem_trp = 15; 3372 mem_tras = ((temp >> 12) & 0x1f) + 6; 3373 if (mem_tras > 31) 3374 mem_tras = 31; 3375 } else { /* RV200, R200 */ 3376 mem_trcd = (temp & 0x7) + 1; 3377 mem_trp = ((temp >> 8) & 0x7) + 1; 3378 mem_tras = ((temp >> 12) & 0xf) + 4; 3379 } 3380 /* convert to FF */ 3381 trcd_ff.full = dfixed_const(mem_trcd); 3382 trp_ff.full = dfixed_const(mem_trp); 3383 tras_ff.full = dfixed_const(mem_tras); 3384 3385 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3386 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3387 data = (temp & (7 << 20)) >> 20; 3388 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3389 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3390 tcas_ff = memtcas_rs480_ff[data]; 3391 else 3392 tcas_ff = memtcas_ff[data]; 3393 } else 3394 tcas_ff = memtcas2_ff[data]; 3395 3396 if (rdev->family == CHIP_RS400 || 3397 rdev->family == CHIP_RS480) { 3398 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3399 data = (temp >> 23) & 0x7; 3400 if (data < 5) 3401 tcas_ff.full += dfixed_const(data); 3402 } 3403 3404 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3405 /* on the R300, Tcas is included in Trbs. 3406 */ 3407 temp = RREG32(RADEON_MEM_CNTL); 3408 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3409 if (data == 1) { 3410 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3411 temp = RREG32(R300_MC_IND_INDEX); 3412 temp &= ~R300_MC_IND_ADDR_MASK; 3413 temp |= R300_MC_READ_CNTL_CD_mcind; 3414 WREG32(R300_MC_IND_INDEX, temp); 3415 temp = RREG32(R300_MC_IND_DATA); 3416 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3417 } else { 3418 temp = RREG32(R300_MC_READ_CNTL_AB); 3419 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3420 } 3421 } else { 3422 temp = RREG32(R300_MC_READ_CNTL_AB); 3423 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3424 } 3425 if (rdev->family == CHIP_RV410 || 3426 rdev->family == CHIP_R420 || 3427 rdev->family == CHIP_R423) 3428 trbs_ff = memtrbs_r4xx[data]; 3429 else 3430 trbs_ff = memtrbs[data]; 3431 tcas_ff.full += trbs_ff.full; 3432 } 3433 3434 sclk_eff_ff.full = sclk_ff.full; 3435 3436 if (rdev->flags & RADEON_IS_AGP) { 3437 fixed20_12 agpmode_ff; 3438 agpmode_ff.full = dfixed_const(radeon_agpmode); 3439 temp_ff.full = dfixed_const_666(16); 3440 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3441 } 3442 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3443 3444 if (ASIC_IS_R300(rdev)) { 3445 sclk_delay_ff.full = dfixed_const(250); 3446 } else { 3447 if ((rdev->family == CHIP_RV100) || 3448 rdev->flags & RADEON_IS_IGP) { 3449 if (rdev->mc.vram_is_ddr) 3450 sclk_delay_ff.full = dfixed_const(41); 3451 else 3452 sclk_delay_ff.full = dfixed_const(33); 3453 } else { 3454 if (rdev->mc.vram_width == 128) 3455 sclk_delay_ff.full = dfixed_const(57); 3456 else 3457 sclk_delay_ff.full = dfixed_const(41); 3458 } 3459 } 3460 3461 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3462 3463 if (rdev->mc.vram_is_ddr) { 3464 if (rdev->mc.vram_width == 32) { 3465 k1.full = dfixed_const(40); 3466 c = 3; 3467 } else { 3468 k1.full = dfixed_const(20); 3469 c = 1; 3470 } 3471 } else { 3472 k1.full = dfixed_const(40); 3473 c = 3; 3474 } 3475 3476 temp_ff.full = dfixed_const(2); 3477 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3478 temp_ff.full = dfixed_const(c); 3479 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3480 temp_ff.full = dfixed_const(4); 3481 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3482 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3483 mc_latency_mclk.full += k1.full; 3484 3485 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3486 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3487 3488 /* 3489 HW cursor time assuming worst case of full size colour cursor. 3490 */ 3491 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3492 temp_ff.full += trcd_ff.full; 3493 if (temp_ff.full < tras_ff.full) 3494 temp_ff.full = tras_ff.full; 3495 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3496 3497 temp_ff.full = dfixed_const(cur_size); 3498 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3499 /* 3500 Find the total latency for the display data. 3501 */ 3502 disp_latency_overhead.full = dfixed_const(8); 3503 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3504 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3505 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3506 3507 if (mc_latency_mclk.full > mc_latency_sclk.full) 3508 disp_latency.full = mc_latency_mclk.full; 3509 else 3510 disp_latency.full = mc_latency_sclk.full; 3511 3512 /* setup Max GRPH_STOP_REQ default value */ 3513 if (ASIC_IS_RV100(rdev)) 3514 max_stop_req = 0x5c; 3515 else 3516 max_stop_req = 0x7c; 3517 3518 if (mode1) { 3519 /* CRTC1 3520 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3521 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3522 */ 3523 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3524 3525 if (stop_req > max_stop_req) 3526 stop_req = max_stop_req; 3527 3528 /* 3529 Find the drain rate of the display buffer. 3530 */ 3531 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3532 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3533 3534 /* 3535 Find the critical point of the display buffer. 3536 */ 3537 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3538 crit_point_ff.full += dfixed_const_half(0); 3539 3540 critical_point = dfixed_trunc(crit_point_ff); 3541 3542 if (rdev->disp_priority == 2) { 3543 critical_point = 0; 3544 } 3545 3546 /* 3547 The critical point should never be above max_stop_req-4. Setting 3548 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3549 */ 3550 if (max_stop_req - critical_point < 4) 3551 critical_point = 0; 3552 3553 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3554 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3555 critical_point = 0x10; 3556 } 3557 3558 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3559 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3560 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3561 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3562 if ((rdev->family == CHIP_R350) && 3563 (stop_req > 0x15)) { 3564 stop_req -= 0x10; 3565 } 3566 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3567 temp |= RADEON_GRPH_BUFFER_SIZE; 3568 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3569 RADEON_GRPH_CRITICAL_AT_SOF | 3570 RADEON_GRPH_STOP_CNTL); 3571 /* 3572 Write the result into the register. 3573 */ 3574 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3575 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3576 3577 #if 0 3578 if ((rdev->family == CHIP_RS400) || 3579 (rdev->family == CHIP_RS480)) { 3580 /* attempt to program RS400 disp regs correctly ??? */ 3581 temp = RREG32(RS400_DISP1_REG_CNTL); 3582 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3583 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3584 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3585 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3586 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3587 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3588 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3589 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3590 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3591 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3592 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3593 } 3594 #endif 3595 3596 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3597 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3598 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3599 } 3600 3601 if (mode2) { 3602 u32 grph2_cntl; 3603 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3604 3605 if (stop_req > max_stop_req) 3606 stop_req = max_stop_req; 3607 3608 /* 3609 Find the drain rate of the display buffer. 3610 */ 3611 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3612 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3613 3614 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3615 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3616 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3617 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3618 if ((rdev->family == CHIP_R350) && 3619 (stop_req > 0x15)) { 3620 stop_req -= 0x10; 3621 } 3622 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3623 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3624 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3625 RADEON_GRPH_CRITICAL_AT_SOF | 3626 RADEON_GRPH_STOP_CNTL); 3627 3628 if ((rdev->family == CHIP_RS100) || 3629 (rdev->family == CHIP_RS200)) 3630 critical_point2 = 0; 3631 else { 3632 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3633 temp_ff.full = dfixed_const(temp); 3634 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3635 if (sclk_ff.full < temp_ff.full) 3636 temp_ff.full = sclk_ff.full; 3637 3638 read_return_rate.full = temp_ff.full; 3639 3640 if (mode1) { 3641 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3642 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3643 } else { 3644 time_disp1_drop_priority.full = 0; 3645 } 3646 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3647 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3648 crit_point_ff.full += dfixed_const_half(0); 3649 3650 critical_point2 = dfixed_trunc(crit_point_ff); 3651 3652 if (rdev->disp_priority == 2) { 3653 critical_point2 = 0; 3654 } 3655 3656 if (max_stop_req - critical_point2 < 4) 3657 critical_point2 = 0; 3658 3659 } 3660 3661 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3662 /* some R300 cards have problem with this set to 0 */ 3663 critical_point2 = 0x10; 3664 } 3665 3666 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3667 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3668 3669 if ((rdev->family == CHIP_RS400) || 3670 (rdev->family == CHIP_RS480)) { 3671 #if 0 3672 /* attempt to program RS400 disp2 regs correctly ??? */ 3673 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3674 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3675 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3676 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3677 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3678 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3679 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3680 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3681 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3682 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3683 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3684 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3685 #endif 3686 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3687 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3688 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3689 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3690 } 3691 3692 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3693 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3694 } 3695 } 3696 3697 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3698 { 3699 uint32_t scratch; 3700 uint32_t tmp = 0; 3701 unsigned i; 3702 int r; 3703 3704 r = radeon_scratch_get(rdev, &scratch); 3705 if (r) { 3706 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3707 return r; 3708 } 3709 WREG32(scratch, 0xCAFEDEAD); 3710 r = radeon_ring_lock(rdev, ring, 2); 3711 if (r) { 3712 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3713 radeon_scratch_free(rdev, scratch); 3714 return r; 3715 } 3716 radeon_ring_write(ring, PACKET0(scratch, 0)); 3717 radeon_ring_write(ring, 0xDEADBEEF); 3718 radeon_ring_unlock_commit(rdev, ring); 3719 for (i = 0; i < rdev->usec_timeout; i++) { 3720 tmp = RREG32(scratch); 3721 if (tmp == 0xDEADBEEF) { 3722 break; 3723 } 3724 DRM_UDELAY(1); 3725 } 3726 if (i < rdev->usec_timeout) { 3727 DRM_INFO("ring test succeeded in %d usecs\n", i); 3728 } else { 3729 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3730 scratch, tmp); 3731 r = -EINVAL; 3732 } 3733 radeon_scratch_free(rdev, scratch); 3734 return r; 3735 } 3736 3737 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3738 { 3739 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3740 3741 if (ring->rptr_save_reg) { 3742 u32 next_rptr = ring->wptr + 2 + 3; 3743 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3744 radeon_ring_write(ring, next_rptr); 3745 } 3746 3747 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3748 radeon_ring_write(ring, ib->gpu_addr); 3749 radeon_ring_write(ring, ib->length_dw); 3750 } 3751 3752 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3753 { 3754 struct radeon_ib ib; 3755 uint32_t scratch; 3756 uint32_t tmp = 0; 3757 unsigned i; 3758 int r; 3759 3760 r = radeon_scratch_get(rdev, &scratch); 3761 if (r) { 3762 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3763 return r; 3764 } 3765 WREG32(scratch, 0xCAFEDEAD); 3766 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3767 if (r) { 3768 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3769 goto free_scratch; 3770 } 3771 ib.ptr[0] = PACKET0(scratch, 0); 3772 ib.ptr[1] = 0xDEADBEEF; 3773 ib.ptr[2] = PACKET2(0); 3774 ib.ptr[3] = PACKET2(0); 3775 ib.ptr[4] = PACKET2(0); 3776 ib.ptr[5] = PACKET2(0); 3777 ib.ptr[6] = PACKET2(0); 3778 ib.ptr[7] = PACKET2(0); 3779 ib.length_dw = 8; 3780 r = radeon_ib_schedule(rdev, &ib, NULL); 3781 if (r) { 3782 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3783 goto free_ib; 3784 } 3785 r = radeon_fence_wait(ib.fence, false); 3786 if (r) { 3787 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3788 goto free_ib; 3789 } 3790 for (i = 0; i < rdev->usec_timeout; i++) { 3791 tmp = RREG32(scratch); 3792 if (tmp == 0xDEADBEEF) { 3793 break; 3794 } 3795 DRM_UDELAY(1); 3796 } 3797 if (i < rdev->usec_timeout) { 3798 DRM_INFO("ib test succeeded in %u usecs\n", i); 3799 } else { 3800 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3801 scratch, tmp); 3802 r = -EINVAL; 3803 } 3804 free_ib: 3805 radeon_ib_free(rdev, &ib); 3806 free_scratch: 3807 radeon_scratch_free(rdev, scratch); 3808 return r; 3809 } 3810 3811 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3812 { 3813 /* Shutdown CP we shouldn't need to do that but better be safe than 3814 * sorry 3815 */ 3816 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3817 WREG32(R_000740_CP_CSQ_CNTL, 0); 3818 3819 /* Save few CRTC registers */ 3820 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3821 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3822 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3823 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3824 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3825 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3826 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3827 } 3828 3829 /* Disable VGA aperture access */ 3830 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3831 /* Disable cursor, overlay, crtc */ 3832 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3833 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3834 S_000054_CRTC_DISPLAY_DIS(1)); 3835 WREG32(R_000050_CRTC_GEN_CNTL, 3836 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3837 S_000050_CRTC_DISP_REQ_EN_B(1)); 3838 WREG32(R_000420_OV0_SCALE_CNTL, 3839 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3840 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3841 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3842 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3843 S_000360_CUR2_LOCK(1)); 3844 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3845 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3846 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3847 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3848 WREG32(R_000360_CUR2_OFFSET, 3849 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3850 } 3851 } 3852 3853 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3854 { 3855 /* Update base address for crtc */ 3856 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3857 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3858 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3859 } 3860 /* Restore CRTC registers */ 3861 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3862 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3863 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3864 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3865 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3866 } 3867 } 3868 3869 void r100_vga_render_disable(struct radeon_device *rdev) 3870 { 3871 u32 tmp; 3872 3873 tmp = RREG8(R_0003C2_GENMO_WT); 3874 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3875 } 3876 3877 static void r100_debugfs(struct radeon_device *rdev) 3878 { 3879 int r; 3880 3881 r = r100_debugfs_mc_info_init(rdev); 3882 if (r) 3883 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3884 } 3885 3886 static void r100_mc_program(struct radeon_device *rdev) 3887 { 3888 struct r100_mc_save save; 3889 3890 /* Stops all mc clients */ 3891 r100_mc_stop(rdev, &save); 3892 if (rdev->flags & RADEON_IS_AGP) { 3893 WREG32(R_00014C_MC_AGP_LOCATION, 3894 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3895 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3896 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3897 if (rdev->family > CHIP_RV200) 3898 WREG32(R_00015C_AGP_BASE_2, 3899 upper_32_bits(rdev->mc.agp_base) & 0xff); 3900 } else { 3901 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3902 WREG32(R_000170_AGP_BASE, 0); 3903 if (rdev->family > CHIP_RV200) 3904 WREG32(R_00015C_AGP_BASE_2, 0); 3905 } 3906 /* Wait for mc idle */ 3907 if (r100_mc_wait_for_idle(rdev)) 3908 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3909 /* Program MC, should be a 32bits limited address space */ 3910 WREG32(R_000148_MC_FB_LOCATION, 3911 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3912 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3913 r100_mc_resume(rdev, &save); 3914 } 3915 3916 static void r100_clock_startup(struct radeon_device *rdev) 3917 { 3918 u32 tmp; 3919 3920 if (radeon_dynclks != -1 && radeon_dynclks) 3921 radeon_legacy_set_clock_gating(rdev, 1); 3922 /* We need to force on some of the block */ 3923 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3924 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3925 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3926 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3927 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3928 } 3929 3930 static int r100_startup(struct radeon_device *rdev) 3931 { 3932 int r; 3933 3934 /* set common regs */ 3935 r100_set_common_regs(rdev); 3936 /* program mc */ 3937 r100_mc_program(rdev); 3938 /* Resume clock */ 3939 r100_clock_startup(rdev); 3940 /* Initialize GART (initialize after TTM so we can allocate 3941 * memory through TTM but finalize after TTM) */ 3942 r100_enable_bm(rdev); 3943 if (rdev->flags & RADEON_IS_PCI) { 3944 r = r100_pci_gart_enable(rdev); 3945 if (r) 3946 return r; 3947 } 3948 3949 /* allocate wb buffer */ 3950 r = radeon_wb_init(rdev); 3951 if (r) 3952 return r; 3953 3954 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3955 if (r) { 3956 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3957 return r; 3958 } 3959 3960 /* Enable IRQ */ 3961 r100_irq_set(rdev); 3962 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3963 /* 1M ring buffer */ 3964 r = r100_cp_init(rdev, 1024 * 1024); 3965 if (r) { 3966 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3967 return r; 3968 } 3969 3970 r = radeon_ib_pool_init(rdev); 3971 if (r) { 3972 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3973 return r; 3974 } 3975 3976 return 0; 3977 } 3978 3979 int r100_resume(struct radeon_device *rdev) 3980 { 3981 int r; 3982 3983 /* Make sur GART are not working */ 3984 if (rdev->flags & RADEON_IS_PCI) 3985 r100_pci_gart_disable(rdev); 3986 /* Resume clock before doing reset */ 3987 r100_clock_startup(rdev); 3988 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3989 if (radeon_asic_reset(rdev)) { 3990 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3991 RREG32(R_000E40_RBBM_STATUS), 3992 RREG32(R_0007C0_CP_STAT)); 3993 } 3994 /* post */ 3995 radeon_combios_asic_init(rdev->ddev); 3996 /* Resume clock after posting */ 3997 r100_clock_startup(rdev); 3998 /* Initialize surface registers */ 3999 radeon_surface_init(rdev); 4000 4001 rdev->accel_working = true; 4002 r = r100_startup(rdev); 4003 if (r) { 4004 rdev->accel_working = false; 4005 } 4006 return r; 4007 } 4008 4009 int r100_suspend(struct radeon_device *rdev) 4010 { 4011 r100_cp_disable(rdev); 4012 radeon_wb_disable(rdev); 4013 r100_irq_disable(rdev); 4014 if (rdev->flags & RADEON_IS_PCI) 4015 r100_pci_gart_disable(rdev); 4016 return 0; 4017 } 4018 4019 void r100_fini(struct radeon_device *rdev) 4020 { 4021 r100_cp_fini(rdev); 4022 radeon_wb_fini(rdev); 4023 radeon_ib_pool_fini(rdev); 4024 radeon_gem_fini(rdev); 4025 if (rdev->flags & RADEON_IS_PCI) 4026 r100_pci_gart_fini(rdev); 4027 radeon_agp_fini(rdev); 4028 radeon_irq_kms_fini(rdev); 4029 radeon_fence_driver_fini(rdev); 4030 radeon_bo_fini(rdev); 4031 radeon_atombios_fini(rdev); 4032 r100_cp_fini_microcode(rdev); 4033 drm_free(rdev->bios, M_DRM); 4034 rdev->bios = NULL; 4035 } 4036 4037 /* 4038 * Due to how kexec works, it can leave the hw fully initialised when it 4039 * boots the new kernel. However doing our init sequence with the CP and 4040 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 4041 * do some quick sanity checks and restore sane values to avoid this 4042 * problem. 4043 */ 4044 void r100_restore_sanity(struct radeon_device *rdev) 4045 { 4046 u32 tmp; 4047 4048 tmp = RREG32(RADEON_CP_CSQ_CNTL); 4049 if (tmp) { 4050 WREG32(RADEON_CP_CSQ_CNTL, 0); 4051 } 4052 tmp = RREG32(RADEON_CP_RB_CNTL); 4053 if (tmp) { 4054 WREG32(RADEON_CP_RB_CNTL, 0); 4055 } 4056 tmp = RREG32(RADEON_SCRATCH_UMSK); 4057 if (tmp) { 4058 WREG32(RADEON_SCRATCH_UMSK, 0); 4059 } 4060 } 4061 4062 int r100_init(struct radeon_device *rdev) 4063 { 4064 int r; 4065 4066 /* Register debugfs file specific to this group of asics */ 4067 r100_debugfs(rdev); 4068 /* Disable VGA */ 4069 r100_vga_render_disable(rdev); 4070 /* Initialize scratch registers */ 4071 radeon_scratch_init(rdev); 4072 /* Initialize surface registers */ 4073 radeon_surface_init(rdev); 4074 /* sanity check some register to avoid hangs like after kexec */ 4075 r100_restore_sanity(rdev); 4076 /* TODO: disable VGA need to use VGA request */ 4077 /* BIOS*/ 4078 if (!radeon_get_bios(rdev)) { 4079 if (ASIC_IS_AVIVO(rdev)) 4080 return -EINVAL; 4081 } 4082 if (rdev->is_atom_bios) { 4083 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4084 return -EINVAL; 4085 } else { 4086 r = radeon_combios_init(rdev); 4087 if (r) 4088 return r; 4089 } 4090 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4091 if (radeon_asic_reset(rdev)) { 4092 dev_warn(rdev->dev, 4093 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4094 RREG32(R_000E40_RBBM_STATUS), 4095 RREG32(R_0007C0_CP_STAT)); 4096 } 4097 /* check if cards are posted or not */ 4098 if (radeon_boot_test_post_card(rdev) == false) 4099 return -EINVAL; 4100 /* Set asic errata */ 4101 r100_errata(rdev); 4102 /* Initialize clocks */ 4103 radeon_get_clock_info(rdev->ddev); 4104 /* initialize AGP */ 4105 if (rdev->flags & RADEON_IS_AGP) { 4106 r = radeon_agp_init(rdev); 4107 if (r) { 4108 radeon_agp_disable(rdev); 4109 } 4110 } 4111 /* initialize VRAM */ 4112 r100_mc_init(rdev); 4113 /* Fence driver */ 4114 r = radeon_fence_driver_init(rdev); 4115 if (r) 4116 return r; 4117 r = radeon_irq_kms_init(rdev); 4118 if (r) 4119 return r; 4120 /* Memory manager */ 4121 r = radeon_bo_init(rdev); 4122 if (r) 4123 return r; 4124 if (rdev->flags & RADEON_IS_PCI) { 4125 r = r100_pci_gart_init(rdev); 4126 if (r) 4127 return r; 4128 } 4129 r100_set_safe_registers(rdev); 4130 4131 rdev->accel_working = true; 4132 r = r100_startup(rdev); 4133 if (r) { 4134 /* Somethings want wront with the accel init stop accel */ 4135 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4136 r100_cp_fini(rdev); 4137 radeon_wb_fini(rdev); 4138 radeon_ib_pool_fini(rdev); 4139 radeon_irq_kms_fini(rdev); 4140 if (rdev->flags & RADEON_IS_PCI) 4141 r100_pci_gart_fini(rdev); 4142 rdev->accel_working = false; 4143 } 4144 return 0; 4145 } 4146 4147 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 4148 bool always_indirect) 4149 { 4150 if (reg < rdev->rmmio_size && !always_indirect) 4151 return bus_read_4(rdev->rmmio, reg); 4152 else { 4153 uint32_t ret; 4154 4155 spin_lock(&rdev->mmio_idx_lock); 4156 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4157 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA); 4158 spin_unlock(&rdev->mmio_idx_lock); 4159 4160 return ret; 4161 } 4162 } 4163 4164 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 4165 bool always_indirect) 4166 { 4167 if (reg < rdev->rmmio_size && !always_indirect) 4168 bus_write_4(rdev->rmmio, reg, v); 4169 else { 4170 spin_lock(&rdev->mmio_idx_lock); 4171 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4172 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v); 4173 spin_unlock(&rdev->mmio_idx_lock); 4174 } 4175 } 4176 4177 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4178 { 4179 if (reg < rdev->rio_mem_size) 4180 return bus_read_4(rdev->rio_mem, reg); 4181 else { 4182 /* XXX No locking? -- dumbbell@ */ 4183 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4184 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA); 4185 } 4186 } 4187 4188 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4189 { 4190 if (reg < rdev->rio_mem_size) 4191 bus_write_4(rdev->rio_mem, reg, v); 4192 else { 4193 /* XXX No locking? -- dumbbell@ */ 4194 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4195 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v); 4196 } 4197 } 4198