1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/drm.h> 30 #include <drm/drm_crtc_helper.h> 31 #include "radeon_reg.h" 32 #include "radeon.h" 33 #include "radeon_asic.h" 34 #include <drm/radeon_drm.h> 35 #include "r100_track.h" 36 #include "r300d.h" 37 #include "rv350d.h" 38 #include "r300_reg_safe.h" 39 40 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 41 * 42 * GPU Errata: 43 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL 44 * using MMIO to flush host path read cache, this lead to HARDLOCKUP. 45 * However, scheduling such write to the ring seems harmless, i suspect 46 * the CP read collide with the flush somehow, or maybe the MC, hard to 47 * tell. (Jerome Glisse) 48 */ 49 50 /* 51 * rv370,rv380 PCIE GART 52 */ 53 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 54 55 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 56 { 57 uint32_t tmp; 58 int i; 59 60 /* Workaround HW bug do flush 2 times */ 61 for (i = 0; i < 2; i++) { 62 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 63 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); 64 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 65 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 66 } 67 mb(); 68 } 69 70 #define R300_PTE_UNSNOOPED (1 << 0) 71 #define R300_PTE_WRITEABLE (1 << 2) 72 #define R300_PTE_READABLE (1 << 3) 73 74 void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 75 uint64_t addr, uint32_t flags) 76 { 77 volatile uint32_t *ptr = rdev->gart.ptr; 78 79 addr = (lower_32_bits(addr) >> 8) | 80 ((upper_32_bits(addr) & 0xff) << 24); 81 if (flags & RADEON_GART_PAGE_READ) 82 addr |= R300_PTE_READABLE; 83 if (flags & RADEON_GART_PAGE_WRITE) 84 addr |= R300_PTE_WRITEABLE; 85 if (!(flags & RADEON_GART_PAGE_SNOOP)) 86 addr |= R300_PTE_UNSNOOPED; 87 /* on x86 we want this to be CPU endian, on powerpc 88 * on powerpc without HW swappers, it'll get swapped on way 89 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 90 ptr += i; 91 *ptr = (uint32_t)addr; 92 } 93 94 int rv370_pcie_gart_init(struct radeon_device *rdev) 95 { 96 int r; 97 98 if (rdev->gart.robj) { 99 WARN(1, "RV370 PCIE GART already initialized\n"); 100 return 0; 101 } 102 /* Initialize common gart structure */ 103 r = radeon_gart_init(rdev); 104 if (r) 105 return r; 106 r = rv370_debugfs_pcie_gart_info_init(rdev); 107 if (r) 108 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 109 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 110 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 111 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 112 return radeon_gart_table_vram_alloc(rdev); 113 } 114 115 int rv370_pcie_gart_enable(struct radeon_device *rdev) 116 { 117 uint32_t table_addr; 118 uint32_t tmp; 119 int r; 120 121 if (rdev->gart.robj == NULL) { 122 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 123 return -EINVAL; 124 } 125 r = radeon_gart_table_vram_pin(rdev); 126 if (r) 127 return r; 128 /* discard memory request outside of configured range */ 129 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 130 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 131 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); 132 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; 133 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 134 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 135 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 136 table_addr = rdev->gart.table_addr; 137 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 138 /* FIXME: setup default page */ 139 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 140 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 141 /* Clear error */ 142 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); 143 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 144 tmp |= RADEON_PCIE_TX_GART_EN; 145 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 146 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 147 rv370_pcie_gart_tlb_flush(rdev); 148 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 149 (unsigned)(rdev->mc.gtt_size >> 20), 150 (unsigned long long)table_addr); 151 rdev->gart.ready = true; 152 return 0; 153 } 154 155 void rv370_pcie_gart_disable(struct radeon_device *rdev) 156 { 157 u32 tmp; 158 159 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); 160 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); 161 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 162 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 163 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 164 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 165 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 166 radeon_gart_table_vram_unpin(rdev); 167 } 168 169 void rv370_pcie_gart_fini(struct radeon_device *rdev) 170 { 171 radeon_gart_fini(rdev); 172 rv370_pcie_gart_disable(rdev); 173 radeon_gart_table_vram_free(rdev); 174 } 175 176 void r300_fence_ring_emit(struct radeon_device *rdev, 177 struct radeon_fence *fence) 178 { 179 struct radeon_ring *ring = &rdev->ring[fence->ring]; 180 181 /* Who ever call radeon_fence_emit should call ring_lock and ask 182 * for enough space (today caller are ib schedule and buffer move) */ 183 /* Write SC register so SC & US assert idle */ 184 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); 185 radeon_ring_write(ring, 0); 186 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); 187 radeon_ring_write(ring, 0); 188 /* Flush 3D cache */ 189 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 190 radeon_ring_write(ring, R300_RB3D_DC_FLUSH); 191 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 192 radeon_ring_write(ring, R300_ZC_FLUSH); 193 /* Wait until IDLE & CLEAN */ 194 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 195 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | 196 RADEON_WAIT_2D_IDLECLEAN | 197 RADEON_WAIT_DMA_GUI_IDLE)); 198 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 199 radeon_ring_write(ring, rdev->config.r300.hdp_cntl | 200 RADEON_HDP_READ_BUFFER_INVALIDATE); 201 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 202 radeon_ring_write(ring, rdev->config.r300.hdp_cntl); 203 /* Emit fence sequence & fire IRQ */ 204 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 205 radeon_ring_write(ring, fence->seq); 206 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 207 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 208 } 209 210 void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 211 { 212 unsigned gb_tile_config; 213 int r; 214 215 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 216 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 217 switch(rdev->num_gb_pipes) { 218 case 2: 219 gb_tile_config |= R300_PIPE_COUNT_R300; 220 break; 221 case 3: 222 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 223 break; 224 case 4: 225 gb_tile_config |= R300_PIPE_COUNT_R420; 226 break; 227 case 1: 228 default: 229 gb_tile_config |= R300_PIPE_COUNT_RV350; 230 break; 231 } 232 233 r = radeon_ring_lock(rdev, ring, 64); 234 if (r) { 235 return; 236 } 237 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 238 radeon_ring_write(ring, 239 RADEON_ISYNC_ANY2D_IDLE3D | 240 RADEON_ISYNC_ANY3D_IDLE2D | 241 RADEON_ISYNC_WAIT_IDLEGUI | 242 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 243 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); 244 radeon_ring_write(ring, gb_tile_config); 245 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 246 radeon_ring_write(ring, 247 RADEON_WAIT_2D_IDLECLEAN | 248 RADEON_WAIT_3D_IDLECLEAN); 249 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); 250 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); 251 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); 252 radeon_ring_write(ring, 0); 253 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); 254 radeon_ring_write(ring, 0); 255 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 256 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 257 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 258 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 259 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 260 radeon_ring_write(ring, 261 RADEON_WAIT_2D_IDLECLEAN | 262 RADEON_WAIT_3D_IDLECLEAN); 263 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); 264 radeon_ring_write(ring, 0); 265 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 266 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 267 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 268 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 269 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); 270 radeon_ring_write(ring, 271 ((6 << R300_MS_X0_SHIFT) | 272 (6 << R300_MS_Y0_SHIFT) | 273 (6 << R300_MS_X1_SHIFT) | 274 (6 << R300_MS_Y1_SHIFT) | 275 (6 << R300_MS_X2_SHIFT) | 276 (6 << R300_MS_Y2_SHIFT) | 277 (6 << R300_MSBD0_Y_SHIFT) | 278 (6 << R300_MSBD0_X_SHIFT))); 279 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); 280 radeon_ring_write(ring, 281 ((6 << R300_MS_X3_SHIFT) | 282 (6 << R300_MS_Y3_SHIFT) | 283 (6 << R300_MS_X4_SHIFT) | 284 (6 << R300_MS_Y4_SHIFT) | 285 (6 << R300_MS_X5_SHIFT) | 286 (6 << R300_MS_Y5_SHIFT) | 287 (6 << R300_MSBD1_SHIFT))); 288 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); 289 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 290 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); 291 radeon_ring_write(ring, 292 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 293 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); 294 radeon_ring_write(ring, 295 R300_GEOMETRY_ROUND_NEAREST | 296 R300_COLOR_ROUND_NEAREST); 297 radeon_ring_unlock_commit(rdev, ring, false); 298 } 299 300 static void r300_errata(struct radeon_device *rdev) 301 { 302 rdev->pll_errata = 0; 303 304 if (rdev->family == CHIP_R300 && 305 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { 306 rdev->pll_errata |= CHIP_ERRATA_R300_CG; 307 } 308 } 309 310 int r300_mc_wait_for_idle(struct radeon_device *rdev) 311 { 312 unsigned i; 313 uint32_t tmp; 314 315 for (i = 0; i < rdev->usec_timeout; i++) { 316 /* read MC_STATUS */ 317 tmp = RREG32(RADEON_MC_STATUS); 318 if (tmp & R300_MC_IDLE) { 319 return 0; 320 } 321 DRM_UDELAY(1); 322 } 323 return -1; 324 } 325 326 static void r300_gpu_init(struct radeon_device *rdev) 327 { 328 uint32_t gb_tile_config, tmp; 329 330 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 331 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { 332 /* r300,r350 */ 333 rdev->num_gb_pipes = 2; 334 } else { 335 /* rv350,rv370,rv380,r300 AD, r350 AH */ 336 rdev->num_gb_pipes = 1; 337 } 338 rdev->num_z_pipes = 1; 339 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 340 switch (rdev->num_gb_pipes) { 341 case 2: 342 gb_tile_config |= R300_PIPE_COUNT_R300; 343 break; 344 case 3: 345 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 346 break; 347 case 4: 348 gb_tile_config |= R300_PIPE_COUNT_R420; 349 break; 350 default: 351 case 1: 352 gb_tile_config |= R300_PIPE_COUNT_RV350; 353 break; 354 } 355 WREG32(R300_GB_TILE_CONFIG, gb_tile_config); 356 357 if (r100_gui_wait_for_idle(rdev)) { 358 printk(KERN_WARNING "Failed to wait GUI idle while " 359 "programming pipes. Bad things might happen.\n"); 360 } 361 362 tmp = RREG32(R300_DST_PIPE_CONFIG); 363 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); 364 365 WREG32(R300_RB2D_DSTCACHE_MODE, 366 R300_DC_AUTOFLUSH_ENABLE | 367 R300_DC_DC_DISABLE_IGNORE_PE); 368 369 if (r100_gui_wait_for_idle(rdev)) { 370 printk(KERN_WARNING "Failed to wait GUI idle while " 371 "programming pipes. Bad things might happen.\n"); 372 } 373 if (r300_mc_wait_for_idle(rdev)) { 374 printk(KERN_WARNING "Failed to wait MC idle while " 375 "programming pipes. Bad things might happen.\n"); 376 } 377 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", 378 rdev->num_gb_pipes, rdev->num_z_pipes); 379 } 380 381 int r300_asic_reset(struct radeon_device *rdev) 382 { 383 struct r100_mc_save save; 384 u32 status, tmp; 385 int ret = 0; 386 387 status = RREG32(R_000E40_RBBM_STATUS); 388 if (!G_000E40_GUI_ACTIVE(status)) { 389 return 0; 390 } 391 r100_mc_stop(rdev, &save); 392 status = RREG32(R_000E40_RBBM_STATUS); 393 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 394 /* stop CP */ 395 WREG32(RADEON_CP_CSQ_CNTL, 0); 396 tmp = RREG32(RADEON_CP_RB_CNTL); 397 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 398 WREG32(RADEON_CP_RB_RPTR_WR, 0); 399 WREG32(RADEON_CP_RB_WPTR, 0); 400 WREG32(RADEON_CP_RB_CNTL, tmp); 401 /* save PCI state */ 402 pci_save_state(device_get_parent(rdev->dev->bsddev)); 403 /* disable bus mastering */ 404 r100_bm_disable(rdev); 405 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 406 S_0000F0_SOFT_RESET_GA(1)); 407 RREG32(R_0000F0_RBBM_SOFT_RESET); 408 mdelay(500); 409 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 410 mdelay(1); 411 status = RREG32(R_000E40_RBBM_STATUS); 412 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 413 /* resetting the CP seems to be problematic sometimes it end up 414 * hard locking the computer, but it's necessary for successful 415 * reset more test & playing is needed on R3XX/R4XX to find a 416 * reliable (if any solution) 417 */ 418 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 419 RREG32(R_0000F0_RBBM_SOFT_RESET); 420 mdelay(500); 421 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 422 mdelay(1); 423 status = RREG32(R_000E40_RBBM_STATUS); 424 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 425 /* restore PCI & busmastering */ 426 pci_restore_state(device_get_parent(rdev->dev->bsddev)); 427 r100_enable_bm(rdev); 428 /* Check if GPU is idle */ 429 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 430 dev_err(rdev->dev, "failed to reset GPU\n"); 431 ret = -1; 432 } else 433 dev_info(rdev->dev, "GPU reset succeed\n"); 434 r100_mc_resume(rdev, &save); 435 return ret; 436 } 437 438 /* 439 * r300,r350,rv350,rv380 VRAM info 440 */ 441 void r300_mc_init(struct radeon_device *rdev) 442 { 443 u64 base; 444 u32 tmp; 445 446 /* DDR for all card after R300 & IGP */ 447 rdev->mc.vram_is_ddr = true; 448 tmp = RREG32(RADEON_MEM_CNTL); 449 tmp &= R300_MEM_NUM_CHANNELS_MASK; 450 switch (tmp) { 451 case 0: rdev->mc.vram_width = 64; break; 452 case 1: rdev->mc.vram_width = 128; break; 453 case 2: rdev->mc.vram_width = 256; break; 454 default: rdev->mc.vram_width = 128; break; 455 } 456 r100_vram_init_sizes(rdev); 457 base = rdev->mc.aper_base; 458 if (rdev->flags & RADEON_IS_IGP) 459 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 460 radeon_vram_location(rdev, &rdev->mc, base); 461 rdev->mc.gtt_base_align = 0; 462 if (!(rdev->flags & RADEON_IS_AGP)) 463 radeon_gtt_location(rdev, &rdev->mc); 464 radeon_update_bandwidth_info(rdev); 465 } 466 467 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 468 { 469 uint32_t link_width_cntl, mask; 470 471 if (rdev->flags & RADEON_IS_IGP) 472 return; 473 474 if (!(rdev->flags & RADEON_IS_PCIE)) 475 return; 476 477 /* FIXME wait for idle */ 478 479 switch (lanes) { 480 case 0: 481 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 482 break; 483 case 1: 484 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 485 break; 486 case 2: 487 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 488 break; 489 case 4: 490 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 491 break; 492 case 8: 493 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 494 break; 495 case 12: 496 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 497 break; 498 case 16: 499 default: 500 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 501 break; 502 } 503 504 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 505 506 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == 507 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) 508 return; 509 510 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 511 RADEON_PCIE_LC_RECONFIG_NOW | 512 RADEON_PCIE_LC_RECONFIG_LATER | 513 RADEON_PCIE_LC_SHORT_RECONFIG_EN); 514 link_width_cntl |= mask; 515 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 516 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | 517 RADEON_PCIE_LC_RECONFIG_NOW)); 518 519 /* wait for lane set to complete */ 520 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 521 while (link_width_cntl == 0xffffffff) 522 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 523 524 } 525 526 int rv370_get_pcie_lanes(struct radeon_device *rdev) 527 { 528 u32 link_width_cntl; 529 530 if (rdev->flags & RADEON_IS_IGP) 531 return 0; 532 533 if (!(rdev->flags & RADEON_IS_PCIE)) 534 return 0; 535 536 /* FIXME wait for idle */ 537 538 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 539 540 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 541 case RADEON_PCIE_LC_LINK_WIDTH_X0: 542 return 0; 543 case RADEON_PCIE_LC_LINK_WIDTH_X1: 544 return 1; 545 case RADEON_PCIE_LC_LINK_WIDTH_X2: 546 return 2; 547 case RADEON_PCIE_LC_LINK_WIDTH_X4: 548 return 4; 549 case RADEON_PCIE_LC_LINK_WIDTH_X8: 550 return 8; 551 case RADEON_PCIE_LC_LINK_WIDTH_X16: 552 default: 553 return 16; 554 } 555 } 556 557 #if defined(CONFIG_DEBUG_FS) 558 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 559 { 560 struct drm_info_node *node = (struct drm_info_node *) m->private; 561 struct drm_device *dev = node->minor->dev; 562 struct radeon_device *rdev = dev->dev_private; 563 uint32_t tmp; 564 565 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 566 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); 567 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); 568 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); 569 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); 570 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); 571 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); 572 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); 573 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); 574 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); 575 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); 576 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); 577 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); 578 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); 579 return 0; 580 } 581 582 static struct drm_info_list rv370_pcie_gart_info_list[] = { 583 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, 584 }; 585 #endif 586 587 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 588 { 589 #if defined(CONFIG_DEBUG_FS) 590 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); 591 #else 592 return 0; 593 #endif 594 } 595 596 static int r300_packet0_check(struct radeon_cs_parser *p, 597 struct radeon_cs_packet *pkt, 598 unsigned idx, unsigned reg) 599 { 600 struct radeon_cs_reloc *reloc; 601 struct r100_cs_track *track; 602 volatile uint32_t *ib; 603 uint32_t tmp, tile_flags = 0; 604 unsigned i; 605 int r; 606 u32 idx_value; 607 608 ib = p->ib.ptr; 609 track = (struct r100_cs_track *)p->track; 610 idx_value = radeon_get_ib_value(p, idx); 611 612 switch(reg) { 613 case AVIVO_D1MODE_VLINE_START_END: 614 case RADEON_CRTC_GUI_TRIG_VLINE: 615 r = r100_cs_packet_parse_vline(p); 616 if (r) { 617 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 618 idx, reg); 619 radeon_cs_dump_packet(p, pkt); 620 return r; 621 } 622 break; 623 case RADEON_DST_PITCH_OFFSET: 624 case RADEON_SRC_PITCH_OFFSET: 625 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 626 if (r) 627 return r; 628 break; 629 case R300_RB3D_COLOROFFSET0: 630 case R300_RB3D_COLOROFFSET1: 631 case R300_RB3D_COLOROFFSET2: 632 case R300_RB3D_COLOROFFSET3: 633 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 634 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 635 if (r) { 636 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 637 idx, reg); 638 radeon_cs_dump_packet(p, pkt); 639 return r; 640 } 641 track->cb[i].robj = reloc->robj; 642 track->cb[i].offset = idx_value; 643 track->cb_dirty = true; 644 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 645 break; 646 case R300_ZB_DEPTHOFFSET: 647 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 648 if (r) { 649 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 650 idx, reg); 651 radeon_cs_dump_packet(p, pkt); 652 return r; 653 } 654 track->zb.robj = reloc->robj; 655 track->zb.offset = idx_value; 656 track->zb_dirty = true; 657 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 658 break; 659 case R300_TX_OFFSET_0: 660 case R300_TX_OFFSET_0+4: 661 case R300_TX_OFFSET_0+8: 662 case R300_TX_OFFSET_0+12: 663 case R300_TX_OFFSET_0+16: 664 case R300_TX_OFFSET_0+20: 665 case R300_TX_OFFSET_0+24: 666 case R300_TX_OFFSET_0+28: 667 case R300_TX_OFFSET_0+32: 668 case R300_TX_OFFSET_0+36: 669 case R300_TX_OFFSET_0+40: 670 case R300_TX_OFFSET_0+44: 671 case R300_TX_OFFSET_0+48: 672 case R300_TX_OFFSET_0+52: 673 case R300_TX_OFFSET_0+56: 674 case R300_TX_OFFSET_0+60: 675 i = (reg - R300_TX_OFFSET_0) >> 2; 676 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 677 if (r) { 678 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 679 idx, reg); 680 radeon_cs_dump_packet(p, pkt); 681 return r; 682 } 683 684 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { 685 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 686 ((idx_value & ~31) + (u32)reloc->gpu_offset); 687 } else { 688 if (reloc->tiling_flags & RADEON_TILING_MACRO) 689 tile_flags |= R300_TXO_MACRO_TILE; 690 if (reloc->tiling_flags & RADEON_TILING_MICRO) 691 tile_flags |= R300_TXO_MICRO_TILE; 692 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 693 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 694 695 tmp = idx_value + ((u32)reloc->gpu_offset); 696 tmp |= tile_flags; 697 ib[idx] = tmp; 698 } 699 track->textures[i].robj = reloc->robj; 700 track->tex_dirty = true; 701 break; 702 /* Tracked registers */ 703 case 0x2084: 704 /* VAP_VF_CNTL */ 705 track->vap_vf_cntl = idx_value; 706 break; 707 case 0x20B4: 708 /* VAP_VTX_SIZE */ 709 track->vtx_size = idx_value & 0x7F; 710 break; 711 case 0x2134: 712 /* VAP_VF_MAX_VTX_INDX */ 713 track->max_indx = idx_value & 0x00FFFFFFUL; 714 break; 715 case 0x2088: 716 /* VAP_ALT_NUM_VERTICES - only valid on r500 */ 717 if (p->rdev->family < CHIP_RV515) 718 goto fail; 719 track->vap_alt_nverts = idx_value & 0xFFFFFF; 720 break; 721 case 0x43E4: 722 /* SC_SCISSOR1 */ 723 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 724 if (p->rdev->family < CHIP_RV515) { 725 track->maxy -= 1440; 726 } 727 track->cb_dirty = true; 728 track->zb_dirty = true; 729 break; 730 case 0x4E00: 731 /* RB3D_CCTL */ 732 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ 733 p->rdev->cmask_filp != p->filp) { 734 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); 735 return -EINVAL; 736 } 737 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 738 track->cb_dirty = true; 739 break; 740 case 0x4E38: 741 case 0x4E3C: 742 case 0x4E40: 743 case 0x4E44: 744 /* RB3D_COLORPITCH0 */ 745 /* RB3D_COLORPITCH1 */ 746 /* RB3D_COLORPITCH2 */ 747 /* RB3D_COLORPITCH3 */ 748 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 749 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 750 if (r) { 751 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 752 idx, reg); 753 radeon_cs_dump_packet(p, pkt); 754 return r; 755 } 756 757 if (reloc->tiling_flags & RADEON_TILING_MACRO) 758 tile_flags |= R300_COLOR_TILE_ENABLE; 759 if (reloc->tiling_flags & RADEON_TILING_MICRO) 760 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 761 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 762 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 763 764 tmp = idx_value & ~(0x7 << 16); 765 tmp |= tile_flags; 766 ib[idx] = tmp; 767 } 768 i = (reg - 0x4E38) >> 2; 769 track->cb[i].pitch = idx_value & 0x3FFE; 770 switch (((idx_value >> 21) & 0xF)) { 771 case 9: 772 case 11: 773 case 12: 774 track->cb[i].cpp = 1; 775 break; 776 case 3: 777 case 4: 778 case 13: 779 case 15: 780 track->cb[i].cpp = 2; 781 break; 782 case 5: 783 if (p->rdev->family < CHIP_RV515) { 784 DRM_ERROR("Invalid color buffer format (%d)!\n", 785 ((idx_value >> 21) & 0xF)); 786 return -EINVAL; 787 } 788 /* Pass through. */ 789 case 6: 790 track->cb[i].cpp = 4; 791 break; 792 case 10: 793 track->cb[i].cpp = 8; 794 break; 795 case 7: 796 track->cb[i].cpp = 16; 797 break; 798 default: 799 DRM_ERROR("Invalid color buffer format (%d) !\n", 800 ((idx_value >> 21) & 0xF)); 801 return -EINVAL; 802 } 803 track->cb_dirty = true; 804 break; 805 case 0x4F00: 806 /* ZB_CNTL */ 807 if (idx_value & 2) { 808 track->z_enabled = true; 809 } else { 810 track->z_enabled = false; 811 } 812 track->zb_dirty = true; 813 break; 814 case 0x4F10: 815 /* ZB_FORMAT */ 816 switch ((idx_value & 0xF)) { 817 case 0: 818 case 1: 819 track->zb.cpp = 2; 820 break; 821 case 2: 822 track->zb.cpp = 4; 823 break; 824 default: 825 DRM_ERROR("Invalid z buffer format (%d) !\n", 826 (idx_value & 0xF)); 827 return -EINVAL; 828 } 829 track->zb_dirty = true; 830 break; 831 case 0x4F24: 832 /* ZB_DEPTHPITCH */ 833 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 834 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 835 if (r) { 836 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 837 idx, reg); 838 radeon_cs_dump_packet(p, pkt); 839 return r; 840 } 841 842 if (reloc->tiling_flags & RADEON_TILING_MACRO) 843 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 844 if (reloc->tiling_flags & RADEON_TILING_MICRO) 845 tile_flags |= R300_DEPTHMICROTILE_TILED; 846 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 847 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 848 849 tmp = idx_value & ~(0x7 << 16); 850 tmp |= tile_flags; 851 ib[idx] = tmp; 852 } 853 track->zb.pitch = idx_value & 0x3FFC; 854 track->zb_dirty = true; 855 break; 856 case 0x4104: 857 /* TX_ENABLE */ 858 for (i = 0; i < 16; i++) { 859 bool enabled; 860 861 enabled = !!(idx_value & (1 << i)); 862 track->textures[i].enabled = enabled; 863 } 864 track->tex_dirty = true; 865 break; 866 case 0x44C0: 867 case 0x44C4: 868 case 0x44C8: 869 case 0x44CC: 870 case 0x44D0: 871 case 0x44D4: 872 case 0x44D8: 873 case 0x44DC: 874 case 0x44E0: 875 case 0x44E4: 876 case 0x44E8: 877 case 0x44EC: 878 case 0x44F0: 879 case 0x44F4: 880 case 0x44F8: 881 case 0x44FC: 882 /* TX_FORMAT1_[0-15] */ 883 i = (reg - 0x44C0) >> 2; 884 tmp = (idx_value >> 25) & 0x3; 885 track->textures[i].tex_coord_type = tmp; 886 switch ((idx_value & 0x1F)) { 887 case R300_TX_FORMAT_X8: 888 case R300_TX_FORMAT_Y4X4: 889 case R300_TX_FORMAT_Z3Y3X2: 890 track->textures[i].cpp = 1; 891 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 892 break; 893 case R300_TX_FORMAT_X16: 894 case R300_TX_FORMAT_FL_I16: 895 case R300_TX_FORMAT_Y8X8: 896 case R300_TX_FORMAT_Z5Y6X5: 897 case R300_TX_FORMAT_Z6Y5X5: 898 case R300_TX_FORMAT_W4Z4Y4X4: 899 case R300_TX_FORMAT_W1Z5Y5X5: 900 case R300_TX_FORMAT_D3DMFT_CxV8U8: 901 case R300_TX_FORMAT_B8G8_B8G8: 902 case R300_TX_FORMAT_G8R8_G8B8: 903 track->textures[i].cpp = 2; 904 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 905 break; 906 case R300_TX_FORMAT_Y16X16: 907 case R300_TX_FORMAT_FL_I16A16: 908 case R300_TX_FORMAT_Z11Y11X10: 909 case R300_TX_FORMAT_Z10Y11X11: 910 case R300_TX_FORMAT_W8Z8Y8X8: 911 case R300_TX_FORMAT_W2Z10Y10X10: 912 case 0x17: 913 case R300_TX_FORMAT_FL_I32: 914 case 0x1e: 915 track->textures[i].cpp = 4; 916 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 917 break; 918 case R300_TX_FORMAT_W16Z16Y16X16: 919 case R300_TX_FORMAT_FL_R16G16B16A16: 920 case R300_TX_FORMAT_FL_I32A32: 921 track->textures[i].cpp = 8; 922 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 923 break; 924 case R300_TX_FORMAT_FL_R32G32B32A32: 925 track->textures[i].cpp = 16; 926 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 927 break; 928 case R300_TX_FORMAT_DXT1: 929 track->textures[i].cpp = 1; 930 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 931 break; 932 case R300_TX_FORMAT_ATI2N: 933 if (p->rdev->family < CHIP_R420) { 934 DRM_ERROR("Invalid texture format %u\n", 935 (idx_value & 0x1F)); 936 return -EINVAL; 937 } 938 /* The same rules apply as for DXT3/5. */ 939 /* Pass through. */ 940 case R300_TX_FORMAT_DXT3: 941 case R300_TX_FORMAT_DXT5: 942 track->textures[i].cpp = 1; 943 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 944 break; 945 default: 946 DRM_ERROR("Invalid texture format %u\n", 947 (idx_value & 0x1F)); 948 return -EINVAL; 949 } 950 track->tex_dirty = true; 951 break; 952 case 0x4400: 953 case 0x4404: 954 case 0x4408: 955 case 0x440C: 956 case 0x4410: 957 case 0x4414: 958 case 0x4418: 959 case 0x441C: 960 case 0x4420: 961 case 0x4424: 962 case 0x4428: 963 case 0x442C: 964 case 0x4430: 965 case 0x4434: 966 case 0x4438: 967 case 0x443C: 968 /* TX_FILTER0_[0-15] */ 969 i = (reg - 0x4400) >> 2; 970 tmp = idx_value & 0x7; 971 if (tmp == 2 || tmp == 4 || tmp == 6) { 972 track->textures[i].roundup_w = false; 973 } 974 tmp = (idx_value >> 3) & 0x7; 975 if (tmp == 2 || tmp == 4 || tmp == 6) { 976 track->textures[i].roundup_h = false; 977 } 978 track->tex_dirty = true; 979 break; 980 case 0x4500: 981 case 0x4504: 982 case 0x4508: 983 case 0x450C: 984 case 0x4510: 985 case 0x4514: 986 case 0x4518: 987 case 0x451C: 988 case 0x4520: 989 case 0x4524: 990 case 0x4528: 991 case 0x452C: 992 case 0x4530: 993 case 0x4534: 994 case 0x4538: 995 case 0x453C: 996 /* TX_FORMAT2_[0-15] */ 997 i = (reg - 0x4500) >> 2; 998 tmp = idx_value & 0x3FFF; 999 track->textures[i].pitch = tmp + 1; 1000 if (p->rdev->family >= CHIP_RV515) { 1001 tmp = ((idx_value >> 15) & 1) << 11; 1002 track->textures[i].width_11 = tmp; 1003 tmp = ((idx_value >> 16) & 1) << 11; 1004 track->textures[i].height_11 = tmp; 1005 1006 /* ATI1N */ 1007 if (idx_value & (1 << 14)) { 1008 /* The same rules apply as for DXT1. */ 1009 track->textures[i].compress_format = 1010 R100_TRACK_COMP_DXT1; 1011 } 1012 } else if (idx_value & (1 << 14)) { 1013 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1014 return -EINVAL; 1015 } 1016 track->tex_dirty = true; 1017 break; 1018 case 0x4480: 1019 case 0x4484: 1020 case 0x4488: 1021 case 0x448C: 1022 case 0x4490: 1023 case 0x4494: 1024 case 0x4498: 1025 case 0x449C: 1026 case 0x44A0: 1027 case 0x44A4: 1028 case 0x44A8: 1029 case 0x44AC: 1030 case 0x44B0: 1031 case 0x44B4: 1032 case 0x44B8: 1033 case 0x44BC: 1034 /* TX_FORMAT0_[0-15] */ 1035 i = (reg - 0x4480) >> 2; 1036 tmp = idx_value & 0x7FF; 1037 track->textures[i].width = tmp + 1; 1038 tmp = (idx_value >> 11) & 0x7FF; 1039 track->textures[i].height = tmp + 1; 1040 tmp = (idx_value >> 26) & 0xF; 1041 track->textures[i].num_levels = tmp; 1042 tmp = idx_value & (1 << 31); 1043 track->textures[i].use_pitch = !!tmp; 1044 tmp = (idx_value >> 22) & 0xF; 1045 track->textures[i].txdepth = tmp; 1046 track->tex_dirty = true; 1047 break; 1048 case R300_ZB_ZPASS_ADDR: 1049 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1050 if (r) { 1051 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1052 idx, reg); 1053 radeon_cs_dump_packet(p, pkt); 1054 return r; 1055 } 1056 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1057 break; 1058 case 0x4e0c: 1059 /* RB3D_COLOR_CHANNEL_MASK */ 1060 track->color_channel_mask = idx_value; 1061 track->cb_dirty = true; 1062 break; 1063 case 0x43a4: 1064 /* SC_HYPERZ_EN */ 1065 /* r300c emits this register - we need to disable hyperz for it 1066 * without complaining */ 1067 if (p->rdev->hyperz_filp != p->filp) { 1068 if (idx_value & 0x1) 1069 ib[idx] = idx_value & ~1; 1070 } 1071 break; 1072 case 0x4f1c: 1073 /* ZB_BW_CNTL */ 1074 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1075 track->cb_dirty = true; 1076 track->zb_dirty = true; 1077 if (p->rdev->hyperz_filp != p->filp) { 1078 if (idx_value & (R300_HIZ_ENABLE | 1079 R300_RD_COMP_ENABLE | 1080 R300_WR_COMP_ENABLE | 1081 R300_FAST_FILL_ENABLE)) 1082 goto fail; 1083 } 1084 break; 1085 case 0x4e04: 1086 /* RB3D_BLENDCNTL */ 1087 track->blend_read_enable = !!(idx_value & (1 << 2)); 1088 track->cb_dirty = true; 1089 break; 1090 case R300_RB3D_AARESOLVE_OFFSET: 1091 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1092 if (r) { 1093 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1094 idx, reg); 1095 radeon_cs_dump_packet(p, pkt); 1096 return r; 1097 } 1098 track->aa.robj = reloc->robj; 1099 track->aa.offset = idx_value; 1100 track->aa_dirty = true; 1101 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1102 break; 1103 case R300_RB3D_AARESOLVE_PITCH: 1104 track->aa.pitch = idx_value & 0x3FFE; 1105 track->aa_dirty = true; 1106 break; 1107 case R300_RB3D_AARESOLVE_CTL: 1108 track->aaresolve = idx_value & 0x1; 1109 track->aa_dirty = true; 1110 break; 1111 case 0x4f30: /* ZB_MASK_OFFSET */ 1112 case 0x4f34: /* ZB_ZMASK_PITCH */ 1113 case 0x4f44: /* ZB_HIZ_OFFSET */ 1114 case 0x4f54: /* ZB_HIZ_PITCH */ 1115 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1116 goto fail; 1117 break; 1118 case 0x4028: 1119 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1120 goto fail; 1121 /* GB_Z_PEQ_CONFIG */ 1122 if (p->rdev->family >= CHIP_RV350) 1123 break; 1124 goto fail; 1125 break; 1126 case 0x4be8: 1127 /* valid register only on RV530 */ 1128 if (p->rdev->family == CHIP_RV530) 1129 break; 1130 /* fallthrough do not move */ 1131 default: 1132 goto fail; 1133 } 1134 return 0; 1135 fail: 1136 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n", 1137 reg, idx, idx_value); 1138 return -EINVAL; 1139 } 1140 1141 static int r300_packet3_check(struct radeon_cs_parser *p, 1142 struct radeon_cs_packet *pkt) 1143 { 1144 struct radeon_cs_reloc *reloc; 1145 struct r100_cs_track *track; 1146 volatile uint32_t *ib; 1147 unsigned idx; 1148 int r; 1149 1150 ib = p->ib.ptr; 1151 idx = pkt->idx + 1; 1152 track = (struct r100_cs_track *)p->track; 1153 switch(pkt->opcode) { 1154 case PACKET3_3D_LOAD_VBPNTR: 1155 r = r100_packet3_load_vbpntr(p, pkt, idx); 1156 if (r) 1157 return r; 1158 break; 1159 case PACKET3_INDX_BUFFER: 1160 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1161 if (r) { 1162 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1163 radeon_cs_dump_packet(p, pkt); 1164 return r; 1165 } 1166 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1167 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1168 if (r) { 1169 return r; 1170 } 1171 break; 1172 /* Draw packet */ 1173 case PACKET3_3D_DRAW_IMMD: 1174 /* Number of dwords is vtx_size * (num_vertices - 1) 1175 * PRIM_WALK must be equal to 3 vertex data in embedded 1176 * in cmd stream */ 1177 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1178 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1179 return -EINVAL; 1180 } 1181 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1182 track->immd_dwords = pkt->count - 1; 1183 r = r100_cs_track_check(p->rdev, track); 1184 if (r) { 1185 return r; 1186 } 1187 break; 1188 case PACKET3_3D_DRAW_IMMD_2: 1189 /* Number of dwords is vtx_size * (num_vertices - 1) 1190 * PRIM_WALK must be equal to 3 vertex data in embedded 1191 * in cmd stream */ 1192 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1193 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1194 return -EINVAL; 1195 } 1196 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1197 track->immd_dwords = pkt->count; 1198 r = r100_cs_track_check(p->rdev, track); 1199 if (r) { 1200 return r; 1201 } 1202 break; 1203 case PACKET3_3D_DRAW_VBUF: 1204 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1205 r = r100_cs_track_check(p->rdev, track); 1206 if (r) { 1207 return r; 1208 } 1209 break; 1210 case PACKET3_3D_DRAW_VBUF_2: 1211 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1212 r = r100_cs_track_check(p->rdev, track); 1213 if (r) { 1214 return r; 1215 } 1216 break; 1217 case PACKET3_3D_DRAW_INDX: 1218 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1219 r = r100_cs_track_check(p->rdev, track); 1220 if (r) { 1221 return r; 1222 } 1223 break; 1224 case PACKET3_3D_DRAW_INDX_2: 1225 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1226 r = r100_cs_track_check(p->rdev, track); 1227 if (r) { 1228 return r; 1229 } 1230 break; 1231 case PACKET3_3D_CLEAR_HIZ: 1232 case PACKET3_3D_CLEAR_ZMASK: 1233 if (p->rdev->hyperz_filp != p->filp) 1234 return -EINVAL; 1235 break; 1236 case PACKET3_3D_CLEAR_CMASK: 1237 if (p->rdev->cmask_filp != p->filp) 1238 return -EINVAL; 1239 break; 1240 case PACKET3_NOP: 1241 break; 1242 default: 1243 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1244 return -EINVAL; 1245 } 1246 return 0; 1247 } 1248 1249 int r300_cs_parse(struct radeon_cs_parser *p) 1250 { 1251 struct radeon_cs_packet pkt; 1252 struct r100_cs_track *track; 1253 int r; 1254 1255 track = kzalloc(sizeof(*track), GFP_KERNEL); 1256 if (track == NULL) 1257 return -ENOMEM; 1258 r100_cs_track_clear(p->rdev, track); 1259 p->track = track; 1260 do { 1261 r = radeon_cs_packet_parse(p, &pkt, p->idx); 1262 if (r) { 1263 kfree(p->track); 1264 p->track = NULL; 1265 return r; 1266 } 1267 p->idx += pkt.count + 2; 1268 switch (pkt.type) { 1269 case RADEON_PACKET_TYPE0: 1270 r = r100_cs_parse_packet0(p, &pkt, 1271 p->rdev->config.r300.reg_safe_bm, 1272 p->rdev->config.r300.reg_safe_bm_size, 1273 &r300_packet0_check); 1274 break; 1275 case RADEON_PACKET_TYPE2: 1276 break; 1277 case RADEON_PACKET_TYPE3: 1278 r = r300_packet3_check(p, &pkt); 1279 break; 1280 default: 1281 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1282 kfree(p->track); 1283 p->track = NULL; 1284 return -EINVAL; 1285 } 1286 if (r) { 1287 kfree(p->track); 1288 p->track = NULL; 1289 return r; 1290 } 1291 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1292 kfree(p->track); 1293 p->track = NULL; 1294 return 0; 1295 } 1296 1297 void r300_set_reg_safe(struct radeon_device *rdev) 1298 { 1299 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; 1300 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1301 } 1302 1303 void r300_mc_program(struct radeon_device *rdev) 1304 { 1305 struct r100_mc_save save; 1306 int r; 1307 1308 r = r100_debugfs_mc_info_init(rdev); 1309 if (r) { 1310 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 1311 } 1312 1313 /* Stops all mc clients */ 1314 r100_mc_stop(rdev, &save); 1315 if (rdev->flags & RADEON_IS_AGP) { 1316 WREG32(R_00014C_MC_AGP_LOCATION, 1317 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 1318 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 1319 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 1320 WREG32(R_00015C_AGP_BASE_2, 1321 upper_32_bits(rdev->mc.agp_base) & 0xff); 1322 } else { 1323 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 1324 WREG32(R_000170_AGP_BASE, 0); 1325 WREG32(R_00015C_AGP_BASE_2, 0); 1326 } 1327 /* Wait for mc idle */ 1328 if (r300_mc_wait_for_idle(rdev)) 1329 DRM_INFO("Failed to wait MC idle before programming MC.\n"); 1330 /* Program MC, should be a 32bits limited address space */ 1331 WREG32(R_000148_MC_FB_LOCATION, 1332 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 1333 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1334 r100_mc_resume(rdev, &save); 1335 } 1336 1337 void r300_clock_startup(struct radeon_device *rdev) 1338 { 1339 u32 tmp; 1340 1341 if (radeon_dynclks != -1 && radeon_dynclks) 1342 radeon_legacy_set_clock_gating(rdev, 1); 1343 /* We need to force on some of the block */ 1344 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 1345 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 1346 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) 1347 tmp |= S_00000D_FORCE_VAP(1); 1348 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 1349 } 1350 1351 static int r300_startup(struct radeon_device *rdev) 1352 { 1353 int r; 1354 1355 /* set common regs */ 1356 r100_set_common_regs(rdev); 1357 /* program mc */ 1358 r300_mc_program(rdev); 1359 /* Resume clock */ 1360 r300_clock_startup(rdev); 1361 /* Initialize GPU configuration (# pipes, ...) */ 1362 r300_gpu_init(rdev); 1363 /* Initialize GART (initialize after TTM so we can allocate 1364 * memory through TTM but finalize after TTM) */ 1365 if (rdev->flags & RADEON_IS_PCIE) { 1366 r = rv370_pcie_gart_enable(rdev); 1367 if (r) 1368 return r; 1369 } 1370 1371 if (rdev->family == CHIP_R300 || 1372 rdev->family == CHIP_R350 || 1373 rdev->family == CHIP_RV350) 1374 r100_enable_bm(rdev); 1375 1376 if (rdev->flags & RADEON_IS_PCI) { 1377 r = r100_pci_gart_enable(rdev); 1378 if (r) 1379 return r; 1380 } 1381 1382 /* allocate wb buffer */ 1383 r = radeon_wb_init(rdev); 1384 if (r) 1385 return r; 1386 1387 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1388 if (r) { 1389 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1390 return r; 1391 } 1392 1393 /* Enable IRQ */ 1394 if (!rdev->irq.installed) { 1395 r = radeon_irq_kms_init(rdev); 1396 if (r) 1397 return r; 1398 } 1399 1400 r100_irq_set(rdev); 1401 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1402 /* 1M ring buffer */ 1403 r = r100_cp_init(rdev, 1024 * 1024); 1404 if (r) { 1405 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 1406 return r; 1407 } 1408 1409 r = radeon_ib_pool_init(rdev); 1410 if (r) { 1411 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1412 return r; 1413 } 1414 1415 return 0; 1416 } 1417 1418 int r300_resume(struct radeon_device *rdev) 1419 { 1420 int r; 1421 1422 /* Make sur GART are not working */ 1423 if (rdev->flags & RADEON_IS_PCIE) 1424 rv370_pcie_gart_disable(rdev); 1425 if (rdev->flags & RADEON_IS_PCI) 1426 r100_pci_gart_disable(rdev); 1427 /* Resume clock before doing reset */ 1428 r300_clock_startup(rdev); 1429 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1430 if (radeon_asic_reset(rdev)) { 1431 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1432 RREG32(R_000E40_RBBM_STATUS), 1433 RREG32(R_0007C0_CP_STAT)); 1434 } 1435 /* post */ 1436 radeon_combios_asic_init(rdev->ddev); 1437 /* Resume clock after posting */ 1438 r300_clock_startup(rdev); 1439 /* Initialize surface registers */ 1440 radeon_surface_init(rdev); 1441 1442 rdev->accel_working = true; 1443 r = r300_startup(rdev); 1444 if (r) { 1445 rdev->accel_working = false; 1446 } 1447 return r; 1448 } 1449 1450 int r300_suspend(struct radeon_device *rdev) 1451 { 1452 radeon_pm_suspend(rdev); 1453 r100_cp_disable(rdev); 1454 radeon_wb_disable(rdev); 1455 r100_irq_disable(rdev); 1456 if (rdev->flags & RADEON_IS_PCIE) 1457 rv370_pcie_gart_disable(rdev); 1458 if (rdev->flags & RADEON_IS_PCI) 1459 r100_pci_gart_disable(rdev); 1460 return 0; 1461 } 1462 1463 void r300_fini(struct radeon_device *rdev) 1464 { 1465 radeon_pm_fini(rdev); 1466 r100_cp_fini(rdev); 1467 radeon_wb_fini(rdev); 1468 radeon_ib_pool_fini(rdev); 1469 radeon_gem_fini(rdev); 1470 if (rdev->flags & RADEON_IS_PCIE) 1471 rv370_pcie_gart_fini(rdev); 1472 if (rdev->flags & RADEON_IS_PCI) 1473 r100_pci_gart_fini(rdev); 1474 radeon_agp_fini(rdev); 1475 radeon_irq_kms_fini(rdev); 1476 radeon_fence_driver_fini(rdev); 1477 radeon_bo_fini(rdev); 1478 radeon_atombios_fini(rdev); 1479 kfree(rdev->bios); 1480 rdev->bios = NULL; 1481 } 1482 1483 int r300_init(struct radeon_device *rdev) 1484 { 1485 int r; 1486 1487 /* Disable VGA */ 1488 r100_vga_render_disable(rdev); 1489 /* Initialize scratch registers */ 1490 radeon_scratch_init(rdev); 1491 /* Initialize surface registers */ 1492 radeon_surface_init(rdev); 1493 /* TODO: disable VGA need to use VGA request */ 1494 /* restore some register to sane defaults */ 1495 r100_restore_sanity(rdev); 1496 /* BIOS*/ 1497 if (!radeon_get_bios(rdev)) { 1498 if (ASIC_IS_AVIVO(rdev)) 1499 return -EINVAL; 1500 } 1501 if (rdev->is_atom_bios) { 1502 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 1503 return -EINVAL; 1504 } else { 1505 r = radeon_combios_init(rdev); 1506 if (r) 1507 return r; 1508 } 1509 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1510 if (radeon_asic_reset(rdev)) { 1511 dev_warn(rdev->dev, 1512 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1513 RREG32(R_000E40_RBBM_STATUS), 1514 RREG32(R_0007C0_CP_STAT)); 1515 } 1516 /* check if cards are posted or not */ 1517 if (radeon_boot_test_post_card(rdev) == false) 1518 return -EINVAL; 1519 /* Set asic errata */ 1520 r300_errata(rdev); 1521 /* Initialize clocks */ 1522 radeon_get_clock_info(rdev->ddev); 1523 /* initialize AGP */ 1524 if (rdev->flags & RADEON_IS_AGP) { 1525 r = radeon_agp_init(rdev); 1526 if (r) { 1527 radeon_agp_disable(rdev); 1528 } 1529 } 1530 /* initialize memory controller */ 1531 r300_mc_init(rdev); 1532 /* Fence driver */ 1533 r = radeon_fence_driver_init(rdev); 1534 if (r) 1535 return r; 1536 /* Memory manager */ 1537 r = radeon_bo_init(rdev); 1538 if (r) 1539 return r; 1540 if (rdev->flags & RADEON_IS_PCIE) { 1541 r = rv370_pcie_gart_init(rdev); 1542 if (r) 1543 return r; 1544 } 1545 if (rdev->flags & RADEON_IS_PCI) { 1546 r = r100_pci_gart_init(rdev); 1547 if (r) 1548 return r; 1549 } 1550 r300_set_reg_safe(rdev); 1551 1552 /* Initialize power management */ 1553 radeon_pm_init(rdev); 1554 1555 rdev->accel_working = true; 1556 r = r300_startup(rdev); 1557 if (r) { 1558 /* Something went wrong with the accel init, so stop accel */ 1559 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1560 r100_cp_fini(rdev); 1561 radeon_wb_fini(rdev); 1562 radeon_ib_pool_fini(rdev); 1563 radeon_irq_kms_fini(rdev); 1564 if (rdev->flags & RADEON_IS_PCIE) 1565 rv370_pcie_gart_fini(rdev); 1566 if (rdev->flags & RADEON_IS_PCI) 1567 r100_pci_gart_fini(rdev); 1568 radeon_agp_fini(rdev); 1569 rdev->accel_working = false; 1570 } 1571 return 0; 1572 } 1573