1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <drm/drmP.h> 30 #include <drm/drm.h> 31 #include <drm/drm_crtc_helper.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include <drm/radeon_drm.h> 36 #include "r100_track.h" 37 #include "r300d.h" 38 #include "rv350d.h" 39 #include "r300_reg_safe.h" 40 41 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 42 * 43 * GPU Errata: 44 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL 45 * using MMIO to flush host path read cache, this lead to HARDLOCKUP. 46 * However, scheduling such write to the ring seems harmless, i suspect 47 * the CP read collide with the flush somehow, or maybe the MC, hard to 48 * tell. (Jerome Glisse) 49 */ 50 51 /* 52 * rv370,rv380 PCIE GART 53 */ 54 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 55 56 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 57 { 58 uint32_t tmp; 59 int i; 60 61 /* Workaround HW bug do flush 2 times */ 62 for (i = 0; i < 2; i++) { 63 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 64 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); 65 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 66 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 67 } 68 mb(); 69 } 70 71 #define R300_PTE_UNSNOOPED (1 << 0) 72 #define R300_PTE_WRITEABLE (1 << 2) 73 #define R300_PTE_READABLE (1 << 3) 74 75 uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) 76 { 77 addr = (lower_32_bits(addr) >> 8) | 78 ((upper_32_bits(addr) & 0xff) << 24); 79 if (flags & RADEON_GART_PAGE_READ) 80 addr |= R300_PTE_READABLE; 81 if (flags & RADEON_GART_PAGE_WRITE) 82 addr |= R300_PTE_WRITEABLE; 83 if (!(flags & RADEON_GART_PAGE_SNOOP)) 84 addr |= R300_PTE_UNSNOOPED; 85 return addr; 86 } 87 88 void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 89 uint64_t entry) 90 { 91 void __iomem *ptr = rdev->gart.ptr; 92 93 /* on x86 we want this to be CPU endian, on powerpc 94 * on powerpc without HW swappers, it'll get swapped on way 95 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 96 writel(entry, ((uint8_t __iomem *)ptr) + (i * 4)); 97 } 98 99 int rv370_pcie_gart_init(struct radeon_device *rdev) 100 { 101 int r; 102 103 if (rdev->gart.robj) { 104 WARN(1, "RV370 PCIE GART already initialized\n"); 105 return 0; 106 } 107 /* Initialize common gart structure */ 108 r = radeon_gart_init(rdev); 109 if (r) 110 return r; 111 r = rv370_debugfs_pcie_gart_info_init(rdev); 112 if (r) 113 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 114 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 115 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 116 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; 117 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 118 return radeon_gart_table_vram_alloc(rdev); 119 } 120 121 int rv370_pcie_gart_enable(struct radeon_device *rdev) 122 { 123 uint32_t table_addr; 124 uint32_t tmp; 125 int r; 126 127 if (rdev->gart.robj == NULL) { 128 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 129 return -EINVAL; 130 } 131 r = radeon_gart_table_vram_pin(rdev); 132 if (r) 133 return r; 134 /* discard memory request outside of configured range */ 135 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 136 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 137 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); 138 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; 139 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 140 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 141 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 142 table_addr = rdev->gart.table_addr; 143 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 144 /* FIXME: setup default page */ 145 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 146 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 147 /* Clear error */ 148 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); 149 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 150 tmp |= RADEON_PCIE_TX_GART_EN; 151 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 152 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 153 rv370_pcie_gart_tlb_flush(rdev); 154 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 155 (unsigned)(rdev->mc.gtt_size >> 20), 156 (unsigned long long)table_addr); 157 rdev->gart.ready = true; 158 return 0; 159 } 160 161 void rv370_pcie_gart_disable(struct radeon_device *rdev) 162 { 163 u32 tmp; 164 165 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); 166 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); 167 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 168 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 169 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 170 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 171 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 172 radeon_gart_table_vram_unpin(rdev); 173 } 174 175 void rv370_pcie_gart_fini(struct radeon_device *rdev) 176 { 177 radeon_gart_fini(rdev); 178 rv370_pcie_gart_disable(rdev); 179 radeon_gart_table_vram_free(rdev); 180 } 181 182 void r300_fence_ring_emit(struct radeon_device *rdev, 183 struct radeon_fence *fence) 184 { 185 struct radeon_ring *ring = &rdev->ring[fence->ring]; 186 187 /* Who ever call radeon_fence_emit should call ring_lock and ask 188 * for enough space (today caller are ib schedule and buffer move) */ 189 /* Write SC register so SC & US assert idle */ 190 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); 191 radeon_ring_write(ring, 0); 192 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); 193 radeon_ring_write(ring, 0); 194 /* Flush 3D cache */ 195 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 196 radeon_ring_write(ring, R300_RB3D_DC_FLUSH); 197 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 198 radeon_ring_write(ring, R300_ZC_FLUSH); 199 /* Wait until IDLE & CLEAN */ 200 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 201 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | 202 RADEON_WAIT_2D_IDLECLEAN | 203 RADEON_WAIT_DMA_GUI_IDLE)); 204 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 205 radeon_ring_write(ring, rdev->config.r300.hdp_cntl | 206 RADEON_HDP_READ_BUFFER_INVALIDATE); 207 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 208 radeon_ring_write(ring, rdev->config.r300.hdp_cntl); 209 /* Emit fence sequence & fire IRQ */ 210 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 211 radeon_ring_write(ring, fence->seq); 212 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 213 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 214 } 215 216 void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 217 { 218 unsigned gb_tile_config; 219 int r; 220 221 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 222 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 223 switch(rdev->num_gb_pipes) { 224 case 2: 225 gb_tile_config |= R300_PIPE_COUNT_R300; 226 break; 227 case 3: 228 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 229 break; 230 case 4: 231 gb_tile_config |= R300_PIPE_COUNT_R420; 232 break; 233 case 1: 234 default: 235 gb_tile_config |= R300_PIPE_COUNT_RV350; 236 break; 237 } 238 239 r = radeon_ring_lock(rdev, ring, 64); 240 if (r) { 241 return; 242 } 243 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 244 radeon_ring_write(ring, 245 RADEON_ISYNC_ANY2D_IDLE3D | 246 RADEON_ISYNC_ANY3D_IDLE2D | 247 RADEON_ISYNC_WAIT_IDLEGUI | 248 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 249 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); 250 radeon_ring_write(ring, gb_tile_config); 251 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 252 radeon_ring_write(ring, 253 RADEON_WAIT_2D_IDLECLEAN | 254 RADEON_WAIT_3D_IDLECLEAN); 255 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); 256 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); 257 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); 258 radeon_ring_write(ring, 0); 259 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); 260 radeon_ring_write(ring, 0); 261 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 262 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 263 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 264 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 265 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 266 radeon_ring_write(ring, 267 RADEON_WAIT_2D_IDLECLEAN | 268 RADEON_WAIT_3D_IDLECLEAN); 269 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); 270 radeon_ring_write(ring, 0); 271 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 272 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 273 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 274 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 275 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); 276 radeon_ring_write(ring, 277 ((6 << R300_MS_X0_SHIFT) | 278 (6 << R300_MS_Y0_SHIFT) | 279 (6 << R300_MS_X1_SHIFT) | 280 (6 << R300_MS_Y1_SHIFT) | 281 (6 << R300_MS_X2_SHIFT) | 282 (6 << R300_MS_Y2_SHIFT) | 283 (6 << R300_MSBD0_Y_SHIFT) | 284 (6 << R300_MSBD0_X_SHIFT))); 285 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); 286 radeon_ring_write(ring, 287 ((6 << R300_MS_X3_SHIFT) | 288 (6 << R300_MS_Y3_SHIFT) | 289 (6 << R300_MS_X4_SHIFT) | 290 (6 << R300_MS_Y4_SHIFT) | 291 (6 << R300_MS_X5_SHIFT) | 292 (6 << R300_MS_Y5_SHIFT) | 293 (6 << R300_MSBD1_SHIFT))); 294 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); 295 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 296 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); 297 radeon_ring_write(ring, 298 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 299 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); 300 radeon_ring_write(ring, 301 R300_GEOMETRY_ROUND_NEAREST | 302 R300_COLOR_ROUND_NEAREST); 303 radeon_ring_unlock_commit(rdev, ring, false); 304 } 305 306 static void r300_errata(struct radeon_device *rdev) 307 { 308 rdev->pll_errata = 0; 309 310 if (rdev->family == CHIP_R300 && 311 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { 312 rdev->pll_errata |= CHIP_ERRATA_R300_CG; 313 } 314 } 315 316 int r300_mc_wait_for_idle(struct radeon_device *rdev) 317 { 318 unsigned i; 319 uint32_t tmp; 320 321 for (i = 0; i < rdev->usec_timeout; i++) { 322 /* read MC_STATUS */ 323 tmp = RREG32(RADEON_MC_STATUS); 324 if (tmp & R300_MC_IDLE) { 325 return 0; 326 } 327 DRM_UDELAY(1); 328 } 329 return -1; 330 } 331 332 static void r300_gpu_init(struct radeon_device *rdev) 333 { 334 uint32_t gb_tile_config, tmp; 335 336 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 337 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { 338 /* r300,r350 */ 339 rdev->num_gb_pipes = 2; 340 } else { 341 /* rv350,rv370,rv380,r300 AD, r350 AH */ 342 rdev->num_gb_pipes = 1; 343 } 344 rdev->num_z_pipes = 1; 345 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 346 switch (rdev->num_gb_pipes) { 347 case 2: 348 gb_tile_config |= R300_PIPE_COUNT_R300; 349 break; 350 case 3: 351 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 352 break; 353 case 4: 354 gb_tile_config |= R300_PIPE_COUNT_R420; 355 break; 356 default: 357 case 1: 358 gb_tile_config |= R300_PIPE_COUNT_RV350; 359 break; 360 } 361 WREG32(R300_GB_TILE_CONFIG, gb_tile_config); 362 363 if (r100_gui_wait_for_idle(rdev)) { 364 printk(KERN_WARNING "Failed to wait GUI idle while " 365 "programming pipes. Bad things might happen.\n"); 366 } 367 368 tmp = RREG32(R300_DST_PIPE_CONFIG); 369 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); 370 371 WREG32(R300_RB2D_DSTCACHE_MODE, 372 R300_DC_AUTOFLUSH_ENABLE | 373 R300_DC_DC_DISABLE_IGNORE_PE); 374 375 if (r100_gui_wait_for_idle(rdev)) { 376 printk(KERN_WARNING "Failed to wait GUI idle while " 377 "programming pipes. Bad things might happen.\n"); 378 } 379 if (r300_mc_wait_for_idle(rdev)) { 380 printk(KERN_WARNING "Failed to wait MC idle while " 381 "programming pipes. Bad things might happen.\n"); 382 } 383 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", 384 rdev->num_gb_pipes, rdev->num_z_pipes); 385 } 386 387 int r300_asic_reset(struct radeon_device *rdev) 388 { 389 struct r100_mc_save save; 390 u32 status, tmp; 391 int ret = 0; 392 393 status = RREG32(R_000E40_RBBM_STATUS); 394 if (!G_000E40_GUI_ACTIVE(status)) { 395 return 0; 396 } 397 r100_mc_stop(rdev, &save); 398 status = RREG32(R_000E40_RBBM_STATUS); 399 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 400 /* stop CP */ 401 WREG32(RADEON_CP_CSQ_CNTL, 0); 402 tmp = RREG32(RADEON_CP_RB_CNTL); 403 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 404 WREG32(RADEON_CP_RB_RPTR_WR, 0); 405 WREG32(RADEON_CP_RB_WPTR, 0); 406 WREG32(RADEON_CP_RB_CNTL, tmp); 407 /* save PCI state */ 408 pci_save_state(device_get_parent(rdev->dev->bsddev)); 409 /* disable bus mastering */ 410 r100_bm_disable(rdev); 411 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 412 S_0000F0_SOFT_RESET_GA(1)); 413 RREG32(R_0000F0_RBBM_SOFT_RESET); 414 mdelay(500); 415 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 416 mdelay(1); 417 status = RREG32(R_000E40_RBBM_STATUS); 418 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 419 /* resetting the CP seems to be problematic sometimes it end up 420 * hard locking the computer, but it's necessary for successful 421 * reset more test & playing is needed on R3XX/R4XX to find a 422 * reliable (if any solution) 423 */ 424 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 425 RREG32(R_0000F0_RBBM_SOFT_RESET); 426 mdelay(500); 427 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 428 mdelay(1); 429 status = RREG32(R_000E40_RBBM_STATUS); 430 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 431 /* restore PCI & busmastering */ 432 pci_restore_state(device_get_parent(rdev->dev->bsddev)); 433 r100_enable_bm(rdev); 434 /* Check if GPU is idle */ 435 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 436 dev_err(rdev->dev, "failed to reset GPU\n"); 437 ret = -1; 438 } else 439 dev_info(rdev->dev, "GPU reset succeed\n"); 440 r100_mc_resume(rdev, &save); 441 return ret; 442 } 443 444 /* 445 * r300,r350,rv350,rv380 VRAM info 446 */ 447 void r300_mc_init(struct radeon_device *rdev) 448 { 449 u64 base; 450 u32 tmp; 451 452 /* DDR for all card after R300 & IGP */ 453 rdev->mc.vram_is_ddr = true; 454 tmp = RREG32(RADEON_MEM_CNTL); 455 tmp &= R300_MEM_NUM_CHANNELS_MASK; 456 switch (tmp) { 457 case 0: rdev->mc.vram_width = 64; break; 458 case 1: rdev->mc.vram_width = 128; break; 459 case 2: rdev->mc.vram_width = 256; break; 460 default: rdev->mc.vram_width = 128; break; 461 } 462 r100_vram_init_sizes(rdev); 463 base = rdev->mc.aper_base; 464 if (rdev->flags & RADEON_IS_IGP) 465 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 466 radeon_vram_location(rdev, &rdev->mc, base); 467 rdev->mc.gtt_base_align = 0; 468 if (!(rdev->flags & RADEON_IS_AGP)) 469 radeon_gtt_location(rdev, &rdev->mc); 470 radeon_update_bandwidth_info(rdev); 471 } 472 473 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 474 { 475 uint32_t link_width_cntl, mask; 476 477 if (rdev->flags & RADEON_IS_IGP) 478 return; 479 480 if (!(rdev->flags & RADEON_IS_PCIE)) 481 return; 482 483 /* FIXME wait for idle */ 484 485 switch (lanes) { 486 case 0: 487 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 488 break; 489 case 1: 490 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 491 break; 492 case 2: 493 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 494 break; 495 case 4: 496 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 497 break; 498 case 8: 499 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 500 break; 501 case 12: 502 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 503 break; 504 case 16: 505 default: 506 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 507 break; 508 } 509 510 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 511 512 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == 513 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) 514 return; 515 516 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 517 RADEON_PCIE_LC_RECONFIG_NOW | 518 RADEON_PCIE_LC_RECONFIG_LATER | 519 RADEON_PCIE_LC_SHORT_RECONFIG_EN); 520 link_width_cntl |= mask; 521 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 522 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | 523 RADEON_PCIE_LC_RECONFIG_NOW)); 524 525 /* wait for lane set to complete */ 526 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 527 while (link_width_cntl == 0xffffffff) 528 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 529 530 } 531 532 int rv370_get_pcie_lanes(struct radeon_device *rdev) 533 { 534 u32 link_width_cntl; 535 536 if (rdev->flags & RADEON_IS_IGP) 537 return 0; 538 539 if (!(rdev->flags & RADEON_IS_PCIE)) 540 return 0; 541 542 /* FIXME wait for idle */ 543 544 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 545 546 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 547 case RADEON_PCIE_LC_LINK_WIDTH_X0: 548 return 0; 549 case RADEON_PCIE_LC_LINK_WIDTH_X1: 550 return 1; 551 case RADEON_PCIE_LC_LINK_WIDTH_X2: 552 return 2; 553 case RADEON_PCIE_LC_LINK_WIDTH_X4: 554 return 4; 555 case RADEON_PCIE_LC_LINK_WIDTH_X8: 556 return 8; 557 case RADEON_PCIE_LC_LINK_WIDTH_X16: 558 default: 559 return 16; 560 } 561 } 562 563 #if defined(CONFIG_DEBUG_FS) 564 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 565 { 566 struct drm_info_node *node = (struct drm_info_node *) m->private; 567 struct drm_device *dev = node->minor->dev; 568 struct radeon_device *rdev = dev->dev_private; 569 uint32_t tmp; 570 571 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 572 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); 573 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); 574 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); 575 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); 576 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); 577 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); 578 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); 579 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); 580 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); 581 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); 582 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); 583 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); 584 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); 585 return 0; 586 } 587 588 static struct drm_info_list rv370_pcie_gart_info_list[] = { 589 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, 590 }; 591 #endif 592 593 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 594 { 595 #if defined(CONFIG_DEBUG_FS) 596 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); 597 #else 598 return 0; 599 #endif 600 } 601 602 static int r300_packet0_check(struct radeon_cs_parser *p, 603 struct radeon_cs_packet *pkt, 604 unsigned idx, unsigned reg) 605 { 606 struct radeon_bo_list *reloc; 607 struct r100_cs_track *track; 608 volatile uint32_t *ib; 609 uint32_t tmp, tile_flags = 0; 610 unsigned i; 611 int r; 612 u32 idx_value; 613 614 ib = p->ib.ptr; 615 track = (struct r100_cs_track *)p->track; 616 idx_value = radeon_get_ib_value(p, idx); 617 618 switch(reg) { 619 case AVIVO_D1MODE_VLINE_START_END: 620 case RADEON_CRTC_GUI_TRIG_VLINE: 621 r = r100_cs_packet_parse_vline(p); 622 if (r) { 623 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 624 idx, reg); 625 radeon_cs_dump_packet(p, pkt); 626 return r; 627 } 628 break; 629 case RADEON_DST_PITCH_OFFSET: 630 case RADEON_SRC_PITCH_OFFSET: 631 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 632 if (r) 633 return r; 634 break; 635 case R300_RB3D_COLOROFFSET0: 636 case R300_RB3D_COLOROFFSET1: 637 case R300_RB3D_COLOROFFSET2: 638 case R300_RB3D_COLOROFFSET3: 639 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 640 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 641 if (r) { 642 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 643 idx, reg); 644 radeon_cs_dump_packet(p, pkt); 645 return r; 646 } 647 track->cb[i].robj = reloc->robj; 648 track->cb[i].offset = idx_value; 649 track->cb_dirty = true; 650 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 651 break; 652 case R300_ZB_DEPTHOFFSET: 653 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 654 if (r) { 655 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 656 idx, reg); 657 radeon_cs_dump_packet(p, pkt); 658 return r; 659 } 660 track->zb.robj = reloc->robj; 661 track->zb.offset = idx_value; 662 track->zb_dirty = true; 663 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 664 break; 665 case R300_TX_OFFSET_0: 666 case R300_TX_OFFSET_0+4: 667 case R300_TX_OFFSET_0+8: 668 case R300_TX_OFFSET_0+12: 669 case R300_TX_OFFSET_0+16: 670 case R300_TX_OFFSET_0+20: 671 case R300_TX_OFFSET_0+24: 672 case R300_TX_OFFSET_0+28: 673 case R300_TX_OFFSET_0+32: 674 case R300_TX_OFFSET_0+36: 675 case R300_TX_OFFSET_0+40: 676 case R300_TX_OFFSET_0+44: 677 case R300_TX_OFFSET_0+48: 678 case R300_TX_OFFSET_0+52: 679 case R300_TX_OFFSET_0+56: 680 case R300_TX_OFFSET_0+60: 681 i = (reg - R300_TX_OFFSET_0) >> 2; 682 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 683 if (r) { 684 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 685 idx, reg); 686 radeon_cs_dump_packet(p, pkt); 687 return r; 688 } 689 690 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { 691 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 692 ((idx_value & ~31) + (u32)reloc->gpu_offset); 693 } else { 694 if (reloc->tiling_flags & RADEON_TILING_MACRO) 695 tile_flags |= R300_TXO_MACRO_TILE; 696 if (reloc->tiling_flags & RADEON_TILING_MICRO) 697 tile_flags |= R300_TXO_MICRO_TILE; 698 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 699 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 700 701 tmp = idx_value + ((u32)reloc->gpu_offset); 702 tmp |= tile_flags; 703 ib[idx] = tmp; 704 } 705 track->textures[i].robj = reloc->robj; 706 track->tex_dirty = true; 707 break; 708 /* Tracked registers */ 709 case 0x2084: 710 /* VAP_VF_CNTL */ 711 track->vap_vf_cntl = idx_value; 712 break; 713 case 0x20B4: 714 /* VAP_VTX_SIZE */ 715 track->vtx_size = idx_value & 0x7F; 716 break; 717 case 0x2134: 718 /* VAP_VF_MAX_VTX_INDX */ 719 track->max_indx = idx_value & 0x00FFFFFFUL; 720 break; 721 case 0x2088: 722 /* VAP_ALT_NUM_VERTICES - only valid on r500 */ 723 if (p->rdev->family < CHIP_RV515) 724 goto fail; 725 track->vap_alt_nverts = idx_value & 0xFFFFFF; 726 break; 727 case 0x43E4: 728 /* SC_SCISSOR1 */ 729 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 730 if (p->rdev->family < CHIP_RV515) { 731 track->maxy -= 1440; 732 } 733 track->cb_dirty = true; 734 track->zb_dirty = true; 735 break; 736 case 0x4E00: 737 /* RB3D_CCTL */ 738 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ 739 p->rdev->cmask_filp != p->filp) { 740 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); 741 return -EINVAL; 742 } 743 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 744 track->cb_dirty = true; 745 break; 746 case 0x4E38: 747 case 0x4E3C: 748 case 0x4E40: 749 case 0x4E44: 750 /* RB3D_COLORPITCH0 */ 751 /* RB3D_COLORPITCH1 */ 752 /* RB3D_COLORPITCH2 */ 753 /* RB3D_COLORPITCH3 */ 754 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 755 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 756 if (r) { 757 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 758 idx, reg); 759 radeon_cs_dump_packet(p, pkt); 760 return r; 761 } 762 763 if (reloc->tiling_flags & RADEON_TILING_MACRO) 764 tile_flags |= R300_COLOR_TILE_ENABLE; 765 if (reloc->tiling_flags & RADEON_TILING_MICRO) 766 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 767 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 768 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 769 770 tmp = idx_value & ~(0x7 << 16); 771 tmp |= tile_flags; 772 ib[idx] = tmp; 773 } 774 i = (reg - 0x4E38) >> 2; 775 track->cb[i].pitch = idx_value & 0x3FFE; 776 switch (((idx_value >> 21) & 0xF)) { 777 case 9: 778 case 11: 779 case 12: 780 track->cb[i].cpp = 1; 781 break; 782 case 3: 783 case 4: 784 case 13: 785 case 15: 786 track->cb[i].cpp = 2; 787 break; 788 case 5: 789 if (p->rdev->family < CHIP_RV515) { 790 DRM_ERROR("Invalid color buffer format (%d)!\n", 791 ((idx_value >> 21) & 0xF)); 792 return -EINVAL; 793 } 794 /* Pass through. */ 795 case 6: 796 track->cb[i].cpp = 4; 797 break; 798 case 10: 799 track->cb[i].cpp = 8; 800 break; 801 case 7: 802 track->cb[i].cpp = 16; 803 break; 804 default: 805 DRM_ERROR("Invalid color buffer format (%d) !\n", 806 ((idx_value >> 21) & 0xF)); 807 return -EINVAL; 808 } 809 track->cb_dirty = true; 810 break; 811 case 0x4F00: 812 /* ZB_CNTL */ 813 if (idx_value & 2) { 814 track->z_enabled = true; 815 } else { 816 track->z_enabled = false; 817 } 818 track->zb_dirty = true; 819 break; 820 case 0x4F10: 821 /* ZB_FORMAT */ 822 switch ((idx_value & 0xF)) { 823 case 0: 824 case 1: 825 track->zb.cpp = 2; 826 break; 827 case 2: 828 track->zb.cpp = 4; 829 break; 830 default: 831 DRM_ERROR("Invalid z buffer format (%d) !\n", 832 (idx_value & 0xF)); 833 return -EINVAL; 834 } 835 track->zb_dirty = true; 836 break; 837 case 0x4F24: 838 /* ZB_DEPTHPITCH */ 839 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 840 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 841 if (r) { 842 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 843 idx, reg); 844 radeon_cs_dump_packet(p, pkt); 845 return r; 846 } 847 848 if (reloc->tiling_flags & RADEON_TILING_MACRO) 849 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 850 if (reloc->tiling_flags & RADEON_TILING_MICRO) 851 tile_flags |= R300_DEPTHMICROTILE_TILED; 852 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 853 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 854 855 tmp = idx_value & ~(0x7 << 16); 856 tmp |= tile_flags; 857 ib[idx] = tmp; 858 } 859 track->zb.pitch = idx_value & 0x3FFC; 860 track->zb_dirty = true; 861 break; 862 case 0x4104: 863 /* TX_ENABLE */ 864 for (i = 0; i < 16; i++) { 865 bool enabled; 866 867 enabled = !!(idx_value & (1 << i)); 868 track->textures[i].enabled = enabled; 869 } 870 track->tex_dirty = true; 871 break; 872 case 0x44C0: 873 case 0x44C4: 874 case 0x44C8: 875 case 0x44CC: 876 case 0x44D0: 877 case 0x44D4: 878 case 0x44D8: 879 case 0x44DC: 880 case 0x44E0: 881 case 0x44E4: 882 case 0x44E8: 883 case 0x44EC: 884 case 0x44F0: 885 case 0x44F4: 886 case 0x44F8: 887 case 0x44FC: 888 /* TX_FORMAT1_[0-15] */ 889 i = (reg - 0x44C0) >> 2; 890 tmp = (idx_value >> 25) & 0x3; 891 track->textures[i].tex_coord_type = tmp; 892 switch ((idx_value & 0x1F)) { 893 case R300_TX_FORMAT_X8: 894 case R300_TX_FORMAT_Y4X4: 895 case R300_TX_FORMAT_Z3Y3X2: 896 track->textures[i].cpp = 1; 897 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 898 break; 899 case R300_TX_FORMAT_X16: 900 case R300_TX_FORMAT_FL_I16: 901 case R300_TX_FORMAT_Y8X8: 902 case R300_TX_FORMAT_Z5Y6X5: 903 case R300_TX_FORMAT_Z6Y5X5: 904 case R300_TX_FORMAT_W4Z4Y4X4: 905 case R300_TX_FORMAT_W1Z5Y5X5: 906 case R300_TX_FORMAT_D3DMFT_CxV8U8: 907 case R300_TX_FORMAT_B8G8_B8G8: 908 case R300_TX_FORMAT_G8R8_G8B8: 909 track->textures[i].cpp = 2; 910 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 911 break; 912 case R300_TX_FORMAT_Y16X16: 913 case R300_TX_FORMAT_FL_I16A16: 914 case R300_TX_FORMAT_Z11Y11X10: 915 case R300_TX_FORMAT_Z10Y11X11: 916 case R300_TX_FORMAT_W8Z8Y8X8: 917 case R300_TX_FORMAT_W2Z10Y10X10: 918 case 0x17: 919 case R300_TX_FORMAT_FL_I32: 920 case 0x1e: 921 track->textures[i].cpp = 4; 922 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 923 break; 924 case R300_TX_FORMAT_W16Z16Y16X16: 925 case R300_TX_FORMAT_FL_R16G16B16A16: 926 case R300_TX_FORMAT_FL_I32A32: 927 track->textures[i].cpp = 8; 928 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 929 break; 930 case R300_TX_FORMAT_FL_R32G32B32A32: 931 track->textures[i].cpp = 16; 932 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 933 break; 934 case R300_TX_FORMAT_DXT1: 935 track->textures[i].cpp = 1; 936 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 937 break; 938 case R300_TX_FORMAT_ATI2N: 939 if (p->rdev->family < CHIP_R420) { 940 DRM_ERROR("Invalid texture format %u\n", 941 (idx_value & 0x1F)); 942 return -EINVAL; 943 } 944 /* The same rules apply as for DXT3/5. */ 945 /* Pass through. */ 946 case R300_TX_FORMAT_DXT3: 947 case R300_TX_FORMAT_DXT5: 948 track->textures[i].cpp = 1; 949 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 950 break; 951 default: 952 DRM_ERROR("Invalid texture format %u\n", 953 (idx_value & 0x1F)); 954 return -EINVAL; 955 } 956 track->tex_dirty = true; 957 break; 958 case 0x4400: 959 case 0x4404: 960 case 0x4408: 961 case 0x440C: 962 case 0x4410: 963 case 0x4414: 964 case 0x4418: 965 case 0x441C: 966 case 0x4420: 967 case 0x4424: 968 case 0x4428: 969 case 0x442C: 970 case 0x4430: 971 case 0x4434: 972 case 0x4438: 973 case 0x443C: 974 /* TX_FILTER0_[0-15] */ 975 i = (reg - 0x4400) >> 2; 976 tmp = idx_value & 0x7; 977 if (tmp == 2 || tmp == 4 || tmp == 6) { 978 track->textures[i].roundup_w = false; 979 } 980 tmp = (idx_value >> 3) & 0x7; 981 if (tmp == 2 || tmp == 4 || tmp == 6) { 982 track->textures[i].roundup_h = false; 983 } 984 track->tex_dirty = true; 985 break; 986 case 0x4500: 987 case 0x4504: 988 case 0x4508: 989 case 0x450C: 990 case 0x4510: 991 case 0x4514: 992 case 0x4518: 993 case 0x451C: 994 case 0x4520: 995 case 0x4524: 996 case 0x4528: 997 case 0x452C: 998 case 0x4530: 999 case 0x4534: 1000 case 0x4538: 1001 case 0x453C: 1002 /* TX_FORMAT2_[0-15] */ 1003 i = (reg - 0x4500) >> 2; 1004 tmp = idx_value & 0x3FFF; 1005 track->textures[i].pitch = tmp + 1; 1006 if (p->rdev->family >= CHIP_RV515) { 1007 tmp = ((idx_value >> 15) & 1) << 11; 1008 track->textures[i].width_11 = tmp; 1009 tmp = ((idx_value >> 16) & 1) << 11; 1010 track->textures[i].height_11 = tmp; 1011 1012 /* ATI1N */ 1013 if (idx_value & (1 << 14)) { 1014 /* The same rules apply as for DXT1. */ 1015 track->textures[i].compress_format = 1016 R100_TRACK_COMP_DXT1; 1017 } 1018 } else if (idx_value & (1 << 14)) { 1019 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1020 return -EINVAL; 1021 } 1022 track->tex_dirty = true; 1023 break; 1024 case 0x4480: 1025 case 0x4484: 1026 case 0x4488: 1027 case 0x448C: 1028 case 0x4490: 1029 case 0x4494: 1030 case 0x4498: 1031 case 0x449C: 1032 case 0x44A0: 1033 case 0x44A4: 1034 case 0x44A8: 1035 case 0x44AC: 1036 case 0x44B0: 1037 case 0x44B4: 1038 case 0x44B8: 1039 case 0x44BC: 1040 /* TX_FORMAT0_[0-15] */ 1041 i = (reg - 0x4480) >> 2; 1042 tmp = idx_value & 0x7FF; 1043 track->textures[i].width = tmp + 1; 1044 tmp = (idx_value >> 11) & 0x7FF; 1045 track->textures[i].height = tmp + 1; 1046 tmp = (idx_value >> 26) & 0xF; 1047 track->textures[i].num_levels = tmp; 1048 tmp = idx_value & (1 << 31); 1049 track->textures[i].use_pitch = !!tmp; 1050 tmp = (idx_value >> 22) & 0xF; 1051 track->textures[i].txdepth = tmp; 1052 track->tex_dirty = true; 1053 break; 1054 case R300_ZB_ZPASS_ADDR: 1055 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1056 if (r) { 1057 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1058 idx, reg); 1059 radeon_cs_dump_packet(p, pkt); 1060 return r; 1061 } 1062 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1063 break; 1064 case 0x4e0c: 1065 /* RB3D_COLOR_CHANNEL_MASK */ 1066 track->color_channel_mask = idx_value; 1067 track->cb_dirty = true; 1068 break; 1069 case 0x43a4: 1070 /* SC_HYPERZ_EN */ 1071 /* r300c emits this register - we need to disable hyperz for it 1072 * without complaining */ 1073 if (p->rdev->hyperz_filp != p->filp) { 1074 if (idx_value & 0x1) 1075 ib[idx] = idx_value & ~1; 1076 } 1077 break; 1078 case 0x4f1c: 1079 /* ZB_BW_CNTL */ 1080 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1081 track->cb_dirty = true; 1082 track->zb_dirty = true; 1083 if (p->rdev->hyperz_filp != p->filp) { 1084 if (idx_value & (R300_HIZ_ENABLE | 1085 R300_RD_COMP_ENABLE | 1086 R300_WR_COMP_ENABLE | 1087 R300_FAST_FILL_ENABLE)) 1088 goto fail; 1089 } 1090 break; 1091 case 0x4e04: 1092 /* RB3D_BLENDCNTL */ 1093 track->blend_read_enable = !!(idx_value & (1 << 2)); 1094 track->cb_dirty = true; 1095 break; 1096 case R300_RB3D_AARESOLVE_OFFSET: 1097 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1098 if (r) { 1099 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1100 idx, reg); 1101 radeon_cs_dump_packet(p, pkt); 1102 return r; 1103 } 1104 track->aa.robj = reloc->robj; 1105 track->aa.offset = idx_value; 1106 track->aa_dirty = true; 1107 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1108 break; 1109 case R300_RB3D_AARESOLVE_PITCH: 1110 track->aa.pitch = idx_value & 0x3FFE; 1111 track->aa_dirty = true; 1112 break; 1113 case R300_RB3D_AARESOLVE_CTL: 1114 track->aaresolve = idx_value & 0x1; 1115 track->aa_dirty = true; 1116 break; 1117 case 0x4f30: /* ZB_MASK_OFFSET */ 1118 case 0x4f34: /* ZB_ZMASK_PITCH */ 1119 case 0x4f44: /* ZB_HIZ_OFFSET */ 1120 case 0x4f54: /* ZB_HIZ_PITCH */ 1121 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1122 goto fail; 1123 break; 1124 case 0x4028: 1125 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1126 goto fail; 1127 /* GB_Z_PEQ_CONFIG */ 1128 if (p->rdev->family >= CHIP_RV350) 1129 break; 1130 goto fail; 1131 break; 1132 case 0x4be8: 1133 /* valid register only on RV530 */ 1134 if (p->rdev->family == CHIP_RV530) 1135 break; 1136 /* fallthrough do not move */ 1137 default: 1138 goto fail; 1139 } 1140 return 0; 1141 fail: 1142 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n", 1143 reg, idx, idx_value); 1144 return -EINVAL; 1145 } 1146 1147 static int r300_packet3_check(struct radeon_cs_parser *p, 1148 struct radeon_cs_packet *pkt) 1149 { 1150 struct radeon_bo_list *reloc; 1151 struct r100_cs_track *track; 1152 volatile uint32_t *ib; 1153 unsigned idx; 1154 int r; 1155 1156 ib = p->ib.ptr; 1157 idx = pkt->idx + 1; 1158 track = (struct r100_cs_track *)p->track; 1159 switch(pkt->opcode) { 1160 case PACKET3_3D_LOAD_VBPNTR: 1161 r = r100_packet3_load_vbpntr(p, pkt, idx); 1162 if (r) 1163 return r; 1164 break; 1165 case PACKET3_INDX_BUFFER: 1166 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1167 if (r) { 1168 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1169 radeon_cs_dump_packet(p, pkt); 1170 return r; 1171 } 1172 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1173 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1174 if (r) { 1175 return r; 1176 } 1177 break; 1178 /* Draw packet */ 1179 case PACKET3_3D_DRAW_IMMD: 1180 /* Number of dwords is vtx_size * (num_vertices - 1) 1181 * PRIM_WALK must be equal to 3 vertex data in embedded 1182 * in cmd stream */ 1183 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1184 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1185 return -EINVAL; 1186 } 1187 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1188 track->immd_dwords = pkt->count - 1; 1189 r = r100_cs_track_check(p->rdev, track); 1190 if (r) { 1191 return r; 1192 } 1193 break; 1194 case PACKET3_3D_DRAW_IMMD_2: 1195 /* Number of dwords is vtx_size * (num_vertices - 1) 1196 * PRIM_WALK must be equal to 3 vertex data in embedded 1197 * in cmd stream */ 1198 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1199 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1200 return -EINVAL; 1201 } 1202 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1203 track->immd_dwords = pkt->count; 1204 r = r100_cs_track_check(p->rdev, track); 1205 if (r) { 1206 return r; 1207 } 1208 break; 1209 case PACKET3_3D_DRAW_VBUF: 1210 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1211 r = r100_cs_track_check(p->rdev, track); 1212 if (r) { 1213 return r; 1214 } 1215 break; 1216 case PACKET3_3D_DRAW_VBUF_2: 1217 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1218 r = r100_cs_track_check(p->rdev, track); 1219 if (r) { 1220 return r; 1221 } 1222 break; 1223 case PACKET3_3D_DRAW_INDX: 1224 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1225 r = r100_cs_track_check(p->rdev, track); 1226 if (r) { 1227 return r; 1228 } 1229 break; 1230 case PACKET3_3D_DRAW_INDX_2: 1231 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1232 r = r100_cs_track_check(p->rdev, track); 1233 if (r) { 1234 return r; 1235 } 1236 break; 1237 case PACKET3_3D_CLEAR_HIZ: 1238 case PACKET3_3D_CLEAR_ZMASK: 1239 if (p->rdev->hyperz_filp != p->filp) 1240 return -EINVAL; 1241 break; 1242 case PACKET3_3D_CLEAR_CMASK: 1243 if (p->rdev->cmask_filp != p->filp) 1244 return -EINVAL; 1245 break; 1246 case PACKET3_NOP: 1247 break; 1248 default: 1249 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1250 return -EINVAL; 1251 } 1252 return 0; 1253 } 1254 1255 int r300_cs_parse(struct radeon_cs_parser *p) 1256 { 1257 struct radeon_cs_packet pkt; 1258 struct r100_cs_track *track; 1259 int r; 1260 1261 track = kzalloc(sizeof(*track), GFP_KERNEL); 1262 if (track == NULL) 1263 return -ENOMEM; 1264 r100_cs_track_clear(p->rdev, track); 1265 p->track = track; 1266 do { 1267 r = radeon_cs_packet_parse(p, &pkt, p->idx); 1268 if (r) { 1269 return r; 1270 } 1271 p->idx += pkt.count + 2; 1272 switch (pkt.type) { 1273 case RADEON_PACKET_TYPE0: 1274 r = r100_cs_parse_packet0(p, &pkt, 1275 p->rdev->config.r300.reg_safe_bm, 1276 p->rdev->config.r300.reg_safe_bm_size, 1277 &r300_packet0_check); 1278 break; 1279 case RADEON_PACKET_TYPE2: 1280 break; 1281 case RADEON_PACKET_TYPE3: 1282 r = r300_packet3_check(p, &pkt); 1283 break; 1284 default: 1285 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1286 return -EINVAL; 1287 } 1288 if (r) { 1289 return r; 1290 } 1291 } while (p->idx < p->chunk_ib->length_dw); 1292 return 0; 1293 } 1294 1295 void r300_set_reg_safe(struct radeon_device *rdev) 1296 { 1297 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; 1298 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1299 } 1300 1301 void r300_mc_program(struct radeon_device *rdev) 1302 { 1303 struct r100_mc_save save; 1304 int r; 1305 1306 r = r100_debugfs_mc_info_init(rdev); 1307 if (r) { 1308 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 1309 } 1310 1311 /* Stops all mc clients */ 1312 r100_mc_stop(rdev, &save); 1313 if (rdev->flags & RADEON_IS_AGP) { 1314 WREG32(R_00014C_MC_AGP_LOCATION, 1315 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 1316 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 1317 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 1318 WREG32(R_00015C_AGP_BASE_2, 1319 upper_32_bits(rdev->mc.agp_base) & 0xff); 1320 } else { 1321 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 1322 WREG32(R_000170_AGP_BASE, 0); 1323 WREG32(R_00015C_AGP_BASE_2, 0); 1324 } 1325 /* Wait for mc idle */ 1326 if (r300_mc_wait_for_idle(rdev)) 1327 DRM_INFO("Failed to wait MC idle before programming MC.\n"); 1328 /* Program MC, should be a 32bits limited address space */ 1329 WREG32(R_000148_MC_FB_LOCATION, 1330 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 1331 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1332 r100_mc_resume(rdev, &save); 1333 } 1334 1335 void r300_clock_startup(struct radeon_device *rdev) 1336 { 1337 u32 tmp; 1338 1339 if (radeon_dynclks != -1 && radeon_dynclks) 1340 radeon_legacy_set_clock_gating(rdev, 1); 1341 /* We need to force on some of the block */ 1342 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 1343 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 1344 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) 1345 tmp |= S_00000D_FORCE_VAP(1); 1346 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 1347 } 1348 1349 static int r300_startup(struct radeon_device *rdev) 1350 { 1351 int r; 1352 1353 /* set common regs */ 1354 r100_set_common_regs(rdev); 1355 /* program mc */ 1356 r300_mc_program(rdev); 1357 /* Resume clock */ 1358 r300_clock_startup(rdev); 1359 /* Initialize GPU configuration (# pipes, ...) */ 1360 r300_gpu_init(rdev); 1361 /* Initialize GART (initialize after TTM so we can allocate 1362 * memory through TTM but finalize after TTM) */ 1363 if (rdev->flags & RADEON_IS_PCIE) { 1364 r = rv370_pcie_gart_enable(rdev); 1365 if (r) 1366 return r; 1367 } 1368 1369 if (rdev->family == CHIP_R300 || 1370 rdev->family == CHIP_R350 || 1371 rdev->family == CHIP_RV350) 1372 r100_enable_bm(rdev); 1373 1374 if (rdev->flags & RADEON_IS_PCI) { 1375 r = r100_pci_gart_enable(rdev); 1376 if (r) 1377 return r; 1378 } 1379 1380 /* allocate wb buffer */ 1381 r = radeon_wb_init(rdev); 1382 if (r) 1383 return r; 1384 1385 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1386 if (r) { 1387 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1388 return r; 1389 } 1390 1391 /* Enable IRQ */ 1392 if (!rdev->irq.installed) { 1393 r = radeon_irq_kms_init(rdev); 1394 if (r) 1395 return r; 1396 } 1397 1398 r100_irq_set(rdev); 1399 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1400 /* 1M ring buffer */ 1401 r = r100_cp_init(rdev, 1024 * 1024); 1402 if (r) { 1403 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 1404 return r; 1405 } 1406 1407 r = radeon_ib_pool_init(rdev); 1408 if (r) { 1409 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1410 return r; 1411 } 1412 1413 return 0; 1414 } 1415 1416 int r300_resume(struct radeon_device *rdev) 1417 { 1418 int r; 1419 1420 /* Make sur GART are not working */ 1421 if (rdev->flags & RADEON_IS_PCIE) 1422 rv370_pcie_gart_disable(rdev); 1423 if (rdev->flags & RADEON_IS_PCI) 1424 r100_pci_gart_disable(rdev); 1425 /* Resume clock before doing reset */ 1426 r300_clock_startup(rdev); 1427 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1428 if (radeon_asic_reset(rdev)) { 1429 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1430 RREG32(R_000E40_RBBM_STATUS), 1431 RREG32(R_0007C0_CP_STAT)); 1432 } 1433 /* post */ 1434 radeon_combios_asic_init(rdev->ddev); 1435 /* Resume clock after posting */ 1436 r300_clock_startup(rdev); 1437 /* Initialize surface registers */ 1438 radeon_surface_init(rdev); 1439 1440 rdev->accel_working = true; 1441 r = r300_startup(rdev); 1442 if (r) { 1443 rdev->accel_working = false; 1444 } 1445 return r; 1446 } 1447 1448 int r300_suspend(struct radeon_device *rdev) 1449 { 1450 radeon_pm_suspend(rdev); 1451 r100_cp_disable(rdev); 1452 radeon_wb_disable(rdev); 1453 r100_irq_disable(rdev); 1454 if (rdev->flags & RADEON_IS_PCIE) 1455 rv370_pcie_gart_disable(rdev); 1456 if (rdev->flags & RADEON_IS_PCI) 1457 r100_pci_gart_disable(rdev); 1458 return 0; 1459 } 1460 1461 void r300_fini(struct radeon_device *rdev) 1462 { 1463 radeon_pm_fini(rdev); 1464 r100_cp_fini(rdev); 1465 radeon_wb_fini(rdev); 1466 radeon_ib_pool_fini(rdev); 1467 radeon_gem_fini(rdev); 1468 if (rdev->flags & RADEON_IS_PCIE) 1469 rv370_pcie_gart_fini(rdev); 1470 if (rdev->flags & RADEON_IS_PCI) 1471 r100_pci_gart_fini(rdev); 1472 radeon_agp_fini(rdev); 1473 radeon_irq_kms_fini(rdev); 1474 radeon_fence_driver_fini(rdev); 1475 radeon_bo_fini(rdev); 1476 radeon_atombios_fini(rdev); 1477 kfree(rdev->bios); 1478 rdev->bios = NULL; 1479 } 1480 1481 int r300_init(struct radeon_device *rdev) 1482 { 1483 int r; 1484 1485 /* Disable VGA */ 1486 r100_vga_render_disable(rdev); 1487 /* Initialize scratch registers */ 1488 radeon_scratch_init(rdev); 1489 /* Initialize surface registers */ 1490 radeon_surface_init(rdev); 1491 /* TODO: disable VGA need to use VGA request */ 1492 /* restore some register to sane defaults */ 1493 r100_restore_sanity(rdev); 1494 /* BIOS*/ 1495 if (!radeon_get_bios(rdev)) { 1496 if (ASIC_IS_AVIVO(rdev)) 1497 return -EINVAL; 1498 } 1499 if (rdev->is_atom_bios) { 1500 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 1501 return -EINVAL; 1502 } else { 1503 r = radeon_combios_init(rdev); 1504 if (r) 1505 return r; 1506 } 1507 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1508 if (radeon_asic_reset(rdev)) { 1509 dev_warn(rdev->dev, 1510 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1511 RREG32(R_000E40_RBBM_STATUS), 1512 RREG32(R_0007C0_CP_STAT)); 1513 } 1514 /* check if cards are posted or not */ 1515 if (radeon_boot_test_post_card(rdev) == false) 1516 return -EINVAL; 1517 /* Set asic errata */ 1518 r300_errata(rdev); 1519 /* Initialize clocks */ 1520 radeon_get_clock_info(rdev->ddev); 1521 /* initialize AGP */ 1522 if (rdev->flags & RADEON_IS_AGP) { 1523 r = radeon_agp_init(rdev); 1524 if (r) { 1525 radeon_agp_disable(rdev); 1526 } 1527 } 1528 /* initialize memory controller */ 1529 r300_mc_init(rdev); 1530 /* Fence driver */ 1531 r = radeon_fence_driver_init(rdev); 1532 if (r) 1533 return r; 1534 /* Memory manager */ 1535 r = radeon_bo_init(rdev); 1536 if (r) 1537 return r; 1538 if (rdev->flags & RADEON_IS_PCIE) { 1539 r = rv370_pcie_gart_init(rdev); 1540 if (r) 1541 return r; 1542 } 1543 if (rdev->flags & RADEON_IS_PCI) { 1544 r = r100_pci_gart_init(rdev); 1545 if (r) 1546 return r; 1547 } 1548 r300_set_reg_safe(rdev); 1549 1550 /* Initialize power management */ 1551 radeon_pm_init(rdev); 1552 1553 rdev->accel_working = true; 1554 r = r300_startup(rdev); 1555 if (r) { 1556 /* Something went wrong with the accel init, so stop accel */ 1557 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1558 r100_cp_fini(rdev); 1559 radeon_wb_fini(rdev); 1560 radeon_ib_pool_fini(rdev); 1561 radeon_irq_kms_fini(rdev); 1562 if (rdev->flags & RADEON_IS_PCIE) 1563 rv370_pcie_gart_fini(rdev); 1564 if (rdev->flags & RADEON_IS_PCI) 1565 r100_pci_gart_fini(rdev); 1566 radeon_agp_fini(rdev); 1567 rdev->accel_working = false; 1568 } 1569 return 0; 1570 } 1571