1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <uapi_drm/drm.h> 30 #include <drm/drm_crtc_helper.h> 31 #include "radeon_reg.h" 32 #include "radeon.h" 33 #include "radeon_asic.h" 34 #include <uapi_drm/radeon_drm.h> 35 #include "r100_track.h" 36 #include "r300d.h" 37 #include "rv350d.h" 38 #include "r300_reg_safe.h" 39 40 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 41 * 42 * GPU Errata: 43 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL 44 * using MMIO to flush host path read cache, this lead to HARDLOCKUP. 45 * However, scheduling such write to the ring seems harmless, i suspect 46 * the CP read collide with the flush somehow, or maybe the MC, hard to 47 * tell. (Jerome Glisse) 48 */ 49 50 /* 51 * Indirect registers accessor 52 */ 53 uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 54 { 55 unsigned long flags; 56 uint32_t r; 57 58 spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 59 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 60 r = RREG32(RADEON_PCIE_DATA); 61 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 62 return r; 63 } 64 65 void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 66 { 67 unsigned long flags; 68 69 spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 70 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 71 WREG32(RADEON_PCIE_DATA, (v)); 72 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 73 } 74 75 /* 76 * rv370,rv380 PCIE GART 77 */ 78 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 79 80 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 81 { 82 uint32_t tmp; 83 int i; 84 85 /* Workaround HW bug do flush 2 times */ 86 for (i = 0; i < 2; i++) { 87 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 88 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); 89 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 90 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 91 } 92 mb(); 93 } 94 95 #define R300_PTE_UNSNOOPED (1 << 0) 96 #define R300_PTE_WRITEABLE (1 << 2) 97 #define R300_PTE_READABLE (1 << 3) 98 99 uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) 100 { 101 addr = (lower_32_bits(addr) >> 8) | 102 ((upper_32_bits(addr) & 0xff) << 24); 103 if (flags & RADEON_GART_PAGE_READ) 104 addr |= R300_PTE_READABLE; 105 if (flags & RADEON_GART_PAGE_WRITE) 106 addr |= R300_PTE_WRITEABLE; 107 if (!(flags & RADEON_GART_PAGE_SNOOP)) 108 addr |= R300_PTE_UNSNOOPED; 109 return addr; 110 } 111 112 void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 113 uint64_t entry) 114 { 115 volatile uint32_t *ptr = rdev->gart.ptr; 116 117 /* on x86 we want this to be CPU endian, on powerpc 118 * on powerpc without HW swappers, it'll get swapped on way 119 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 120 ptr += i; 121 *ptr = (uint32_t)entry; 122 } 123 124 int rv370_pcie_gart_init(struct radeon_device *rdev) 125 { 126 int r; 127 128 if (rdev->gart.robj) { 129 WARN(1, "RV370 PCIE GART already initialized\n"); 130 return 0; 131 } 132 /* Initialize common gart structure */ 133 r = radeon_gart_init(rdev); 134 if (r) 135 return r; 136 r = rv370_debugfs_pcie_gart_info_init(rdev); 137 if (r) 138 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 139 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 140 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 141 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; 142 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 143 return radeon_gart_table_vram_alloc(rdev); 144 } 145 146 int rv370_pcie_gart_enable(struct radeon_device *rdev) 147 { 148 uint32_t table_addr; 149 uint32_t tmp; 150 int r; 151 152 if (rdev->gart.robj == NULL) { 153 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 154 return -EINVAL; 155 } 156 r = radeon_gart_table_vram_pin(rdev); 157 if (r) 158 return r; 159 /* discard memory request outside of configured range */ 160 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 161 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 162 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); 163 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; 164 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 165 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 166 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 167 table_addr = rdev->gart.table_addr; 168 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 169 /* FIXME: setup default page */ 170 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 171 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 172 /* Clear error */ 173 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); 174 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 175 tmp |= RADEON_PCIE_TX_GART_EN; 176 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 177 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 178 rv370_pcie_gart_tlb_flush(rdev); 179 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 180 (unsigned)(rdev->mc.gtt_size >> 20), 181 (unsigned long long)table_addr); 182 rdev->gart.ready = true; 183 return 0; 184 } 185 186 void rv370_pcie_gart_disable(struct radeon_device *rdev) 187 { 188 u32 tmp; 189 190 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); 191 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); 192 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 193 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 194 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 195 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 196 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 197 radeon_gart_table_vram_unpin(rdev); 198 } 199 200 void rv370_pcie_gart_fini(struct radeon_device *rdev) 201 { 202 radeon_gart_fini(rdev); 203 rv370_pcie_gart_disable(rdev); 204 radeon_gart_table_vram_free(rdev); 205 } 206 207 void r300_fence_ring_emit(struct radeon_device *rdev, 208 struct radeon_fence *fence) 209 { 210 struct radeon_ring *ring = &rdev->ring[fence->ring]; 211 212 /* Who ever call radeon_fence_emit should call ring_lock and ask 213 * for enough space (today caller are ib schedule and buffer move) */ 214 /* Write SC register so SC & US assert idle */ 215 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); 216 radeon_ring_write(ring, 0); 217 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); 218 radeon_ring_write(ring, 0); 219 /* Flush 3D cache */ 220 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 221 radeon_ring_write(ring, R300_RB3D_DC_FLUSH); 222 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 223 radeon_ring_write(ring, R300_ZC_FLUSH); 224 /* Wait until IDLE & CLEAN */ 225 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 226 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | 227 RADEON_WAIT_2D_IDLECLEAN | 228 RADEON_WAIT_DMA_GUI_IDLE)); 229 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 230 radeon_ring_write(ring, rdev->config.r300.hdp_cntl | 231 RADEON_HDP_READ_BUFFER_INVALIDATE); 232 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 233 radeon_ring_write(ring, rdev->config.r300.hdp_cntl); 234 /* Emit fence sequence & fire IRQ */ 235 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 236 radeon_ring_write(ring, fence->seq); 237 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 238 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 239 } 240 241 void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 242 { 243 unsigned gb_tile_config; 244 int r; 245 246 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 247 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 248 switch(rdev->num_gb_pipes) { 249 case 2: 250 gb_tile_config |= R300_PIPE_COUNT_R300; 251 break; 252 case 3: 253 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 254 break; 255 case 4: 256 gb_tile_config |= R300_PIPE_COUNT_R420; 257 break; 258 case 1: 259 default: 260 gb_tile_config |= R300_PIPE_COUNT_RV350; 261 break; 262 } 263 264 r = radeon_ring_lock(rdev, ring, 64); 265 if (r) { 266 return; 267 } 268 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 269 radeon_ring_write(ring, 270 RADEON_ISYNC_ANY2D_IDLE3D | 271 RADEON_ISYNC_ANY3D_IDLE2D | 272 RADEON_ISYNC_WAIT_IDLEGUI | 273 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 274 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); 275 radeon_ring_write(ring, gb_tile_config); 276 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 277 radeon_ring_write(ring, 278 RADEON_WAIT_2D_IDLECLEAN | 279 RADEON_WAIT_3D_IDLECLEAN); 280 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); 281 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); 282 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); 283 radeon_ring_write(ring, 0); 284 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); 285 radeon_ring_write(ring, 0); 286 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 287 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 288 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 289 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 290 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 291 radeon_ring_write(ring, 292 RADEON_WAIT_2D_IDLECLEAN | 293 RADEON_WAIT_3D_IDLECLEAN); 294 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); 295 radeon_ring_write(ring, 0); 296 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 297 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 298 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 299 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 300 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); 301 radeon_ring_write(ring, 302 ((6 << R300_MS_X0_SHIFT) | 303 (6 << R300_MS_Y0_SHIFT) | 304 (6 << R300_MS_X1_SHIFT) | 305 (6 << R300_MS_Y1_SHIFT) | 306 (6 << R300_MS_X2_SHIFT) | 307 (6 << R300_MS_Y2_SHIFT) | 308 (6 << R300_MSBD0_Y_SHIFT) | 309 (6 << R300_MSBD0_X_SHIFT))); 310 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); 311 radeon_ring_write(ring, 312 ((6 << R300_MS_X3_SHIFT) | 313 (6 << R300_MS_Y3_SHIFT) | 314 (6 << R300_MS_X4_SHIFT) | 315 (6 << R300_MS_Y4_SHIFT) | 316 (6 << R300_MS_X5_SHIFT) | 317 (6 << R300_MS_Y5_SHIFT) | 318 (6 << R300_MSBD1_SHIFT))); 319 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); 320 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 321 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); 322 radeon_ring_write(ring, 323 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 324 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); 325 radeon_ring_write(ring, 326 R300_GEOMETRY_ROUND_NEAREST | 327 R300_COLOR_ROUND_NEAREST); 328 radeon_ring_unlock_commit(rdev, ring, false); 329 } 330 331 static void r300_errata(struct radeon_device *rdev) 332 { 333 rdev->pll_errata = 0; 334 335 if (rdev->family == CHIP_R300 && 336 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { 337 rdev->pll_errata |= CHIP_ERRATA_R300_CG; 338 } 339 } 340 341 int r300_mc_wait_for_idle(struct radeon_device *rdev) 342 { 343 unsigned i; 344 uint32_t tmp; 345 346 for (i = 0; i < rdev->usec_timeout; i++) { 347 /* read MC_STATUS */ 348 tmp = RREG32(RADEON_MC_STATUS); 349 if (tmp & R300_MC_IDLE) { 350 return 0; 351 } 352 DRM_UDELAY(1); 353 } 354 return -1; 355 } 356 357 static void r300_gpu_init(struct radeon_device *rdev) 358 { 359 uint32_t gb_tile_config, tmp; 360 361 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 362 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { 363 /* r300,r350 */ 364 rdev->num_gb_pipes = 2; 365 } else { 366 /* rv350,rv370,rv380,r300 AD, r350 AH */ 367 rdev->num_gb_pipes = 1; 368 } 369 rdev->num_z_pipes = 1; 370 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 371 switch (rdev->num_gb_pipes) { 372 case 2: 373 gb_tile_config |= R300_PIPE_COUNT_R300; 374 break; 375 case 3: 376 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 377 break; 378 case 4: 379 gb_tile_config |= R300_PIPE_COUNT_R420; 380 break; 381 default: 382 case 1: 383 gb_tile_config |= R300_PIPE_COUNT_RV350; 384 break; 385 } 386 WREG32(R300_GB_TILE_CONFIG, gb_tile_config); 387 388 if (r100_gui_wait_for_idle(rdev)) { 389 printk(KERN_WARNING "Failed to wait GUI idle while " 390 "programming pipes. Bad things might happen.\n"); 391 } 392 393 tmp = RREG32(R300_DST_PIPE_CONFIG); 394 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); 395 396 WREG32(R300_RB2D_DSTCACHE_MODE, 397 R300_DC_AUTOFLUSH_ENABLE | 398 R300_DC_DC_DISABLE_IGNORE_PE); 399 400 if (r100_gui_wait_for_idle(rdev)) { 401 printk(KERN_WARNING "Failed to wait GUI idle while " 402 "programming pipes. Bad things might happen.\n"); 403 } 404 if (r300_mc_wait_for_idle(rdev)) { 405 printk(KERN_WARNING "Failed to wait MC idle while " 406 "programming pipes. Bad things might happen.\n"); 407 } 408 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", 409 rdev->num_gb_pipes, rdev->num_z_pipes); 410 } 411 412 int r300_asic_reset(struct radeon_device *rdev, bool hard) 413 { 414 struct r100_mc_save save; 415 u32 status, tmp; 416 int ret = 0; 417 418 status = RREG32(R_000E40_RBBM_STATUS); 419 if (!G_000E40_GUI_ACTIVE(status)) { 420 return 0; 421 } 422 r100_mc_stop(rdev, &save); 423 status = RREG32(R_000E40_RBBM_STATUS); 424 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 425 /* stop CP */ 426 WREG32(RADEON_CP_CSQ_CNTL, 0); 427 tmp = RREG32(RADEON_CP_RB_CNTL); 428 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 429 WREG32(RADEON_CP_RB_RPTR_WR, 0); 430 WREG32(RADEON_CP_RB_WPTR, 0); 431 WREG32(RADEON_CP_RB_CNTL, tmp); 432 /* save PCI state */ 433 pci_save_state(device_get_parent(rdev->dev->bsddev)); 434 /* disable bus mastering */ 435 r100_bm_disable(rdev); 436 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 437 S_0000F0_SOFT_RESET_GA(1)); 438 RREG32(R_0000F0_RBBM_SOFT_RESET); 439 mdelay(500); 440 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 441 mdelay(1); 442 status = RREG32(R_000E40_RBBM_STATUS); 443 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 444 /* resetting the CP seems to be problematic sometimes it end up 445 * hard locking the computer, but it's necessary for successful 446 * reset more test & playing is needed on R3XX/R4XX to find a 447 * reliable (if any solution) 448 */ 449 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 450 RREG32(R_0000F0_RBBM_SOFT_RESET); 451 mdelay(500); 452 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 453 mdelay(1); 454 status = RREG32(R_000E40_RBBM_STATUS); 455 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 456 /* restore PCI & busmastering */ 457 pci_restore_state(device_get_parent(rdev->dev->bsddev)); 458 r100_enable_bm(rdev); 459 /* Check if GPU is idle */ 460 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 461 dev_err(rdev->dev, "failed to reset GPU\n"); 462 ret = -1; 463 } else 464 dev_info(rdev->dev, "GPU reset succeed\n"); 465 r100_mc_resume(rdev, &save); 466 return ret; 467 } 468 469 /* 470 * r300,r350,rv350,rv380 VRAM info 471 */ 472 void r300_mc_init(struct radeon_device *rdev) 473 { 474 u64 base; 475 u32 tmp; 476 477 /* DDR for all card after R300 & IGP */ 478 rdev->mc.vram_is_ddr = true; 479 tmp = RREG32(RADEON_MEM_CNTL); 480 tmp &= R300_MEM_NUM_CHANNELS_MASK; 481 switch (tmp) { 482 case 0: rdev->mc.vram_width = 64; break; 483 case 1: rdev->mc.vram_width = 128; break; 484 case 2: rdev->mc.vram_width = 256; break; 485 default: rdev->mc.vram_width = 128; break; 486 } 487 r100_vram_init_sizes(rdev); 488 base = rdev->mc.aper_base; 489 if (rdev->flags & RADEON_IS_IGP) 490 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 491 radeon_vram_location(rdev, &rdev->mc, base); 492 rdev->mc.gtt_base_align = 0; 493 if (!(rdev->flags & RADEON_IS_AGP)) 494 radeon_gtt_location(rdev, &rdev->mc); 495 radeon_update_bandwidth_info(rdev); 496 } 497 498 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 499 { 500 uint32_t link_width_cntl, mask; 501 502 if (rdev->flags & RADEON_IS_IGP) 503 return; 504 505 if (!(rdev->flags & RADEON_IS_PCIE)) 506 return; 507 508 /* FIXME wait for idle */ 509 510 switch (lanes) { 511 case 0: 512 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 513 break; 514 case 1: 515 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 516 break; 517 case 2: 518 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 519 break; 520 case 4: 521 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 522 break; 523 case 8: 524 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 525 break; 526 case 12: 527 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 528 break; 529 case 16: 530 default: 531 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 532 break; 533 } 534 535 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 536 537 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == 538 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) 539 return; 540 541 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 542 RADEON_PCIE_LC_RECONFIG_NOW | 543 RADEON_PCIE_LC_RECONFIG_LATER | 544 RADEON_PCIE_LC_SHORT_RECONFIG_EN); 545 link_width_cntl |= mask; 546 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 547 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | 548 RADEON_PCIE_LC_RECONFIG_NOW)); 549 550 /* wait for lane set to complete */ 551 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 552 while (link_width_cntl == 0xffffffff) 553 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 554 555 } 556 557 int rv370_get_pcie_lanes(struct radeon_device *rdev) 558 { 559 u32 link_width_cntl; 560 561 if (rdev->flags & RADEON_IS_IGP) 562 return 0; 563 564 if (!(rdev->flags & RADEON_IS_PCIE)) 565 return 0; 566 567 /* FIXME wait for idle */ 568 569 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 570 571 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 572 case RADEON_PCIE_LC_LINK_WIDTH_X0: 573 return 0; 574 case RADEON_PCIE_LC_LINK_WIDTH_X1: 575 return 1; 576 case RADEON_PCIE_LC_LINK_WIDTH_X2: 577 return 2; 578 case RADEON_PCIE_LC_LINK_WIDTH_X4: 579 return 4; 580 case RADEON_PCIE_LC_LINK_WIDTH_X8: 581 return 8; 582 case RADEON_PCIE_LC_LINK_WIDTH_X16: 583 default: 584 return 16; 585 } 586 } 587 588 #if defined(CONFIG_DEBUG_FS) 589 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 590 { 591 struct drm_info_node *node = (struct drm_info_node *) m->private; 592 struct drm_device *dev = node->minor->dev; 593 struct radeon_device *rdev = dev->dev_private; 594 uint32_t tmp; 595 596 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 597 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); 598 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); 599 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); 600 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); 601 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); 602 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); 603 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); 604 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); 605 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); 606 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); 607 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); 608 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); 609 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); 610 return 0; 611 } 612 613 static struct drm_info_list rv370_pcie_gart_info_list[] = { 614 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, 615 }; 616 #endif 617 618 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 619 { 620 #if defined(CONFIG_DEBUG_FS) 621 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); 622 #else 623 return 0; 624 #endif 625 } 626 627 static int r300_packet0_check(struct radeon_cs_parser *p, 628 struct radeon_cs_packet *pkt, 629 unsigned idx, unsigned reg) 630 { 631 struct radeon_bo_list *reloc; 632 struct r100_cs_track *track; 633 volatile uint32_t *ib; 634 uint32_t tmp, tile_flags = 0; 635 unsigned i; 636 int r; 637 u32 idx_value; 638 639 ib = p->ib.ptr; 640 track = (struct r100_cs_track *)p->track; 641 idx_value = radeon_get_ib_value(p, idx); 642 643 switch(reg) { 644 case AVIVO_D1MODE_VLINE_START_END: 645 case RADEON_CRTC_GUI_TRIG_VLINE: 646 r = r100_cs_packet_parse_vline(p); 647 if (r) { 648 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 649 idx, reg); 650 radeon_cs_dump_packet(p, pkt); 651 return r; 652 } 653 break; 654 case RADEON_DST_PITCH_OFFSET: 655 case RADEON_SRC_PITCH_OFFSET: 656 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 657 if (r) 658 return r; 659 break; 660 case R300_RB3D_COLOROFFSET0: 661 case R300_RB3D_COLOROFFSET1: 662 case R300_RB3D_COLOROFFSET2: 663 case R300_RB3D_COLOROFFSET3: 664 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 665 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 666 if (r) { 667 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 668 idx, reg); 669 radeon_cs_dump_packet(p, pkt); 670 return r; 671 } 672 track->cb[i].robj = reloc->robj; 673 track->cb[i].offset = idx_value; 674 track->cb_dirty = true; 675 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 676 break; 677 case R300_ZB_DEPTHOFFSET: 678 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 679 if (r) { 680 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 681 idx, reg); 682 radeon_cs_dump_packet(p, pkt); 683 return r; 684 } 685 track->zb.robj = reloc->robj; 686 track->zb.offset = idx_value; 687 track->zb_dirty = true; 688 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 689 break; 690 case R300_TX_OFFSET_0: 691 case R300_TX_OFFSET_0+4: 692 case R300_TX_OFFSET_0+8: 693 case R300_TX_OFFSET_0+12: 694 case R300_TX_OFFSET_0+16: 695 case R300_TX_OFFSET_0+20: 696 case R300_TX_OFFSET_0+24: 697 case R300_TX_OFFSET_0+28: 698 case R300_TX_OFFSET_0+32: 699 case R300_TX_OFFSET_0+36: 700 case R300_TX_OFFSET_0+40: 701 case R300_TX_OFFSET_0+44: 702 case R300_TX_OFFSET_0+48: 703 case R300_TX_OFFSET_0+52: 704 case R300_TX_OFFSET_0+56: 705 case R300_TX_OFFSET_0+60: 706 i = (reg - R300_TX_OFFSET_0) >> 2; 707 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 708 if (r) { 709 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 710 idx, reg); 711 radeon_cs_dump_packet(p, pkt); 712 return r; 713 } 714 715 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { 716 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 717 ((idx_value & ~31) + (u32)reloc->gpu_offset); 718 } else { 719 if (reloc->tiling_flags & RADEON_TILING_MACRO) 720 tile_flags |= R300_TXO_MACRO_TILE; 721 if (reloc->tiling_flags & RADEON_TILING_MICRO) 722 tile_flags |= R300_TXO_MICRO_TILE; 723 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 724 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 725 726 tmp = idx_value + ((u32)reloc->gpu_offset); 727 tmp |= tile_flags; 728 ib[idx] = tmp; 729 } 730 track->textures[i].robj = reloc->robj; 731 track->tex_dirty = true; 732 break; 733 /* Tracked registers */ 734 case 0x2084: 735 /* VAP_VF_CNTL */ 736 track->vap_vf_cntl = idx_value; 737 break; 738 case 0x20B4: 739 /* VAP_VTX_SIZE */ 740 track->vtx_size = idx_value & 0x7F; 741 break; 742 case 0x2134: 743 /* VAP_VF_MAX_VTX_INDX */ 744 track->max_indx = idx_value & 0x00FFFFFFUL; 745 break; 746 case 0x2088: 747 /* VAP_ALT_NUM_VERTICES - only valid on r500 */ 748 if (p->rdev->family < CHIP_RV515) 749 goto fail; 750 track->vap_alt_nverts = idx_value & 0xFFFFFF; 751 break; 752 case 0x43E4: 753 /* SC_SCISSOR1 */ 754 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 755 if (p->rdev->family < CHIP_RV515) { 756 track->maxy -= 1440; 757 } 758 track->cb_dirty = true; 759 track->zb_dirty = true; 760 break; 761 case 0x4E00: 762 /* RB3D_CCTL */ 763 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ 764 p->rdev->cmask_filp != p->filp) { 765 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); 766 return -EINVAL; 767 } 768 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 769 track->cb_dirty = true; 770 break; 771 case 0x4E38: 772 case 0x4E3C: 773 case 0x4E40: 774 case 0x4E44: 775 /* RB3D_COLORPITCH0 */ 776 /* RB3D_COLORPITCH1 */ 777 /* RB3D_COLORPITCH2 */ 778 /* RB3D_COLORPITCH3 */ 779 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 780 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 781 if (r) { 782 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 783 idx, reg); 784 radeon_cs_dump_packet(p, pkt); 785 return r; 786 } 787 788 if (reloc->tiling_flags & RADEON_TILING_MACRO) 789 tile_flags |= R300_COLOR_TILE_ENABLE; 790 if (reloc->tiling_flags & RADEON_TILING_MICRO) 791 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 792 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 793 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 794 795 tmp = idx_value & ~(0x7 << 16); 796 tmp |= tile_flags; 797 ib[idx] = tmp; 798 } 799 i = (reg - 0x4E38) >> 2; 800 track->cb[i].pitch = idx_value & 0x3FFE; 801 switch (((idx_value >> 21) & 0xF)) { 802 case 9: 803 case 11: 804 case 12: 805 track->cb[i].cpp = 1; 806 break; 807 case 3: 808 case 4: 809 case 13: 810 case 15: 811 track->cb[i].cpp = 2; 812 break; 813 case 5: 814 if (p->rdev->family < CHIP_RV515) { 815 DRM_ERROR("Invalid color buffer format (%d)!\n", 816 ((idx_value >> 21) & 0xF)); 817 return -EINVAL; 818 } 819 /* Pass through. */ 820 case 6: 821 track->cb[i].cpp = 4; 822 break; 823 case 10: 824 track->cb[i].cpp = 8; 825 break; 826 case 7: 827 track->cb[i].cpp = 16; 828 break; 829 default: 830 DRM_ERROR("Invalid color buffer format (%d) !\n", 831 ((idx_value >> 21) & 0xF)); 832 return -EINVAL; 833 } 834 track->cb_dirty = true; 835 break; 836 case 0x4F00: 837 /* ZB_CNTL */ 838 if (idx_value & 2) { 839 track->z_enabled = true; 840 } else { 841 track->z_enabled = false; 842 } 843 track->zb_dirty = true; 844 break; 845 case 0x4F10: 846 /* ZB_FORMAT */ 847 switch ((idx_value & 0xF)) { 848 case 0: 849 case 1: 850 track->zb.cpp = 2; 851 break; 852 case 2: 853 track->zb.cpp = 4; 854 break; 855 default: 856 DRM_ERROR("Invalid z buffer format (%d) !\n", 857 (idx_value & 0xF)); 858 return -EINVAL; 859 } 860 track->zb_dirty = true; 861 break; 862 case 0x4F24: 863 /* ZB_DEPTHPITCH */ 864 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 865 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 866 if (r) { 867 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 868 idx, reg); 869 radeon_cs_dump_packet(p, pkt); 870 return r; 871 } 872 873 if (reloc->tiling_flags & RADEON_TILING_MACRO) 874 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 875 if (reloc->tiling_flags & RADEON_TILING_MICRO) 876 tile_flags |= R300_DEPTHMICROTILE_TILED; 877 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 878 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 879 880 tmp = idx_value & ~(0x7 << 16); 881 tmp |= tile_flags; 882 ib[idx] = tmp; 883 } 884 track->zb.pitch = idx_value & 0x3FFC; 885 track->zb_dirty = true; 886 break; 887 case 0x4104: 888 /* TX_ENABLE */ 889 for (i = 0; i < 16; i++) { 890 bool enabled; 891 892 enabled = !!(idx_value & (1 << i)); 893 track->textures[i].enabled = enabled; 894 } 895 track->tex_dirty = true; 896 break; 897 case 0x44C0: 898 case 0x44C4: 899 case 0x44C8: 900 case 0x44CC: 901 case 0x44D0: 902 case 0x44D4: 903 case 0x44D8: 904 case 0x44DC: 905 case 0x44E0: 906 case 0x44E4: 907 case 0x44E8: 908 case 0x44EC: 909 case 0x44F0: 910 case 0x44F4: 911 case 0x44F8: 912 case 0x44FC: 913 /* TX_FORMAT1_[0-15] */ 914 i = (reg - 0x44C0) >> 2; 915 tmp = (idx_value >> 25) & 0x3; 916 track->textures[i].tex_coord_type = tmp; 917 switch ((idx_value & 0x1F)) { 918 case R300_TX_FORMAT_X8: 919 case R300_TX_FORMAT_Y4X4: 920 case R300_TX_FORMAT_Z3Y3X2: 921 track->textures[i].cpp = 1; 922 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 923 break; 924 case R300_TX_FORMAT_X16: 925 case R300_TX_FORMAT_FL_I16: 926 case R300_TX_FORMAT_Y8X8: 927 case R300_TX_FORMAT_Z5Y6X5: 928 case R300_TX_FORMAT_Z6Y5X5: 929 case R300_TX_FORMAT_W4Z4Y4X4: 930 case R300_TX_FORMAT_W1Z5Y5X5: 931 case R300_TX_FORMAT_D3DMFT_CxV8U8: 932 case R300_TX_FORMAT_B8G8_B8G8: 933 case R300_TX_FORMAT_G8R8_G8B8: 934 track->textures[i].cpp = 2; 935 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 936 break; 937 case R300_TX_FORMAT_Y16X16: 938 case R300_TX_FORMAT_FL_I16A16: 939 case R300_TX_FORMAT_Z11Y11X10: 940 case R300_TX_FORMAT_Z10Y11X11: 941 case R300_TX_FORMAT_W8Z8Y8X8: 942 case R300_TX_FORMAT_W2Z10Y10X10: 943 case 0x17: 944 case R300_TX_FORMAT_FL_I32: 945 case 0x1e: 946 track->textures[i].cpp = 4; 947 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 948 break; 949 case R300_TX_FORMAT_W16Z16Y16X16: 950 case R300_TX_FORMAT_FL_R16G16B16A16: 951 case R300_TX_FORMAT_FL_I32A32: 952 track->textures[i].cpp = 8; 953 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 954 break; 955 case R300_TX_FORMAT_FL_R32G32B32A32: 956 track->textures[i].cpp = 16; 957 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 958 break; 959 case R300_TX_FORMAT_DXT1: 960 track->textures[i].cpp = 1; 961 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 962 break; 963 case R300_TX_FORMAT_ATI2N: 964 if (p->rdev->family < CHIP_R420) { 965 DRM_ERROR("Invalid texture format %u\n", 966 (idx_value & 0x1F)); 967 return -EINVAL; 968 } 969 /* The same rules apply as for DXT3/5. */ 970 /* Pass through. */ 971 case R300_TX_FORMAT_DXT3: 972 case R300_TX_FORMAT_DXT5: 973 track->textures[i].cpp = 1; 974 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 975 break; 976 default: 977 DRM_ERROR("Invalid texture format %u\n", 978 (idx_value & 0x1F)); 979 return -EINVAL; 980 } 981 track->tex_dirty = true; 982 break; 983 case 0x4400: 984 case 0x4404: 985 case 0x4408: 986 case 0x440C: 987 case 0x4410: 988 case 0x4414: 989 case 0x4418: 990 case 0x441C: 991 case 0x4420: 992 case 0x4424: 993 case 0x4428: 994 case 0x442C: 995 case 0x4430: 996 case 0x4434: 997 case 0x4438: 998 case 0x443C: 999 /* TX_FILTER0_[0-15] */ 1000 i = (reg - 0x4400) >> 2; 1001 tmp = idx_value & 0x7; 1002 if (tmp == 2 || tmp == 4 || tmp == 6) { 1003 track->textures[i].roundup_w = false; 1004 } 1005 tmp = (idx_value >> 3) & 0x7; 1006 if (tmp == 2 || tmp == 4 || tmp == 6) { 1007 track->textures[i].roundup_h = false; 1008 } 1009 track->tex_dirty = true; 1010 break; 1011 case 0x4500: 1012 case 0x4504: 1013 case 0x4508: 1014 case 0x450C: 1015 case 0x4510: 1016 case 0x4514: 1017 case 0x4518: 1018 case 0x451C: 1019 case 0x4520: 1020 case 0x4524: 1021 case 0x4528: 1022 case 0x452C: 1023 case 0x4530: 1024 case 0x4534: 1025 case 0x4538: 1026 case 0x453C: 1027 /* TX_FORMAT2_[0-15] */ 1028 i = (reg - 0x4500) >> 2; 1029 tmp = idx_value & 0x3FFF; 1030 track->textures[i].pitch = tmp + 1; 1031 if (p->rdev->family >= CHIP_RV515) { 1032 tmp = ((idx_value >> 15) & 1) << 11; 1033 track->textures[i].width_11 = tmp; 1034 tmp = ((idx_value >> 16) & 1) << 11; 1035 track->textures[i].height_11 = tmp; 1036 1037 /* ATI1N */ 1038 if (idx_value & (1 << 14)) { 1039 /* The same rules apply as for DXT1. */ 1040 track->textures[i].compress_format = 1041 R100_TRACK_COMP_DXT1; 1042 } 1043 } else if (idx_value & (1 << 14)) { 1044 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1045 return -EINVAL; 1046 } 1047 track->tex_dirty = true; 1048 break; 1049 case 0x4480: 1050 case 0x4484: 1051 case 0x4488: 1052 case 0x448C: 1053 case 0x4490: 1054 case 0x4494: 1055 case 0x4498: 1056 case 0x449C: 1057 case 0x44A0: 1058 case 0x44A4: 1059 case 0x44A8: 1060 case 0x44AC: 1061 case 0x44B0: 1062 case 0x44B4: 1063 case 0x44B8: 1064 case 0x44BC: 1065 /* TX_FORMAT0_[0-15] */ 1066 i = (reg - 0x4480) >> 2; 1067 tmp = idx_value & 0x7FF; 1068 track->textures[i].width = tmp + 1; 1069 tmp = (idx_value >> 11) & 0x7FF; 1070 track->textures[i].height = tmp + 1; 1071 tmp = (idx_value >> 26) & 0xF; 1072 track->textures[i].num_levels = tmp; 1073 tmp = idx_value & (1 << 31); 1074 track->textures[i].use_pitch = !!tmp; 1075 tmp = (idx_value >> 22) & 0xF; 1076 track->textures[i].txdepth = tmp; 1077 track->tex_dirty = true; 1078 break; 1079 case R300_ZB_ZPASS_ADDR: 1080 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1081 if (r) { 1082 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1083 idx, reg); 1084 radeon_cs_dump_packet(p, pkt); 1085 return r; 1086 } 1087 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1088 break; 1089 case 0x4e0c: 1090 /* RB3D_COLOR_CHANNEL_MASK */ 1091 track->color_channel_mask = idx_value; 1092 track->cb_dirty = true; 1093 break; 1094 case 0x43a4: 1095 /* SC_HYPERZ_EN */ 1096 /* r300c emits this register - we need to disable hyperz for it 1097 * without complaining */ 1098 if (p->rdev->hyperz_filp != p->filp) { 1099 if (idx_value & 0x1) 1100 ib[idx] = idx_value & ~1; 1101 } 1102 break; 1103 case 0x4f1c: 1104 /* ZB_BW_CNTL */ 1105 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1106 track->cb_dirty = true; 1107 track->zb_dirty = true; 1108 if (p->rdev->hyperz_filp != p->filp) { 1109 if (idx_value & (R300_HIZ_ENABLE | 1110 R300_RD_COMP_ENABLE | 1111 R300_WR_COMP_ENABLE | 1112 R300_FAST_FILL_ENABLE)) 1113 goto fail; 1114 } 1115 break; 1116 case 0x4e04: 1117 /* RB3D_BLENDCNTL */ 1118 track->blend_read_enable = !!(idx_value & (1 << 2)); 1119 track->cb_dirty = true; 1120 break; 1121 case R300_RB3D_AARESOLVE_OFFSET: 1122 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1123 if (r) { 1124 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1125 idx, reg); 1126 radeon_cs_dump_packet(p, pkt); 1127 return r; 1128 } 1129 track->aa.robj = reloc->robj; 1130 track->aa.offset = idx_value; 1131 track->aa_dirty = true; 1132 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1133 break; 1134 case R300_RB3D_AARESOLVE_PITCH: 1135 track->aa.pitch = idx_value & 0x3FFE; 1136 track->aa_dirty = true; 1137 break; 1138 case R300_RB3D_AARESOLVE_CTL: 1139 track->aaresolve = idx_value & 0x1; 1140 track->aa_dirty = true; 1141 break; 1142 case 0x4f30: /* ZB_MASK_OFFSET */ 1143 case 0x4f34: /* ZB_ZMASK_PITCH */ 1144 case 0x4f44: /* ZB_HIZ_OFFSET */ 1145 case 0x4f54: /* ZB_HIZ_PITCH */ 1146 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1147 goto fail; 1148 break; 1149 case 0x4028: 1150 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1151 goto fail; 1152 /* GB_Z_PEQ_CONFIG */ 1153 if (p->rdev->family >= CHIP_RV350) 1154 break; 1155 goto fail; 1156 break; 1157 case 0x4be8: 1158 /* valid register only on RV530 */ 1159 if (p->rdev->family == CHIP_RV530) 1160 break; 1161 /* fallthrough do not move */ 1162 default: 1163 goto fail; 1164 } 1165 return 0; 1166 fail: 1167 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n", 1168 reg, idx, idx_value); 1169 return -EINVAL; 1170 } 1171 1172 static int r300_packet3_check(struct radeon_cs_parser *p, 1173 struct radeon_cs_packet *pkt) 1174 { 1175 struct radeon_bo_list *reloc; 1176 struct r100_cs_track *track; 1177 volatile uint32_t *ib; 1178 unsigned idx; 1179 int r; 1180 1181 ib = p->ib.ptr; 1182 idx = pkt->idx + 1; 1183 track = (struct r100_cs_track *)p->track; 1184 switch(pkt->opcode) { 1185 case PACKET3_3D_LOAD_VBPNTR: 1186 r = r100_packet3_load_vbpntr(p, pkt, idx); 1187 if (r) 1188 return r; 1189 break; 1190 case PACKET3_INDX_BUFFER: 1191 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1192 if (r) { 1193 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1194 radeon_cs_dump_packet(p, pkt); 1195 return r; 1196 } 1197 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1198 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1199 if (r) { 1200 return r; 1201 } 1202 break; 1203 /* Draw packet */ 1204 case PACKET3_3D_DRAW_IMMD: 1205 /* Number of dwords is vtx_size * (num_vertices - 1) 1206 * PRIM_WALK must be equal to 3 vertex data in embedded 1207 * in cmd stream */ 1208 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1209 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1210 return -EINVAL; 1211 } 1212 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1213 track->immd_dwords = pkt->count - 1; 1214 r = r100_cs_track_check(p->rdev, track); 1215 if (r) { 1216 return r; 1217 } 1218 break; 1219 case PACKET3_3D_DRAW_IMMD_2: 1220 /* Number of dwords is vtx_size * (num_vertices - 1) 1221 * PRIM_WALK must be equal to 3 vertex data in embedded 1222 * in cmd stream */ 1223 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1224 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1225 return -EINVAL; 1226 } 1227 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1228 track->immd_dwords = pkt->count; 1229 r = r100_cs_track_check(p->rdev, track); 1230 if (r) { 1231 return r; 1232 } 1233 break; 1234 case PACKET3_3D_DRAW_VBUF: 1235 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1236 r = r100_cs_track_check(p->rdev, track); 1237 if (r) { 1238 return r; 1239 } 1240 break; 1241 case PACKET3_3D_DRAW_VBUF_2: 1242 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1243 r = r100_cs_track_check(p->rdev, track); 1244 if (r) { 1245 return r; 1246 } 1247 break; 1248 case PACKET3_3D_DRAW_INDX: 1249 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1250 r = r100_cs_track_check(p->rdev, track); 1251 if (r) { 1252 return r; 1253 } 1254 break; 1255 case PACKET3_3D_DRAW_INDX_2: 1256 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1257 r = r100_cs_track_check(p->rdev, track); 1258 if (r) { 1259 return r; 1260 } 1261 break; 1262 case PACKET3_3D_CLEAR_HIZ: 1263 case PACKET3_3D_CLEAR_ZMASK: 1264 if (p->rdev->hyperz_filp != p->filp) 1265 return -EINVAL; 1266 break; 1267 case PACKET3_3D_CLEAR_CMASK: 1268 if (p->rdev->cmask_filp != p->filp) 1269 return -EINVAL; 1270 break; 1271 case PACKET3_NOP: 1272 break; 1273 default: 1274 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1275 return -EINVAL; 1276 } 1277 return 0; 1278 } 1279 1280 int r300_cs_parse(struct radeon_cs_parser *p) 1281 { 1282 struct radeon_cs_packet pkt; 1283 struct r100_cs_track *track; 1284 int r; 1285 1286 track = kzalloc(sizeof(*track), GFP_KERNEL); 1287 if (track == NULL) 1288 return -ENOMEM; 1289 r100_cs_track_clear(p->rdev, track); 1290 p->track = track; 1291 do { 1292 r = radeon_cs_packet_parse(p, &pkt, p->idx); 1293 if (r) { 1294 return r; 1295 } 1296 p->idx += pkt.count + 2; 1297 switch (pkt.type) { 1298 case RADEON_PACKET_TYPE0: 1299 r = r100_cs_parse_packet0(p, &pkt, 1300 p->rdev->config.r300.reg_safe_bm, 1301 p->rdev->config.r300.reg_safe_bm_size, 1302 &r300_packet0_check); 1303 break; 1304 case RADEON_PACKET_TYPE2: 1305 break; 1306 case RADEON_PACKET_TYPE3: 1307 r = r300_packet3_check(p, &pkt); 1308 break; 1309 default: 1310 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1311 return -EINVAL; 1312 } 1313 if (r) { 1314 return r; 1315 } 1316 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1317 return 0; 1318 } 1319 1320 void r300_set_reg_safe(struct radeon_device *rdev) 1321 { 1322 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; 1323 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1324 } 1325 1326 void r300_mc_program(struct radeon_device *rdev) 1327 { 1328 struct r100_mc_save save; 1329 int r; 1330 1331 r = r100_debugfs_mc_info_init(rdev); 1332 if (r) { 1333 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 1334 } 1335 1336 /* Stops all mc clients */ 1337 r100_mc_stop(rdev, &save); 1338 if (rdev->flags & RADEON_IS_AGP) { 1339 WREG32(R_00014C_MC_AGP_LOCATION, 1340 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 1341 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 1342 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 1343 WREG32(R_00015C_AGP_BASE_2, 1344 upper_32_bits(rdev->mc.agp_base) & 0xff); 1345 } else { 1346 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 1347 WREG32(R_000170_AGP_BASE, 0); 1348 WREG32(R_00015C_AGP_BASE_2, 0); 1349 } 1350 /* Wait for mc idle */ 1351 if (r300_mc_wait_for_idle(rdev)) 1352 DRM_INFO("Failed to wait MC idle before programming MC.\n"); 1353 /* Program MC, should be a 32bits limited address space */ 1354 WREG32(R_000148_MC_FB_LOCATION, 1355 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 1356 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1357 r100_mc_resume(rdev, &save); 1358 } 1359 1360 void r300_clock_startup(struct radeon_device *rdev) 1361 { 1362 u32 tmp; 1363 1364 if (radeon_dynclks != -1 && radeon_dynclks) 1365 radeon_legacy_set_clock_gating(rdev, 1); 1366 /* We need to force on some of the block */ 1367 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 1368 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 1369 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) 1370 tmp |= S_00000D_FORCE_VAP(1); 1371 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 1372 } 1373 1374 static int r300_startup(struct radeon_device *rdev) 1375 { 1376 int r; 1377 1378 /* set common regs */ 1379 r100_set_common_regs(rdev); 1380 /* program mc */ 1381 r300_mc_program(rdev); 1382 /* Resume clock */ 1383 r300_clock_startup(rdev); 1384 /* Initialize GPU configuration (# pipes, ...) */ 1385 r300_gpu_init(rdev); 1386 /* Initialize GART (initialize after TTM so we can allocate 1387 * memory through TTM but finalize after TTM) */ 1388 if (rdev->flags & RADEON_IS_PCIE) { 1389 r = rv370_pcie_gart_enable(rdev); 1390 if (r) 1391 return r; 1392 } 1393 1394 if (rdev->family == CHIP_R300 || 1395 rdev->family == CHIP_R350 || 1396 rdev->family == CHIP_RV350) 1397 r100_enable_bm(rdev); 1398 1399 if (rdev->flags & RADEON_IS_PCI) { 1400 r = r100_pci_gart_enable(rdev); 1401 if (r) 1402 return r; 1403 } 1404 1405 /* allocate wb buffer */ 1406 r = radeon_wb_init(rdev); 1407 if (r) 1408 return r; 1409 1410 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1411 if (r) { 1412 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1413 return r; 1414 } 1415 1416 /* Enable IRQ */ 1417 if (!rdev->irq.installed) { 1418 r = radeon_irq_kms_init(rdev); 1419 if (r) 1420 return r; 1421 } 1422 1423 r100_irq_set(rdev); 1424 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1425 /* 1M ring buffer */ 1426 r = r100_cp_init(rdev, 1024 * 1024); 1427 if (r) { 1428 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 1429 return r; 1430 } 1431 1432 r = radeon_ib_pool_init(rdev); 1433 if (r) { 1434 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1435 return r; 1436 } 1437 1438 return 0; 1439 } 1440 1441 int r300_resume(struct radeon_device *rdev) 1442 { 1443 int r; 1444 1445 /* Make sur GART are not working */ 1446 if (rdev->flags & RADEON_IS_PCIE) 1447 rv370_pcie_gart_disable(rdev); 1448 if (rdev->flags & RADEON_IS_PCI) 1449 r100_pci_gart_disable(rdev); 1450 /* Resume clock before doing reset */ 1451 r300_clock_startup(rdev); 1452 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1453 if (radeon_asic_reset(rdev)) { 1454 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1455 RREG32(R_000E40_RBBM_STATUS), 1456 RREG32(R_0007C0_CP_STAT)); 1457 } 1458 /* post */ 1459 radeon_combios_asic_init(rdev->ddev); 1460 /* Resume clock after posting */ 1461 r300_clock_startup(rdev); 1462 /* Initialize surface registers */ 1463 radeon_surface_init(rdev); 1464 1465 rdev->accel_working = true; 1466 r = r300_startup(rdev); 1467 if (r) { 1468 rdev->accel_working = false; 1469 } 1470 return r; 1471 } 1472 1473 int r300_suspend(struct radeon_device *rdev) 1474 { 1475 radeon_pm_suspend(rdev); 1476 r100_cp_disable(rdev); 1477 radeon_wb_disable(rdev); 1478 r100_irq_disable(rdev); 1479 if (rdev->flags & RADEON_IS_PCIE) 1480 rv370_pcie_gart_disable(rdev); 1481 if (rdev->flags & RADEON_IS_PCI) 1482 r100_pci_gart_disable(rdev); 1483 return 0; 1484 } 1485 1486 void r300_fini(struct radeon_device *rdev) 1487 { 1488 radeon_pm_fini(rdev); 1489 r100_cp_fini(rdev); 1490 radeon_wb_fini(rdev); 1491 radeon_ib_pool_fini(rdev); 1492 radeon_gem_fini(rdev); 1493 if (rdev->flags & RADEON_IS_PCIE) 1494 rv370_pcie_gart_fini(rdev); 1495 if (rdev->flags & RADEON_IS_PCI) 1496 r100_pci_gart_fini(rdev); 1497 radeon_agp_fini(rdev); 1498 radeon_irq_kms_fini(rdev); 1499 radeon_fence_driver_fini(rdev); 1500 radeon_bo_fini(rdev); 1501 radeon_atombios_fini(rdev); 1502 kfree(rdev->bios); 1503 rdev->bios = NULL; 1504 } 1505 1506 int r300_init(struct radeon_device *rdev) 1507 { 1508 int r; 1509 1510 /* Disable VGA */ 1511 r100_vga_render_disable(rdev); 1512 /* Initialize scratch registers */ 1513 radeon_scratch_init(rdev); 1514 /* Initialize surface registers */ 1515 radeon_surface_init(rdev); 1516 /* TODO: disable VGA need to use VGA request */ 1517 /* restore some register to sane defaults */ 1518 r100_restore_sanity(rdev); 1519 /* BIOS*/ 1520 if (!radeon_get_bios(rdev)) { 1521 if (ASIC_IS_AVIVO(rdev)) 1522 return -EINVAL; 1523 } 1524 if (rdev->is_atom_bios) { 1525 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 1526 return -EINVAL; 1527 } else { 1528 r = radeon_combios_init(rdev); 1529 if (r) 1530 return r; 1531 } 1532 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1533 if (radeon_asic_reset(rdev)) { 1534 dev_warn(rdev->dev, 1535 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1536 RREG32(R_000E40_RBBM_STATUS), 1537 RREG32(R_0007C0_CP_STAT)); 1538 } 1539 /* check if cards are posted or not */ 1540 if (radeon_boot_test_post_card(rdev) == false) 1541 return -EINVAL; 1542 /* Set asic errata */ 1543 r300_errata(rdev); 1544 /* Initialize clocks */ 1545 radeon_get_clock_info(rdev->ddev); 1546 /* initialize AGP */ 1547 if (rdev->flags & RADEON_IS_AGP) { 1548 r = radeon_agp_init(rdev); 1549 if (r) { 1550 radeon_agp_disable(rdev); 1551 } 1552 } 1553 /* initialize memory controller */ 1554 r300_mc_init(rdev); 1555 /* Fence driver */ 1556 r = radeon_fence_driver_init(rdev); 1557 if (r) 1558 return r; 1559 /* Memory manager */ 1560 r = radeon_bo_init(rdev); 1561 if (r) 1562 return r; 1563 if (rdev->flags & RADEON_IS_PCIE) { 1564 r = rv370_pcie_gart_init(rdev); 1565 if (r) 1566 return r; 1567 } 1568 if (rdev->flags & RADEON_IS_PCI) { 1569 r = r100_pci_gart_init(rdev); 1570 if (r) 1571 return r; 1572 } 1573 r300_set_reg_safe(rdev); 1574 1575 /* Initialize power management */ 1576 radeon_pm_init(rdev); 1577 1578 rdev->accel_working = true; 1579 r = r300_startup(rdev); 1580 if (r) { 1581 /* Something went wrong with the accel init, so stop accel */ 1582 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1583 r100_cp_fini(rdev); 1584 radeon_wb_fini(rdev); 1585 radeon_ib_pool_fini(rdev); 1586 radeon_irq_kms_fini(rdev); 1587 if (rdev->flags & RADEON_IS_PCIE) 1588 rv370_pcie_gart_fini(rdev); 1589 if (rdev->flags & RADEON_IS_PCI) 1590 r100_pci_gart_fini(rdev); 1591 radeon_agp_fini(rdev); 1592 rdev->accel_working = false; 1593 } 1594 return 0; 1595 } 1596