1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/rv770.c 254885 2013-08-25 19:37:15Z dumbbell $ 29 */ 30 31 #include <drm/drmP.h> 32 #include "radeon.h" 33 #include "radeon_asic.h" 34 #include <uapi_drm/radeon_drm.h> 35 #include "rv770d.h" 36 #include "atom.h" 37 #include "avivod.h" 38 39 #define R700_PFP_UCODE_SIZE 848 40 #define R700_PM4_UCODE_SIZE 1360 41 42 static void rv770_gpu_init(struct radeon_device *rdev); 43 static void rv770_pcie_gen2_enable(struct radeon_device *rdev); 44 45 u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 46 { 47 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 48 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 49 int i; 50 51 /* Lock the graphics update lock */ 52 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 53 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 54 55 /* update the scanout addresses */ 56 if (radeon_crtc->crtc_id) { 57 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 58 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 59 } else { 60 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 61 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 62 } 63 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 64 (u32)crtc_base); 65 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 66 (u32)crtc_base); 67 68 /* Wait for update_pending to go high. */ 69 for (i = 0; i < rdev->usec_timeout; i++) { 70 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) 71 break; 72 DRM_UDELAY(1); 73 } 74 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 75 76 /* Unlock the lock, so double-buffering can take place inside vblank */ 77 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 78 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 79 80 /* Return current update_pending status: */ 81 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 82 } 83 84 /* get temperature in millidegrees */ 85 int rv770_get_temp(struct radeon_device *rdev) 86 { 87 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 88 ASIC_T_SHIFT; 89 int actual_temp; 90 91 if (temp & 0x400) 92 actual_temp = -256; 93 else if (temp & 0x200) 94 actual_temp = 255; 95 else if (temp & 0x100) { 96 actual_temp = temp & 0x1ff; 97 actual_temp |= ~0x1ff; 98 } else 99 actual_temp = temp & 0xff; 100 101 return (actual_temp * 1000) / 2; 102 } 103 104 void rv770_pm_misc(struct radeon_device *rdev) 105 { 106 int req_ps_idx = rdev->pm.requested_power_state_index; 107 int req_cm_idx = rdev->pm.requested_clock_mode_index; 108 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 109 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 110 111 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 112 /* 0xff01 is a flag rather then an actual voltage */ 113 if (voltage->voltage == 0xff01) 114 return; 115 if (voltage->voltage != rdev->pm.current_vddc) { 116 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 117 rdev->pm.current_vddc = voltage->voltage; 118 DRM_DEBUG("Setting: v: %d\n", voltage->voltage); 119 } 120 } 121 } 122 123 /* 124 * GART 125 */ 126 static int rv770_pcie_gart_enable(struct radeon_device *rdev) 127 { 128 u32 tmp; 129 int r, i; 130 131 if (rdev->gart.robj == NULL) { 132 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 133 return -EINVAL; 134 } 135 r = radeon_gart_table_vram_pin(rdev); 136 if (r) 137 return r; 138 radeon_gart_restore(rdev); 139 /* Setup L2 cache */ 140 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 141 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 142 EFFECTIVE_L2_QUEUE_SIZE(7)); 143 WREG32(VM_L2_CNTL2, 0); 144 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 145 /* Setup TLB control */ 146 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 147 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 148 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 149 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 150 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 151 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 152 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 153 if (rdev->family == CHIP_RV740) 154 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); 155 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 156 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 157 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 158 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 159 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 160 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 161 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 162 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 163 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 164 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 165 (u32)(rdev->dummy_page.addr >> 12)); 166 for (i = 1; i < 7; i++) 167 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 168 169 r600_pcie_gart_tlb_flush(rdev); 170 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 171 (unsigned)(rdev->mc.gtt_size >> 20), 172 (unsigned long long)rdev->gart.table_addr); 173 rdev->gart.ready = true; 174 return 0; 175 } 176 177 static void rv770_pcie_gart_disable(struct radeon_device *rdev) 178 { 179 u32 tmp; 180 int i; 181 182 /* Disable all tables */ 183 for (i = 0; i < 7; i++) 184 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 185 186 /* Setup L2 cache */ 187 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 188 EFFECTIVE_L2_QUEUE_SIZE(7)); 189 WREG32(VM_L2_CNTL2, 0); 190 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 191 /* Setup TLB control */ 192 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 193 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 194 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 195 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 196 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 197 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 198 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 199 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 200 radeon_gart_table_vram_unpin(rdev); 201 } 202 203 static void rv770_pcie_gart_fini(struct radeon_device *rdev) 204 { 205 radeon_gart_fini(rdev); 206 rv770_pcie_gart_disable(rdev); 207 radeon_gart_table_vram_free(rdev); 208 } 209 210 211 static void rv770_agp_enable(struct radeon_device *rdev) 212 { 213 u32 tmp; 214 int i; 215 216 /* Setup L2 cache */ 217 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 218 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 219 EFFECTIVE_L2_QUEUE_SIZE(7)); 220 WREG32(VM_L2_CNTL2, 0); 221 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 222 /* Setup TLB control */ 223 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 224 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 225 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 226 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 227 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 228 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 229 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 230 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 231 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 232 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 233 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 234 for (i = 0; i < 7; i++) 235 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 236 } 237 238 static void rv770_mc_program(struct radeon_device *rdev) 239 { 240 struct rv515_mc_save save; 241 u32 tmp; 242 int i, j; 243 244 /* Initialize HDP */ 245 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 246 WREG32((0x2c14 + j), 0x00000000); 247 WREG32((0x2c18 + j), 0x00000000); 248 WREG32((0x2c1c + j), 0x00000000); 249 WREG32((0x2c20 + j), 0x00000000); 250 WREG32((0x2c24 + j), 0x00000000); 251 } 252 /* r7xx hw bug. Read from HDP_DEBUG1 rather 253 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL 254 */ 255 tmp = RREG32(HDP_DEBUG1); 256 257 rv515_mc_stop(rdev, &save); 258 if (r600_mc_wait_for_idle(rdev)) { 259 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 260 } 261 /* Lockout access through VGA aperture*/ 262 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 263 /* Update configuration */ 264 if (rdev->flags & RADEON_IS_AGP) { 265 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 266 /* VRAM before AGP */ 267 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 268 rdev->mc.vram_start >> 12); 269 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 270 rdev->mc.gtt_end >> 12); 271 } else { 272 /* VRAM after AGP */ 273 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 274 rdev->mc.gtt_start >> 12); 275 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 276 rdev->mc.vram_end >> 12); 277 } 278 } else { 279 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 280 rdev->mc.vram_start >> 12); 281 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 282 rdev->mc.vram_end >> 12); 283 } 284 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 285 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 286 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 287 WREG32(MC_VM_FB_LOCATION, tmp); 288 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 289 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 290 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 291 if (rdev->flags & RADEON_IS_AGP) { 292 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 293 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 294 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 295 } else { 296 WREG32(MC_VM_AGP_BASE, 0); 297 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 298 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 299 } 300 if (r600_mc_wait_for_idle(rdev)) { 301 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 302 } 303 rv515_mc_resume(rdev, &save); 304 /* we need to own VRAM, so turn off the VGA renderer here 305 * to stop it overwriting our objects */ 306 rv515_vga_render_disable(rdev); 307 } 308 309 310 /* 311 * CP. 312 */ 313 void r700_cp_stop(struct radeon_device *rdev) 314 { 315 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 316 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 317 WREG32(SCRATCH_UMSK, 0); 318 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 319 } 320 321 static int rv770_cp_load_microcode(struct radeon_device *rdev) 322 { 323 const __be32 *fw_data; 324 int i; 325 326 if (!rdev->me_fw || !rdev->pfp_fw) 327 return -EINVAL; 328 329 r700_cp_stop(rdev); 330 WREG32(CP_RB_CNTL, 331 #ifdef __BIG_ENDIAN 332 BUF_SWAP_32BIT | 333 #endif 334 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 335 336 /* Reset cp */ 337 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 338 RREG32(GRBM_SOFT_RESET); 339 DRM_MDELAY(15); 340 WREG32(GRBM_SOFT_RESET, 0); 341 342 fw_data = (const __be32 *)rdev->pfp_fw->data; 343 WREG32(CP_PFP_UCODE_ADDR, 0); 344 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 345 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 346 WREG32(CP_PFP_UCODE_ADDR, 0); 347 348 fw_data = (const __be32 *)rdev->me_fw->data; 349 WREG32(CP_ME_RAM_WADDR, 0); 350 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 351 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 352 353 WREG32(CP_PFP_UCODE_ADDR, 0); 354 WREG32(CP_ME_RAM_WADDR, 0); 355 WREG32(CP_ME_RAM_RADDR, 0); 356 return 0; 357 } 358 359 void r700_cp_fini(struct radeon_device *rdev) 360 { 361 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 362 r700_cp_stop(rdev); 363 radeon_ring_fini(rdev, ring); 364 radeon_scratch_free(rdev, ring->rptr_save_reg); 365 } 366 367 /* 368 * Core functions 369 */ 370 static void rv770_gpu_init(struct radeon_device *rdev) 371 { 372 int i, j, num_qd_pipes; 373 u32 ta_aux_cntl; 374 u32 sx_debug_1; 375 u32 smx_dc_ctl0; 376 u32 db_debug3; 377 u32 num_gs_verts_per_thread; 378 u32 vgt_gs_per_es; 379 u32 gs_prim_buffer_depth = 0; 380 u32 sq_ms_fifo_sizes; 381 u32 sq_config; 382 u32 sq_thread_resource_mgmt; 383 u32 hdp_host_path_cntl; 384 u32 sq_dyn_gpr_size_simd_ab_0; 385 u32 gb_tiling_config = 0; 386 u32 cc_rb_backend_disable = 0; 387 u32 cc_gc_shader_pipe_config = 0; 388 u32 mc_arb_ramcfg; 389 u32 db_debug4, tmp; 390 u32 inactive_pipes, shader_pipe_config; 391 u32 disabled_rb_mask; 392 unsigned active_number; 393 394 /* setup chip specs */ 395 rdev->config.rv770.tiling_group_size = 256; 396 switch (rdev->family) { 397 case CHIP_RV770: 398 rdev->config.rv770.max_pipes = 4; 399 rdev->config.rv770.max_tile_pipes = 8; 400 rdev->config.rv770.max_simds = 10; 401 rdev->config.rv770.max_backends = 4; 402 rdev->config.rv770.max_gprs = 256; 403 rdev->config.rv770.max_threads = 248; 404 rdev->config.rv770.max_stack_entries = 512; 405 rdev->config.rv770.max_hw_contexts = 8; 406 rdev->config.rv770.max_gs_threads = 16 * 2; 407 rdev->config.rv770.sx_max_export_size = 128; 408 rdev->config.rv770.sx_max_export_pos_size = 16; 409 rdev->config.rv770.sx_max_export_smx_size = 112; 410 rdev->config.rv770.sq_num_cf_insts = 2; 411 412 rdev->config.rv770.sx_num_of_sets = 7; 413 rdev->config.rv770.sc_prim_fifo_size = 0xF9; 414 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 415 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 416 break; 417 case CHIP_RV730: 418 rdev->config.rv770.max_pipes = 2; 419 rdev->config.rv770.max_tile_pipes = 4; 420 rdev->config.rv770.max_simds = 8; 421 rdev->config.rv770.max_backends = 2; 422 rdev->config.rv770.max_gprs = 128; 423 rdev->config.rv770.max_threads = 248; 424 rdev->config.rv770.max_stack_entries = 256; 425 rdev->config.rv770.max_hw_contexts = 8; 426 rdev->config.rv770.max_gs_threads = 16 * 2; 427 rdev->config.rv770.sx_max_export_size = 256; 428 rdev->config.rv770.sx_max_export_pos_size = 32; 429 rdev->config.rv770.sx_max_export_smx_size = 224; 430 rdev->config.rv770.sq_num_cf_insts = 2; 431 432 rdev->config.rv770.sx_num_of_sets = 7; 433 rdev->config.rv770.sc_prim_fifo_size = 0xf9; 434 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 435 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 436 if (rdev->config.rv770.sx_max_export_pos_size > 16) { 437 rdev->config.rv770.sx_max_export_pos_size -= 16; 438 rdev->config.rv770.sx_max_export_smx_size += 16; 439 } 440 break; 441 case CHIP_RV710: 442 rdev->config.rv770.max_pipes = 2; 443 rdev->config.rv770.max_tile_pipes = 2; 444 rdev->config.rv770.max_simds = 2; 445 rdev->config.rv770.max_backends = 1; 446 rdev->config.rv770.max_gprs = 256; 447 rdev->config.rv770.max_threads = 192; 448 rdev->config.rv770.max_stack_entries = 256; 449 rdev->config.rv770.max_hw_contexts = 4; 450 rdev->config.rv770.max_gs_threads = 8 * 2; 451 rdev->config.rv770.sx_max_export_size = 128; 452 rdev->config.rv770.sx_max_export_pos_size = 16; 453 rdev->config.rv770.sx_max_export_smx_size = 112; 454 rdev->config.rv770.sq_num_cf_insts = 1; 455 456 rdev->config.rv770.sx_num_of_sets = 7; 457 rdev->config.rv770.sc_prim_fifo_size = 0x40; 458 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 459 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 460 break; 461 case CHIP_RV740: 462 rdev->config.rv770.max_pipes = 4; 463 rdev->config.rv770.max_tile_pipes = 4; 464 rdev->config.rv770.max_simds = 8; 465 rdev->config.rv770.max_backends = 4; 466 rdev->config.rv770.max_gprs = 256; 467 rdev->config.rv770.max_threads = 248; 468 rdev->config.rv770.max_stack_entries = 512; 469 rdev->config.rv770.max_hw_contexts = 8; 470 rdev->config.rv770.max_gs_threads = 16 * 2; 471 rdev->config.rv770.sx_max_export_size = 256; 472 rdev->config.rv770.sx_max_export_pos_size = 32; 473 rdev->config.rv770.sx_max_export_smx_size = 224; 474 rdev->config.rv770.sq_num_cf_insts = 2; 475 476 rdev->config.rv770.sx_num_of_sets = 7; 477 rdev->config.rv770.sc_prim_fifo_size = 0x100; 478 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 479 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 480 481 if (rdev->config.rv770.sx_max_export_pos_size > 16) { 482 rdev->config.rv770.sx_max_export_pos_size -= 16; 483 rdev->config.rv770.sx_max_export_smx_size += 16; 484 } 485 break; 486 default: 487 break; 488 } 489 490 /* Initialize HDP */ 491 j = 0; 492 for (i = 0; i < 32; i++) { 493 WREG32((0x2c14 + j), 0x00000000); 494 WREG32((0x2c18 + j), 0x00000000); 495 WREG32((0x2c1c + j), 0x00000000); 496 WREG32((0x2c20 + j), 0x00000000); 497 WREG32((0x2c24 + j), 0x00000000); 498 j += 0x18; 499 } 500 501 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 502 503 /* setup tiling, simd, pipe config */ 504 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 505 506 shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 507 inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; 508 for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) { 509 if (!(inactive_pipes & tmp)) { 510 active_number++; 511 } 512 tmp <<= 1; 513 } 514 if (active_number == 1) { 515 WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1); 516 } else { 517 WREG32(SPI_CONFIG_CNTL, 0); 518 } 519 520 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 521 tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16); 522 if (tmp < rdev->config.rv770.max_backends) { 523 rdev->config.rv770.max_backends = tmp; 524 } 525 526 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 527 tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK); 528 if (tmp < rdev->config.rv770.max_pipes) { 529 rdev->config.rv770.max_pipes = tmp; 530 } 531 tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); 532 if (tmp < rdev->config.rv770.max_simds) { 533 rdev->config.rv770.max_simds = tmp; 534 } 535 536 switch (rdev->config.rv770.max_tile_pipes) { 537 case 1: 538 default: 539 gb_tiling_config = PIPE_TILING(0); 540 break; 541 case 2: 542 gb_tiling_config = PIPE_TILING(1); 543 break; 544 case 4: 545 gb_tiling_config = PIPE_TILING(2); 546 break; 547 case 8: 548 gb_tiling_config = PIPE_TILING(3); 549 break; 550 } 551 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; 552 553 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; 554 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 555 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, 556 R7XX_MAX_BACKENDS, disabled_rb_mask); 557 gb_tiling_config |= tmp << 16; 558 rdev->config.rv770.backend_map = tmp; 559 560 if (rdev->family == CHIP_RV770) 561 gb_tiling_config |= BANK_TILING(1); 562 else { 563 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 564 gb_tiling_config |= BANK_TILING(1); 565 else 566 gb_tiling_config |= BANK_TILING(0); 567 } 568 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); 569 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 570 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 571 gb_tiling_config |= ROW_TILING(3); 572 gb_tiling_config |= SAMPLE_SPLIT(3); 573 } else { 574 gb_tiling_config |= 575 ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); 576 gb_tiling_config |= 577 SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); 578 } 579 580 gb_tiling_config |= BANK_SWAPS(1); 581 rdev->config.rv770.tile_config = gb_tiling_config; 582 583 WREG32(GB_TILING_CONFIG, gb_tiling_config); 584 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 585 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 586 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff)); 587 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff)); 588 589 WREG32(CGTS_SYS_TCC_DISABLE, 0); 590 WREG32(CGTS_TCC_DISABLE, 0); 591 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); 592 WREG32(CGTS_USER_TCC_DISABLE, 0); 593 594 595 num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 596 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 597 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 598 599 /* set HW defaults for 3D engine */ 600 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 601 ROQ_IB2_START(0x2b))); 602 603 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 604 605 ta_aux_cntl = RREG32(TA_CNTL_AUX); 606 WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO); 607 608 sx_debug_1 = RREG32(SX_DEBUG_1); 609 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 610 WREG32(SX_DEBUG_1, sx_debug_1); 611 612 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 613 smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff); 614 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); 615 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 616 617 if (rdev->family != CHIP_RV740) 618 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | 619 GS_FLUSH_CTL(4) | 620 ACK_FLUSH_CTL(3) | 621 SYNC_FLUSH_CTL)); 622 623 if (rdev->family != CHIP_RV770) 624 WREG32(SMX_SAR_CTL0, 0x00003f3f); 625 626 db_debug3 = RREG32(DB_DEBUG3); 627 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); 628 switch (rdev->family) { 629 case CHIP_RV770: 630 case CHIP_RV740: 631 db_debug3 |= DB_CLK_OFF_DELAY(0x1f); 632 break; 633 case CHIP_RV710: 634 case CHIP_RV730: 635 default: 636 db_debug3 |= DB_CLK_OFF_DELAY(2); 637 break; 638 } 639 WREG32(DB_DEBUG3, db_debug3); 640 641 if (rdev->family != CHIP_RV770) { 642 db_debug4 = RREG32(DB_DEBUG4); 643 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; 644 WREG32(DB_DEBUG4, db_debug4); 645 } 646 647 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | 648 POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | 649 SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); 650 651 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | 652 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | 653 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); 654 655 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 656 657 WREG32(VGT_NUM_INSTANCES, 1); 658 659 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 660 661 WREG32(CP_PERFMON_CNTL, 0); 662 663 sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) | 664 DONE_FIFO_HIWATER(0xe0) | 665 ALU_UPDATE_FIFO_HIWATER(0x8)); 666 switch (rdev->family) { 667 case CHIP_RV770: 668 case CHIP_RV730: 669 case CHIP_RV710: 670 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1); 671 break; 672 case CHIP_RV740: 673 default: 674 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); 675 break; 676 } 677 WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes); 678 679 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 680 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 681 */ 682 sq_config = RREG32(SQ_CONFIG); 683 sq_config &= ~(PS_PRIO(3) | 684 VS_PRIO(3) | 685 GS_PRIO(3) | 686 ES_PRIO(3)); 687 sq_config |= (DX9_CONSTS | 688 VC_ENABLE | 689 EXPORT_SRC_C | 690 PS_PRIO(0) | 691 VS_PRIO(1) | 692 GS_PRIO(2) | 693 ES_PRIO(3)); 694 if (rdev->family == CHIP_RV710) 695 /* no vertex cache */ 696 sq_config &= ~VC_ENABLE; 697 698 WREG32(SQ_CONFIG, sq_config); 699 700 WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | 701 NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | 702 NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2))); 703 704 WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) | 705 NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64))); 706 707 sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) | 708 NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) | 709 NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8)); 710 if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads) 711 sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads); 712 else 713 sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8); 714 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 715 716 WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | 717 NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); 718 719 WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | 720 NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); 721 722 sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) | 723 SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) | 724 SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) | 725 SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64)); 726 727 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0); 728 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0); 729 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0); 730 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0); 731 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0); 732 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0); 733 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0); 734 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0); 735 736 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 737 FORCE_EOV_MAX_REZ_CNT(255))); 738 739 if (rdev->family == CHIP_RV710) 740 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) | 741 AUTO_INVLD_EN(ES_AND_GS_AUTO))); 742 else 743 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) | 744 AUTO_INVLD_EN(ES_AND_GS_AUTO))); 745 746 switch (rdev->family) { 747 case CHIP_RV770: 748 case CHIP_RV730: 749 case CHIP_RV740: 750 gs_prim_buffer_depth = 384; 751 break; 752 case CHIP_RV710: 753 gs_prim_buffer_depth = 128; 754 break; 755 default: 756 break; 757 } 758 759 num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16; 760 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread; 761 /* Max value for this is 256 */ 762 if (vgt_gs_per_es > 256) 763 vgt_gs_per_es = 256; 764 765 WREG32(VGT_ES_PER_GS, 128); 766 WREG32(VGT_GS_PER_ES, vgt_gs_per_es); 767 WREG32(VGT_GS_PER_VS, 2); 768 769 /* more default values. 2D/3D driver should adjust as needed */ 770 WREG32(VGT_GS_VERTEX_REUSE, 16); 771 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 772 WREG32(VGT_STRMOUT_EN, 0); 773 WREG32(SX_MISC, 0); 774 WREG32(PA_SC_MODE_CNTL, 0); 775 WREG32(PA_SC_EDGERULE, 0xaaaaaaaa); 776 WREG32(PA_SC_AA_CONFIG, 0); 777 WREG32(PA_SC_CLIPRECT_RULE, 0xffff); 778 WREG32(PA_SC_LINE_STIPPLE, 0); 779 WREG32(SPI_INPUT_Z, 0); 780 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); 781 WREG32(CB_COLOR7_FRAG, 0); 782 783 /* clear render buffer base addresses */ 784 WREG32(CB_COLOR0_BASE, 0); 785 WREG32(CB_COLOR1_BASE, 0); 786 WREG32(CB_COLOR2_BASE, 0); 787 WREG32(CB_COLOR3_BASE, 0); 788 WREG32(CB_COLOR4_BASE, 0); 789 WREG32(CB_COLOR5_BASE, 0); 790 WREG32(CB_COLOR6_BASE, 0); 791 WREG32(CB_COLOR7_BASE, 0); 792 793 WREG32(TCP_CNTL, 0); 794 795 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 796 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 797 798 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 799 800 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 801 NUM_CLIP_SEQ(3))); 802 WREG32(VC_ENHANCE, 0); 803 } 804 805 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 806 { 807 u64 size_bf, size_af; 808 809 if (mc->mc_vram_size > 0xE0000000) { 810 /* leave room for at least 512M GTT */ 811 dev_warn(rdev->dev, "limiting VRAM\n"); 812 mc->real_vram_size = 0xE0000000; 813 mc->mc_vram_size = 0xE0000000; 814 } 815 if (rdev->flags & RADEON_IS_AGP) { 816 size_bf = mc->gtt_start; 817 size_af = 0xFFFFFFFF - mc->gtt_end; 818 if (size_bf > size_af) { 819 if (mc->mc_vram_size > size_bf) { 820 dev_warn(rdev->dev, "limiting VRAM\n"); 821 mc->real_vram_size = size_bf; 822 mc->mc_vram_size = size_bf; 823 } 824 mc->vram_start = mc->gtt_start - mc->mc_vram_size; 825 } else { 826 if (mc->mc_vram_size > size_af) { 827 dev_warn(rdev->dev, "limiting VRAM\n"); 828 mc->real_vram_size = size_af; 829 mc->mc_vram_size = size_af; 830 } 831 mc->vram_start = mc->gtt_end + 1; 832 } 833 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 834 dev_info(rdev->dev, "VRAM: %juM 0x%08jX - 0x%08jX (%juM used)\n", 835 (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, 836 (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); 837 } else { 838 radeon_vram_location(rdev, &rdev->mc, 0); 839 rdev->mc.gtt_base_align = 0; 840 radeon_gtt_location(rdev, mc); 841 } 842 } 843 844 static int rv770_mc_init(struct radeon_device *rdev) 845 { 846 u32 tmp; 847 int chansize, numchan; 848 849 /* Get VRAM informations */ 850 rdev->mc.vram_is_ddr = true; 851 tmp = RREG32(MC_ARB_RAMCFG); 852 if (tmp & CHANSIZE_OVERRIDE) { 853 chansize = 16; 854 } else if (tmp & CHANSIZE_MASK) { 855 chansize = 64; 856 } else { 857 chansize = 32; 858 } 859 tmp = RREG32(MC_SHARED_CHMAP); 860 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 861 case 0: 862 default: 863 numchan = 1; 864 break; 865 case 1: 866 numchan = 2; 867 break; 868 case 2: 869 numchan = 4; 870 break; 871 case 3: 872 numchan = 8; 873 break; 874 } 875 rdev->mc.vram_width = numchan * chansize; 876 /* Could aper size report 0 ? */ 877 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 878 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 879 /* Setup GPU memory space */ 880 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 881 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 882 rdev->mc.visible_vram_size = rdev->mc.aper_size; 883 r700_vram_gtt_location(rdev, &rdev->mc); 884 radeon_update_bandwidth_info(rdev); 885 886 return 0; 887 } 888 889 /** 890 * rv770_copy_dma - copy pages using the DMA engine 891 * 892 * @rdev: radeon_device pointer 893 * @src_offset: src GPU address 894 * @dst_offset: dst GPU address 895 * @num_gpu_pages: number of GPU pages to xfer 896 * @fence: radeon fence object 897 * 898 * Copy GPU paging using the DMA engine (r7xx). 899 * Used by the radeon ttm implementation to move pages if 900 * registered as the asic copy callback. 901 */ 902 int rv770_copy_dma(struct radeon_device *rdev, 903 uint64_t src_offset, uint64_t dst_offset, 904 unsigned num_gpu_pages, 905 struct radeon_fence **fence) 906 { 907 struct radeon_semaphore *sem = NULL; 908 int ring_index = rdev->asic->copy.dma_ring_index; 909 struct radeon_ring *ring = &rdev->ring[ring_index]; 910 u32 size_in_dw, cur_size_in_dw; 911 int i, num_loops; 912 int r = 0; 913 914 r = radeon_semaphore_create(rdev, &sem); 915 if (r) { 916 DRM_ERROR("radeon: moving bo (%d).\n", r); 917 return r; 918 } 919 920 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 921 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); 922 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); 923 if (r) { 924 DRM_ERROR("radeon: moving bo (%d).\n", r); 925 radeon_semaphore_free(rdev, &sem, NULL); 926 return r; 927 } 928 929 if (radeon_fence_need_sync(*fence, ring->idx)) { 930 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 931 ring->idx); 932 radeon_fence_note_sync(*fence, ring->idx); 933 } else { 934 radeon_semaphore_free(rdev, &sem, NULL); 935 } 936 937 for (i = 0; i < num_loops; i++) { 938 cur_size_in_dw = size_in_dw; 939 if (cur_size_in_dw > 0xFFFF) 940 cur_size_in_dw = 0xFFFF; 941 size_in_dw -= cur_size_in_dw; 942 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 943 radeon_ring_write(ring, dst_offset & 0xfffffffc); 944 radeon_ring_write(ring, src_offset & 0xfffffffc); 945 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 946 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 947 src_offset += cur_size_in_dw * 4; 948 dst_offset += cur_size_in_dw * 4; 949 } 950 951 r = radeon_fence_emit(rdev, fence, ring->idx); 952 if (r) { 953 radeon_ring_unlock_undo(rdev, ring); 954 return r; 955 } 956 957 radeon_ring_unlock_commit(rdev, ring); 958 radeon_semaphore_free(rdev, &sem, *fence); 959 960 return r; 961 } 962 963 static int rv770_startup(struct radeon_device *rdev) 964 { 965 struct radeon_ring *ring; 966 int r; 967 968 /* enable pcie gen2 link */ 969 rv770_pcie_gen2_enable(rdev); 970 971 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 972 r = r600_init_microcode(rdev); 973 if (r) { 974 DRM_ERROR("Failed to load firmware!\n"); 975 return r; 976 } 977 } 978 979 r = r600_vram_scratch_init(rdev); 980 if (r) 981 return r; 982 983 rv770_mc_program(rdev); 984 if (rdev->flags & RADEON_IS_AGP) { 985 rv770_agp_enable(rdev); 986 } else { 987 r = rv770_pcie_gart_enable(rdev); 988 if (r) 989 return r; 990 } 991 992 rv770_gpu_init(rdev); 993 r = r600_blit_init(rdev); 994 if (r) { 995 r600_blit_fini(rdev); 996 rdev->asic->copy.copy = NULL; 997 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 998 } 999 1000 /* allocate wb buffer */ 1001 r = radeon_wb_init(rdev); 1002 if (r) 1003 return r; 1004 1005 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1006 if (r) { 1007 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1008 return r; 1009 } 1010 1011 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 1012 if (r) { 1013 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1014 return r; 1015 } 1016 1017 /* Enable IRQ */ 1018 r = r600_irq_init(rdev); 1019 if (r) { 1020 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1021 radeon_irq_kms_fini(rdev); 1022 return r; 1023 } 1024 r600_irq_set(rdev); 1025 1026 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1027 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1028 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 1029 0, 0xfffff, RADEON_CP_PACKET2); 1030 if (r) 1031 return r; 1032 1033 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1034 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1035 DMA_RB_RPTR, DMA_RB_WPTR, 1036 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1037 if (r) 1038 return r; 1039 1040 r = rv770_cp_load_microcode(rdev); 1041 if (r) 1042 return r; 1043 r = r600_cp_resume(rdev); 1044 if (r) 1045 return r; 1046 1047 r = r600_dma_resume(rdev); 1048 if (r) 1049 return r; 1050 1051 r = radeon_ib_pool_init(rdev); 1052 if (r) { 1053 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1054 return r; 1055 } 1056 1057 r = r600_audio_init(rdev); 1058 if (r) { 1059 DRM_ERROR("radeon: audio init failed\n"); 1060 return r; 1061 } 1062 1063 return 0; 1064 } 1065 1066 int rv770_resume(struct radeon_device *rdev) 1067 { 1068 int r; 1069 1070 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1071 * posting will perform necessary task to bring back GPU into good 1072 * shape. 1073 */ 1074 /* post card */ 1075 atom_asic_init(rdev->mode_info.atom_context); 1076 1077 rdev->accel_working = true; 1078 r = rv770_startup(rdev); 1079 if (r) { 1080 DRM_ERROR("r600 startup failed on resume\n"); 1081 rdev->accel_working = false; 1082 return r; 1083 } 1084 1085 return r; 1086 1087 } 1088 1089 int rv770_suspend(struct radeon_device *rdev) 1090 { 1091 r600_audio_fini(rdev); 1092 r700_cp_stop(rdev); 1093 r600_dma_stop(rdev); 1094 r600_irq_suspend(rdev); 1095 radeon_wb_disable(rdev); 1096 rv770_pcie_gart_disable(rdev); 1097 1098 return 0; 1099 } 1100 1101 /* Plan is to move initialization in that function and use 1102 * helper function so that radeon_device_init pretty much 1103 * do nothing more than calling asic specific function. This 1104 * should also allow to remove a bunch of callback function 1105 * like vram_info. 1106 */ 1107 int rv770_init(struct radeon_device *rdev) 1108 { 1109 int r; 1110 1111 /* Read BIOS */ 1112 if (!radeon_get_bios(rdev)) { 1113 if (ASIC_IS_AVIVO(rdev)) 1114 return -EINVAL; 1115 } 1116 /* Must be an ATOMBIOS */ 1117 if (!rdev->is_atom_bios) { 1118 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); 1119 return -EINVAL; 1120 } 1121 r = radeon_atombios_init(rdev); 1122 if (r) 1123 return r; 1124 /* Post card if necessary */ 1125 if (!radeon_card_posted(rdev)) { 1126 if (!rdev->bios) { 1127 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1128 return -EINVAL; 1129 } 1130 DRM_INFO("GPU not posted. posting now...\n"); 1131 atom_asic_init(rdev->mode_info.atom_context); 1132 } 1133 /* Initialize scratch registers */ 1134 r600_scratch_init(rdev); 1135 /* Initialize surface registers */ 1136 radeon_surface_init(rdev); 1137 /* Initialize clocks */ 1138 radeon_get_clock_info(rdev->ddev); 1139 /* Fence driver */ 1140 r = radeon_fence_driver_init(rdev); 1141 if (r) 1142 return r; 1143 /* initialize AGP */ 1144 if (rdev->flags & RADEON_IS_AGP) { 1145 r = radeon_agp_init(rdev); 1146 if (r) 1147 radeon_agp_disable(rdev); 1148 } 1149 r = rv770_mc_init(rdev); 1150 if (r) 1151 return r; 1152 /* Memory manager */ 1153 r = radeon_bo_init(rdev); 1154 if (r) 1155 return r; 1156 1157 r = radeon_irq_kms_init(rdev); 1158 if (r) 1159 return r; 1160 1161 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 1162 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 1163 1164 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 1165 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 1166 1167 rdev->ih.ring_obj = NULL; 1168 r600_ih_ring_init(rdev, 64 * 1024); 1169 1170 r = r600_pcie_gart_init(rdev); 1171 if (r) 1172 return r; 1173 1174 rdev->accel_working = true; 1175 r = rv770_startup(rdev); 1176 if (r) { 1177 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1178 r700_cp_fini(rdev); 1179 r600_dma_fini(rdev); 1180 r600_irq_fini(rdev); 1181 radeon_wb_fini(rdev); 1182 radeon_ib_pool_fini(rdev); 1183 radeon_irq_kms_fini(rdev); 1184 rv770_pcie_gart_fini(rdev); 1185 rdev->accel_working = false; 1186 } 1187 1188 return 0; 1189 } 1190 1191 void rv770_fini(struct radeon_device *rdev) 1192 { 1193 r600_blit_fini(rdev); 1194 r700_cp_fini(rdev); 1195 r600_dma_fini(rdev); 1196 r600_irq_fini(rdev); 1197 radeon_wb_fini(rdev); 1198 radeon_ib_pool_fini(rdev); 1199 radeon_irq_kms_fini(rdev); 1200 rv770_pcie_gart_fini(rdev); 1201 r600_vram_scratch_fini(rdev); 1202 radeon_gem_fini(rdev); 1203 radeon_fence_driver_fini(rdev); 1204 radeon_agp_fini(rdev); 1205 radeon_bo_fini(rdev); 1206 radeon_atombios_fini(rdev); 1207 r600_fini_microcode(rdev); 1208 drm_free(rdev->bios, DRM_MEM_DRIVER); 1209 rdev->bios = NULL; 1210 } 1211 1212 static void rv770_pcie_gen2_enable(struct radeon_device *rdev) 1213 { 1214 u32 link_width_cntl, lanes, speed_cntl, tmp; 1215 u16 link_cntl2; 1216 u32 mask; 1217 int ret; 1218 1219 if (radeon_pcie_gen2 == 0) 1220 return; 1221 1222 if (rdev->flags & RADEON_IS_IGP) 1223 return; 1224 1225 if (!(rdev->flags & RADEON_IS_PCIE)) 1226 return; 1227 1228 /* x2 cards have a special sequence */ 1229 if (ASIC_IS_X2(rdev)) 1230 return; 1231 1232 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 1233 if (ret != 0) 1234 return; 1235 1236 if (!(mask & DRM_PCIE_SPEED_50)) 1237 return; 1238 1239 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 1240 1241 /* advertise upconfig capability */ 1242 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 1243 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 1244 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1245 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 1246 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 1247 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 1248 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 1249 LC_RECONFIG_ARC_MISSING_ESCAPE); 1250 link_width_cntl |= lanes | LC_RECONFIG_NOW | 1251 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; 1252 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1253 } else { 1254 link_width_cntl |= LC_UPCONFIGURE_DIS; 1255 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1256 } 1257 1258 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1259 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 1260 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 1261 1262 tmp = RREG32(0x541c); 1263 WREG32(0x541c, tmp | 0x8); 1264 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); 1265 link_cntl2 = RREG16(0x4088); 1266 link_cntl2 &= ~TARGET_LINK_SPEED_MASK; 1267 link_cntl2 |= 0x2; 1268 WREG16(0x4088, link_cntl2); 1269 WREG32(MM_CFGREGS_CNTL, 0); 1270 1271 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1272 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 1273 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1274 1275 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1276 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 1277 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1278 1279 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1280 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 1281 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1282 1283 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1284 speed_cntl |= LC_GEN2_EN_STRAP; 1285 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1286 1287 } else { 1288 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 1289 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 1290 if (1) 1291 link_width_cntl |= LC_UPCONFIGURE_DIS; 1292 else 1293 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 1294 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1295 } 1296 } 1297