1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "cikd.h" 29 30 /* sdma */ 31 #define CIK_SDMA_UCODE_SIZE 1050 32 #define CIK_SDMA_UCODE_VERSION 64 33 34 /* 35 * sDMA - System DMA 36 * Starting with CIK, the GPU has new asynchronous 37 * DMA engines. These engines are used for compute 38 * and gfx. There are two DMA engines (SDMA0, SDMA1) 39 * and each one supports 1 ring buffer used for gfx 40 * and 2 queues used for compute. 41 * 42 * The programming model is very similar to the CP 43 * (ring buffer, IBs, etc.), but sDMA has it's own 44 * packet format that is different from the PM4 format 45 * used by the CP. sDMA supports copying data, writing 46 * embedded data, solid fills, and a number of other 47 * things. It also has support for tiling/detiling of 48 * buffers. 49 */ 50 51 /** 52 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine 53 * 54 * @rdev: radeon_device pointer 55 * @ib: IB object to schedule 56 * 57 * Schedule an IB in the DMA ring (CIK). 58 */ 59 void cik_sdma_ring_ib_execute(struct radeon_device *rdev, 60 struct radeon_ib *ib) 61 { 62 struct radeon_ring *ring = &rdev->ring[ib->ring]; 63 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf; 64 65 if (rdev->wb.enabled) { 66 u32 next_rptr = ring->wptr + 5; 67 while ((next_rptr & 7) != 4) 68 next_rptr++; 69 next_rptr += 4; 70 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 71 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 72 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 73 radeon_ring_write(ring, 1); /* number of DWs to follow */ 74 radeon_ring_write(ring, next_rptr); 75 } 76 77 /* IB packet must end on a 8 DW boundary */ 78 while ((ring->wptr & 7) != 4) 79 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 80 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 81 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 82 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 83 radeon_ring_write(ring, ib->length_dw); 84 85 } 86 87 /** 88 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring 89 * 90 * @rdev: radeon_device pointer 91 * @fence: radeon fence object 92 * 93 * Add a DMA fence packet to the ring to write 94 * the fence seq number and DMA trap packet to generate 95 * an interrupt if needed (CIK). 96 */ 97 void cik_sdma_fence_ring_emit(struct radeon_device *rdev, 98 struct radeon_fence *fence) 99 { 100 struct radeon_ring *ring = &rdev->ring[fence->ring]; 101 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 102 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | 103 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 104 u32 ref_and_mask; 105 106 if (fence->ring == R600_RING_TYPE_DMA_INDEX) 107 ref_and_mask = SDMA0; 108 else 109 ref_and_mask = SDMA1; 110 111 /* write the fence */ 112 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 113 radeon_ring_write(ring, addr & 0xffffffff); 114 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 115 radeon_ring_write(ring, fence->seq); 116 /* generate an interrupt */ 117 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 118 /* flush HDP */ 119 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 120 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); 121 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); 122 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ 123 radeon_ring_write(ring, ref_and_mask); /* MASK */ 124 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ 125 } 126 127 /** 128 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring 129 * 130 * @rdev: radeon_device pointer 131 * @ring: radeon_ring structure holding ring information 132 * @semaphore: radeon semaphore object 133 * @emit_wait: wait or signal semaphore 134 * 135 * Add a DMA semaphore packet to the ring wait on or signal 136 * other rings (CIK). 137 */ 138 void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 139 struct radeon_ring *ring, 140 struct radeon_semaphore *semaphore, 141 bool emit_wait) 142 { 143 u64 addr = semaphore->gpu_addr; 144 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; 145 146 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 147 radeon_ring_write(ring, addr & 0xfffffff8); 148 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 149 } 150 151 /** 152 * cik_sdma_gfx_stop - stop the gfx async dma engines 153 * 154 * @rdev: radeon_device pointer 155 * 156 * Stop the gfx async dma ring buffers (CIK). 157 */ 158 static void cik_sdma_gfx_stop(struct radeon_device *rdev) 159 { 160 u32 rb_cntl, reg_offset; 161 int i; 162 163 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 164 165 for (i = 0; i < 2; i++) { 166 if (i == 0) 167 reg_offset = SDMA0_REGISTER_OFFSET; 168 else 169 reg_offset = SDMA1_REGISTER_OFFSET; 170 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset); 171 rb_cntl &= ~SDMA_RB_ENABLE; 172 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); 173 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); 174 } 175 } 176 177 /** 178 * cik_sdma_rlc_stop - stop the compute async dma engines 179 * 180 * @rdev: radeon_device pointer 181 * 182 * Stop the compute async dma queues (CIK). 183 */ 184 static void cik_sdma_rlc_stop(struct radeon_device *rdev) 185 { 186 /* XXX todo */ 187 } 188 189 /** 190 * cik_sdma_enable - stop the async dma engines 191 * 192 * @rdev: radeon_device pointer 193 * @enable: enable/disable the DMA MEs. 194 * 195 * Halt or unhalt the async dma engines (CIK). 196 */ 197 void cik_sdma_enable(struct radeon_device *rdev, bool enable) 198 { 199 u32 me_cntl, reg_offset; 200 int i; 201 202 for (i = 0; i < 2; i++) { 203 if (i == 0) 204 reg_offset = SDMA0_REGISTER_OFFSET; 205 else 206 reg_offset = SDMA1_REGISTER_OFFSET; 207 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset); 208 if (enable) 209 me_cntl &= ~SDMA_HALT; 210 else 211 me_cntl |= SDMA_HALT; 212 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl); 213 } 214 } 215 216 /** 217 * cik_sdma_gfx_resume - setup and start the async dma engines 218 * 219 * @rdev: radeon_device pointer 220 * 221 * Set up the gfx DMA ring buffers and enable them (CIK). 222 * Returns 0 for success, error for failure. 223 */ 224 static int cik_sdma_gfx_resume(struct radeon_device *rdev) 225 { 226 struct radeon_ring *ring; 227 u32 rb_cntl, ib_cntl; 228 u32 rb_bufsz; 229 u32 reg_offset, wb_offset; 230 int i, r; 231 232 for (i = 0; i < 2; i++) { 233 if (i == 0) { 234 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 235 reg_offset = SDMA0_REGISTER_OFFSET; 236 wb_offset = R600_WB_DMA_RPTR_OFFSET; 237 } else { 238 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 239 reg_offset = SDMA1_REGISTER_OFFSET; 240 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; 241 } 242 243 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); 244 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); 245 246 /* Set ring buffer size in dwords */ 247 rb_bufsz = order_base_2(ring->ring_size / 4); 248 rb_cntl = rb_bufsz << 1; 249 #ifdef __BIG_ENDIAN 250 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; 251 #endif 252 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); 253 254 /* Initialize the ring buffer's read and write pointers */ 255 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0); 256 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0); 257 258 /* set the wb address whether it's enabled or not */ 259 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset, 260 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 261 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset, 262 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 263 264 if (rdev->wb.enabled) 265 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE; 266 267 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8); 268 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40); 269 270 ring->wptr = 0; 271 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); 272 273 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2; 274 275 /* enable DMA RB */ 276 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); 277 278 ib_cntl = SDMA_IB_ENABLE; 279 #ifdef __BIG_ENDIAN 280 ib_cntl |= SDMA_IB_SWAP_ENABLE; 281 #endif 282 /* enable DMA IBs */ 283 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl); 284 285 ring->ready = true; 286 287 r = radeon_ring_test(rdev, ring->idx, ring); 288 if (r) { 289 ring->ready = false; 290 return r; 291 } 292 } 293 294 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 295 296 return 0; 297 } 298 299 /** 300 * cik_sdma_rlc_resume - setup and start the async dma engines 301 * 302 * @rdev: radeon_device pointer 303 * 304 * Set up the compute DMA queues and enable them (CIK). 305 * Returns 0 for success, error for failure. 306 */ 307 static int cik_sdma_rlc_resume(struct radeon_device *rdev) 308 { 309 /* XXX todo */ 310 return 0; 311 } 312 313 /** 314 * cik_sdma_load_microcode - load the sDMA ME ucode 315 * 316 * @rdev: radeon_device pointer 317 * 318 * Loads the sDMA0/1 ucode. 319 * Returns 0 for success, -EINVAL if the ucode is not available. 320 */ 321 static int cik_sdma_load_microcode(struct radeon_device *rdev) 322 { 323 const __be32 *fw_data; 324 int i; 325 326 if (!rdev->sdma_fw) 327 return -EINVAL; 328 329 /* stop the gfx rings and rlc compute queues */ 330 cik_sdma_gfx_stop(rdev); 331 cik_sdma_rlc_stop(rdev); 332 333 /* halt the MEs */ 334 cik_sdma_enable(rdev, false); 335 336 /* sdma0 */ 337 fw_data = (const __be32 *)rdev->sdma_fw->data; 338 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); 339 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) 340 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); 341 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); 342 343 /* sdma1 */ 344 fw_data = (const __be32 *)rdev->sdma_fw->data; 345 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); 346 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) 347 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); 348 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); 349 350 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); 351 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); 352 return 0; 353 } 354 355 /** 356 * cik_sdma_resume - setup and start the async dma engines 357 * 358 * @rdev: radeon_device pointer 359 * 360 * Set up the DMA engines and enable them (CIK). 361 * Returns 0 for success, error for failure. 362 */ 363 int cik_sdma_resume(struct radeon_device *rdev) 364 { 365 int r; 366 367 /* Reset dma */ 368 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); 369 RREG32(SRBM_SOFT_RESET); 370 udelay(50); 371 WREG32(SRBM_SOFT_RESET, 0); 372 RREG32(SRBM_SOFT_RESET); 373 374 r = cik_sdma_load_microcode(rdev); 375 if (r) 376 return r; 377 378 /* unhalt the MEs */ 379 cik_sdma_enable(rdev, true); 380 381 /* start the gfx rings and rlc compute queues */ 382 r = cik_sdma_gfx_resume(rdev); 383 if (r) 384 return r; 385 r = cik_sdma_rlc_resume(rdev); 386 if (r) 387 return r; 388 389 return 0; 390 } 391 392 /** 393 * cik_sdma_fini - tear down the async dma engines 394 * 395 * @rdev: radeon_device pointer 396 * 397 * Stop the async dma engines and free the rings (CIK). 398 */ 399 void cik_sdma_fini(struct radeon_device *rdev) 400 { 401 /* stop the gfx rings and rlc compute queues */ 402 cik_sdma_gfx_stop(rdev); 403 cik_sdma_rlc_stop(rdev); 404 /* halt the MEs */ 405 cik_sdma_enable(rdev, false); 406 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 407 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); 408 /* XXX - compute dma queue tear down */ 409 } 410 411 /** 412 * cik_copy_dma - copy pages using the DMA engine 413 * 414 * @rdev: radeon_device pointer 415 * @src_offset: src GPU address 416 * @dst_offset: dst GPU address 417 * @num_gpu_pages: number of GPU pages to xfer 418 * @fence: radeon fence object 419 * 420 * Copy GPU paging using the DMA engine (CIK). 421 * Used by the radeon ttm implementation to move pages if 422 * registered as the asic copy callback. 423 */ 424 int cik_copy_dma(struct radeon_device *rdev, 425 uint64_t src_offset, uint64_t dst_offset, 426 unsigned num_gpu_pages, 427 struct radeon_fence **fence) 428 { 429 struct radeon_semaphore *sem = NULL; 430 int ring_index = rdev->asic->copy.dma_ring_index; 431 struct radeon_ring *ring = &rdev->ring[ring_index]; 432 u32 size_in_bytes, cur_size_in_bytes; 433 int i, num_loops; 434 int r = 0; 435 436 r = radeon_semaphore_create(rdev, &sem); 437 if (r) { 438 DRM_ERROR("radeon: moving bo (%d).\n", r); 439 return r; 440 } 441 442 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 443 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 444 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14); 445 if (r) { 446 DRM_ERROR("radeon: moving bo (%d).\n", r); 447 radeon_semaphore_free(rdev, &sem, NULL); 448 return r; 449 } 450 451 if (radeon_fence_need_sync(*fence, ring->idx)) { 452 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 453 ring->idx); 454 radeon_fence_note_sync(*fence, ring->idx); 455 } else { 456 radeon_semaphore_free(rdev, &sem, NULL); 457 } 458 459 for (i = 0; i < num_loops; i++) { 460 cur_size_in_bytes = size_in_bytes; 461 if (cur_size_in_bytes > 0x1fffff) 462 cur_size_in_bytes = 0x1fffff; 463 size_in_bytes -= cur_size_in_bytes; 464 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); 465 radeon_ring_write(ring, cur_size_in_bytes); 466 radeon_ring_write(ring, 0); /* src/dst endian swap */ 467 radeon_ring_write(ring, src_offset & 0xffffffff); 468 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 469 radeon_ring_write(ring, dst_offset & 0xfffffffc); 470 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 471 src_offset += cur_size_in_bytes; 472 dst_offset += cur_size_in_bytes; 473 } 474 475 r = radeon_fence_emit(rdev, fence, ring->idx); 476 if (r) { 477 radeon_ring_unlock_undo(rdev, ring); 478 return r; 479 } 480 481 radeon_ring_unlock_commit(rdev, ring); 482 radeon_semaphore_free(rdev, &sem, *fence); 483 484 return r; 485 } 486 487 /** 488 * cik_sdma_ring_test - simple async dma engine test 489 * 490 * @rdev: radeon_device pointer 491 * @ring: radeon_ring structure holding ring information 492 * 493 * Test the DMA engine by writing using it to write an 494 * value to memory. (CIK). 495 * Returns 0 for success, error for failure. 496 */ 497 int cik_sdma_ring_test(struct radeon_device *rdev, 498 struct radeon_ring *ring) 499 { 500 unsigned i; 501 int r; 502 volatile void __iomem *ptr = (volatile void *)rdev->vram_scratch.ptr; 503 u32 tmp; 504 505 if (!ptr) { 506 DRM_ERROR("invalid vram scratch pointer\n"); 507 return -EINVAL; 508 } 509 510 tmp = 0xCAFEDEAD; 511 writel(tmp, ptr); 512 513 r = radeon_ring_lock(rdev, ring, 4); 514 if (r) { 515 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); 516 return r; 517 } 518 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 519 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 520 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); 521 radeon_ring_write(ring, 1); /* number of DWs to follow */ 522 radeon_ring_write(ring, 0xDEADBEEF); 523 radeon_ring_unlock_commit(rdev, ring); 524 525 for (i = 0; i < rdev->usec_timeout; i++) { 526 tmp = readl(ptr); 527 if (tmp == 0xDEADBEEF) 528 break; 529 DRM_UDELAY(1); 530 } 531 532 if (i < rdev->usec_timeout) { 533 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 534 } else { 535 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", 536 ring->idx, tmp); 537 r = -EINVAL; 538 } 539 return r; 540 } 541 542 /** 543 * cik_sdma_ib_test - test an IB on the DMA engine 544 * 545 * @rdev: radeon_device pointer 546 * @ring: radeon_ring structure holding ring information 547 * 548 * Test a simple IB in the DMA ring (CIK). 549 * Returns 0 on success, error on failure. 550 */ 551 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 552 { 553 struct radeon_ib ib; 554 unsigned i; 555 int r; 556 volatile void __iomem *ptr = (volatile void *)rdev->vram_scratch.ptr; 557 u32 tmp = 0; 558 559 if (!ptr) { 560 DRM_ERROR("invalid vram scratch pointer\n"); 561 return -EINVAL; 562 } 563 564 tmp = 0xCAFEDEAD; 565 writel(tmp, ptr); 566 567 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 568 if (r) { 569 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 570 return r; 571 } 572 573 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 574 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 575 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; 576 ib.ptr[3] = 1; 577 ib.ptr[4] = 0xDEADBEEF; 578 ib.length_dw = 5; 579 580 r = radeon_ib_schedule(rdev, &ib, NULL); 581 if (r) { 582 radeon_ib_free(rdev, &ib); 583 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 584 return r; 585 } 586 r = radeon_fence_wait(ib.fence, false); 587 if (r) { 588 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 589 return r; 590 } 591 for (i = 0; i < rdev->usec_timeout; i++) { 592 tmp = readl(ptr); 593 if (tmp == 0xDEADBEEF) 594 break; 595 DRM_UDELAY(1); 596 } 597 if (i < rdev->usec_timeout) { 598 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 599 } else { 600 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); 601 r = -EINVAL; 602 } 603 radeon_ib_free(rdev, &ib); 604 return r; 605 } 606 607 /** 608 * cik_sdma_is_lockup - Check if the DMA engine is locked up 609 * 610 * @rdev: radeon_device pointer 611 * @ring: radeon_ring structure holding ring information 612 * 613 * Check if the async DMA engine is locked up (CIK). 614 * Returns true if the engine appears to be locked up, false if not. 615 */ 616 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 617 { 618 u32 reset_mask = cik_gpu_check_soft_reset(rdev); 619 u32 mask; 620 621 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 622 mask = RADEON_RESET_DMA; 623 else 624 mask = RADEON_RESET_DMA1; 625 626 if (!(reset_mask & mask)) { 627 radeon_ring_lockup_update(ring); 628 return false; 629 } 630 /* force ring activities */ 631 radeon_ring_force_activity(rdev, ring); 632 return radeon_ring_test_lockup(rdev, ring); 633 } 634 635 /** 636 * cik_sdma_vm_set_page - update the page tables using sDMA 637 * 638 * @rdev: radeon_device pointer 639 * @ib: indirect buffer to fill with commands 640 * @pe: addr of the page entry 641 * @addr: dst addr to write into pe 642 * @count: number of page entries to update 643 * @incr: increase next addr by incr bytes 644 * @flags: access flags 645 * 646 * Update the page tables using sDMA (CIK). 647 */ 648 void cik_sdma_vm_set_page(struct radeon_device *rdev, 649 struct radeon_ib *ib, 650 uint64_t pe, 651 uint64_t addr, unsigned count, 652 uint32_t incr, uint32_t flags) 653 { 654 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 655 uint64_t value; 656 unsigned ndw; 657 658 if (flags & RADEON_VM_PAGE_SYSTEM) { 659 while (count) { 660 ndw = count * 2; 661 if (ndw > 0xFFFFE) 662 ndw = 0xFFFFE; 663 664 /* for non-physically contiguous pages (system) */ 665 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 666 ib->ptr[ib->length_dw++] = pe; 667 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 668 ib->ptr[ib->length_dw++] = ndw; 669 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 670 if (flags & RADEON_VM_PAGE_SYSTEM) { 671 value = radeon_vm_map_gart(rdev, addr); 672 value &= 0xFFFFFFFFFFFFF000ULL; 673 } else if (flags & RADEON_VM_PAGE_VALID) { 674 value = addr; 675 } else { 676 value = 0; 677 } 678 addr += incr; 679 value |= r600_flags; 680 ib->ptr[ib->length_dw++] = value; 681 ib->ptr[ib->length_dw++] = upper_32_bits(value); 682 } 683 } 684 } else { 685 while (count) { 686 ndw = count; 687 if (ndw > 0x7FFFF) 688 ndw = 0x7FFFF; 689 690 if (flags & RADEON_VM_PAGE_VALID) 691 value = addr; 692 else 693 value = 0; 694 /* for physically contiguous pages (vram) */ 695 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 696 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 697 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 698 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 699 ib->ptr[ib->length_dw++] = 0; 700 ib->ptr[ib->length_dw++] = value; /* value */ 701 ib->ptr[ib->length_dw++] = upper_32_bits(value); 702 ib->ptr[ib->length_dw++] = incr; /* increment size */ 703 ib->ptr[ib->length_dw++] = 0; 704 ib->ptr[ib->length_dw++] = ndw; /* number of entries */ 705 pe += ndw * 8; 706 addr += ndw * incr; 707 count -= ndw; 708 } 709 } 710 while (ib->length_dw & 0x7) 711 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); 712 } 713 714 /** 715 * cik_dma_vm_flush - cik vm flush using sDMA 716 * 717 * @rdev: radeon_device pointer 718 * 719 * Update the page table base and flush the VM TLB 720 * using sDMA (CIK). 721 */ 722 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 723 { 724 struct radeon_ring *ring = &rdev->ring[ridx]; 725 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | 726 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 727 u32 ref_and_mask; 728 729 if (vm == NULL) 730 return; 731 732 if (ridx == R600_RING_TYPE_DMA_INDEX) 733 ref_and_mask = SDMA0; 734 else 735 ref_and_mask = SDMA1; 736 737 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 738 if (vm->id < 8) { 739 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 740 } else { 741 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 742 } 743 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 744 745 /* update SH_MEM_* regs */ 746 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 747 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 748 radeon_ring_write(ring, VMID(vm->id)); 749 750 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 751 radeon_ring_write(ring, SH_MEM_BASES >> 2); 752 radeon_ring_write(ring, 0); 753 754 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 755 radeon_ring_write(ring, SH_MEM_CONFIG >> 2); 756 radeon_ring_write(ring, 0); 757 758 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 759 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2); 760 radeon_ring_write(ring, 1); 761 762 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 763 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2); 764 radeon_ring_write(ring, 0); 765 766 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 767 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 768 radeon_ring_write(ring, VMID(0)); 769 770 /* flush HDP */ 771 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 772 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); 773 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); 774 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ 775 radeon_ring_write(ring, ref_and_mask); /* MASK */ 776 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ 777 778 /* flush TLB */ 779 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 780 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 781 radeon_ring_write(ring, 1 << vm->id); 782 } 783