1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #ifdef TRACE_TODO 29 #include "radeon_trace.h" 30 #endif 31 #include "cikd.h" 32 33 /* sdma */ 34 #define CIK_SDMA_UCODE_SIZE 1050 35 #define CIK_SDMA_UCODE_VERSION 64 36 37 /* 38 * sDMA - System DMA 39 * Starting with CIK, the GPU has new asynchronous 40 * DMA engines. These engines are used for compute 41 * and gfx. There are two DMA engines (SDMA0, SDMA1) 42 * and each one supports 1 ring buffer used for gfx 43 * and 2 queues used for compute. 44 * 45 * The programming model is very similar to the CP 46 * (ring buffer, IBs, etc.), but sDMA has it's own 47 * packet format that is different from the PM4 format 48 * used by the CP. sDMA supports copying data, writing 49 * embedded data, solid fills, and a number of other 50 * things. It also has support for tiling/detiling of 51 * buffers. 52 */ 53 54 /** 55 * cik_sdma_get_rptr - get the current read pointer 56 * 57 * @rdev: radeon_device pointer 58 * @ring: radeon ring pointer 59 * 60 * Get the current rptr from the hardware (CIK+). 61 */ 62 uint32_t cik_sdma_get_rptr(struct radeon_device *rdev, 63 struct radeon_ring *ring) 64 { 65 u32 rptr, reg; 66 67 if (rdev->wb.enabled) { 68 rptr = rdev->wb.wb[ring->rptr_offs/4]; 69 } else { 70 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 71 reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET; 72 else 73 reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET; 74 75 rptr = RREG32(reg); 76 } 77 78 return (rptr & 0x3fffc) >> 2; 79 } 80 81 /** 82 * cik_sdma_get_wptr - get the current write pointer 83 * 84 * @rdev: radeon_device pointer 85 * @ring: radeon ring pointer 86 * 87 * Get the current wptr from the hardware (CIK+). 88 */ 89 uint32_t cik_sdma_get_wptr(struct radeon_device *rdev, 90 struct radeon_ring *ring) 91 { 92 u32 reg; 93 94 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 95 reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; 96 else 97 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; 98 99 return (RREG32(reg) & 0x3fffc) >> 2; 100 } 101 102 /** 103 * cik_sdma_set_wptr - commit the write pointer 104 * 105 * @rdev: radeon_device pointer 106 * @ring: radeon ring pointer 107 * 108 * Write the wptr back to the hardware (CIK+). 109 */ 110 void cik_sdma_set_wptr(struct radeon_device *rdev, 111 struct radeon_ring *ring) 112 { 113 u32 reg; 114 115 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 116 reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; 117 else 118 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; 119 120 WREG32(reg, (ring->wptr << 2) & 0x3fffc); 121 (void)RREG32(reg); 122 } 123 124 /** 125 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine 126 * 127 * @rdev: radeon_device pointer 128 * @ib: IB object to schedule 129 * 130 * Schedule an IB in the DMA ring (CIK). 131 */ 132 void cik_sdma_ring_ib_execute(struct radeon_device *rdev, 133 struct radeon_ib *ib) 134 { 135 struct radeon_ring *ring = &rdev->ring[ib->ring]; 136 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf; 137 138 if (rdev->wb.enabled) { 139 u32 next_rptr = ring->wptr + 5; 140 while ((next_rptr & 7) != 4) 141 next_rptr++; 142 next_rptr += 4; 143 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 144 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 145 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); 146 radeon_ring_write(ring, 1); /* number of DWs to follow */ 147 radeon_ring_write(ring, next_rptr); 148 } 149 150 /* IB packet must end on a 8 DW boundary */ 151 while ((ring->wptr & 7) != 4) 152 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 153 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 154 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 155 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); 156 radeon_ring_write(ring, ib->length_dw); 157 158 } 159 160 /** 161 * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring 162 * 163 * @rdev: radeon_device pointer 164 * @ridx: radeon ring index 165 * 166 * Emit an hdp flush packet on the requested DMA ring. 167 */ 168 static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev, 169 int ridx) 170 { 171 struct radeon_ring *ring = &rdev->ring[ridx]; 172 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | 173 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 174 u32 ref_and_mask; 175 176 if (ridx == R600_RING_TYPE_DMA_INDEX) 177 ref_and_mask = SDMA0; 178 else 179 ref_and_mask = SDMA1; 180 181 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 182 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); 183 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); 184 radeon_ring_write(ring, ref_and_mask); /* reference */ 185 radeon_ring_write(ring, ref_and_mask); /* mask */ 186 radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 187 } 188 189 /** 190 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring 191 * 192 * @rdev: radeon_device pointer 193 * @fence: radeon fence object 194 * 195 * Add a DMA fence packet to the ring to write 196 * the fence seq number and DMA trap packet to generate 197 * an interrupt if needed (CIK). 198 */ 199 void cik_sdma_fence_ring_emit(struct radeon_device *rdev, 200 struct radeon_fence *fence) 201 { 202 struct radeon_ring *ring = &rdev->ring[fence->ring]; 203 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 204 205 /* write the fence */ 206 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 207 radeon_ring_write(ring, lower_32_bits(addr)); 208 radeon_ring_write(ring, upper_32_bits(addr)); 209 radeon_ring_write(ring, fence->seq); 210 /* generate an interrupt */ 211 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 212 /* flush HDP */ 213 cik_sdma_hdp_flush_ring_emit(rdev, fence->ring); 214 } 215 216 /** 217 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring 218 * 219 * @rdev: radeon_device pointer 220 * @ring: radeon_ring structure holding ring information 221 * @semaphore: radeon semaphore object 222 * @emit_wait: wait or signal semaphore 223 * 224 * Add a DMA semaphore packet to the ring wait on or signal 225 * other rings (CIK). 226 */ 227 bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 228 struct radeon_ring *ring, 229 struct radeon_semaphore *semaphore, 230 bool emit_wait) 231 { 232 u64 addr = semaphore->gpu_addr; 233 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; 234 235 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 236 radeon_ring_write(ring, addr & 0xfffffff8); 237 radeon_ring_write(ring, upper_32_bits(addr)); 238 239 return true; 240 } 241 242 /** 243 * cik_sdma_gfx_stop - stop the gfx async dma engines 244 * 245 * @rdev: radeon_device pointer 246 * 247 * Stop the gfx async dma ring buffers (CIK). 248 */ 249 static void cik_sdma_gfx_stop(struct radeon_device *rdev) 250 { 251 u32 rb_cntl, reg_offset; 252 int i; 253 254 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || 255 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) 256 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 257 258 for (i = 0; i < 2; i++) { 259 if (i == 0) 260 reg_offset = SDMA0_REGISTER_OFFSET; 261 else 262 reg_offset = SDMA1_REGISTER_OFFSET; 263 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset); 264 rb_cntl &= ~SDMA_RB_ENABLE; 265 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); 266 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); 267 } 268 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 269 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; 270 } 271 272 /** 273 * cik_sdma_rlc_stop - stop the compute async dma engines 274 * 275 * @rdev: radeon_device pointer 276 * 277 * Stop the compute async dma queues (CIK). 278 */ 279 static void cik_sdma_rlc_stop(struct radeon_device *rdev) 280 { 281 /* XXX todo */ 282 } 283 284 /** 285 * cik_sdma_enable - stop the async dma engines 286 * 287 * @rdev: radeon_device pointer 288 * @enable: enable/disable the DMA MEs. 289 * 290 * Halt or unhalt the async dma engines (CIK). 291 */ 292 void cik_sdma_enable(struct radeon_device *rdev, bool enable) 293 { 294 u32 me_cntl, reg_offset; 295 int i; 296 297 if (enable == false) { 298 cik_sdma_gfx_stop(rdev); 299 cik_sdma_rlc_stop(rdev); 300 } 301 302 for (i = 0; i < 2; i++) { 303 if (i == 0) 304 reg_offset = SDMA0_REGISTER_OFFSET; 305 else 306 reg_offset = SDMA1_REGISTER_OFFSET; 307 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset); 308 if (enable) 309 me_cntl &= ~SDMA_HALT; 310 else 311 me_cntl |= SDMA_HALT; 312 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl); 313 } 314 } 315 316 /** 317 * cik_sdma_gfx_resume - setup and start the async dma engines 318 * 319 * @rdev: radeon_device pointer 320 * 321 * Set up the gfx DMA ring buffers and enable them (CIK). 322 * Returns 0 for success, error for failure. 323 */ 324 static int cik_sdma_gfx_resume(struct radeon_device *rdev) 325 { 326 struct radeon_ring *ring; 327 u32 rb_cntl, ib_cntl; 328 u32 rb_bufsz; 329 u32 reg_offset, wb_offset; 330 int i, r; 331 332 for (i = 0; i < 2; i++) { 333 if (i == 0) { 334 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 335 reg_offset = SDMA0_REGISTER_OFFSET; 336 wb_offset = R600_WB_DMA_RPTR_OFFSET; 337 } else { 338 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 339 reg_offset = SDMA1_REGISTER_OFFSET; 340 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; 341 } 342 343 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); 344 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); 345 346 /* Set ring buffer size in dwords */ 347 rb_bufsz = order_base_2(ring->ring_size / 4); 348 rb_cntl = rb_bufsz << 1; 349 #ifdef __BIG_ENDIAN 350 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; 351 #endif 352 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); 353 354 /* Initialize the ring buffer's read and write pointers */ 355 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0); 356 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0); 357 358 /* set the wb address whether it's enabled or not */ 359 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset, 360 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 361 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset, 362 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 363 364 if (rdev->wb.enabled) 365 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE; 366 367 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8); 368 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40); 369 370 ring->wptr = 0; 371 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); 372 373 /* enable DMA RB */ 374 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); 375 376 ib_cntl = SDMA_IB_ENABLE; 377 #ifdef __BIG_ENDIAN 378 ib_cntl |= SDMA_IB_SWAP_ENABLE; 379 #endif 380 /* enable DMA IBs */ 381 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl); 382 383 ring->ready = true; 384 385 r = radeon_ring_test(rdev, ring->idx, ring); 386 if (r) { 387 ring->ready = false; 388 return r; 389 } 390 } 391 392 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || 393 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) 394 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 395 396 return 0; 397 } 398 399 /** 400 * cik_sdma_rlc_resume - setup and start the async dma engines 401 * 402 * @rdev: radeon_device pointer 403 * 404 * Set up the compute DMA queues and enable them (CIK). 405 * Returns 0 for success, error for failure. 406 */ 407 static int cik_sdma_rlc_resume(struct radeon_device *rdev) 408 { 409 /* XXX todo */ 410 return 0; 411 } 412 413 /** 414 * cik_sdma_load_microcode - load the sDMA ME ucode 415 * 416 * @rdev: radeon_device pointer 417 * 418 * Loads the sDMA0/1 ucode. 419 * Returns 0 for success, -EINVAL if the ucode is not available. 420 */ 421 static int cik_sdma_load_microcode(struct radeon_device *rdev) 422 { 423 const __be32 *fw_data; 424 int i; 425 426 if (!rdev->sdma_fw) 427 return -EINVAL; 428 429 /* halt the MEs */ 430 cik_sdma_enable(rdev, false); 431 432 /* sdma0 */ 433 fw_data = (const __be32 *)rdev->sdma_fw->data; 434 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); 435 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) 436 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); 437 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); 438 439 /* sdma1 */ 440 fw_data = (const __be32 *)rdev->sdma_fw->data; 441 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); 442 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) 443 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); 444 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); 445 446 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); 447 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); 448 return 0; 449 } 450 451 /** 452 * cik_sdma_resume - setup and start the async dma engines 453 * 454 * @rdev: radeon_device pointer 455 * 456 * Set up the DMA engines and enable them (CIK). 457 * Returns 0 for success, error for failure. 458 */ 459 int cik_sdma_resume(struct radeon_device *rdev) 460 { 461 int r; 462 463 r = cik_sdma_load_microcode(rdev); 464 if (r) 465 return r; 466 467 /* unhalt the MEs */ 468 cik_sdma_enable(rdev, true); 469 470 /* start the gfx rings and rlc compute queues */ 471 r = cik_sdma_gfx_resume(rdev); 472 if (r) 473 return r; 474 r = cik_sdma_rlc_resume(rdev); 475 if (r) 476 return r; 477 478 return 0; 479 } 480 481 /** 482 * cik_sdma_fini - tear down the async dma engines 483 * 484 * @rdev: radeon_device pointer 485 * 486 * Stop the async dma engines and free the rings (CIK). 487 */ 488 void cik_sdma_fini(struct radeon_device *rdev) 489 { 490 /* halt the MEs */ 491 cik_sdma_enable(rdev, false); 492 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 493 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); 494 /* XXX - compute dma queue tear down */ 495 } 496 497 /** 498 * cik_copy_dma - copy pages using the DMA engine 499 * 500 * @rdev: radeon_device pointer 501 * @src_offset: src GPU address 502 * @dst_offset: dst GPU address 503 * @num_gpu_pages: number of GPU pages to xfer 504 * @fence: radeon fence object 505 * 506 * Copy GPU paging using the DMA engine (CIK). 507 * Used by the radeon ttm implementation to move pages if 508 * registered as the asic copy callback. 509 */ 510 int cik_copy_dma(struct radeon_device *rdev, 511 uint64_t src_offset, uint64_t dst_offset, 512 unsigned num_gpu_pages, 513 struct radeon_fence **fence) 514 { 515 struct radeon_semaphore *sem = NULL; 516 int ring_index = rdev->asic->copy.dma_ring_index; 517 struct radeon_ring *ring = &rdev->ring[ring_index]; 518 u32 size_in_bytes, cur_size_in_bytes; 519 int i, num_loops; 520 int r = 0; 521 522 r = radeon_semaphore_create(rdev, &sem); 523 if (r) { 524 DRM_ERROR("radeon: moving bo (%d).\n", r); 525 return r; 526 } 527 528 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 529 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 530 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14); 531 if (r) { 532 DRM_ERROR("radeon: moving bo (%d).\n", r); 533 radeon_semaphore_free(rdev, &sem, NULL); 534 return r; 535 } 536 537 radeon_semaphore_sync_to(sem, *fence); 538 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 539 540 for (i = 0; i < num_loops; i++) { 541 cur_size_in_bytes = size_in_bytes; 542 if (cur_size_in_bytes > 0x1fffff) 543 cur_size_in_bytes = 0x1fffff; 544 size_in_bytes -= cur_size_in_bytes; 545 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); 546 radeon_ring_write(ring, cur_size_in_bytes); 547 radeon_ring_write(ring, 0); /* src/dst endian swap */ 548 radeon_ring_write(ring, lower_32_bits(src_offset)); 549 radeon_ring_write(ring, upper_32_bits(src_offset)); 550 radeon_ring_write(ring, lower_32_bits(dst_offset)); 551 radeon_ring_write(ring, upper_32_bits(dst_offset)); 552 src_offset += cur_size_in_bytes; 553 dst_offset += cur_size_in_bytes; 554 } 555 556 r = radeon_fence_emit(rdev, fence, ring->idx); 557 if (r) { 558 radeon_ring_unlock_undo(rdev, ring); 559 radeon_semaphore_free(rdev, &sem, NULL); 560 return r; 561 } 562 563 radeon_ring_unlock_commit(rdev, ring, false); 564 radeon_semaphore_free(rdev, &sem, *fence); 565 566 return r; 567 } 568 569 /** 570 * cik_sdma_ring_test - simple async dma engine test 571 * 572 * @rdev: radeon_device pointer 573 * @ring: radeon_ring structure holding ring information 574 * 575 * Test the DMA engine by writing using it to write an 576 * value to memory. (CIK). 577 * Returns 0 for success, error for failure. 578 */ 579 int cik_sdma_ring_test(struct radeon_device *rdev, 580 struct radeon_ring *ring) 581 { 582 unsigned i; 583 int r; 584 volatile void __iomem *ptr = (volatile void *)rdev->vram_scratch.ptr; 585 u32 tmp; 586 587 if (!ptr) { 588 DRM_ERROR("invalid vram scratch pointer\n"); 589 return -EINVAL; 590 } 591 592 tmp = 0xCAFEDEAD; 593 writel(tmp, ptr); 594 595 r = radeon_ring_lock(rdev, ring, 5); 596 if (r) { 597 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); 598 return r; 599 } 600 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 601 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 602 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); 603 radeon_ring_write(ring, 1); /* number of DWs to follow */ 604 radeon_ring_write(ring, 0xDEADBEEF); 605 radeon_ring_unlock_commit(rdev, ring, false); 606 607 for (i = 0; i < rdev->usec_timeout; i++) { 608 tmp = readl(ptr); 609 if (tmp == 0xDEADBEEF) 610 break; 611 DRM_UDELAY(1); 612 } 613 614 if (i < rdev->usec_timeout) { 615 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 616 } else { 617 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", 618 ring->idx, tmp); 619 r = -EINVAL; 620 } 621 return r; 622 } 623 624 /** 625 * cik_sdma_ib_test - test an IB on the DMA engine 626 * 627 * @rdev: radeon_device pointer 628 * @ring: radeon_ring structure holding ring information 629 * 630 * Test a simple IB in the DMA ring (CIK). 631 * Returns 0 on success, error on failure. 632 */ 633 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 634 { 635 struct radeon_ib ib; 636 unsigned i; 637 int r; 638 volatile void __iomem *ptr = (volatile void *)rdev->vram_scratch.ptr; 639 u32 tmp = 0; 640 641 if (!ptr) { 642 DRM_ERROR("invalid vram scratch pointer\n"); 643 return -EINVAL; 644 } 645 646 tmp = 0xCAFEDEAD; 647 writel(tmp, ptr); 648 649 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 650 if (r) { 651 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 652 return r; 653 } 654 655 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 656 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 657 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); 658 ib.ptr[3] = 1; 659 ib.ptr[4] = 0xDEADBEEF; 660 ib.length_dw = 5; 661 662 r = radeon_ib_schedule(rdev, &ib, NULL, false); 663 if (r) { 664 radeon_ib_free(rdev, &ib); 665 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 666 return r; 667 } 668 r = radeon_fence_wait(ib.fence, false); 669 if (r) { 670 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 671 return r; 672 } 673 for (i = 0; i < rdev->usec_timeout; i++) { 674 tmp = readl(ptr); 675 if (tmp == 0xDEADBEEF) 676 break; 677 DRM_UDELAY(1); 678 } 679 if (i < rdev->usec_timeout) { 680 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 681 } else { 682 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); 683 r = -EINVAL; 684 } 685 radeon_ib_free(rdev, &ib); 686 return r; 687 } 688 689 /** 690 * cik_sdma_is_lockup - Check if the DMA engine is locked up 691 * 692 * @rdev: radeon_device pointer 693 * @ring: radeon_ring structure holding ring information 694 * 695 * Check if the async DMA engine is locked up (CIK). 696 * Returns true if the engine appears to be locked up, false if not. 697 */ 698 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 699 { 700 u32 reset_mask = cik_gpu_check_soft_reset(rdev); 701 u32 mask; 702 703 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 704 mask = RADEON_RESET_DMA; 705 else 706 mask = RADEON_RESET_DMA1; 707 708 if (!(reset_mask & mask)) { 709 radeon_ring_lockup_update(rdev, ring); 710 return false; 711 } 712 return radeon_ring_test_lockup(rdev, ring); 713 } 714 715 /** 716 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART 717 * 718 * @rdev: radeon_device pointer 719 * @ib: indirect buffer to fill with commands 720 * @pe: addr of the page entry 721 * @src: src addr to copy from 722 * @count: number of page entries to update 723 * 724 * Update PTEs by copying them from the GART using sDMA (CIK). 725 */ 726 void cik_sdma_vm_copy_pages(struct radeon_device *rdev, 727 struct radeon_ib *ib, 728 uint64_t pe, uint64_t src, 729 unsigned count) 730 { 731 while (count) { 732 unsigned bytes = count * 8; 733 if (bytes > 0x1FFFF8) 734 bytes = 0x1FFFF8; 735 736 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, 737 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 738 ib->ptr[ib->length_dw++] = bytes; 739 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 740 ib->ptr[ib->length_dw++] = lower_32_bits(src); 741 ib->ptr[ib->length_dw++] = upper_32_bits(src); 742 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 743 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 744 745 pe += bytes; 746 src += bytes; 747 count -= bytes / 8; 748 } 749 } 750 751 /** 752 * cik_sdma_vm_write_pages - update PTEs by writing them manually 753 * 754 * @rdev: radeon_device pointer 755 * @ib: indirect buffer to fill with commands 756 * @pe: addr of the page entry 757 * @addr: dst addr to write into pe 758 * @count: number of page entries to update 759 * @incr: increase next addr by incr bytes 760 * @flags: access flags 761 * 762 * Update PTEs by writing them manually using sDMA (CIK). 763 */ 764 void cik_sdma_vm_write_pages(struct radeon_device *rdev, 765 struct radeon_ib *ib, 766 uint64_t pe, 767 uint64_t addr, unsigned count, 768 uint32_t incr, uint32_t flags) 769 { 770 uint64_t value; 771 unsigned ndw; 772 773 while (count) { 774 ndw = count * 2; 775 if (ndw > 0xFFFFE) 776 ndw = 0xFFFFE; 777 778 /* for non-physically contiguous pages (system) */ 779 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, 780 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 781 ib->ptr[ib->length_dw++] = pe; 782 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 783 ib->ptr[ib->length_dw++] = ndw; 784 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 785 if (flags & R600_PTE_SYSTEM) { 786 value = radeon_vm_map_gart(rdev, addr); 787 value &= 0xFFFFFFFFFFFFF000ULL; 788 } else if (flags & R600_PTE_VALID) { 789 value = addr; 790 } else { 791 value = 0; 792 } 793 addr += incr; 794 value |= flags; 795 ib->ptr[ib->length_dw++] = value; 796 ib->ptr[ib->length_dw++] = upper_32_bits(value); 797 } 798 } 799 } 800 801 /** 802 * cik_sdma_vm_set_pages - update the page tables using sDMA 803 * 804 * @rdev: radeon_device pointer 805 * @ib: indirect buffer to fill with commands 806 * @pe: addr of the page entry 807 * @addr: dst addr to write into pe 808 * @count: number of page entries to update 809 * @incr: increase next addr by incr bytes 810 * @flags: access flags 811 * 812 * Update the page tables using sDMA (CIK). 813 */ 814 void cik_sdma_vm_set_pages(struct radeon_device *rdev, 815 struct radeon_ib *ib, 816 uint64_t pe, 817 uint64_t addr, unsigned count, 818 uint32_t incr, uint32_t flags) 819 { 820 uint64_t value; 821 unsigned ndw; 822 823 while (count) { 824 ndw = count; 825 if (ndw > 0x7FFFF) 826 ndw = 0x7FFFF; 827 828 if (flags & R600_PTE_VALID) 829 value = addr; 830 else 831 value = 0; 832 833 /* for physically contiguous pages (vram) */ 834 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 835 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 836 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 837 ib->ptr[ib->length_dw++] = flags; /* mask */ 838 ib->ptr[ib->length_dw++] = 0; 839 ib->ptr[ib->length_dw++] = value; /* value */ 840 ib->ptr[ib->length_dw++] = upper_32_bits(value); 841 ib->ptr[ib->length_dw++] = incr; /* increment size */ 842 ib->ptr[ib->length_dw++] = 0; 843 ib->ptr[ib->length_dw++] = ndw; /* number of entries */ 844 845 pe += ndw * 8; 846 addr += ndw * incr; 847 count -= ndw; 848 } 849 } 850 851 /** 852 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw 853 * 854 * @ib: indirect buffer to fill with padding 855 * 856 */ 857 void cik_sdma_vm_pad_ib(struct radeon_ib *ib) 858 { 859 while (ib->length_dw & 0x7) 860 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); 861 } 862 863 /** 864 * cik_dma_vm_flush - cik vm flush using sDMA 865 * 866 * @rdev: radeon_device pointer 867 * 868 * Update the page table base and flush the VM TLB 869 * using sDMA (CIK). 870 */ 871 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 872 { 873 struct radeon_ring *ring = &rdev->ring[ridx]; 874 875 if (vm == NULL) 876 return; 877 878 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 879 if (vm->id < 8) { 880 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 881 } else { 882 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 883 } 884 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 885 886 /* update SH_MEM_* regs */ 887 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 888 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 889 radeon_ring_write(ring, VMID(vm->id)); 890 891 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 892 radeon_ring_write(ring, SH_MEM_BASES >> 2); 893 radeon_ring_write(ring, 0); 894 895 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 896 radeon_ring_write(ring, SH_MEM_CONFIG >> 2); 897 radeon_ring_write(ring, 0); 898 899 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 900 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2); 901 radeon_ring_write(ring, 1); 902 903 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 904 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2); 905 radeon_ring_write(ring, 0); 906 907 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 908 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 909 radeon_ring_write(ring, VMID(0)); 910 911 /* flush HDP */ 912 cik_sdma_hdp_flush_ring_emit(rdev, ridx); 913 914 /* flush TLB */ 915 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 916 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 917 radeon_ring_write(ring, 1 << vm->id); 918 } 919