1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <drm/drmP.h> 25 #include "radeon.h" 26 #include "radeon_asic.h" 27 #include "r600d.h" 28 29 /* 30 * DMA 31 * Starting with R600, the GPU has an asynchronous 32 * DMA engine. The programming model is very similar 33 * to the 3D engine (ring buffer, IBs, etc.), but the 34 * DMA controller has it's own packet format that is 35 * different form the PM4 format used by the 3D engine. 36 * It supports copying data, writing embedded data, 37 * solid fills, and a number of other things. It also 38 * has support for tiling/detiling of buffers. 39 */ 40 41 /** 42 * r600_dma_get_rptr - get the current read pointer 43 * 44 * @rdev: radeon_device pointer 45 * @ring: radeon ring pointer 46 * 47 * Get the current rptr from the hardware (r6xx+). 48 */ 49 uint32_t r600_dma_get_rptr(struct radeon_device *rdev, 50 struct radeon_ring *ring) 51 { 52 return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2; 53 } 54 55 /** 56 * r600_dma_get_wptr - get the current write pointer 57 * 58 * @rdev: radeon_device pointer 59 * @ring: radeon ring pointer 60 * 61 * Get the current wptr from the hardware (r6xx+). 62 */ 63 uint32_t r600_dma_get_wptr(struct radeon_device *rdev, 64 struct radeon_ring *ring) 65 { 66 return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2; 67 } 68 69 /** 70 * r600_dma_set_wptr - commit the write pointer 71 * 72 * @rdev: radeon_device pointer 73 * @ring: radeon ring pointer 74 * 75 * Write the wptr back to the hardware (r6xx+). 76 */ 77 void r600_dma_set_wptr(struct radeon_device *rdev, 78 struct radeon_ring *ring) 79 { 80 WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc); 81 } 82 83 /** 84 * r600_dma_stop - stop the async dma engine 85 * 86 * @rdev: radeon_device pointer 87 * 88 * Stop the async dma engine (r6xx-evergreen). 89 */ 90 void r600_dma_stop(struct radeon_device *rdev) 91 { 92 u32 rb_cntl = RREG32(DMA_RB_CNTL); 93 94 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 95 96 rb_cntl &= ~DMA_RB_ENABLE; 97 WREG32(DMA_RB_CNTL, rb_cntl); 98 99 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 100 } 101 102 /** 103 * r600_dma_resume - setup and start the async dma engine 104 * 105 * @rdev: radeon_device pointer 106 * 107 * Set up the DMA ring buffer and enable it. (r6xx-evergreen). 108 * Returns 0 for success, error for failure. 109 */ 110 int r600_dma_resume(struct radeon_device *rdev) 111 { 112 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 113 u32 rb_cntl, dma_cntl, ib_cntl; 114 u32 rb_bufsz; 115 int r; 116 117 /* Reset dma */ 118 if (rdev->family >= CHIP_RV770) 119 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); 120 else 121 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); 122 RREG32(SRBM_SOFT_RESET); 123 udelay(50); 124 WREG32(SRBM_SOFT_RESET, 0); 125 126 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); 127 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); 128 129 /* Set ring buffer size in dwords */ 130 rb_bufsz = order_base_2(ring->ring_size / 4); 131 rb_cntl = rb_bufsz << 1; 132 #ifdef __BIG_ENDIAN 133 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 134 #endif 135 WREG32(DMA_RB_CNTL, rb_cntl); 136 137 /* Initialize the ring buffer's read and write pointers */ 138 WREG32(DMA_RB_RPTR, 0); 139 WREG32(DMA_RB_WPTR, 0); 140 141 /* set the wb address whether it's enabled or not */ 142 WREG32(DMA_RB_RPTR_ADDR_HI, 143 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); 144 WREG32(DMA_RB_RPTR_ADDR_LO, 145 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); 146 147 if (rdev->wb.enabled) 148 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 149 150 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); 151 152 /* enable DMA IBs */ 153 ib_cntl = DMA_IB_ENABLE; 154 #ifdef __BIG_ENDIAN 155 ib_cntl |= DMA_IB_SWAP_ENABLE; 156 #endif 157 WREG32(DMA_IB_CNTL, ib_cntl); 158 159 dma_cntl = RREG32(DMA_CNTL); 160 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 161 WREG32(DMA_CNTL, dma_cntl); 162 163 if (rdev->family >= CHIP_RV770) 164 WREG32(DMA_MODE, 1); 165 166 ring->wptr = 0; 167 WREG32(DMA_RB_WPTR, ring->wptr << 2); 168 169 ring->rptr = RREG32(DMA_RB_RPTR) >> 2; 170 171 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); 172 173 ring->ready = true; 174 175 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); 176 if (r) { 177 ring->ready = false; 178 return r; 179 } 180 181 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 182 183 return 0; 184 } 185 186 /** 187 * r600_dma_fini - tear down the async dma engine 188 * 189 * @rdev: radeon_device pointer 190 * 191 * Stop the async dma engine and free the ring (r6xx-evergreen). 192 */ 193 void r600_dma_fini(struct radeon_device *rdev) 194 { 195 r600_dma_stop(rdev); 196 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 197 } 198 199 /** 200 * r600_dma_is_lockup - Check if the DMA engine is locked up 201 * 202 * @rdev: radeon_device pointer 203 * @ring: radeon_ring structure holding ring information 204 * 205 * Check if the async DMA engine is locked up. 206 * Returns true if the engine appears to be locked up, false if not. 207 */ 208 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 209 { 210 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 211 212 if (!(reset_mask & RADEON_RESET_DMA)) { 213 radeon_ring_lockup_update(ring); 214 return false; 215 } 216 /* force ring activities */ 217 radeon_ring_force_activity(rdev, ring); 218 return radeon_ring_test_lockup(rdev, ring); 219 } 220 221 222 /** 223 * r600_dma_ring_test - simple async dma engine test 224 * 225 * @rdev: radeon_device pointer 226 * @ring: radeon_ring structure holding ring information 227 * 228 * Test the DMA engine by writing using it to write an 229 * value to memory. (r6xx-SI). 230 * Returns 0 for success, error for failure. 231 */ 232 int r600_dma_ring_test(struct radeon_device *rdev, 233 struct radeon_ring *ring) 234 { 235 unsigned i; 236 int r; 237 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 238 u32 tmp; 239 240 if (!ptr) { 241 DRM_ERROR("invalid vram scratch pointer\n"); 242 return -EINVAL; 243 } 244 245 tmp = 0xCAFEDEAD; 246 *ptr = tmp; 247 248 r = radeon_ring_lock(rdev, ring, 4); 249 if (r) { 250 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); 251 return r; 252 } 253 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 254 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 255 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 256 radeon_ring_write(ring, 0xDEADBEEF); 257 radeon_ring_unlock_commit(rdev, ring); 258 259 for (i = 0; i < rdev->usec_timeout; i++) { 260 tmp = *ptr; 261 if (tmp == 0xDEADBEEF) 262 break; 263 DRM_UDELAY(1); 264 } 265 266 if (i < rdev->usec_timeout) { 267 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 268 } else { 269 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", 270 ring->idx, tmp); 271 r = -EINVAL; 272 } 273 return r; 274 } 275 276 /** 277 * r600_dma_fence_ring_emit - emit a fence on the DMA ring 278 * 279 * @rdev: radeon_device pointer 280 * @fence: radeon fence object 281 * 282 * Add a DMA fence packet to the ring to write 283 * the fence seq number and DMA trap packet to generate 284 * an interrupt if needed (r6xx-r7xx). 285 */ 286 void r600_dma_fence_ring_emit(struct radeon_device *rdev, 287 struct radeon_fence *fence) 288 { 289 struct radeon_ring *ring = &rdev->ring[fence->ring]; 290 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 291 292 /* write the fence */ 293 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); 294 radeon_ring_write(ring, addr & 0xfffffffc); 295 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 296 radeon_ring_write(ring, lower_32_bits(fence->seq)); 297 /* generate an interrupt */ 298 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); 299 } 300 301 /** 302 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring 303 * 304 * @rdev: radeon_device pointer 305 * @ring: radeon_ring structure holding ring information 306 * @semaphore: radeon semaphore object 307 * @emit_wait: wait or signal semaphore 308 * 309 * Add a DMA semaphore packet to the ring wait on or signal 310 * other rings (r6xx-SI). 311 */ 312 void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 313 struct radeon_ring *ring, 314 struct radeon_semaphore *semaphore, 315 bool emit_wait) 316 { 317 u64 addr = semaphore->gpu_addr; 318 u32 s = emit_wait ? 0 : 1; 319 320 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); 321 radeon_ring_write(ring, addr & 0xfffffffc); 322 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 323 } 324 325 /** 326 * r600_dma_ib_test - test an IB on the DMA engine 327 * 328 * @rdev: radeon_device pointer 329 * @ring: radeon_ring structure holding ring information 330 * 331 * Test a simple IB in the DMA ring (r6xx-SI). 332 * Returns 0 on success, error on failure. 333 */ 334 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 335 { 336 struct radeon_ib ib; 337 unsigned i; 338 int r; 339 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 340 u32 tmp = 0; 341 342 if (!ptr) { 343 DRM_ERROR("invalid vram scratch pointer\n"); 344 return -EINVAL; 345 } 346 347 tmp = 0xCAFEDEAD; 348 *ptr = tmp; 349 350 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 351 if (r) { 352 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 353 return r; 354 } 355 356 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); 357 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 358 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; 359 ib.ptr[3] = 0xDEADBEEF; 360 ib.length_dw = 4; 361 362 r = radeon_ib_schedule(rdev, &ib, NULL); 363 if (r) { 364 radeon_ib_free(rdev, &ib); 365 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 366 return r; 367 } 368 r = radeon_fence_wait(ib.fence, false); 369 if (r) { 370 radeon_ib_free(rdev, &ib); /* zRJ XXX culprit */ 371 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 372 return r; 373 } 374 for (i = 0; i < rdev->usec_timeout; i++) { 375 tmp = *ptr; 376 if (tmp == 0xDEADBEEF) 377 break; 378 DRM_UDELAY(1); 379 } 380 if (i < rdev->usec_timeout) { 381 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 382 } else { 383 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); 384 r = -EINVAL; 385 } 386 radeon_ib_free(rdev, &ib); 387 return r; 388 } 389 390 /** 391 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine 392 * 393 * @rdev: radeon_device pointer 394 * @ib: IB object to schedule 395 * 396 * Schedule an IB in the DMA ring (r6xx-r7xx). 397 */ 398 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 399 { 400 struct radeon_ring *ring = &rdev->ring[ib->ring]; 401 402 if (rdev->wb.enabled) { 403 u32 next_rptr = ring->wptr + 4; 404 while ((next_rptr & 7) != 5) 405 next_rptr++; 406 next_rptr += 3; 407 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 408 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 409 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 410 radeon_ring_write(ring, next_rptr); 411 } 412 413 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 414 * Pad as necessary with NOPs. 415 */ 416 while ((ring->wptr & 7) != 5) 417 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 418 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); 419 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 420 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 421 422 } 423 424 /** 425 * r600_copy_dma - copy pages using the DMA engine 426 * 427 * @rdev: radeon_device pointer 428 * @src_offset: src GPU address 429 * @dst_offset: dst GPU address 430 * @num_gpu_pages: number of GPU pages to xfer 431 * @fence: radeon fence object 432 * 433 * Copy GPU paging using the DMA engine (r6xx). 434 * Used by the radeon ttm implementation to move pages if 435 * registered as the asic copy callback. 436 */ 437 int r600_copy_dma(struct radeon_device *rdev, 438 uint64_t src_offset, uint64_t dst_offset, 439 unsigned num_gpu_pages, 440 struct radeon_fence **fence) 441 { 442 struct radeon_semaphore *sem = NULL; 443 int ring_index = rdev->asic->copy.dma_ring_index; 444 struct radeon_ring *ring = &rdev->ring[ring_index]; 445 u32 size_in_dw, cur_size_in_dw; 446 int i, num_loops; 447 int r = 0; 448 449 r = radeon_semaphore_create(rdev, &sem); 450 if (r) { 451 DRM_ERROR("radeon: moving bo (%d).\n", r); 452 return r; 453 } 454 455 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 456 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); 457 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); 458 if (r) { 459 DRM_ERROR("radeon: moving bo (%d).\n", r); 460 radeon_semaphore_free(rdev, &sem, NULL); 461 return r; 462 } 463 464 if (radeon_fence_need_sync(*fence, ring->idx)) { 465 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 466 ring->idx); 467 radeon_fence_note_sync(*fence, ring->idx); 468 } else { 469 radeon_semaphore_free(rdev, &sem, NULL); 470 } 471 472 for (i = 0; i < num_loops; i++) { 473 cur_size_in_dw = size_in_dw; 474 if (cur_size_in_dw > 0xFFFE) 475 cur_size_in_dw = 0xFFFE; 476 size_in_dw -= cur_size_in_dw; 477 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 478 radeon_ring_write(ring, dst_offset & 0xfffffffc); 479 radeon_ring_write(ring, src_offset & 0xfffffffc); 480 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | 481 (upper_32_bits(src_offset) & 0xff))); 482 src_offset += cur_size_in_dw * 4; 483 dst_offset += cur_size_in_dw * 4; 484 } 485 486 r = radeon_fence_emit(rdev, fence, ring->idx); 487 if (r) { 488 radeon_ring_unlock_undo(rdev, ring); 489 return r; 490 } 491 492 radeon_ring_unlock_commit(rdev, ring); 493 radeon_semaphore_free(rdev, &sem, *fence); 494 495 return r; 496 } 497