1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <drm/drmP.h> 25 #include "radeon.h" 26 #include "radeon_asic.h" 27 #include "r600d.h" 28 29 /* 30 * DMA 31 * Starting with R600, the GPU has an asynchronous 32 * DMA engine. The programming model is very similar 33 * to the 3D engine (ring buffer, IBs, etc.), but the 34 * DMA controller has it's own packet format that is 35 * different form the PM4 format used by the 3D engine. 36 * It supports copying data, writing embedded data, 37 * solid fills, and a number of other things. It also 38 * has support for tiling/detiling of buffers. 39 */ 40 41 /** 42 * r600_dma_get_rptr - get the current read pointer 43 * 44 * @rdev: radeon_device pointer 45 * @ring: radeon ring pointer 46 * 47 * Get the current rptr from the hardware (r6xx+). 48 */ 49 uint32_t r600_dma_get_rptr(struct radeon_device *rdev, 50 struct radeon_ring *ring) 51 { 52 u32 rptr; 53 54 if (rdev->wb.enabled) 55 rptr = rdev->wb.wb[ring->rptr_offs/4]; 56 else 57 rptr = RREG32(DMA_RB_RPTR); 58 59 return (rptr & 0x3fffc) >> 2; 60 } 61 62 /** 63 * r600_dma_get_wptr - get the current write pointer 64 * 65 * @rdev: radeon_device pointer 66 * @ring: radeon ring pointer 67 * 68 * Get the current wptr from the hardware (r6xx+). 69 */ 70 uint32_t r600_dma_get_wptr(struct radeon_device *rdev, 71 struct radeon_ring *ring) 72 { 73 return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2; 74 } 75 76 /** 77 * r600_dma_set_wptr - commit the write pointer 78 * 79 * @rdev: radeon_device pointer 80 * @ring: radeon ring pointer 81 * 82 * Write the wptr back to the hardware (r6xx+). 83 */ 84 void r600_dma_set_wptr(struct radeon_device *rdev, 85 struct radeon_ring *ring) 86 { 87 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); 88 } 89 90 /** 91 * r600_dma_stop - stop the async dma engine 92 * 93 * @rdev: radeon_device pointer 94 * 95 * Stop the async dma engine (r6xx-evergreen). 96 */ 97 void r600_dma_stop(struct radeon_device *rdev) 98 { 99 u32 rb_cntl = RREG32(DMA_RB_CNTL); 100 101 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) 102 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 103 104 rb_cntl &= ~DMA_RB_ENABLE; 105 WREG32(DMA_RB_CNTL, rb_cntl); 106 107 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 108 } 109 110 /** 111 * r600_dma_resume - setup and start the async dma engine 112 * 113 * @rdev: radeon_device pointer 114 * 115 * Set up the DMA ring buffer and enable it. (r6xx-evergreen). 116 * Returns 0 for success, error for failure. 117 */ 118 int r600_dma_resume(struct radeon_device *rdev) 119 { 120 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 121 u32 rb_cntl, dma_cntl, ib_cntl; 122 u32 rb_bufsz; 123 int r; 124 125 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); 126 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); 127 128 /* Set ring buffer size in dwords */ 129 rb_bufsz = order_base_2(ring->ring_size / 4); 130 rb_cntl = rb_bufsz << 1; 131 #ifdef __BIG_ENDIAN 132 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 133 #endif 134 WREG32(DMA_RB_CNTL, rb_cntl); 135 136 /* Initialize the ring buffer's read and write pointers */ 137 WREG32(DMA_RB_RPTR, 0); 138 WREG32(DMA_RB_WPTR, 0); 139 140 /* set the wb address whether it's enabled or not */ 141 WREG32(DMA_RB_RPTR_ADDR_HI, 142 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); 143 WREG32(DMA_RB_RPTR_ADDR_LO, 144 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); 145 146 if (rdev->wb.enabled) 147 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 148 149 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); 150 151 /* enable DMA IBs */ 152 ib_cntl = DMA_IB_ENABLE; 153 #ifdef __BIG_ENDIAN 154 ib_cntl |= DMA_IB_SWAP_ENABLE; 155 #endif 156 WREG32(DMA_IB_CNTL, ib_cntl); 157 158 dma_cntl = RREG32(DMA_CNTL); 159 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 160 WREG32(DMA_CNTL, dma_cntl); 161 162 if (rdev->family >= CHIP_RV770) 163 WREG32(DMA_MODE, 1); 164 165 ring->wptr = 0; 166 WREG32(DMA_RB_WPTR, ring->wptr << 2); 167 168 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); 169 170 ring->ready = true; 171 172 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); 173 if (r) { 174 ring->ready = false; 175 return r; 176 } 177 178 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) 179 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 180 181 return 0; 182 } 183 184 /** 185 * r600_dma_fini - tear down the async dma engine 186 * 187 * @rdev: radeon_device pointer 188 * 189 * Stop the async dma engine and free the ring (r6xx-evergreen). 190 */ 191 void r600_dma_fini(struct radeon_device *rdev) 192 { 193 r600_dma_stop(rdev); 194 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 195 } 196 197 /** 198 * r600_dma_is_lockup - Check if the DMA engine is locked up 199 * 200 * @rdev: radeon_device pointer 201 * @ring: radeon_ring structure holding ring information 202 * 203 * Check if the async DMA engine is locked up. 204 * Returns true if the engine appears to be locked up, false if not. 205 */ 206 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 207 { 208 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 209 210 if (!(reset_mask & RADEON_RESET_DMA)) { 211 radeon_ring_lockup_update(rdev, ring); 212 return false; 213 } 214 return radeon_ring_test_lockup(rdev, ring); 215 } 216 217 218 /** 219 * r600_dma_ring_test - simple async dma engine test 220 * 221 * @rdev: radeon_device pointer 222 * @ring: radeon_ring structure holding ring information 223 * 224 * Test the DMA engine by writing using it to write an 225 * value to memory. (r6xx-SI). 226 * Returns 0 for success, error for failure. 227 */ 228 int r600_dma_ring_test(struct radeon_device *rdev, 229 struct radeon_ring *ring) 230 { 231 unsigned i; 232 int r; 233 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 234 u32 tmp; 235 236 if (!ptr) { 237 DRM_ERROR("invalid vram scratch pointer\n"); 238 return -EINVAL; 239 } 240 241 tmp = 0xCAFEDEAD; 242 *ptr = tmp; 243 244 r = radeon_ring_lock(rdev, ring, 4); 245 if (r) { 246 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); 247 return r; 248 } 249 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 250 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 251 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 252 radeon_ring_write(ring, 0xDEADBEEF); 253 radeon_ring_unlock_commit(rdev, ring, false); 254 255 for (i = 0; i < rdev->usec_timeout; i++) { 256 tmp = *ptr; 257 if (tmp == 0xDEADBEEF) 258 break; 259 DRM_UDELAY(1); 260 } 261 262 if (i < rdev->usec_timeout) { 263 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 264 } else { 265 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", 266 ring->idx, tmp); 267 r = -EINVAL; 268 } 269 return r; 270 } 271 272 /** 273 * r600_dma_fence_ring_emit - emit a fence on the DMA ring 274 * 275 * @rdev: radeon_device pointer 276 * @fence: radeon fence object 277 * 278 * Add a DMA fence packet to the ring to write 279 * the fence seq number and DMA trap packet to generate 280 * an interrupt if needed (r6xx-r7xx). 281 */ 282 void r600_dma_fence_ring_emit(struct radeon_device *rdev, 283 struct radeon_fence *fence) 284 { 285 struct radeon_ring *ring = &rdev->ring[fence->ring]; 286 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 287 288 /* write the fence */ 289 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); 290 radeon_ring_write(ring, addr & 0xfffffffc); 291 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 292 radeon_ring_write(ring, lower_32_bits(fence->seq)); 293 /* generate an interrupt */ 294 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); 295 } 296 297 /** 298 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring 299 * 300 * @rdev: radeon_device pointer 301 * @ring: radeon_ring structure holding ring information 302 * @semaphore: radeon semaphore object 303 * @emit_wait: wait or signal semaphore 304 * 305 * Add a DMA semaphore packet to the ring wait on or signal 306 * other rings (r6xx-SI). 307 */ 308 bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 309 struct radeon_ring *ring, 310 struct radeon_semaphore *semaphore, 311 bool emit_wait) 312 { 313 u64 addr = semaphore->gpu_addr; 314 u32 s = emit_wait ? 0 : 1; 315 316 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); 317 radeon_ring_write(ring, addr & 0xfffffffc); 318 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 319 320 return true; 321 } 322 323 /** 324 * r600_dma_ib_test - test an IB on the DMA engine 325 * 326 * @rdev: radeon_device pointer 327 * @ring: radeon_ring structure holding ring information 328 * 329 * Test a simple IB in the DMA ring (r6xx-SI). 330 * Returns 0 on success, error on failure. 331 */ 332 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 333 { 334 struct radeon_ib ib; 335 unsigned i; 336 int r; 337 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 338 u32 tmp = 0; 339 340 if (!ptr) { 341 DRM_ERROR("invalid vram scratch pointer\n"); 342 return -EINVAL; 343 } 344 345 tmp = 0xCAFEDEAD; 346 *ptr = tmp; 347 348 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 349 if (r) { 350 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 351 return r; 352 } 353 354 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); 355 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 356 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; 357 ib.ptr[3] = 0xDEADBEEF; 358 ib.length_dw = 4; 359 360 r = radeon_ib_schedule(rdev, &ib, NULL, false); 361 if (r) { 362 radeon_ib_free(rdev, &ib); 363 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 364 return r; 365 } 366 r = radeon_fence_wait(ib.fence, false); 367 if (r) { 368 radeon_ib_free(rdev, &ib); /* zRJ XXX culprit */ 369 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 370 return r; 371 } 372 for (i = 0; i < rdev->usec_timeout; i++) { 373 tmp = *ptr; 374 if (tmp == 0xDEADBEEF) 375 break; 376 DRM_UDELAY(1); 377 } 378 if (i < rdev->usec_timeout) { 379 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 380 } else { 381 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); 382 r = -EINVAL; 383 } 384 radeon_ib_free(rdev, &ib); 385 return r; 386 } 387 388 /** 389 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine 390 * 391 * @rdev: radeon_device pointer 392 * @ib: IB object to schedule 393 * 394 * Schedule an IB in the DMA ring (r6xx-r7xx). 395 */ 396 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 397 { 398 struct radeon_ring *ring = &rdev->ring[ib->ring]; 399 400 if (rdev->wb.enabled) { 401 u32 next_rptr = ring->wptr + 4; 402 while ((next_rptr & 7) != 5) 403 next_rptr++; 404 next_rptr += 3; 405 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 406 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 407 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 408 radeon_ring_write(ring, next_rptr); 409 } 410 411 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 412 * Pad as necessary with NOPs. 413 */ 414 while ((ring->wptr & 7) != 5) 415 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 416 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); 417 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 418 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 419 420 } 421 422 /** 423 * r600_copy_dma - copy pages using the DMA engine 424 * 425 * @rdev: radeon_device pointer 426 * @src_offset: src GPU address 427 * @dst_offset: dst GPU address 428 * @num_gpu_pages: number of GPU pages to xfer 429 * @fence: radeon fence object 430 * 431 * Copy GPU paging using the DMA engine (r6xx). 432 * Used by the radeon ttm implementation to move pages if 433 * registered as the asic copy callback. 434 */ 435 int r600_copy_dma(struct radeon_device *rdev, 436 uint64_t src_offset, uint64_t dst_offset, 437 unsigned num_gpu_pages, 438 struct radeon_fence **fence) 439 { 440 struct radeon_semaphore *sem = NULL; 441 int ring_index = rdev->asic->copy.dma_ring_index; 442 struct radeon_ring *ring = &rdev->ring[ring_index]; 443 u32 size_in_dw, cur_size_in_dw; 444 int i, num_loops; 445 int r = 0; 446 447 r = radeon_semaphore_create(rdev, &sem); 448 if (r) { 449 DRM_ERROR("radeon: moving bo (%d).\n", r); 450 return r; 451 } 452 453 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 454 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); 455 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); 456 if (r) { 457 DRM_ERROR("radeon: moving bo (%d).\n", r); 458 radeon_semaphore_free(rdev, &sem, NULL); 459 return r; 460 } 461 462 radeon_semaphore_sync_to(sem, *fence); 463 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 464 465 for (i = 0; i < num_loops; i++) { 466 cur_size_in_dw = size_in_dw; 467 if (cur_size_in_dw > 0xFFFE) 468 cur_size_in_dw = 0xFFFE; 469 size_in_dw -= cur_size_in_dw; 470 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 471 radeon_ring_write(ring, dst_offset & 0xfffffffc); 472 radeon_ring_write(ring, src_offset & 0xfffffffc); 473 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | 474 (upper_32_bits(src_offset) & 0xff))); 475 src_offset += cur_size_in_dw * 4; 476 dst_offset += cur_size_in_dw * 4; 477 } 478 479 r = radeon_fence_emit(rdev, fence, ring->idx); 480 if (r) { 481 radeon_ring_unlock_undo(rdev, ring); 482 radeon_semaphore_free(rdev, &sem, NULL); 483 return r; 484 } 485 486 radeon_ring_unlock_commit(rdev, ring, false); 487 radeon_semaphore_free(rdev, &sem, *fence); 488 489 return r; 490 } 491