17ccd5a2cSjsg /*
27ccd5a2cSjsg * Copyright 2010 Advanced Micro Devices, Inc.
37ccd5a2cSjsg *
47ccd5a2cSjsg * Permission is hereby granted, free of charge, to any person obtaining a
57ccd5a2cSjsg * copy of this software and associated documentation files (the "Software"),
67ccd5a2cSjsg * to deal in the Software without restriction, including without limitation
77ccd5a2cSjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87ccd5a2cSjsg * and/or sell copies of the Software, and to permit persons to whom the
97ccd5a2cSjsg * Software is furnished to do so, subject to the following conditions:
107ccd5a2cSjsg *
117ccd5a2cSjsg * The above copyright notice and this permission notice shall be included in
127ccd5a2cSjsg * all copies or substantial portions of the Software.
137ccd5a2cSjsg *
147ccd5a2cSjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
157ccd5a2cSjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
167ccd5a2cSjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
177ccd5a2cSjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
187ccd5a2cSjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
197ccd5a2cSjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
207ccd5a2cSjsg * OTHER DEALINGS IN THE SOFTWARE.
217ccd5a2cSjsg *
227ccd5a2cSjsg * Authors: Alex Deucher
237ccd5a2cSjsg */
24c349dbc7Sjsg
257ccd5a2cSjsg #include "radeon.h"
267ccd5a2cSjsg #include "radeon_asic.h"
27*5ca02815Sjsg #include "evergreen.h"
287ccd5a2cSjsg #include "evergreend.h"
297ccd5a2cSjsg
307ccd5a2cSjsg /**
317ccd5a2cSjsg * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
327ccd5a2cSjsg *
337ccd5a2cSjsg * @rdev: radeon_device pointer
347ccd5a2cSjsg * @fence: radeon fence object
357ccd5a2cSjsg *
367ccd5a2cSjsg * Add a DMA fence packet to the ring to write
377ccd5a2cSjsg * the fence seq number and DMA trap packet to generate
387ccd5a2cSjsg * an interrupt if needed (evergreen-SI).
397ccd5a2cSjsg */
evergreen_dma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)407ccd5a2cSjsg void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
417ccd5a2cSjsg struct radeon_fence *fence)
427ccd5a2cSjsg {
437ccd5a2cSjsg struct radeon_ring *ring = &rdev->ring[fence->ring];
447ccd5a2cSjsg u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
457ccd5a2cSjsg /* write the fence */
467ccd5a2cSjsg radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
477ccd5a2cSjsg radeon_ring_write(ring, addr & 0xfffffffc);
487ccd5a2cSjsg radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
497ccd5a2cSjsg radeon_ring_write(ring, fence->seq);
507ccd5a2cSjsg /* generate an interrupt */
517ccd5a2cSjsg radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
527ccd5a2cSjsg /* flush HDP */
537ccd5a2cSjsg radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
547ccd5a2cSjsg radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
557ccd5a2cSjsg radeon_ring_write(ring, 1);
567ccd5a2cSjsg }
577ccd5a2cSjsg
587ccd5a2cSjsg /**
597ccd5a2cSjsg * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
607ccd5a2cSjsg *
617ccd5a2cSjsg * @rdev: radeon_device pointer
627ccd5a2cSjsg * @ib: IB object to schedule
637ccd5a2cSjsg *
647ccd5a2cSjsg * Schedule an IB in the DMA ring (evergreen).
657ccd5a2cSjsg */
evergreen_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)667ccd5a2cSjsg void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
677ccd5a2cSjsg struct radeon_ib *ib)
687ccd5a2cSjsg {
697ccd5a2cSjsg struct radeon_ring *ring = &rdev->ring[ib->ring];
707ccd5a2cSjsg
717ccd5a2cSjsg if (rdev->wb.enabled) {
727ccd5a2cSjsg u32 next_rptr = ring->wptr + 4;
737ccd5a2cSjsg while ((next_rptr & 7) != 5)
747ccd5a2cSjsg next_rptr++;
757ccd5a2cSjsg next_rptr += 3;
767ccd5a2cSjsg radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
777ccd5a2cSjsg radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
787ccd5a2cSjsg radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
797ccd5a2cSjsg radeon_ring_write(ring, next_rptr);
807ccd5a2cSjsg }
817ccd5a2cSjsg
827ccd5a2cSjsg /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
837ccd5a2cSjsg * Pad as necessary with NOPs.
847ccd5a2cSjsg */
857ccd5a2cSjsg while ((ring->wptr & 7) != 5)
867ccd5a2cSjsg radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
877ccd5a2cSjsg radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
887ccd5a2cSjsg radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
897ccd5a2cSjsg radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
907ccd5a2cSjsg
917ccd5a2cSjsg }
927ccd5a2cSjsg
937ccd5a2cSjsg /**
947ccd5a2cSjsg * evergreen_copy_dma - copy pages using the DMA engine
957ccd5a2cSjsg *
967ccd5a2cSjsg * @rdev: radeon_device pointer
977ccd5a2cSjsg * @src_offset: src GPU address
987ccd5a2cSjsg * @dst_offset: dst GPU address
997ccd5a2cSjsg * @num_gpu_pages: number of GPU pages to xfer
100*5ca02815Sjsg * @resv: reservation object with embedded fence
1017ccd5a2cSjsg *
1027ccd5a2cSjsg * Copy GPU paging using the DMA engine (evergreen-cayman).
1037ccd5a2cSjsg * Used by the radeon ttm implementation to move pages if
1047ccd5a2cSjsg * registered as the asic copy callback.
1057ccd5a2cSjsg */
evergreen_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct dma_resv * resv)1067ccd5a2cSjsg struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
1077ccd5a2cSjsg uint64_t src_offset,
1087ccd5a2cSjsg uint64_t dst_offset,
1097ccd5a2cSjsg unsigned num_gpu_pages,
110c349dbc7Sjsg struct dma_resv *resv)
1117ccd5a2cSjsg {
1127ccd5a2cSjsg struct radeon_fence *fence;
1137ccd5a2cSjsg struct radeon_sync sync;
1147ccd5a2cSjsg int ring_index = rdev->asic->copy.dma_ring_index;
1157ccd5a2cSjsg struct radeon_ring *ring = &rdev->ring[ring_index];
1167ccd5a2cSjsg u32 size_in_dw, cur_size_in_dw;
1177ccd5a2cSjsg int i, num_loops;
1187ccd5a2cSjsg int r = 0;
1197ccd5a2cSjsg
1207ccd5a2cSjsg radeon_sync_create(&sync);
1217ccd5a2cSjsg
1227ccd5a2cSjsg size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
1237ccd5a2cSjsg num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
1247ccd5a2cSjsg r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
1257ccd5a2cSjsg if (r) {
1267ccd5a2cSjsg DRM_ERROR("radeon: moving bo (%d).\n", r);
1277ccd5a2cSjsg radeon_sync_free(rdev, &sync, NULL);
1287ccd5a2cSjsg return ERR_PTR(r);
1297ccd5a2cSjsg }
1307ccd5a2cSjsg
1317ccd5a2cSjsg radeon_sync_resv(rdev, &sync, resv, false);
1327ccd5a2cSjsg radeon_sync_rings(rdev, &sync, ring->idx);
1337ccd5a2cSjsg
1347ccd5a2cSjsg for (i = 0; i < num_loops; i++) {
1357ccd5a2cSjsg cur_size_in_dw = size_in_dw;
1367ccd5a2cSjsg if (cur_size_in_dw > 0xFFFFF)
1377ccd5a2cSjsg cur_size_in_dw = 0xFFFFF;
1387ccd5a2cSjsg size_in_dw -= cur_size_in_dw;
1397ccd5a2cSjsg radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
1407ccd5a2cSjsg radeon_ring_write(ring, dst_offset & 0xfffffffc);
1417ccd5a2cSjsg radeon_ring_write(ring, src_offset & 0xfffffffc);
1427ccd5a2cSjsg radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
1437ccd5a2cSjsg radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
1447ccd5a2cSjsg src_offset += cur_size_in_dw * 4;
1457ccd5a2cSjsg dst_offset += cur_size_in_dw * 4;
1467ccd5a2cSjsg }
1477ccd5a2cSjsg
1487ccd5a2cSjsg r = radeon_fence_emit(rdev, &fence, ring->idx);
1497ccd5a2cSjsg if (r) {
1507ccd5a2cSjsg radeon_ring_unlock_undo(rdev, ring);
1517ccd5a2cSjsg radeon_sync_free(rdev, &sync, NULL);
1527ccd5a2cSjsg return ERR_PTR(r);
1537ccd5a2cSjsg }
1547ccd5a2cSjsg
1557ccd5a2cSjsg radeon_ring_unlock_commit(rdev, ring, false);
1567ccd5a2cSjsg radeon_sync_free(rdev, &sync, fence);
1577ccd5a2cSjsg
1587ccd5a2cSjsg return fence;
1597ccd5a2cSjsg }
1607ccd5a2cSjsg
1617ccd5a2cSjsg /**
1627ccd5a2cSjsg * evergreen_dma_is_lockup - Check if the DMA engine is locked up
1637ccd5a2cSjsg *
1647ccd5a2cSjsg * @rdev: radeon_device pointer
1657ccd5a2cSjsg * @ring: radeon_ring structure holding ring information
1667ccd5a2cSjsg *
1677ccd5a2cSjsg * Check if the async DMA engine is locked up.
1687ccd5a2cSjsg * Returns true if the engine appears to be locked up, false if not.
1697ccd5a2cSjsg */
evergreen_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)1707ccd5a2cSjsg bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1717ccd5a2cSjsg {
1727ccd5a2cSjsg u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
1737ccd5a2cSjsg
1747ccd5a2cSjsg if (!(reset_mask & RADEON_RESET_DMA)) {
1757ccd5a2cSjsg radeon_ring_lockup_update(rdev, ring);
1767ccd5a2cSjsg return false;
1777ccd5a2cSjsg }
1787ccd5a2cSjsg return radeon_ring_test_lockup(rdev, ring);
1797ccd5a2cSjsg }
1807ccd5a2cSjsg
1817ccd5a2cSjsg
182