xref: /dragonfly/sys/dev/drm/radeon/evergreen_dma.c (revision 7dcf36dc)
14cd92098Szrj /*
24cd92098Szrj  * Copyright 2010 Advanced Micro Devices, Inc.
34cd92098Szrj  *
44cd92098Szrj  * Permission is hereby granted, free of charge, to any person obtaining a
54cd92098Szrj  * copy of this software and associated documentation files (the "Software"),
64cd92098Szrj  * to deal in the Software without restriction, including without limitation
74cd92098Szrj  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
84cd92098Szrj  * and/or sell copies of the Software, and to permit persons to whom the
94cd92098Szrj  * Software is furnished to do so, subject to the following conditions:
104cd92098Szrj  *
114cd92098Szrj  * The above copyright notice and this permission notice shall be included in
124cd92098Szrj  * all copies or substantial portions of the Software.
134cd92098Szrj  *
144cd92098Szrj  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
154cd92098Szrj  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
164cd92098Szrj  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
174cd92098Szrj  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
184cd92098Szrj  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
194cd92098Szrj  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
204cd92098Szrj  * OTHER DEALINGS IN THE SOFTWARE.
214cd92098Szrj  *
224cd92098Szrj  * Authors: Alex Deucher
234cd92098Szrj  */
244cd92098Szrj #include <drm/drmP.h>
254cd92098Szrj #include "radeon.h"
264cd92098Szrj #include "radeon_asic.h"
274cd92098Szrj #include "evergreend.h"
284cd92098Szrj 
294cd92098Szrj /**
304cd92098Szrj  * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
314cd92098Szrj  *
324cd92098Szrj  * @rdev: radeon_device pointer
334cd92098Szrj  * @fence: radeon fence object
344cd92098Szrj  *
354cd92098Szrj  * Add a DMA fence packet to the ring to write
364cd92098Szrj  * the fence seq number and DMA trap packet to generate
374cd92098Szrj  * an interrupt if needed (evergreen-SI).
384cd92098Szrj  */
evergreen_dma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)394cd92098Szrj void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
404cd92098Szrj 				   struct radeon_fence *fence)
414cd92098Szrj {
424cd92098Szrj 	struct radeon_ring *ring = &rdev->ring[fence->ring];
434cd92098Szrj 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
444cd92098Szrj 	/* write the fence */
454cd92098Szrj 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
464cd92098Szrj 	radeon_ring_write(ring, addr & 0xfffffffc);
474cd92098Szrj 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
484cd92098Szrj 	radeon_ring_write(ring, fence->seq);
494cd92098Szrj 	/* generate an interrupt */
504cd92098Szrj 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
514cd92098Szrj 	/* flush HDP */
524cd92098Szrj 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
534cd92098Szrj 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
544cd92098Szrj 	radeon_ring_write(ring, 1);
554cd92098Szrj }
564cd92098Szrj 
574cd92098Szrj /**
584cd92098Szrj  * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
594cd92098Szrj  *
604cd92098Szrj  * @rdev: radeon_device pointer
614cd92098Szrj  * @ib: IB object to schedule
624cd92098Szrj  *
634cd92098Szrj  * Schedule an IB in the DMA ring (evergreen).
644cd92098Szrj  */
evergreen_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)654cd92098Szrj void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
664cd92098Szrj 				   struct radeon_ib *ib)
674cd92098Szrj {
684cd92098Szrj 	struct radeon_ring *ring = &rdev->ring[ib->ring];
694cd92098Szrj 
704cd92098Szrj 	if (rdev->wb.enabled) {
714cd92098Szrj 		u32 next_rptr = ring->wptr + 4;
724cd92098Szrj 		while ((next_rptr & 7) != 5)
734cd92098Szrj 			next_rptr++;
744cd92098Szrj 		next_rptr += 3;
754cd92098Szrj 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
764cd92098Szrj 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
774cd92098Szrj 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
784cd92098Szrj 		radeon_ring_write(ring, next_rptr);
794cd92098Szrj 	}
804cd92098Szrj 
814cd92098Szrj 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
824cd92098Szrj 	 * Pad as necessary with NOPs.
834cd92098Szrj 	 */
844cd92098Szrj 	while ((ring->wptr & 7) != 5)
854cd92098Szrj 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
864cd92098Szrj 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
874cd92098Szrj 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
884cd92098Szrj 	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
894cd92098Szrj 
904cd92098Szrj }
914cd92098Szrj 
924cd92098Szrj /**
934cd92098Szrj  * evergreen_copy_dma - copy pages using the DMA engine
944cd92098Szrj  *
954cd92098Szrj  * @rdev: radeon_device pointer
964cd92098Szrj  * @src_offset: src GPU address
974cd92098Szrj  * @dst_offset: dst GPU address
984cd92098Szrj  * @num_gpu_pages: number of GPU pages to xfer
994cd92098Szrj  * @fence: radeon fence object
1004cd92098Szrj  *
1014cd92098Szrj  * Copy GPU paging using the DMA engine (evergreen-cayman).
1024cd92098Szrj  * Used by the radeon ttm implementation to move pages if
1034cd92098Szrj  * registered as the asic copy callback.
1044cd92098Szrj  */
evergreen_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct reservation_object * resv)1051cfef1a5SFrançois Tigeot struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
1061cfef1a5SFrançois Tigeot 					uint64_t src_offset,
1071cfef1a5SFrançois Tigeot 					uint64_t dst_offset,
1084cd92098Szrj 					unsigned num_gpu_pages,
1091cfef1a5SFrançois Tigeot 					struct reservation_object *resv)
1104cd92098Szrj {
1111cfef1a5SFrançois Tigeot 	struct radeon_fence *fence;
112*7dcf36dcSFrançois Tigeot 	struct radeon_sync sync;
1134cd92098Szrj 	int ring_index = rdev->asic->copy.dma_ring_index;
1144cd92098Szrj 	struct radeon_ring *ring = &rdev->ring[ring_index];
1154cd92098Szrj 	u32 size_in_dw, cur_size_in_dw;
1164cd92098Szrj 	int i, num_loops;
1174cd92098Szrj 	int r = 0;
1184cd92098Szrj 
119*7dcf36dcSFrançois Tigeot 	radeon_sync_create(&sync);
1204cd92098Szrj 
1214cd92098Szrj 	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
1224cd92098Szrj 	num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
1234cd92098Szrj 	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
1244cd92098Szrj 	if (r) {
1254cd92098Szrj 		DRM_ERROR("radeon: moving bo (%d).\n", r);
126*7dcf36dcSFrançois Tigeot 		radeon_sync_free(rdev, &sync, NULL);
1271cfef1a5SFrançois Tigeot 		return ERR_PTR(r);
1284cd92098Szrj 	}
1294cd92098Szrj 
130*7dcf36dcSFrançois Tigeot 	radeon_sync_resv(rdev, &sync, resv, false);
131*7dcf36dcSFrançois Tigeot 	radeon_sync_rings(rdev, &sync, ring->idx);
1324cd92098Szrj 
1334cd92098Szrj 	for (i = 0; i < num_loops; i++) {
1344cd92098Szrj 		cur_size_in_dw = size_in_dw;
1354cd92098Szrj 		if (cur_size_in_dw > 0xFFFFF)
1364cd92098Szrj 			cur_size_in_dw = 0xFFFFF;
1374cd92098Szrj 		size_in_dw -= cur_size_in_dw;
1384cd92098Szrj 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
1394cd92098Szrj 		radeon_ring_write(ring, dst_offset & 0xfffffffc);
1404cd92098Szrj 		radeon_ring_write(ring, src_offset & 0xfffffffc);
1414cd92098Szrj 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
1424cd92098Szrj 		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
1434cd92098Szrj 		src_offset += cur_size_in_dw * 4;
1444cd92098Szrj 		dst_offset += cur_size_in_dw * 4;
1454cd92098Szrj 	}
1464cd92098Szrj 
1471cfef1a5SFrançois Tigeot 	r = radeon_fence_emit(rdev, &fence, ring->idx);
1484cd92098Szrj 	if (r) {
1494cd92098Szrj 		radeon_ring_unlock_undo(rdev, ring);
150*7dcf36dcSFrançois Tigeot 		radeon_sync_free(rdev, &sync, NULL);
1511cfef1a5SFrançois Tigeot 		return ERR_PTR(r);
1524cd92098Szrj 	}
1534cd92098Szrj 
154c6f73aabSFrançois Tigeot 	radeon_ring_unlock_commit(rdev, ring, false);
155*7dcf36dcSFrançois Tigeot 	radeon_sync_free(rdev, &sync, fence);
1564cd92098Szrj 
1571cfef1a5SFrançois Tigeot 	return fence;
1584cd92098Szrj }
1594cd92098Szrj 
1604cd92098Szrj /**
1614cd92098Szrj  * evergreen_dma_is_lockup - Check if the DMA engine is locked up
1624cd92098Szrj  *
1634cd92098Szrj  * @rdev: radeon_device pointer
1644cd92098Szrj  * @ring: radeon_ring structure holding ring information
1654cd92098Szrj  *
1664cd92098Szrj  * Check if the async DMA engine is locked up.
1674cd92098Szrj  * Returns true if the engine appears to be locked up, false if not.
1684cd92098Szrj  */
evergreen_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)1694cd92098Szrj bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1704cd92098Szrj {
1714cd92098Szrj 	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
1724cd92098Szrj 
1734cd92098Szrj 	if (!(reset_mask & RADEON_RESET_DMA)) {
174c6f73aabSFrançois Tigeot 		radeon_ring_lockup_update(rdev, ring);
1754cd92098Szrj 		return false;
1764cd92098Szrj 	}
1774cd92098Szrj 	return radeon_ring_test_lockup(rdev, ring);
1784cd92098Szrj }
179