xref: /dragonfly/sys/dev/drm/amd/amdgpu/amdgpu_prime.c (revision 78973132)
1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev  * Copyright 2012 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev  *
4b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev  *
11b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev  *
14b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev  *
22b843c749SSergey Zigachev  * based on nouveau_prime.c
23b843c749SSergey Zigachev  *
24b843c749SSergey Zigachev  * Authors: Alex Deucher
25b843c749SSergey Zigachev  */
26b843c749SSergey Zigachev 
27b843c749SSergey Zigachev /**
28b843c749SSergey Zigachev  * DOC: PRIME Buffer Sharing
29b843c749SSergey Zigachev  *
30b843c749SSergey Zigachev  * The following callback implementations are used for :ref:`sharing GEM buffer
31b843c749SSergey Zigachev  * objects between different devices via PRIME <prime_buffer_sharing>`.
32b843c749SSergey Zigachev  */
33b843c749SSergey Zigachev 
34b843c749SSergey Zigachev #include <drm/drmP.h>
35b843c749SSergey Zigachev 
36b843c749SSergey Zigachev #include "amdgpu.h"
37b843c749SSergey Zigachev #include "amdgpu_display.h"
38b843c749SSergey Zigachev #include <drm/amdgpu_drm.h>
39b843c749SSergey Zigachev #include <linux/dma-buf.h>
40b843c749SSergey Zigachev #include <linux/dma-fence-array.h>
41b843c749SSergey Zigachev 
42b843c749SSergey Zigachev static const struct dma_buf_ops amdgpu_dmabuf_ops;
43b843c749SSergey Zigachev 
44b843c749SSergey Zigachev /**
45b843c749SSergey Zigachev  * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
46b843c749SSergey Zigachev  * implementation
47b843c749SSergey Zigachev  * @obj: GEM buffer object
48b843c749SSergey Zigachev  *
49b843c749SSergey Zigachev  * Returns:
50b843c749SSergey Zigachev  * A scatter/gather table for the pinned pages of the buffer object's memory.
51b843c749SSergey Zigachev  */
amdgpu_gem_prime_get_sg_table(struct drm_gem_object * obj)52b843c749SSergey Zigachev struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
53b843c749SSergey Zigachev {
54b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
55b843c749SSergey Zigachev 	int npages = bo->tbo.num_pages;
56b843c749SSergey Zigachev 
57b843c749SSergey Zigachev 	return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
58b843c749SSergey Zigachev }
59b843c749SSergey Zigachev 
60b843c749SSergey Zigachev /**
61b843c749SSergey Zigachev  * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
62b843c749SSergey Zigachev  * @obj: GEM buffer object
63b843c749SSergey Zigachev  *
64b843c749SSergey Zigachev  * Sets up an in-kernel virtual mapping of the buffer object's memory.
65b843c749SSergey Zigachev  *
66b843c749SSergey Zigachev  * Returns:
67b843c749SSergey Zigachev  * The virtual address of the mapping or an error pointer.
68b843c749SSergey Zigachev  */
amdgpu_gem_prime_vmap(struct drm_gem_object * obj)69b843c749SSergey Zigachev void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
70b843c749SSergey Zigachev {
71b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
72b843c749SSergey Zigachev 	int ret;
73b843c749SSergey Zigachev 
74b843c749SSergey Zigachev 	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
75b843c749SSergey Zigachev 			  &bo->dma_buf_vmap);
76b843c749SSergey Zigachev 	if (ret)
77b843c749SSergey Zigachev 		return ERR_PTR(ret);
78b843c749SSergey Zigachev 
79b843c749SSergey Zigachev 	return bo->dma_buf_vmap.virtual;
80b843c749SSergey Zigachev }
81b843c749SSergey Zigachev 
82b843c749SSergey Zigachev /**
83b843c749SSergey Zigachev  * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
84b843c749SSergey Zigachev  * @obj: GEM buffer object
85b843c749SSergey Zigachev  * @vaddr: virtual address (unused)
86b843c749SSergey Zigachev  *
87b843c749SSergey Zigachev  * Tears down the in-kernel virtual mapping of the buffer object's memory.
88b843c749SSergey Zigachev  */
amdgpu_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)89b843c749SSergey Zigachev void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
90b843c749SSergey Zigachev {
91b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
92b843c749SSergey Zigachev 
93b843c749SSergey Zigachev 	ttm_bo_kunmap(&bo->dma_buf_vmap);
94b843c749SSergey Zigachev }
95b843c749SSergey Zigachev 
96b843c749SSergey Zigachev /**
97b843c749SSergey Zigachev  * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
98b843c749SSergey Zigachev  * @obj: GEM buffer object
99b843c749SSergey Zigachev  * @vma: virtual memory area
100b843c749SSergey Zigachev  *
101b843c749SSergey Zigachev  * Sets up a userspace mapping of the buffer object's memory in the given
102b843c749SSergey Zigachev  * virtual memory area.
103b843c749SSergey Zigachev  *
104b843c749SSergey Zigachev  * Returns:
105b843c749SSergey Zigachev  * 0 on success or negative error code.
106b843c749SSergey Zigachev  */
amdgpu_gem_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)107b843c749SSergey Zigachev int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
108b843c749SSergey Zigachev {
109*78973132SSergey Zigachev 	return -ENODEV;
110*78973132SSergey Zigachev 
111*78973132SSergey Zigachev #if 0
112b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
113b843c749SSergey Zigachev 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
114b843c749SSergey Zigachev 	unsigned asize = amdgpu_bo_size(bo);
115b843c749SSergey Zigachev 	int ret;
116b843c749SSergey Zigachev 
117b843c749SSergey Zigachev 	if (!vma->vm_file)
118b843c749SSergey Zigachev 		return -ENODEV;
119b843c749SSergey Zigachev 
120b843c749SSergey Zigachev 	if (adev == NULL)
121b843c749SSergey Zigachev 		return -ENODEV;
122b843c749SSergey Zigachev 
123b843c749SSergey Zigachev 	/* Check for valid size. */
124b843c749SSergey Zigachev 	if (asize < vma->vm_end - vma->vm_start)
125b843c749SSergey Zigachev 		return -EINVAL;
126b843c749SSergey Zigachev 
127b843c749SSergey Zigachev 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
128b843c749SSergey Zigachev 	    (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
129b843c749SSergey Zigachev 		return -EPERM;
130b843c749SSergey Zigachev 	}
131b843c749SSergey Zigachev 	vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
132b843c749SSergey Zigachev 
133b843c749SSergey Zigachev 	/* prime mmap does not need to check access, so allow here */
134b843c749SSergey Zigachev 	ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
135b843c749SSergey Zigachev 	if (ret)
136b843c749SSergey Zigachev 		return ret;
137b843c749SSergey Zigachev 
138b843c749SSergey Zigachev 	ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
139b843c749SSergey Zigachev 	drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
140b843c749SSergey Zigachev 
141b843c749SSergey Zigachev 	return ret;
142*78973132SSergey Zigachev #endif
143b843c749SSergey Zigachev }
144b843c749SSergey Zigachev 
145b843c749SSergey Zigachev /**
146b843c749SSergey Zigachev  * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
147b843c749SSergey Zigachev  * implementation
148b843c749SSergey Zigachev  * @dev: DRM device
149b843c749SSergey Zigachev  * @attach: DMA-buf attachment
150b843c749SSergey Zigachev  * @sg: Scatter/gather table
151b843c749SSergey Zigachev  *
152b843c749SSergey Zigachev  * Import shared DMA buffer memory exported by another device.
153b843c749SSergey Zigachev  *
154b843c749SSergey Zigachev  * Returns:
155b843c749SSergey Zigachev  * A new GEM buffer object of the given DRM device, representing the memory
156b843c749SSergey Zigachev  * described by the given DMA-buf attachment and scatter/gather table.
157b843c749SSergey Zigachev  */
158b843c749SSergey Zigachev struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sg)159b843c749SSergey Zigachev amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
160b843c749SSergey Zigachev 				 struct dma_buf_attachment *attach,
161b843c749SSergey Zigachev 				 struct sg_table *sg)
162b843c749SSergey Zigachev {
163b843c749SSergey Zigachev 	struct reservation_object *resv = attach->dmabuf->resv;
164b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev->dev_private;
165b843c749SSergey Zigachev 	struct amdgpu_bo *bo;
166b843c749SSergey Zigachev 	struct amdgpu_bo_param bp;
167b843c749SSergey Zigachev 	int ret;
168b843c749SSergey Zigachev 
169b843c749SSergey Zigachev 	memset(&bp, 0, sizeof(bp));
170b843c749SSergey Zigachev 	bp.size = attach->dmabuf->size;
171b843c749SSergey Zigachev 	bp.byte_align = PAGE_SIZE;
172b843c749SSergey Zigachev 	bp.domain = AMDGPU_GEM_DOMAIN_CPU;
173b843c749SSergey Zigachev 	bp.flags = 0;
174b843c749SSergey Zigachev 	bp.type = ttm_bo_type_sg;
175b843c749SSergey Zigachev 	bp.resv = resv;
176b843c749SSergey Zigachev 	ww_mutex_lock(&resv->lock, NULL);
177b843c749SSergey Zigachev 	ret = amdgpu_bo_create(adev, &bp, &bo);
178b843c749SSergey Zigachev 	if (ret)
179b843c749SSergey Zigachev 		goto error;
180b843c749SSergey Zigachev 
181b843c749SSergey Zigachev 	bo->tbo.sg = sg;
182b843c749SSergey Zigachev 	bo->tbo.ttm->sg = sg;
183b843c749SSergey Zigachev 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
184b843c749SSergey Zigachev 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
185b843c749SSergey Zigachev 	if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
186b843c749SSergey Zigachev 		bo->prime_shared_count = 1;
187b843c749SSergey Zigachev 
188b843c749SSergey Zigachev 	ww_mutex_unlock(&resv->lock);
189b843c749SSergey Zigachev 	return &bo->gem_base;
190b843c749SSergey Zigachev 
191b843c749SSergey Zigachev error:
192b843c749SSergey Zigachev 	ww_mutex_unlock(&resv->lock);
193b843c749SSergey Zigachev 	return ERR_PTR(ret);
194b843c749SSergey Zigachev }
195b843c749SSergey Zigachev 
196*78973132SSergey Zigachev #if 0
197b843c749SSergey Zigachev static int
198b843c749SSergey Zigachev __reservation_object_make_exclusive(struct reservation_object *obj)
199b843c749SSergey Zigachev {
200b843c749SSergey Zigachev 	struct dma_fence **fences;
201b843c749SSergey Zigachev 	unsigned int count;
202b843c749SSergey Zigachev 	int r;
203b843c749SSergey Zigachev 
204b843c749SSergey Zigachev 	if (!reservation_object_get_list(obj)) /* no shared fences to convert */
205b843c749SSergey Zigachev 		return 0;
206b843c749SSergey Zigachev 
207b843c749SSergey Zigachev 	r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
208b843c749SSergey Zigachev 	if (r)
209b843c749SSergey Zigachev 		return r;
210b843c749SSergey Zigachev 
211b843c749SSergey Zigachev 	if (count == 0) {
212b843c749SSergey Zigachev 		/* Now that was unexpected. */
213b843c749SSergey Zigachev 	} else if (count == 1) {
214b843c749SSergey Zigachev 		reservation_object_add_excl_fence(obj, fences[0]);
215b843c749SSergey Zigachev 		dma_fence_put(fences[0]);
216b843c749SSergey Zigachev 		kfree(fences);
217b843c749SSergey Zigachev 	} else {
218b843c749SSergey Zigachev 		struct dma_fence_array *array;
219b843c749SSergey Zigachev 
220b843c749SSergey Zigachev 		array = dma_fence_array_create(count, fences,
221b843c749SSergey Zigachev 					       dma_fence_context_alloc(1), 0,
222b843c749SSergey Zigachev 					       false);
223b843c749SSergey Zigachev 		if (!array)
224b843c749SSergey Zigachev 			goto err_fences_put;
225b843c749SSergey Zigachev 
226b843c749SSergey Zigachev 		reservation_object_add_excl_fence(obj, &array->base);
227b843c749SSergey Zigachev 		dma_fence_put(&array->base);
228b843c749SSergey Zigachev 	}
229b843c749SSergey Zigachev 
230b843c749SSergey Zigachev 	return 0;
231b843c749SSergey Zigachev 
232b843c749SSergey Zigachev err_fences_put:
233b843c749SSergey Zigachev 	while (count--)
234b843c749SSergey Zigachev 		dma_fence_put(fences[count]);
235b843c749SSergey Zigachev 	kfree(fences);
236b843c749SSergey Zigachev 	return -ENOMEM;
237b843c749SSergey Zigachev }
238b843c749SSergey Zigachev 
239b843c749SSergey Zigachev /**
240b843c749SSergey Zigachev  * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
241b843c749SSergey Zigachev  * @dma_buf: shared DMA buffer
242b843c749SSergey Zigachev  * @attach: DMA-buf attachment
243b843c749SSergey Zigachev  *
244b843c749SSergey Zigachev  * Makes sure that the shared DMA buffer can be accessed by the target device.
245b843c749SSergey Zigachev  * For now, simply pins it to the GTT domain, where it should be accessible by
246b843c749SSergey Zigachev  * all DMA devices.
247b843c749SSergey Zigachev  *
248b843c749SSergey Zigachev  * Returns:
249b843c749SSergey Zigachev  * 0 on success or negative error code.
250b843c749SSergey Zigachev  */
251b843c749SSergey Zigachev static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
252b843c749SSergey Zigachev 				 struct dma_buf_attachment *attach)
253b843c749SSergey Zigachev {
254b843c749SSergey Zigachev 	struct drm_gem_object *obj = dma_buf->priv;
255b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
256b843c749SSergey Zigachev 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
257b843c749SSergey Zigachev 	long r;
258b843c749SSergey Zigachev 
259b843c749SSergey Zigachev 	r = drm_gem_map_attach(dma_buf, attach);
260b843c749SSergey Zigachev 	if (r)
261b843c749SSergey Zigachev 		return r;
262b843c749SSergey Zigachev 
263b843c749SSergey Zigachev 	r = amdgpu_bo_reserve(bo, false);
264b843c749SSergey Zigachev 	if (unlikely(r != 0))
265b843c749SSergey Zigachev 		goto error_detach;
266b843c749SSergey Zigachev 
267b843c749SSergey Zigachev 
268b843c749SSergey Zigachev 	if (attach->dev->driver != adev->dev->driver) {
269b843c749SSergey Zigachev 		/*
270b843c749SSergey Zigachev 		 * We only create shared fences for internal use, but importers
271b843c749SSergey Zigachev 		 * of the dmabuf rely on exclusive fences for implicitly
272b843c749SSergey Zigachev 		 * tracking write hazards. As any of the current fences may
273b843c749SSergey Zigachev 		 * correspond to a write, we need to convert all existing
274b843c749SSergey Zigachev 		 * fences on the reservation object into a single exclusive
275b843c749SSergey Zigachev 		 * fence.
276b843c749SSergey Zigachev 		 */
277b843c749SSergey Zigachev 		r = __reservation_object_make_exclusive(bo->tbo.resv);
278b843c749SSergey Zigachev 		if (r)
279b843c749SSergey Zigachev 			goto error_unreserve;
280b843c749SSergey Zigachev 	}
281b843c749SSergey Zigachev 
282b843c749SSergey Zigachev 	/* pin buffer into GTT */
283b843c749SSergey Zigachev 	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
284b843c749SSergey Zigachev 	if (r)
285b843c749SSergey Zigachev 		goto error_unreserve;
286b843c749SSergey Zigachev 
287b843c749SSergey Zigachev 	if (attach->dev->driver != adev->dev->driver)
288b843c749SSergey Zigachev 		bo->prime_shared_count++;
289b843c749SSergey Zigachev 
290b843c749SSergey Zigachev error_unreserve:
291b843c749SSergey Zigachev 	amdgpu_bo_unreserve(bo);
292b843c749SSergey Zigachev 
293b843c749SSergey Zigachev error_detach:
294b843c749SSergey Zigachev 	if (r)
295b843c749SSergey Zigachev 		drm_gem_map_detach(dma_buf, attach);
296b843c749SSergey Zigachev 	return r;
297b843c749SSergey Zigachev }
298b843c749SSergey Zigachev 
299b843c749SSergey Zigachev /**
300b843c749SSergey Zigachev  * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
301b843c749SSergey Zigachev  * @dma_buf: shared DMA buffer
302b843c749SSergey Zigachev  * @attach: DMA-buf attachment
303b843c749SSergey Zigachev  *
304b843c749SSergey Zigachev  * This is called when a shared DMA buffer no longer needs to be accessible by
305b843c749SSergey Zigachev  * the other device. For now, simply unpins the buffer from GTT.
306b843c749SSergey Zigachev  */
307b843c749SSergey Zigachev static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
308b843c749SSergey Zigachev 				  struct dma_buf_attachment *attach)
309b843c749SSergey Zigachev {
310b843c749SSergey Zigachev 	struct drm_gem_object *obj = dma_buf->priv;
311b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
312b843c749SSergey Zigachev 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
313b843c749SSergey Zigachev 	int ret = 0;
314b843c749SSergey Zigachev 
315b843c749SSergey Zigachev 	ret = amdgpu_bo_reserve(bo, true);
316b843c749SSergey Zigachev 	if (unlikely(ret != 0))
317b843c749SSergey Zigachev 		goto error;
318b843c749SSergey Zigachev 
319b843c749SSergey Zigachev 	amdgpu_bo_unpin(bo);
320b843c749SSergey Zigachev 	if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
321b843c749SSergey Zigachev 		bo->prime_shared_count--;
322b843c749SSergey Zigachev 	amdgpu_bo_unreserve(bo);
323b843c749SSergey Zigachev 
324b843c749SSergey Zigachev error:
325b843c749SSergey Zigachev 	drm_gem_map_detach(dma_buf, attach);
326b843c749SSergey Zigachev }
327*78973132SSergey Zigachev #endif
328b843c749SSergey Zigachev 
329b843c749SSergey Zigachev /**
330b843c749SSergey Zigachev  * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
331b843c749SSergey Zigachev  * @obj: GEM buffer object
332b843c749SSergey Zigachev  *
333b843c749SSergey Zigachev  * Returns:
334b843c749SSergey Zigachev  * The buffer object's reservation object.
335b843c749SSergey Zigachev  */
amdgpu_gem_prime_res_obj(struct drm_gem_object * obj)336b843c749SSergey Zigachev struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
337b843c749SSergey Zigachev {
338b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
339b843c749SSergey Zigachev 
340b843c749SSergey Zigachev 	return bo->tbo.resv;
341b843c749SSergey Zigachev }
342b843c749SSergey Zigachev 
343b843c749SSergey Zigachev /**
344b843c749SSergey Zigachev  * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
345b843c749SSergey Zigachev  * @dma_buf: shared DMA buffer
346b843c749SSergey Zigachev  * @direction: direction of DMA transfer
347b843c749SSergey Zigachev  *
348b843c749SSergey Zigachev  * This is called before CPU access to the shared DMA buffer's memory. If it's
349b843c749SSergey Zigachev  * a read access, the buffer is moved to the GTT domain if possible, for optimal
350b843c749SSergey Zigachev  * CPU read performance.
351b843c749SSergey Zigachev  *
352b843c749SSergey Zigachev  * Returns:
353b843c749SSergey Zigachev  * 0 on success or negative error code.
354b843c749SSergey Zigachev  */
355*78973132SSergey Zigachev #if 0
356b843c749SSergey Zigachev static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
357b843c749SSergey Zigachev 				       enum dma_data_direction direction)
358b843c749SSergey Zigachev {
359b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
360b843c749SSergey Zigachev 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
361b843c749SSergey Zigachev 	struct ttm_operation_ctx ctx = { true, false };
362b843c749SSergey Zigachev 	u32 domain = amdgpu_display_supported_domains(adev);
363b843c749SSergey Zigachev 	int ret;
364b843c749SSergey Zigachev 	bool reads = (direction == DMA_BIDIRECTIONAL ||
365b843c749SSergey Zigachev 		      direction == DMA_FROM_DEVICE);
366b843c749SSergey Zigachev 
367b843c749SSergey Zigachev 	if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
368b843c749SSergey Zigachev 		return 0;
369b843c749SSergey Zigachev 
370b843c749SSergey Zigachev 	/* move to gtt */
371b843c749SSergey Zigachev 	ret = amdgpu_bo_reserve(bo, false);
372b843c749SSergey Zigachev 	if (unlikely(ret != 0))
373b843c749SSergey Zigachev 		return ret;
374b843c749SSergey Zigachev 
375b843c749SSergey Zigachev 	if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
376b843c749SSergey Zigachev 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
377b843c749SSergey Zigachev 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
378b843c749SSergey Zigachev 	}
379b843c749SSergey Zigachev 
380b843c749SSergey Zigachev 	amdgpu_bo_unreserve(bo);
381b843c749SSergey Zigachev 	return ret;
382b843c749SSergey Zigachev }
383b843c749SSergey Zigachev 
384b843c749SSergey Zigachev static const struct dma_buf_ops amdgpu_dmabuf_ops = {
385b843c749SSergey Zigachev 	.attach = amdgpu_gem_map_attach,
386b843c749SSergey Zigachev 	.detach = amdgpu_gem_map_detach,
387b843c749SSergey Zigachev 	.map_dma_buf = drm_gem_map_dma_buf,
388b843c749SSergey Zigachev 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
389b843c749SSergey Zigachev 	.release = drm_gem_dmabuf_release,
390b843c749SSergey Zigachev 	.begin_cpu_access = amdgpu_gem_begin_cpu_access,
391b843c749SSergey Zigachev 	.map = drm_gem_dmabuf_kmap,
392b843c749SSergey Zigachev 	.unmap = drm_gem_dmabuf_kunmap,
393b843c749SSergey Zigachev 	.mmap = drm_gem_dmabuf_mmap,
394b843c749SSergey Zigachev 	.vmap = drm_gem_dmabuf_vmap,
395b843c749SSergey Zigachev 	.vunmap = drm_gem_dmabuf_vunmap,
396b843c749SSergey Zigachev };
397*78973132SSergey Zigachev #endif
398b843c749SSergey Zigachev 
399b843c749SSergey Zigachev /**
400b843c749SSergey Zigachev  * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
401b843c749SSergey Zigachev  * @dev: DRM device
402b843c749SSergey Zigachev  * @gobj: GEM buffer object
403b843c749SSergey Zigachev  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
404b843c749SSergey Zigachev  *
405b843c749SSergey Zigachev  * The main work is done by the &drm_gem_prime_export helper, which in turn
406b843c749SSergey Zigachev  * uses &amdgpu_gem_prime_res_obj.
407b843c749SSergey Zigachev  *
408b843c749SSergey Zigachev  * Returns:
409b843c749SSergey Zigachev  * Shared DMA buffer representing the GEM buffer object from the given device.
410b843c749SSergey Zigachev  */
amdgpu_gem_prime_export(struct drm_device * dev,struct drm_gem_object * gobj,int flags)411b843c749SSergey Zigachev struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
412b843c749SSergey Zigachev 					struct drm_gem_object *gobj,
413b843c749SSergey Zigachev 					int flags)
414b843c749SSergey Zigachev {
415b843c749SSergey Zigachev 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
416b843c749SSergey Zigachev 	struct dma_buf *buf;
417b843c749SSergey Zigachev 
418b843c749SSergey Zigachev 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
419b843c749SSergey Zigachev 	    bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
420b843c749SSergey Zigachev 		return ERR_PTR(-EPERM);
421b843c749SSergey Zigachev 
422b843c749SSergey Zigachev 	buf = drm_gem_prime_export(dev, gobj, flags);
423b843c749SSergey Zigachev 	if (!IS_ERR(buf)) {
424*78973132SSergey Zigachev #if 0
425b843c749SSergey Zigachev 		buf->file->f_mapping = dev->anon_inode->i_mapping;
426b843c749SSergey Zigachev 		buf->ops = &amdgpu_dmabuf_ops;
427*78973132SSergey Zigachev #endif
428b843c749SSergey Zigachev 	}
429b843c749SSergey Zigachev 
430b843c749SSergey Zigachev 	return buf;
431b843c749SSergey Zigachev }
432b843c749SSergey Zigachev 
433b843c749SSergey Zigachev /**
434b843c749SSergey Zigachev  * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
435b843c749SSergey Zigachev  * @dev: DRM device
436b843c749SSergey Zigachev  * @dma_buf: Shared DMA buffer
437b843c749SSergey Zigachev  *
438b843c749SSergey Zigachev  * The main work is done by the &drm_gem_prime_import helper, which in turn
439b843c749SSergey Zigachev  * uses &amdgpu_gem_prime_import_sg_table.
440b843c749SSergey Zigachev  *
441b843c749SSergey Zigachev  * Returns:
442b843c749SSergey Zigachev  * GEM buffer object representing the shared DMA buffer for the given device.
443b843c749SSergey Zigachev  */
amdgpu_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)444b843c749SSergey Zigachev struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
445b843c749SSergey Zigachev 					    struct dma_buf *dma_buf)
446b843c749SSergey Zigachev {
447b843c749SSergey Zigachev 	struct drm_gem_object *obj;
448b843c749SSergey Zigachev 
449b843c749SSergey Zigachev 	if (dma_buf->ops == &amdgpu_dmabuf_ops) {
450b843c749SSergey Zigachev 		obj = dma_buf->priv;
451b843c749SSergey Zigachev 		if (obj->dev == dev) {
452b843c749SSergey Zigachev 			/*
453b843c749SSergey Zigachev 			 * Importing dmabuf exported from out own gem increases
454b843c749SSergey Zigachev 			 * refcount on gem itself instead of f_count of dmabuf.
455b843c749SSergey Zigachev 			 */
456b843c749SSergey Zigachev 			drm_gem_object_get(obj);
457b843c749SSergey Zigachev 			return obj;
458b843c749SSergey Zigachev 		}
459b843c749SSergey Zigachev 	}
460b843c749SSergey Zigachev 
461b843c749SSergey Zigachev 	return drm_gem_prime_import(dev, dma_buf);
462b843c749SSergey Zigachev }
463