1 /* $NetBSD: amdgpu_dma_buf.c,v 1.3 2021/12/19 12:01:40 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * based on nouveau_prime.c
25 *
26 * Authors: Alex Deucher
27 */
28
29 /**
30 * DOC: PRIME Buffer Sharing
31 *
32 * The following callback implementations are used for :ref:`sharing GEM buffer
33 * objects between different devices via PRIME <prime_buffer_sharing>`.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dma_buf.c,v 1.3 2021/12/19 12:01:40 riastradh Exp $");
38
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_gem.h"
42 #include "amdgpu_dma_buf.h"
43 #include <drm/amdgpu_drm.h>
44 #include <linux/dma-buf.h>
45 #include <linux/dma-fence-array.h>
46
47 /**
48 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
49 * @obj: GEM BO
50 *
51 * Sets up an in-kernel virtual mapping of the BO's memory.
52 *
53 * Returns:
54 * The virtual address of the mapping or an error pointer.
55 */
amdgpu_gem_prime_vmap(struct drm_gem_object * obj)56 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
57 {
58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
59 int ret;
60
61 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
62 &bo->dma_buf_vmap);
63 if (ret)
64 return ERR_PTR(ret);
65
66 return bo->dma_buf_vmap.virtual;
67 }
68
69 /**
70 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
71 * @obj: GEM BO
72 * @vaddr: Virtual address (unused)
73 *
74 * Tears down the in-kernel virtual mapping of the BO's memory.
75 */
amdgpu_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)76 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
77 {
78 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
79
80 ttm_bo_kunmap(&bo->dma_buf_vmap);
81 }
82
83 /**
84 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
85 * @obj: GEM BO
86 * @vma: Virtual memory area
87 *
88 * Sets up a userspace mapping of the BO's memory in the given
89 * virtual memory area.
90 *
91 * Returns:
92 * 0 on success or a negative error code on failure.
93 */
94 #ifdef __NetBSD__
95 int
amdgpu_gem_prime_mmap(struct drm_gem_object * obj,off_t * offp,size_t size,int prot,int * flagsp,int * advicep,struct uvm_object ** uobjp,int * maxprotp)96 amdgpu_gem_prime_mmap(struct drm_gem_object *obj, off_t *offp, size_t size,
97 int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
98 int *maxprotp)
99 #else
100 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
101 struct vm_area_struct *vma)
102 #endif
103 {
104 #ifdef __NetBSD__ /* XXX amdgpu prime */
105 return -ENODEV;
106 #else
107 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
108 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
109 unsigned asize = amdgpu_bo_size(bo);
110 int ret;
111
112 if (!vma->vm_file)
113 return -ENODEV;
114
115 if (adev == NULL)
116 return -ENODEV;
117
118 /* Check for valid size. */
119 #ifdef __NetBSD__
120 if (asize < size)
121 #else
122 if (asize < vma->vm_end - vma->vm_start)
123 #endif
124 return -EINVAL;
125
126 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
127 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
128 return -EPERM;
129 }
130 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
131
132 /* prime mmap does not need to check access, so allow here */
133 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
134 if (ret)
135 return ret;
136
137 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
138 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
139
140 return ret;
141 #endif
142 }
143
144 static int
__dma_resv_make_exclusive(struct dma_resv * obj)145 __dma_resv_make_exclusive(struct dma_resv *obj)
146 {
147 struct dma_fence **fences;
148 unsigned int count;
149 int r;
150
151 if (!dma_resv_get_list(obj)) /* no shared fences to convert */
152 return 0;
153
154 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
155 if (r)
156 return r;
157
158 if (count == 0) {
159 /* Now that was unexpected. */
160 } else if (count == 1) {
161 dma_resv_add_excl_fence(obj, fences[0]);
162 dma_fence_put(fences[0]);
163 kfree(fences);
164 } else {
165 struct dma_fence_array *array;
166
167 array = dma_fence_array_create(count, fences,
168 dma_fence_context_alloc(1), 0,
169 false);
170 if (!array)
171 goto err_fences_put;
172
173 dma_resv_add_excl_fence(obj, &array->base);
174 dma_fence_put(&array->base);
175 }
176
177 return 0;
178
179 err_fences_put:
180 while (count--)
181 dma_fence_put(fences[count]);
182 kfree(fences);
183 return -ENOMEM;
184 }
185
186 /**
187 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
188 *
189 * @dmabuf: DMA-buf where we attach to
190 * @attach: attachment to add
191 *
192 * Add the attachment as user to the exported DMA-buf.
193 */
amdgpu_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)194 static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
195 struct dma_buf_attachment *attach)
196 {
197 struct drm_gem_object *obj = dmabuf->priv;
198 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
199 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
200 int r;
201
202 #ifdef __NetBSD__ /* XXX */
203 __USE(adev);
204 #else
205 if (attach->dev->driver == adev->dev->driver)
206 return 0;
207 #endif
208
209 r = amdgpu_bo_reserve(bo, false);
210 if (unlikely(r != 0))
211 return r;
212
213 /*
214 * We only create shared fences for internal use, but importers
215 * of the dmabuf rely on exclusive fences for implicitly
216 * tracking write hazards. As any of the current fences may
217 * correspond to a write, we need to convert all existing
218 * fences on the reservation object into a single exclusive
219 * fence.
220 */
221 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
222 if (r)
223 return r;
224
225 bo->prime_shared_count++;
226 amdgpu_bo_unreserve(bo);
227 return 0;
228 }
229
230 /**
231 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
232 *
233 * @dmabuf: DMA-buf where we remove the attachment from
234 * @attach: the attachment to remove
235 *
236 * Called when an attachment is removed from the DMA-buf.
237 */
amdgpu_dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)238 static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
239 struct dma_buf_attachment *attach)
240 {
241 struct drm_gem_object *obj = dmabuf->priv;
242 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
243 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
244
245 #ifdef __NetBSD__
246 __USE(adev);
247 if (bo->prime_shared_count)
248 #else
249 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
250 #endif
251 bo->prime_shared_count--;
252 }
253
254 /**
255 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
256 * @attach: DMA-buf attachment
257 * @dir: DMA direction
258 *
259 * Makes sure that the shared DMA buffer can be accessed by the target device.
260 * For now, simply pins it to the GTT domain, where it should be accessible by
261 * all DMA devices.
262 *
263 * Returns:
264 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
265 * code.
266 */
amdgpu_dma_buf_map(struct dma_buf_attachment * attach,enum dma_data_direction dir)267 static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
268 enum dma_data_direction dir)
269 {
270 struct dma_buf *dma_buf = attach->dmabuf;
271 struct drm_gem_object *obj = dma_buf->priv;
272 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
273 struct sg_table *sgt;
274 long r;
275
276 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
277 if (r)
278 return ERR_PTR(r);
279
280 sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
281 if (IS_ERR(sgt))
282 return sgt;
283
284 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
285 DMA_ATTR_SKIP_CPU_SYNC))
286 goto error_free;
287
288 return sgt;
289
290 error_free:
291 sg_free_table(sgt);
292 kfree(sgt);
293 return ERR_PTR(-ENOMEM);
294 }
295
296 /**
297 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
298 * @attach: DMA-buf attachment
299 * @sgt: sg_table to unmap
300 * @dir: DMA direction
301 *
302 * This is called when a shared DMA buffer no longer needs to be accessible by
303 * another device. For now, simply unpins the buffer from GTT.
304 */
amdgpu_dma_buf_unmap(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)305 static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
306 struct sg_table *sgt,
307 enum dma_data_direction dir)
308 {
309 struct drm_gem_object *obj = attach->dmabuf->priv;
310 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
311
312 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
313 sg_free_table(sgt);
314 kfree(sgt);
315 amdgpu_bo_unpin(bo);
316 }
317
318 /**
319 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
320 * @dma_buf: Shared DMA buffer
321 * @direction: Direction of DMA transfer
322 *
323 * This is called before CPU access to the shared DMA buffer's memory. If it's
324 * a read access, the buffer is moved to the GTT domain if possible, for optimal
325 * CPU read performance.
326 *
327 * Returns:
328 * 0 on success or a negative error code on failure.
329 */
amdgpu_dma_buf_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)330 static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
331 enum dma_data_direction direction)
332 {
333 struct drm_gem_object *gem = dma_buf->priv;
334 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gem);
335 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
336 struct ttm_operation_ctx ctx = { true, false };
337 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
338 int ret;
339 bool reads = (direction == DMA_BIDIRECTIONAL ||
340 direction == DMA_FROM_DEVICE);
341
342 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
343 return 0;
344
345 /* move to gtt */
346 ret = amdgpu_bo_reserve(bo, false);
347 if (unlikely(ret != 0))
348 return ret;
349
350 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
351 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
352 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
353 }
354
355 amdgpu_bo_unreserve(bo);
356 return ret;
357 }
358
359 const struct dma_buf_ops amdgpu_dmabuf_ops = {
360 .dynamic_mapping = true,
361 .attach = amdgpu_dma_buf_attach,
362 .detach = amdgpu_dma_buf_detach,
363 .map_dma_buf = amdgpu_dma_buf_map,
364 .unmap_dma_buf = amdgpu_dma_buf_unmap,
365 .release = drm_gem_dmabuf_release,
366 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
367 .mmap = drm_gem_dmabuf_mmap,
368 .vmap = drm_gem_dmabuf_vmap,
369 .vunmap = drm_gem_dmabuf_vunmap,
370 };
371
372 /**
373 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
374 * @gobj: GEM BO
375 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
376 *
377 * The main work is done by the &drm_gem_prime_export helper.
378 *
379 * Returns:
380 * Shared DMA buffer representing the GEM BO from the given device.
381 */
amdgpu_gem_prime_export(struct drm_gem_object * gobj,int flags)382 struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
383 int flags)
384 {
385 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
386 struct dma_buf *buf;
387
388 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
389 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
390 return ERR_PTR(-EPERM);
391
392 buf = drm_gem_prime_export(gobj, flags);
393 if (!IS_ERR(buf))
394 buf->ops = &amdgpu_dmabuf_ops;
395
396 return buf;
397 }
398
399 /**
400 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
401 *
402 * @dev: DRM device
403 * @dma_buf: DMA-buf
404 *
405 * Creates an empty SG BO for DMA-buf import.
406 *
407 * Returns:
408 * A new GEM BO of the given DRM device, representing the memory
409 * described by the given DMA-buf attachment and scatter/gather table.
410 */
411 static struct drm_gem_object *
amdgpu_dma_buf_create_obj(struct drm_device * dev,struct dma_buf * dma_buf)412 amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
413 {
414 struct dma_resv *resv = dma_buf->resv;
415 struct amdgpu_device *adev = dev->dev_private;
416 struct amdgpu_bo *bo;
417 struct amdgpu_bo_param bp;
418 int ret;
419
420 memset(&bp, 0, sizeof(bp));
421 bp.size = dma_buf->size;
422 bp.byte_align = PAGE_SIZE;
423 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
424 bp.flags = 0;
425 bp.type = ttm_bo_type_sg;
426 bp.resv = resv;
427 dma_resv_lock(resv, NULL);
428 ret = amdgpu_bo_create(adev, &bp, &bo);
429 if (ret)
430 goto error;
431
432 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
433 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
434 if (dma_buf->ops != &amdgpu_dmabuf_ops)
435 bo->prime_shared_count = 1;
436
437 dma_resv_unlock(resv);
438 return &bo->tbo.base;
439
440 error:
441 dma_resv_unlock(resv);
442 return ERR_PTR(ret);
443 }
444
445 /**
446 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
447 * @dev: DRM device
448 * @dma_buf: Shared DMA buffer
449 *
450 * Import a dma_buf into a the driver and potentially create a new GEM object.
451 *
452 * Returns:
453 * GEM BO representing the shared DMA buffer for the given device.
454 */
amdgpu_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)455 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
456 struct dma_buf *dma_buf)
457 {
458 struct dma_buf_attachment *attach;
459 struct drm_gem_object *obj;
460
461 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
462 obj = dma_buf->priv;
463 if (obj->dev == dev) {
464 /*
465 * Importing dmabuf exported from out own gem increases
466 * refcount on gem itself instead of f_count of dmabuf.
467 */
468 drm_gem_object_get(obj);
469 return obj;
470 }
471 }
472
473 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
474 if (IS_ERR(obj))
475 return obj;
476
477 #ifdef __NetBSD__
478 attach = dma_buf_dynamic_attach(dma_buf, dev->dmat, true);
479 #else
480 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
481 #endif
482 if (IS_ERR(attach)) {
483 drm_gem_object_put(obj);
484 return ERR_CAST(attach);
485 }
486
487 get_dma_buf(dma_buf);
488 obj->import_attach = attach;
489 return obj;
490 }
491