1 /* 2 * Copyright 2012 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * based on nouveau_prime.c 23 * 24 * Authors: Alex Deucher 25 */ 26 27 /** 28 * DOC: PRIME Buffer Sharing 29 * 30 * The following callback implementations are used for :ref:`sharing GEM buffer 31 * objects between different devices via PRIME <prime_buffer_sharing>`. 32 */ 33 34 #include <drm/drmP.h> 35 36 #include "amdgpu.h" 37 #include "amdgpu_display.h" 38 #include <drm/amdgpu_drm.h> 39 #include <linux/dma-buf.h> 40 #include <linux/dma-fence-array.h> 41 42 static const struct dma_buf_ops amdgpu_dmabuf_ops; 43 44 /** 45 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table 46 * implementation 47 * @obj: GEM buffer object 48 * 49 * Returns: 50 * A scatter/gather table for the pinned pages of the buffer object's memory. 51 */ 52 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) 53 { 54 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 55 int npages = bo->tbo.num_pages; 56 57 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 58 } 59 60 /** 61 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation 62 * @obj: GEM buffer object 63 * 64 * Sets up an in-kernel virtual mapping of the buffer object's memory. 65 * 66 * Returns: 67 * The virtual address of the mapping or an error pointer. 68 */ 69 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) 70 { 71 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 72 int ret; 73 74 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 75 &bo->dma_buf_vmap); 76 if (ret) 77 return ERR_PTR(ret); 78 79 return bo->dma_buf_vmap.virtual; 80 } 81 82 /** 83 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation 84 * @obj: GEM buffer object 85 * @vaddr: virtual address (unused) 86 * 87 * Tears down the in-kernel virtual mapping of the buffer object's memory. 88 */ 89 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 90 { 91 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 92 93 ttm_bo_kunmap(&bo->dma_buf_vmap); 94 } 95 96 /** 97 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation 98 * @obj: GEM buffer object 99 * @vma: virtual memory area 100 * 101 * Sets up a userspace mapping of the buffer object's memory in the given 102 * virtual memory area. 103 * 104 * Returns: 105 * 0 on success or negative error code. 106 */ 107 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 108 { 109 return -ENODEV; 110 111 #if 0 112 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 113 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 114 unsigned asize = amdgpu_bo_size(bo); 115 int ret; 116 117 if (!vma->vm_file) 118 return -ENODEV; 119 120 if (adev == NULL) 121 return -ENODEV; 122 123 /* Check for valid size. */ 124 if (asize < vma->vm_end - vma->vm_start) 125 return -EINVAL; 126 127 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 128 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 129 return -EPERM; 130 } 131 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; 132 133 /* prime mmap does not need to check access, so allow here */ 134 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); 135 if (ret) 136 return ret; 137 138 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); 139 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); 140 141 return ret; 142 #endif 143 } 144 145 /** 146 * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table 147 * implementation 148 * @dev: DRM device 149 * @attach: DMA-buf attachment 150 * @sg: Scatter/gather table 151 * 152 * Import shared DMA buffer memory exported by another device. 153 * 154 * Returns: 155 * A new GEM buffer object of the given DRM device, representing the memory 156 * described by the given DMA-buf attachment and scatter/gather table. 157 */ 158 struct drm_gem_object * 159 amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 160 struct dma_buf_attachment *attach, 161 struct sg_table *sg) 162 { 163 struct reservation_object *resv = attach->dmabuf->resv; 164 struct amdgpu_device *adev = dev->dev_private; 165 struct amdgpu_bo *bo; 166 struct amdgpu_bo_param bp; 167 int ret; 168 169 memset(&bp, 0, sizeof(bp)); 170 bp.size = attach->dmabuf->size; 171 bp.byte_align = PAGE_SIZE; 172 bp.domain = AMDGPU_GEM_DOMAIN_CPU; 173 bp.flags = 0; 174 bp.type = ttm_bo_type_sg; 175 bp.resv = resv; 176 ww_mutex_lock(&resv->lock, NULL); 177 ret = amdgpu_bo_create(adev, &bp, &bo); 178 if (ret) 179 goto error; 180 181 bo->tbo.sg = sg; 182 bo->tbo.ttm->sg = sg; 183 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 184 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 185 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) 186 bo->prime_shared_count = 1; 187 188 ww_mutex_unlock(&resv->lock); 189 return &bo->gem_base; 190 191 error: 192 ww_mutex_unlock(&resv->lock); 193 return ERR_PTR(ret); 194 } 195 196 #if 0 197 static int 198 __reservation_object_make_exclusive(struct reservation_object *obj) 199 { 200 struct dma_fence **fences; 201 unsigned int count; 202 int r; 203 204 if (!reservation_object_get_list(obj)) /* no shared fences to convert */ 205 return 0; 206 207 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); 208 if (r) 209 return r; 210 211 if (count == 0) { 212 /* Now that was unexpected. */ 213 } else if (count == 1) { 214 reservation_object_add_excl_fence(obj, fences[0]); 215 dma_fence_put(fences[0]); 216 kfree(fences); 217 } else { 218 struct dma_fence_array *array; 219 220 array = dma_fence_array_create(count, fences, 221 dma_fence_context_alloc(1), 0, 222 false); 223 if (!array) 224 goto err_fences_put; 225 226 reservation_object_add_excl_fence(obj, &array->base); 227 dma_fence_put(&array->base); 228 } 229 230 return 0; 231 232 err_fences_put: 233 while (count--) 234 dma_fence_put(fences[count]); 235 kfree(fences); 236 return -ENOMEM; 237 } 238 239 /** 240 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 241 * @dma_buf: shared DMA buffer 242 * @attach: DMA-buf attachment 243 * 244 * Makes sure that the shared DMA buffer can be accessed by the target device. 245 * For now, simply pins it to the GTT domain, where it should be accessible by 246 * all DMA devices. 247 * 248 * Returns: 249 * 0 on success or negative error code. 250 */ 251 static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, 252 struct dma_buf_attachment *attach) 253 { 254 struct drm_gem_object *obj = dma_buf->priv; 255 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 256 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 257 long r; 258 259 r = drm_gem_map_attach(dma_buf, attach); 260 if (r) 261 return r; 262 263 r = amdgpu_bo_reserve(bo, false); 264 if (unlikely(r != 0)) 265 goto error_detach; 266 267 268 if (attach->dev->driver != adev->dev->driver) { 269 /* 270 * We only create shared fences for internal use, but importers 271 * of the dmabuf rely on exclusive fences for implicitly 272 * tracking write hazards. As any of the current fences may 273 * correspond to a write, we need to convert all existing 274 * fences on the reservation object into a single exclusive 275 * fence. 276 */ 277 r = __reservation_object_make_exclusive(bo->tbo.resv); 278 if (r) 279 goto error_unreserve; 280 } 281 282 /* pin buffer into GTT */ 283 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 284 if (r) 285 goto error_unreserve; 286 287 if (attach->dev->driver != adev->dev->driver) 288 bo->prime_shared_count++; 289 290 error_unreserve: 291 amdgpu_bo_unreserve(bo); 292 293 error_detach: 294 if (r) 295 drm_gem_map_detach(dma_buf, attach); 296 return r; 297 } 298 299 /** 300 * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation 301 * @dma_buf: shared DMA buffer 302 * @attach: DMA-buf attachment 303 * 304 * This is called when a shared DMA buffer no longer needs to be accessible by 305 * the other device. For now, simply unpins the buffer from GTT. 306 */ 307 static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, 308 struct dma_buf_attachment *attach) 309 { 310 struct drm_gem_object *obj = dma_buf->priv; 311 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 312 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 313 int ret = 0; 314 315 ret = amdgpu_bo_reserve(bo, true); 316 if (unlikely(ret != 0)) 317 goto error; 318 319 amdgpu_bo_unpin(bo); 320 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) 321 bo->prime_shared_count--; 322 amdgpu_bo_unreserve(bo); 323 324 error: 325 drm_gem_map_detach(dma_buf, attach); 326 } 327 #endif 328 329 /** 330 * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation 331 * @obj: GEM buffer object 332 * 333 * Returns: 334 * The buffer object's reservation object. 335 */ 336 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) 337 { 338 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 339 340 return bo->tbo.resv; 341 } 342 343 /** 344 * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation 345 * @dma_buf: shared DMA buffer 346 * @direction: direction of DMA transfer 347 * 348 * This is called before CPU access to the shared DMA buffer's memory. If it's 349 * a read access, the buffer is moved to the GTT domain if possible, for optimal 350 * CPU read performance. 351 * 352 * Returns: 353 * 0 on success or negative error code. 354 */ 355 #if 0 356 static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, 357 enum dma_data_direction direction) 358 { 359 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); 360 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 361 struct ttm_operation_ctx ctx = { true, false }; 362 u32 domain = amdgpu_display_supported_domains(adev); 363 int ret; 364 bool reads = (direction == DMA_BIDIRECTIONAL || 365 direction == DMA_FROM_DEVICE); 366 367 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) 368 return 0; 369 370 /* move to gtt */ 371 ret = amdgpu_bo_reserve(bo, false); 372 if (unlikely(ret != 0)) 373 return ret; 374 375 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { 376 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 377 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 378 } 379 380 amdgpu_bo_unreserve(bo); 381 return ret; 382 } 383 384 static const struct dma_buf_ops amdgpu_dmabuf_ops = { 385 .attach = amdgpu_gem_map_attach, 386 .detach = amdgpu_gem_map_detach, 387 .map_dma_buf = drm_gem_map_dma_buf, 388 .unmap_dma_buf = drm_gem_unmap_dma_buf, 389 .release = drm_gem_dmabuf_release, 390 .begin_cpu_access = amdgpu_gem_begin_cpu_access, 391 .map = drm_gem_dmabuf_kmap, 392 .unmap = drm_gem_dmabuf_kunmap, 393 .mmap = drm_gem_dmabuf_mmap, 394 .vmap = drm_gem_dmabuf_vmap, 395 .vunmap = drm_gem_dmabuf_vunmap, 396 }; 397 #endif 398 399 /** 400 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation 401 * @dev: DRM device 402 * @gobj: GEM buffer object 403 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 404 * 405 * The main work is done by the &drm_gem_prime_export helper, which in turn 406 * uses &amdgpu_gem_prime_res_obj. 407 * 408 * Returns: 409 * Shared DMA buffer representing the GEM buffer object from the given device. 410 */ 411 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 412 struct drm_gem_object *gobj, 413 int flags) 414 { 415 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 416 struct dma_buf *buf; 417 418 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 419 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 420 return ERR_PTR(-EPERM); 421 422 buf = drm_gem_prime_export(dev, gobj, flags); 423 if (!IS_ERR(buf)) { 424 #if 0 425 buf->file->f_mapping = dev->anon_inode->i_mapping; 426 buf->ops = &amdgpu_dmabuf_ops; 427 #endif 428 } 429 430 return buf; 431 } 432 433 /** 434 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation 435 * @dev: DRM device 436 * @dma_buf: Shared DMA buffer 437 * 438 * The main work is done by the &drm_gem_prime_import helper, which in turn 439 * uses &amdgpu_gem_prime_import_sg_table. 440 * 441 * Returns: 442 * GEM buffer object representing the shared DMA buffer for the given device. 443 */ 444 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, 445 struct dma_buf *dma_buf) 446 { 447 struct drm_gem_object *obj; 448 449 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 450 obj = dma_buf->priv; 451 if (obj->dev == dev) { 452 /* 453 * Importing dmabuf exported from out own gem increases 454 * refcount on gem itself instead of f_count of dmabuf. 455 */ 456 drm_gem_object_get(obj); 457 return obj; 458 } 459 } 460 461 return drm_gem_prime_import(dev, dma_buf); 462 } 463