1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright 2012 Red Hat Inc 5 */ 6 7 #include <linux/dma-buf.h> 8 #include <linux/highmem.h> 9 #include <linux/dma-resv.h> 10 11 #include "i915_drv.h" 12 #include "i915_gem_object.h" 13 #include "i915_scatterlist.h" 14 15 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) 16 { 17 return to_intel_bo(buf->priv); 18 } 19 20 #ifdef notyet 21 22 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 23 enum dma_data_direction dir) 24 { 25 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 26 struct sg_table *st; 27 struct scatterlist *src, *dst; 28 int ret, i; 29 30 ret = i915_gem_object_pin_pages(obj); 31 if (ret) 32 goto err; 33 34 /* Copy sg so that we make an independent mapping */ 35 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 36 if (st == NULL) { 37 ret = -ENOMEM; 38 goto err_unpin_pages; 39 } 40 41 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); 42 if (ret) 43 goto err_free; 44 45 src = obj->mm.pages->sgl; 46 dst = st->sgl; 47 for (i = 0; i < obj->mm.pages->nents; i++) { 48 sg_set_page(dst, sg_page(src), src->length, 0); 49 dst = sg_next(dst); 50 src = sg_next(src); 51 } 52 53 if (!dma_map_sg_attrs(attachment->dev, 54 st->sgl, st->nents, dir, 55 DMA_ATTR_SKIP_CPU_SYNC)) { 56 ret = -ENOMEM; 57 goto err_free_sg; 58 } 59 60 return st; 61 62 err_free_sg: 63 sg_free_table(st); 64 err_free: 65 kfree(st); 66 err_unpin_pages: 67 i915_gem_object_unpin_pages(obj); 68 err: 69 return ERR_PTR(ret); 70 } 71 72 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 73 struct sg_table *sg, 74 enum dma_data_direction dir) 75 { 76 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 77 78 dma_unmap_sg_attrs(attachment->dev, 79 sg->sgl, sg->nents, dir, 80 DMA_ATTR_SKIP_CPU_SYNC); 81 sg_free_table(sg); 82 kfree(sg); 83 84 i915_gem_object_unpin_pages(obj); 85 } 86 87 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 88 { 89 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 90 91 return i915_gem_object_pin_map(obj, I915_MAP_WB); 92 } 93 94 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 95 { 96 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 97 98 i915_gem_object_flush_map(obj); 99 i915_gem_object_unpin_map(obj); 100 } 101 102 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 103 { 104 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 105 int ret; 106 107 if (obj->base.size < vma->vm_end - vma->vm_start) 108 return -EINVAL; 109 110 if (!obj->base.filp) 111 return -ENODEV; 112 113 ret = call_mmap(obj->base.filp, vma); 114 if (ret) 115 return ret; 116 117 fput(vma->vm_file); 118 vma->vm_file = get_file(obj->base.filp); 119 120 return 0; 121 } 122 123 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 124 { 125 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 126 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 127 int err; 128 129 err = i915_gem_object_pin_pages(obj); 130 if (err) 131 return err; 132 133 err = i915_gem_object_lock_interruptible(obj); 134 if (err) 135 goto out; 136 137 err = i915_gem_object_set_to_cpu_domain(obj, write); 138 i915_gem_object_unlock(obj); 139 140 out: 141 i915_gem_object_unpin_pages(obj); 142 return err; 143 } 144 145 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 146 { 147 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 148 int err; 149 150 err = i915_gem_object_pin_pages(obj); 151 if (err) 152 return err; 153 154 err = i915_gem_object_lock_interruptible(obj); 155 if (err) 156 goto out; 157 158 err = i915_gem_object_set_to_gtt_domain(obj, false); 159 i915_gem_object_unlock(obj); 160 161 out: 162 i915_gem_object_unpin_pages(obj); 163 return err; 164 } 165 166 #endif /* notyet */ 167 168 static const struct dma_buf_ops i915_dmabuf_ops = { 169 #ifdef notyet 170 .map_dma_buf = i915_gem_map_dma_buf, 171 .unmap_dma_buf = i915_gem_unmap_dma_buf, 172 #endif 173 .release = drm_gem_dmabuf_release, 174 #ifdef notyet 175 .mmap = i915_gem_dmabuf_mmap, 176 .vmap = i915_gem_dmabuf_vmap, 177 .vunmap = i915_gem_dmabuf_vunmap, 178 .begin_cpu_access = i915_gem_begin_cpu_access, 179 .end_cpu_access = i915_gem_end_cpu_access, 180 #endif 181 }; 182 183 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) 184 { 185 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 186 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 187 188 exp_info.ops = &i915_dmabuf_ops; 189 exp_info.size = gem_obj->size; 190 exp_info.flags = flags; 191 exp_info.priv = gem_obj; 192 exp_info.resv = obj->base.resv; 193 194 if (obj->ops->dmabuf_export) { 195 int ret = obj->ops->dmabuf_export(obj); 196 if (ret) 197 return ERR_PTR(ret); 198 } 199 200 return drm_gem_dmabuf_export(gem_obj->dev, &exp_info); 201 } 202 203 #ifdef notyet 204 205 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 206 { 207 struct sg_table *pages; 208 unsigned int sg_page_sizes; 209 210 pages = dma_buf_map_attachment(obj->base.import_attach, 211 DMA_BIDIRECTIONAL); 212 if (IS_ERR(pages)) 213 return PTR_ERR(pages); 214 215 sg_page_sizes = i915_sg_page_sizes(pages->sgl); 216 217 __i915_gem_object_set_pages(obj, pages, sg_page_sizes); 218 219 return 0; 220 } 221 222 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, 223 struct sg_table *pages) 224 { 225 dma_buf_unmap_attachment(obj->base.import_attach, pages, 226 DMA_BIDIRECTIONAL); 227 } 228 229 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { 230 .get_pages = i915_gem_object_get_pages_dmabuf, 231 .put_pages = i915_gem_object_put_pages_dmabuf, 232 }; 233 234 #endif /* notyet */ 235 236 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 237 struct dma_buf *dma_buf) 238 { 239 static struct lock_class_key lock_class; 240 struct dma_buf_attachment *attach; 241 struct drm_i915_gem_object *obj; 242 int ret; 243 244 /* is this one of own objects? */ 245 if (dma_buf->ops == &i915_dmabuf_ops) { 246 obj = dma_buf_to_obj(dma_buf); 247 /* is it from our device? */ 248 if (obj->base.dev == dev) { 249 /* 250 * Importing dmabuf exported from out own gem increases 251 * refcount on gem itself instead of f_count of dmabuf. 252 */ 253 return &i915_gem_object_get(obj)->base; 254 } 255 } 256 257 /* need to attach */ 258 attach = dma_buf_attach(dma_buf, dev->dev); 259 if (IS_ERR(attach)) 260 return ERR_CAST(attach); 261 262 #ifdef notyet 263 get_dma_buf(dma_buf); 264 265 obj = i915_gem_object_alloc(); 266 if (obj == NULL) { 267 ret = -ENOMEM; 268 goto fail_detach; 269 } 270 271 drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 272 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class); 273 obj->base.import_attach = attach; 274 obj->base.resv = dma_buf->resv; 275 276 /* We use GTT as shorthand for a coherent domain, one that is 277 * neither in the GPU cache nor in the CPU cache, where all 278 * writes are immediately visible in memory. (That's not strictly 279 * true, but it's close! There are internal buffers such as the 280 * write-combined buffer or a delay through the chipset for GTT 281 * writes that do require us to treat GTT as a separate cache domain.) 282 */ 283 obj->read_domains = I915_GEM_DOMAIN_GTT; 284 obj->write_domain = 0; 285 286 return &obj->base; 287 288 fail_detach: 289 dma_buf_detach(dma_buf, attach); 290 dma_buf_put(dma_buf); 291 292 return ERR_PTR(ret); 293 #else 294 ret = 0; 295 panic(__func__); 296 #endif 297 } 298 299 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 300 #include "selftests/mock_dmabuf.c" 301 #include "selftests/i915_gem_dmabuf.c" 302 #endif 303