1 /* 2 * Copyright 2012 Red Hat Inc 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 */ 26 27 #include <linux/dma-buf.h> 28 #include <linux/reservation.h> 29 30 #include <drm/drmP.h> 31 32 #include "i915_drv.h" 33 34 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) 35 { 36 return to_intel_bo(buf->priv); 37 } 38 39 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 40 enum dma_data_direction dir) 41 { 42 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 43 struct sg_table *st; 44 struct scatterlist *src, *dst; 45 int ret, i; 46 47 ret = i915_gem_object_pin_pages(obj); 48 if (ret) 49 goto err; 50 51 /* Copy sg so that we make an independent mapping */ 52 st = kmalloc(sizeof(struct sg_table), M_DRM, GFP_KERNEL); 53 if (st == NULL) { 54 ret = -ENOMEM; 55 goto err_unpin_pages; 56 } 57 58 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); 59 if (ret) 60 goto err_free; 61 62 src = obj->mm.pages->sgl; 63 dst = st->sgl; 64 for (i = 0; i < obj->mm.pages->nents; i++) { 65 sg_set_page(dst, sg_page(src), src->length, 0); 66 dst = sg_next(dst); 67 src = sg_next(src); 68 } 69 70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 71 ret = -ENOMEM; 72 goto err_free_sg; 73 } 74 75 return st; 76 77 err_free_sg: 78 sg_free_table(st); 79 err_free: 80 kfree(st); 81 err_unpin_pages: 82 i915_gem_object_unpin_pages(obj); 83 err: 84 return ERR_PTR(ret); 85 } 86 87 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 88 struct sg_table *sg, 89 enum dma_data_direction dir) 90 { 91 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 92 93 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 94 sg_free_table(sg); 95 kfree(sg); 96 97 i915_gem_object_unpin_pages(obj); 98 } 99 100 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 101 { 102 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 103 104 return i915_gem_object_pin_map(obj, I915_MAP_WB); 105 } 106 107 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 108 { 109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 110 111 i915_gem_object_unpin_map(obj); 112 } 113 114 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 115 { 116 return NULL; 117 } 118 119 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 120 { 121 122 } 123 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) 124 { 125 return NULL; 126 } 127 128 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 129 { 130 131 } 132 133 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 134 { 135 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 136 #if 0 137 int ret; 138 #endif 139 140 if (obj->base.size < vma->vm_end - vma->vm_start) 141 return -EINVAL; 142 143 if (!obj->base.filp) 144 return -ENODEV; 145 146 #if 0 147 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); 148 if (ret) 149 return ret; 150 151 fput(vma->vm_file); 152 vma->vm_file = get_file(obj->base.filp); 153 #endif 154 155 return 0; 156 } 157 158 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 159 { 160 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 161 struct drm_device *dev = obj->base.dev; 162 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 163 int err; 164 165 err = i915_gem_object_pin_pages(obj); 166 if (err) 167 return err; 168 169 err = i915_mutex_lock_interruptible(dev); 170 if (err) 171 goto out; 172 173 err = i915_gem_object_set_to_cpu_domain(obj, write); 174 mutex_unlock(&dev->struct_mutex); 175 176 out: 177 i915_gem_object_unpin_pages(obj); 178 return err; 179 } 180 181 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 182 { 183 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 184 struct drm_device *dev = obj->base.dev; 185 int err; 186 187 err = i915_gem_object_pin_pages(obj); 188 if (err) 189 return err; 190 191 err = i915_mutex_lock_interruptible(dev); 192 if (err) 193 goto out; 194 195 err = i915_gem_object_set_to_gtt_domain(obj, false); 196 mutex_unlock(&dev->struct_mutex); 197 198 out: 199 i915_gem_object_unpin_pages(obj); 200 return err; 201 } 202 203 static const struct dma_buf_ops i915_dmabuf_ops = { 204 .map_dma_buf = i915_gem_map_dma_buf, 205 .unmap_dma_buf = i915_gem_unmap_dma_buf, 206 .release = drm_gem_dmabuf_release, 207 .map = i915_gem_dmabuf_kmap, 208 .map_atomic = i915_gem_dmabuf_kmap_atomic, 209 .unmap = i915_gem_dmabuf_kunmap, 210 .unmap_atomic = i915_gem_dmabuf_kunmap_atomic, 211 .mmap = i915_gem_dmabuf_mmap, 212 .vmap = i915_gem_dmabuf_vmap, 213 .vunmap = i915_gem_dmabuf_vunmap, 214 .begin_cpu_access = i915_gem_begin_cpu_access, 215 .end_cpu_access = i915_gem_end_cpu_access, 216 }; 217 218 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 219 struct drm_gem_object *gem_obj, int flags) 220 { 221 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 222 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 223 224 exp_info.ops = &i915_dmabuf_ops; 225 exp_info.size = gem_obj->size; 226 exp_info.flags = flags; 227 exp_info.priv = gem_obj; 228 exp_info.resv = obj->resv; 229 230 if (obj->ops->dmabuf_export) { 231 int ret = obj->ops->dmabuf_export(obj); 232 if (ret) 233 return ERR_PTR(ret); 234 } 235 236 return drm_gem_dmabuf_export(dev, &exp_info); 237 } 238 239 static struct sg_table * 240 i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 241 { 242 #if 0 243 return dma_buf_map_attachment(obj->base.import_attach, 244 DMA_BIDIRECTIONAL); 245 #else 246 return NULL; 247 #endif 248 } 249 250 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, 251 struct sg_table *pages) 252 { 253 #if 0 254 dma_buf_unmap_attachment(obj->base.import_attach, pages, 255 DMA_BIDIRECTIONAL); 256 #endif 257 } 258 259 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { 260 .get_pages = i915_gem_object_get_pages_dmabuf, 261 .put_pages = i915_gem_object_put_pages_dmabuf, 262 }; 263 264 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 265 struct dma_buf *dma_buf) 266 { 267 struct dma_buf_attachment *attach; 268 struct drm_i915_gem_object *obj; 269 int ret; 270 271 /* is this one of own objects? */ 272 if (dma_buf->ops == &i915_dmabuf_ops) { 273 obj = dma_buf_to_obj(dma_buf); 274 /* is it from our device? */ 275 if (obj->base.dev == dev) { 276 /* 277 * Importing dmabuf exported from out own gem increases 278 * refcount on gem itself instead of f_count of dmabuf. 279 */ 280 return &i915_gem_object_get(obj)->base; 281 } 282 } 283 284 /* need to attach */ 285 attach = dma_buf_attach(dma_buf, dev->dev); 286 if (IS_ERR(attach)) 287 return ERR_CAST(attach); 288 289 #if 0 290 get_dma_buf(dma_buf); 291 #endif 292 293 obj = i915_gem_object_alloc(to_i915(dev)); 294 if (obj == NULL) { 295 ret = -ENOMEM; 296 goto fail_detach; 297 } 298 299 drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 300 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); 301 obj->base.import_attach = attach; 302 obj->resv = dma_buf->resv; 303 304 /* We use GTT as shorthand for a coherent domain, one that is 305 * neither in the GPU cache nor in the CPU cache, where all 306 * writes are immediately visible in memory. (That's not strictly 307 * true, but it's close! There are internal buffers such as the 308 * write-combined buffer or a delay through the chipset for GTT 309 * writes that do require us to treat GTT as a separate cache domain.) 310 */ 311 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 312 obj->base.write_domain = 0; 313 314 return &obj->base; 315 316 fail_detach: 317 #if 0 318 dma_buf_detach(dma_buf, attach); 319 dma_buf_put(dma_buf); 320 #endif 321 322 return ERR_PTR(ret); 323 } 324 325 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 326 #include "selftests/mock_dmabuf.c" 327 #include "selftests/i915_gem_dmabuf.c" 328 #endif 329