1 /* 2 * Copyright 2012 Red Hat Inc 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 */ 26 #include <drm/drmP.h> 27 #include "i915_drv.h" 28 #include <linux/dma-buf.h> 29 30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) 31 { 32 return to_intel_bo(buf->priv); 33 } 34 35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 36 enum dma_data_direction dir) 37 { 38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 39 struct sg_table *st; 40 struct scatterlist *src, *dst; 41 int ret, i; 42 43 ret = i915_mutex_lock_interruptible(obj->base.dev); 44 if (ret) 45 goto err; 46 47 ret = i915_gem_object_get_pages(obj); 48 if (ret) 49 goto err_unlock; 50 51 i915_gem_object_pin_pages(obj); 52 53 /* Copy sg so that we make an independent mapping */ 54 st = kmalloc(sizeof(struct sg_table), M_DRM, M_WAITOK); 55 if (st == NULL) { 56 ret = -ENOMEM; 57 goto err_unpin; 58 } 59 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 61 if (ret) 62 goto err_free; 63 64 src = obj->pages->sgl; 65 dst = st->sgl; 66 for (i = 0; i < obj->pages->nents; i++) { 67 sg_set_page(dst, sg_page(src), src->length, 0); 68 dst = sg_next(dst); 69 src = sg_next(src); 70 } 71 72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 73 ret =-ENOMEM; 74 goto err_free_sg; 75 } 76 77 mutex_unlock(&obj->base.dev->struct_mutex); 78 return st; 79 80 err_free_sg: 81 sg_free_table(st); 82 err_free: 83 kfree(st); 84 err_unpin: 85 i915_gem_object_unpin_pages(obj); 86 err_unlock: 87 mutex_unlock(&obj->base.dev->struct_mutex); 88 err: 89 return ERR_PTR(ret); 90 } 91 92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 93 struct sg_table *sg, 94 enum dma_data_direction dir) 95 { 96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 97 98 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 99 sg_free_table(sg); 100 kfree(sg); 101 102 mutex_lock(&obj->base.dev->struct_mutex); 103 i915_gem_object_unpin_pages(obj); 104 mutex_unlock(&obj->base.dev->struct_mutex); 105 } 106 107 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 108 { 109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 110 struct drm_device *dev = obj->base.dev; 111 void *addr; 112 int ret; 113 114 ret = i915_mutex_lock_interruptible(dev); 115 if (ret) 116 return ERR_PTR(ret); 117 118 addr = i915_gem_object_pin_map(obj); 119 mutex_unlock(&dev->struct_mutex); 120 121 return addr; 122 } 123 124 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 125 { 126 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 127 struct drm_device *dev = obj->base.dev; 128 129 mutex_lock(&dev->struct_mutex); 130 i915_gem_object_unpin_map(obj); 131 mutex_unlock(&dev->struct_mutex); 132 } 133 134 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 135 { 136 return NULL; 137 } 138 139 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 140 { 141 142 } 143 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) 144 { 145 return NULL; 146 } 147 148 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 149 { 150 151 } 152 153 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 154 { 155 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 156 #if 0 157 int ret; 158 #endif 159 160 if (obj->base.size < vma->vm_end - vma->vm_start) 161 return -EINVAL; 162 163 if (!obj->base.filp) 164 return -ENODEV; 165 166 #if 0 167 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); 168 if (ret) 169 return ret; 170 171 fput(vma->vm_file); 172 vma->vm_file = get_file(obj->base.filp); 173 #endif 174 175 return 0; 176 } 177 178 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 179 { 180 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 181 struct drm_device *dev = obj->base.dev; 182 int ret; 183 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 184 185 ret = i915_mutex_lock_interruptible(dev); 186 if (ret) 187 return ret; 188 189 ret = i915_gem_object_set_to_cpu_domain(obj, write); 190 mutex_unlock(&dev->struct_mutex); 191 return ret; 192 } 193 194 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 195 { 196 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 197 struct drm_device *dev = obj->base.dev; 198 int ret; 199 200 ret = i915_mutex_lock_interruptible(dev); 201 if (ret) 202 return ret; 203 204 ret = i915_gem_object_set_to_gtt_domain(obj, false); 205 mutex_unlock(&dev->struct_mutex); 206 207 return ret; 208 } 209 210 static const struct dma_buf_ops i915_dmabuf_ops = { 211 .map_dma_buf = i915_gem_map_dma_buf, 212 .unmap_dma_buf = i915_gem_unmap_dma_buf, 213 #if 0 214 .release = drm_gem_dmabuf_release, 215 #endif 216 .kmap = i915_gem_dmabuf_kmap, 217 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 218 .kunmap = i915_gem_dmabuf_kunmap, 219 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, 220 .mmap = i915_gem_dmabuf_mmap, 221 .vmap = i915_gem_dmabuf_vmap, 222 .vunmap = i915_gem_dmabuf_vunmap, 223 .begin_cpu_access = i915_gem_begin_cpu_access, 224 .end_cpu_access = i915_gem_end_cpu_access, 225 }; 226 227 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 228 struct drm_gem_object *gem_obj, int flags) 229 { 230 #if 0 231 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 232 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 233 234 exp_info.ops = &i915_dmabuf_ops; 235 exp_info.size = gem_obj->size; 236 exp_info.flags = flags; 237 exp_info.priv = gem_obj; 238 239 240 if (obj->ops->dmabuf_export) { 241 int ret = obj->ops->dmabuf_export(obj); 242 if (ret) 243 return ERR_PTR(ret); 244 } 245 246 return dma_buf_export(&exp_info); 247 #else 248 return NULL; 249 #endif 250 } 251 252 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 253 { 254 #if 0 255 struct sg_table *sg; 256 257 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); 258 if (IS_ERR(sg)) 259 return PTR_ERR(sg); 260 261 obj->pages = sg; 262 #endif 263 return 0; 264 } 265 266 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) 267 { 268 #if 0 269 dma_buf_unmap_attachment(obj->base.import_attach, 270 obj->pages, DMA_BIDIRECTIONAL); 271 #endif 272 } 273 274 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { 275 .get_pages = i915_gem_object_get_pages_dmabuf, 276 .put_pages = i915_gem_object_put_pages_dmabuf, 277 }; 278 279 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 280 struct dma_buf *dma_buf) 281 { 282 struct dma_buf_attachment *attach; 283 struct drm_i915_gem_object *obj; 284 int ret; 285 286 /* is this one of own objects? */ 287 if (dma_buf->ops == &i915_dmabuf_ops) { 288 obj = dma_buf_to_obj(dma_buf); 289 /* is it from our device? */ 290 if (obj->base.dev == dev) { 291 /* 292 * Importing dmabuf exported from out own gem increases 293 * refcount on gem itself instead of f_count of dmabuf. 294 */ 295 drm_gem_object_reference(&obj->base); 296 return &obj->base; 297 } 298 } 299 300 /* need to attach */ 301 attach = dma_buf_attach(dma_buf, dev->dev); 302 if (IS_ERR(attach)) 303 return ERR_CAST(attach); 304 305 #if 0 306 get_dma_buf(dma_buf); 307 #endif 308 309 obj = i915_gem_object_alloc(dev); 310 if (obj == NULL) { 311 ret = -ENOMEM; 312 goto fail_detach; 313 } 314 315 drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 316 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); 317 obj->base.import_attach = attach; 318 319 return &obj->base; 320 321 fail_detach: 322 #if 0 323 dma_buf_detach(dma_buf, attach); 324 dma_buf_put(dma_buf); 325 #endif 326 327 return ERR_PTR(ret); 328 } 329