1 /* 2 * Copyright 2012 Red Hat Inc 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 */ 26 27 #include <linux/dma-buf.h> 28 #include <linux/reservation.h> 29 30 #include <drm/drmP.h> 31 32 #include "i915_drv.h" 33 34 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) 35 { 36 return to_intel_bo(buf->priv); 37 } 38 39 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 40 enum dma_data_direction dir) 41 { 42 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 43 struct sg_table *st; 44 struct scatterlist *src, *dst; 45 int ret, i; 46 47 ret = i915_mutex_lock_interruptible(obj->base.dev); 48 if (ret) 49 goto err; 50 51 ret = i915_gem_object_get_pages(obj); 52 if (ret) 53 goto err_unlock; 54 55 i915_gem_object_pin_pages(obj); 56 57 /* Copy sg so that we make an independent mapping */ 58 st = kmalloc(sizeof(struct sg_table), M_DRM, GFP_KERNEL); 59 if (st == NULL) { 60 ret = -ENOMEM; 61 goto err_unpin; 62 } 63 64 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 65 if (ret) 66 goto err_free; 67 68 src = obj->pages->sgl; 69 dst = st->sgl; 70 for (i = 0; i < obj->pages->nents; i++) { 71 sg_set_page(dst, sg_page(src), src->length, 0); 72 dst = sg_next(dst); 73 src = sg_next(src); 74 } 75 76 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 77 ret =-ENOMEM; 78 goto err_free_sg; 79 } 80 81 mutex_unlock(&obj->base.dev->struct_mutex); 82 return st; 83 84 err_free_sg: 85 sg_free_table(st); 86 err_free: 87 kfree(st); 88 err_unpin: 89 i915_gem_object_unpin_pages(obj); 90 err_unlock: 91 mutex_unlock(&obj->base.dev->struct_mutex); 92 err: 93 return ERR_PTR(ret); 94 } 95 96 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 97 struct sg_table *sg, 98 enum dma_data_direction dir) 99 { 100 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 101 102 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 103 sg_free_table(sg); 104 kfree(sg); 105 106 mutex_lock(&obj->base.dev->struct_mutex); 107 i915_gem_object_unpin_pages(obj); 108 mutex_unlock(&obj->base.dev->struct_mutex); 109 } 110 111 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 112 { 113 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 114 struct drm_device *dev = obj->base.dev; 115 void *addr; 116 int ret; 117 118 ret = i915_mutex_lock_interruptible(dev); 119 if (ret) 120 return ERR_PTR(ret); 121 122 addr = i915_gem_object_pin_map(obj); 123 mutex_unlock(&dev->struct_mutex); 124 125 return addr; 126 } 127 128 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 129 { 130 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 131 struct drm_device *dev = obj->base.dev; 132 133 mutex_lock(&dev->struct_mutex); 134 i915_gem_object_unpin_map(obj); 135 mutex_unlock(&dev->struct_mutex); 136 } 137 138 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 139 { 140 return NULL; 141 } 142 143 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 144 { 145 146 } 147 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) 148 { 149 return NULL; 150 } 151 152 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 153 { 154 155 } 156 157 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 158 { 159 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 160 #if 0 161 int ret; 162 #endif 163 164 if (obj->base.size < vma->vm_end - vma->vm_start) 165 return -EINVAL; 166 167 if (!obj->base.filp) 168 return -ENODEV; 169 170 #if 0 171 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); 172 if (ret) 173 return ret; 174 175 fput(vma->vm_file); 176 vma->vm_file = get_file(obj->base.filp); 177 #endif 178 179 return 0; 180 } 181 182 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 183 { 184 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 185 struct drm_device *dev = obj->base.dev; 186 int ret; 187 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 188 189 ret = i915_mutex_lock_interruptible(dev); 190 if (ret) 191 return ret; 192 193 ret = i915_gem_object_set_to_cpu_domain(obj, write); 194 mutex_unlock(&dev->struct_mutex); 195 return ret; 196 } 197 198 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 199 { 200 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 201 struct drm_device *dev = obj->base.dev; 202 int ret; 203 204 ret = i915_mutex_lock_interruptible(dev); 205 if (ret) 206 return ret; 207 208 ret = i915_gem_object_set_to_gtt_domain(obj, false); 209 mutex_unlock(&dev->struct_mutex); 210 211 return ret; 212 } 213 214 static const struct dma_buf_ops i915_dmabuf_ops = { 215 .map_dma_buf = i915_gem_map_dma_buf, 216 .unmap_dma_buf = i915_gem_unmap_dma_buf, 217 #if 0 218 .release = drm_gem_dmabuf_release, 219 #endif 220 .kmap = i915_gem_dmabuf_kmap, 221 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 222 .kunmap = i915_gem_dmabuf_kunmap, 223 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, 224 .mmap = i915_gem_dmabuf_mmap, 225 .vmap = i915_gem_dmabuf_vmap, 226 .vunmap = i915_gem_dmabuf_vunmap, 227 .begin_cpu_access = i915_gem_begin_cpu_access, 228 .end_cpu_access = i915_gem_end_cpu_access, 229 }; 230 231 #if 0 232 static void export_fences(struct drm_i915_gem_object *obj, 233 struct dma_buf *dma_buf) 234 { 235 struct reservation_object *resv = dma_buf->resv; 236 struct drm_i915_gem_request *req; 237 unsigned long active; 238 int idx; 239 240 active = __I915_BO_ACTIVE(obj); 241 if (!active) 242 return; 243 244 /* Serialise with execbuf to prevent concurrent fence-loops */ 245 mutex_lock(&obj->base.dev->struct_mutex); 246 247 /* Mark the object for future fences before racily adding old fences */ 248 obj->base.dma_buf = dma_buf; 249 250 ww_mutex_lock(&resv->lock, NULL); 251 252 for_each_active(active, idx) { 253 req = i915_gem_active_get(&obj->last_read[idx], 254 &obj->base.dev->struct_mutex); 255 if (!req) 256 continue; 257 258 if (reservation_object_reserve_shared(resv) == 0) 259 reservation_object_add_shared_fence(resv, &req->fence); 260 261 i915_gem_request_put(req); 262 } 263 264 req = i915_gem_active_get(&obj->last_write, 265 &obj->base.dev->struct_mutex); 266 if (req) { 267 reservation_object_add_excl_fence(resv, &req->fence); 268 i915_gem_request_put(req); 269 } 270 271 ww_mutex_unlock(&resv->lock); 272 mutex_unlock(&obj->base.dev->struct_mutex); 273 } 274 #endif 275 276 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 277 struct drm_gem_object *gem_obj, int flags) 278 { 279 #if 0 280 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 281 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 282 struct dma_buf *dma_buf; 283 284 exp_info.ops = &i915_dmabuf_ops; 285 exp_info.size = gem_obj->size; 286 exp_info.flags = flags; 287 exp_info.priv = gem_obj; 288 289 290 if (obj->ops->dmabuf_export) { 291 int ret = obj->ops->dmabuf_export(obj); 292 if (ret) 293 return ERR_PTR(ret); 294 } 295 296 dma_buf = dma_buf_export(&exp_info); 297 if (IS_ERR(dma_buf)) 298 return dma_buf; 299 300 export_fences(obj, dma_buf); 301 return dma_buf; 302 #else 303 return NULL; 304 #endif 305 } 306 307 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 308 { 309 #if 0 310 struct sg_table *sg; 311 312 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); 313 if (IS_ERR(sg)) 314 return PTR_ERR(sg); 315 316 obj->pages = sg; 317 #endif 318 return 0; 319 } 320 321 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) 322 { 323 #if 0 324 dma_buf_unmap_attachment(obj->base.import_attach, 325 obj->pages, DMA_BIDIRECTIONAL); 326 #endif 327 } 328 329 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { 330 .get_pages = i915_gem_object_get_pages_dmabuf, 331 .put_pages = i915_gem_object_put_pages_dmabuf, 332 }; 333 334 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 335 struct dma_buf *dma_buf) 336 { 337 struct dma_buf_attachment *attach; 338 struct drm_i915_gem_object *obj; 339 int ret; 340 341 /* is this one of own objects? */ 342 if (dma_buf->ops == &i915_dmabuf_ops) { 343 obj = dma_buf_to_obj(dma_buf); 344 /* is it from our device? */ 345 if (obj->base.dev == dev) { 346 /* 347 * Importing dmabuf exported from out own gem increases 348 * refcount on gem itself instead of f_count of dmabuf. 349 */ 350 return &i915_gem_object_get(obj)->base; 351 } 352 } 353 354 /* need to attach */ 355 attach = dma_buf_attach(dma_buf, dev->dev); 356 if (IS_ERR(attach)) 357 return ERR_CAST(attach); 358 359 #if 0 360 get_dma_buf(dma_buf); 361 #endif 362 363 obj = i915_gem_object_alloc(dev); 364 if (obj == NULL) { 365 ret = -ENOMEM; 366 goto fail_detach; 367 } 368 369 drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 370 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); 371 obj->base.import_attach = attach; 372 373 /* We use GTT as shorthand for a coherent domain, one that is 374 * neither in the GPU cache nor in the CPU cache, where all 375 * writes are immediately visible in memory. (That's not strictly 376 * true, but it's close! There are internal buffers such as the 377 * write-combined buffer or a delay through the chipset for GTT 378 * writes that do require us to treat GTT as a separate cache domain.) 379 */ 380 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 381 obj->base.write_domain = 0; 382 383 return &obj->base; 384 385 fail_detach: 386 #if 0 387 dma_buf_detach(dma_buf, attach); 388 dma_buf_put(dma_buf); 389 #endif 390 391 return ERR_PTR(ret); 392 } 393