xref: /dragonfly/sys/dev/drm/i915/i915_gem_dmabuf.c (revision f0dba201)
1 /*
2  * Copyright 2012 Red Hat Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Dave Airlie <airlied@redhat.com>
25  */
26 
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
29 
30 #include <drm/drmP.h>
31 
32 #include "i915_drv.h"
33 
34 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35 {
36 	return to_intel_bo(buf->priv);
37 }
38 
39 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
40 					     enum dma_data_direction dir)
41 {
42 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
43 	struct sg_table *st;
44 	struct scatterlist *src, *dst;
45 	int ret, i;
46 
47 	ret = i915_mutex_lock_interruptible(obj->base.dev);
48 	if (ret)
49 		goto err;
50 
51 	ret = i915_gem_object_get_pages(obj);
52 	if (ret)
53 		goto err_unlock;
54 
55 	i915_gem_object_pin_pages(obj);
56 
57 	/* Copy sg so that we make an independent mapping */
58 	st = kmalloc(sizeof(struct sg_table), M_DRM, GFP_KERNEL);
59 	if (st == NULL) {
60 		ret = -ENOMEM;
61 		goto err_unpin;
62 	}
63 
64 	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
65 	if (ret)
66 		goto err_free;
67 
68 	src = obj->pages->sgl;
69 	dst = st->sgl;
70 	for (i = 0; i < obj->pages->nents; i++) {
71 		sg_set_page(dst, sg_page(src), src->length, 0);
72 		dst = sg_next(dst);
73 		src = sg_next(src);
74 	}
75 
76 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
77 		ret =-ENOMEM;
78 		goto err_free_sg;
79 	}
80 
81 	mutex_unlock(&obj->base.dev->struct_mutex);
82 	return st;
83 
84 err_free_sg:
85 	sg_free_table(st);
86 err_free:
87 	kfree(st);
88 err_unpin:
89 	i915_gem_object_unpin_pages(obj);
90 err_unlock:
91 	mutex_unlock(&obj->base.dev->struct_mutex);
92 err:
93 	return ERR_PTR(ret);
94 }
95 
96 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
97 				   struct sg_table *sg,
98 				   enum dma_data_direction dir)
99 {
100 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
101 
102 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
103 	sg_free_table(sg);
104 	kfree(sg);
105 
106 	mutex_lock(&obj->base.dev->struct_mutex);
107 	i915_gem_object_unpin_pages(obj);
108 	mutex_unlock(&obj->base.dev->struct_mutex);
109 }
110 
111 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
112 {
113 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
114 	struct drm_device *dev = obj->base.dev;
115 	void *addr;
116 	int ret;
117 
118 	ret = i915_mutex_lock_interruptible(dev);
119 	if (ret)
120 		return ERR_PTR(ret);
121 
122 	addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
123 	mutex_unlock(&dev->struct_mutex);
124 
125 	return addr;
126 }
127 
128 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
129 {
130 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
131 	struct drm_device *dev = obj->base.dev;
132 
133 	mutex_lock(&dev->struct_mutex);
134 	i915_gem_object_unpin_map(obj);
135 	mutex_unlock(&dev->struct_mutex);
136 }
137 
138 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
139 {
140 	return NULL;
141 }
142 
143 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
144 {
145 
146 }
147 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
148 {
149 	return NULL;
150 }
151 
152 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
153 {
154 
155 }
156 
157 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
158 {
159 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
160 #if 0
161 	int ret;
162 #endif
163 
164 	if (obj->base.size < vma->vm_end - vma->vm_start)
165 		return -EINVAL;
166 
167 	if (!obj->base.filp)
168 		return -ENODEV;
169 
170 #if 0
171 	ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
172 	if (ret)
173 		return ret;
174 
175 	fput(vma->vm_file);
176 	vma->vm_file = get_file(obj->base.filp);
177 #endif
178 
179 	return 0;
180 }
181 
182 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
183 {
184 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
185 	struct drm_device *dev = obj->base.dev;
186 	int ret;
187 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
188 
189 	ret = i915_mutex_lock_interruptible(dev);
190 	if (ret)
191 		return ret;
192 
193 	ret = i915_gem_object_set_to_cpu_domain(obj, write);
194 	mutex_unlock(&dev->struct_mutex);
195 	return ret;
196 }
197 
198 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
199 {
200 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
201 	struct drm_device *dev = obj->base.dev;
202 	int ret;
203 
204 	ret = i915_mutex_lock_interruptible(dev);
205 	if (ret)
206 		return ret;
207 
208 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
209 	mutex_unlock(&dev->struct_mutex);
210 
211 	return ret;
212 }
213 
214 static const struct dma_buf_ops i915_dmabuf_ops =  {
215 	.map_dma_buf = i915_gem_map_dma_buf,
216 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
217 	.release = drm_gem_dmabuf_release,
218 	.kmap = i915_gem_dmabuf_kmap,
219 	.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
220 	.kunmap = i915_gem_dmabuf_kunmap,
221 	.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
222 	.mmap = i915_gem_dmabuf_mmap,
223 	.vmap = i915_gem_dmabuf_vmap,
224 	.vunmap = i915_gem_dmabuf_vunmap,
225 	.begin_cpu_access = i915_gem_begin_cpu_access,
226 	.end_cpu_access = i915_gem_end_cpu_access,
227 };
228 
229 static void export_fences(struct drm_i915_gem_object *obj,
230 			  struct dma_buf *dma_buf)
231 {
232 	struct reservation_object *resv = dma_buf->resv;
233 	struct drm_i915_gem_request *req;
234 	unsigned long active;
235 	int idx;
236 
237 	active = __I915_BO_ACTIVE(obj);
238 	if (!active)
239 		return;
240 
241 	/* Serialise with execbuf to prevent concurrent fence-loops */
242 	mutex_lock(&obj->base.dev->struct_mutex);
243 
244 	/* Mark the object for future fences before racily adding old fences */
245 	obj->base.dma_buf = dma_buf;
246 
247 	ww_mutex_lock(&resv->lock, NULL);
248 
249 	for_each_active(active, idx) {
250 		req = i915_gem_active_get(&obj->last_read[idx],
251 					  &obj->base.dev->struct_mutex);
252 		if (!req)
253 			continue;
254 
255 		if (reservation_object_reserve_shared(resv) == 0)
256 			reservation_object_add_shared_fence(resv, &req->fence);
257 
258 		i915_gem_request_put(req);
259 	}
260 
261 	req = i915_gem_active_get(&obj->last_write,
262 				  &obj->base.dev->struct_mutex);
263 	if (req) {
264 		reservation_object_add_excl_fence(resv, &req->fence);
265 		i915_gem_request_put(req);
266 	}
267 
268 	ww_mutex_unlock(&resv->lock);
269 	mutex_unlock(&obj->base.dev->struct_mutex);
270 }
271 
272 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
273 				      struct drm_gem_object *gem_obj, int flags)
274 {
275 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
276 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
277 	struct dma_buf *dma_buf;
278 
279 	exp_info.ops = &i915_dmabuf_ops;
280 	exp_info.size = gem_obj->size;
281 	exp_info.flags = flags;
282 	exp_info.priv = gem_obj;
283 
284 	if (obj->ops->dmabuf_export) {
285 		int ret = obj->ops->dmabuf_export(obj);
286 		if (ret)
287 			return ERR_PTR(ret);
288 	}
289 
290 	dma_buf = dma_buf_export(&exp_info);
291 	if (IS_ERR(dma_buf))
292 		return dma_buf;
293 
294 	export_fences(obj, dma_buf);
295 	return dma_buf;
296 }
297 
298 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
299 {
300 	struct sg_table *sg;
301 
302 	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
303 	if (IS_ERR(sg))
304 		return PTR_ERR(sg);
305 
306 	obj->pages = sg;
307 	return 0;
308 }
309 
310 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
311 {
312 	dma_buf_unmap_attachment(obj->base.import_attach,
313 				 obj->pages, DMA_BIDIRECTIONAL);
314 }
315 
316 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
317 	.get_pages = i915_gem_object_get_pages_dmabuf,
318 	.put_pages = i915_gem_object_put_pages_dmabuf,
319 };
320 
321 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
322 					     struct dma_buf *dma_buf)
323 {
324 	struct dma_buf_attachment *attach;
325 	struct drm_i915_gem_object *obj;
326 	int ret;
327 
328 	/* is this one of own objects? */
329 	if (dma_buf->ops == &i915_dmabuf_ops) {
330 		obj = dma_buf_to_obj(dma_buf);
331 		/* is it from our device? */
332 		if (obj->base.dev == dev) {
333 			/*
334 			 * Importing dmabuf exported from out own gem increases
335 			 * refcount on gem itself instead of f_count of dmabuf.
336 			 */
337 			return &i915_gem_object_get(obj)->base;
338 		}
339 	}
340 
341 	/* need to attach */
342 	attach = dma_buf_attach(dma_buf, dev->dev);
343 	if (IS_ERR(attach))
344 		return ERR_CAST(attach);
345 
346 #if 0
347 	get_dma_buf(dma_buf);
348 #endif
349 
350 	obj = i915_gem_object_alloc(dev);
351 	if (obj == NULL) {
352 		ret = -ENOMEM;
353 		goto fail_detach;
354 	}
355 
356 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
357 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
358 	obj->base.import_attach = attach;
359 
360 	/* We use GTT as shorthand for a coherent domain, one that is
361 	 * neither in the GPU cache nor in the CPU cache, where all
362 	 * writes are immediately visible in memory. (That's not strictly
363 	 * true, but it's close! There are internal buffers such as the
364 	 * write-combined buffer or a delay through the chipset for GTT
365 	 * writes that do require us to treat GTT as a separate cache domain.)
366 	 */
367 	obj->base.read_domains = I915_GEM_DOMAIN_GTT;
368 	obj->base.write_domain = 0;
369 
370 	return &obj->base;
371 
372 fail_detach:
373 #if 0
374 	dma_buf_detach(dma_buf, attach);
375 	dma_buf_put(dma_buf);
376 #endif
377 
378 	return ERR_PTR(ret);
379 }
380