xref: /openbsd/sys/dev/pci/drm/i915/gem/i915_gem_dmabuf.c (revision d89ec533)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright 2012 Red Hat Inc
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10 
11 #include "i915_drv.h"
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
14 
15 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
16 {
17 	return to_intel_bo(buf->priv);
18 }
19 
20 #ifdef notyet
21 
22 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
23 					     enum dma_data_direction dir)
24 {
25 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
26 	struct sg_table *st;
27 	struct scatterlist *src, *dst;
28 	int ret, i;
29 
30 	ret = i915_gem_object_pin_pages(obj);
31 	if (ret)
32 		goto err;
33 
34 	/* Copy sg so that we make an independent mapping */
35 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
36 	if (st == NULL) {
37 		ret = -ENOMEM;
38 		goto err_unpin_pages;
39 	}
40 
41 	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
42 	if (ret)
43 		goto err_free;
44 
45 	src = obj->mm.pages->sgl;
46 	dst = st->sgl;
47 	for (i = 0; i < obj->mm.pages->nents; i++) {
48 		sg_set_page(dst, sg_page(src), src->length, 0);
49 		dst = sg_next(dst);
50 		src = sg_next(src);
51 	}
52 
53 	ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
54 	if (ret)
55 		goto err_free_sg;
56 
57 	return st;
58 
59 err_free_sg:
60 	sg_free_table(st);
61 err_free:
62 	kfree(st);
63 err_unpin_pages:
64 	i915_gem_object_unpin_pages(obj);
65 err:
66 	return ERR_PTR(ret);
67 }
68 
69 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
70 				   struct sg_table *sg,
71 				   enum dma_data_direction dir)
72 {
73 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
74 
75 	dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
76 	sg_free_table(sg);
77 	kfree(sg);
78 
79 	i915_gem_object_unpin_pages(obj);
80 }
81 
82 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
83 {
84 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
85 
86 	return i915_gem_object_pin_map(obj, I915_MAP_WB);
87 }
88 
89 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
90 {
91 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
92 
93 	i915_gem_object_flush_map(obj);
94 	i915_gem_object_unpin_map(obj);
95 }
96 
97 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
98 {
99 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
100 	int ret;
101 
102 	if (obj->base.size < vma->vm_end - vma->vm_start)
103 		return -EINVAL;
104 
105 	if (!obj->base.filp)
106 		return -ENODEV;
107 
108 	ret = call_mmap(obj->base.filp, vma);
109 	if (ret)
110 		return ret;
111 
112 	fput(vma->vm_file);
113 	vma->vm_file = get_file(obj->base.filp);
114 
115 	return 0;
116 }
117 
118 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
119 {
120 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
121 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
122 	int err;
123 
124 	err = i915_gem_object_pin_pages(obj);
125 	if (err)
126 		return err;
127 
128 	err = i915_gem_object_lock_interruptible(obj, NULL);
129 	if (err)
130 		goto out;
131 
132 	err = i915_gem_object_set_to_cpu_domain(obj, write);
133 	i915_gem_object_unlock(obj);
134 
135 out:
136 	i915_gem_object_unpin_pages(obj);
137 	return err;
138 }
139 
140 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
141 {
142 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
143 	int err;
144 
145 	err = i915_gem_object_pin_pages(obj);
146 	if (err)
147 		return err;
148 
149 	err = i915_gem_object_lock_interruptible(obj, NULL);
150 	if (err)
151 		goto out;
152 
153 	err = i915_gem_object_set_to_gtt_domain(obj, false);
154 	i915_gem_object_unlock(obj);
155 
156 out:
157 	i915_gem_object_unpin_pages(obj);
158 	return err;
159 }
160 
161 #endif /* notyet */
162 
163 static const struct dma_buf_ops i915_dmabuf_ops =  {
164 #ifdef notyet
165 	.map_dma_buf = i915_gem_map_dma_buf,
166 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
167 #endif
168 	.release = drm_gem_dmabuf_release,
169 #ifdef notyet
170 	.mmap = i915_gem_dmabuf_mmap,
171 	.vmap = i915_gem_dmabuf_vmap,
172 	.vunmap = i915_gem_dmabuf_vunmap,
173 	.begin_cpu_access = i915_gem_begin_cpu_access,
174 	.end_cpu_access = i915_gem_end_cpu_access,
175 #endif
176 };
177 
178 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
179 {
180 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
181 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
182 
183 	exp_info.ops = &i915_dmabuf_ops;
184 	exp_info.size = gem_obj->size;
185 	exp_info.flags = flags;
186 	exp_info.priv = gem_obj;
187 	exp_info.resv = obj->base.resv;
188 
189 	if (obj->ops->dmabuf_export) {
190 		int ret = obj->ops->dmabuf_export(obj);
191 		if (ret)
192 			return ERR_PTR(ret);
193 	}
194 
195 	return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
196 }
197 
198 #ifdef notyet
199 
200 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
201 {
202 	struct sg_table *pages;
203 	unsigned int sg_page_sizes;
204 
205 	pages = dma_buf_map_attachment(obj->base.import_attach,
206 				       DMA_BIDIRECTIONAL);
207 	if (IS_ERR(pages))
208 		return PTR_ERR(pages);
209 
210 	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
211 
212 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
213 
214 	return 0;
215 }
216 
217 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
218 					     struct sg_table *pages)
219 {
220 	dma_buf_unmap_attachment(obj->base.import_attach, pages,
221 				 DMA_BIDIRECTIONAL);
222 }
223 
224 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
225 	.name = "i915_gem_object_dmabuf",
226 	.get_pages = i915_gem_object_get_pages_dmabuf,
227 	.put_pages = i915_gem_object_put_pages_dmabuf,
228 };
229 
230 #endif /* notyet */
231 
232 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
233 					     struct dma_buf *dma_buf)
234 {
235 	static struct lock_class_key lock_class;
236 	struct dma_buf_attachment *attach;
237 	struct drm_i915_gem_object *obj;
238 	int ret;
239 
240 	/* is this one of own objects? */
241 	if (dma_buf->ops == &i915_dmabuf_ops) {
242 		obj = dma_buf_to_obj(dma_buf);
243 		/* is it from our device? */
244 		if (obj->base.dev == dev) {
245 			/*
246 			 * Importing dmabuf exported from out own gem increases
247 			 * refcount on gem itself instead of f_count of dmabuf.
248 			 */
249 			return &i915_gem_object_get(obj)->base;
250 		}
251 	}
252 
253 	/* need to attach */
254 	attach = dma_buf_attach(dma_buf, dev->dev);
255 	if (IS_ERR(attach))
256 		return ERR_CAST(attach);
257 
258 #ifdef notyet
259 	get_dma_buf(dma_buf);
260 
261 	obj = i915_gem_object_alloc();
262 	if (obj == NULL) {
263 		ret = -ENOMEM;
264 		goto fail_detach;
265 	}
266 
267 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
268 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
269 	obj->base.import_attach = attach;
270 	obj->base.resv = dma_buf->resv;
271 
272 	/* We use GTT as shorthand for a coherent domain, one that is
273 	 * neither in the GPU cache nor in the CPU cache, where all
274 	 * writes are immediately visible in memory. (That's not strictly
275 	 * true, but it's close! There are internal buffers such as the
276 	 * write-combined buffer or a delay through the chipset for GTT
277 	 * writes that do require us to treat GTT as a separate cache domain.)
278 	 */
279 	obj->read_domains = I915_GEM_DOMAIN_GTT;
280 	obj->write_domain = 0;
281 
282 	return &obj->base;
283 
284 fail_detach:
285 	dma_buf_detach(dma_buf, attach);
286 	dma_buf_put(dma_buf);
287 
288 	return ERR_PTR(ret);
289 #else
290 	ret = 0;
291 	panic(__func__);
292 #endif
293 }
294 
295 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
296 #include "selftests/mock_dmabuf.c"
297 #include "selftests/i915_gem_dmabuf.c"
298 #endif
299