1 /*	$NetBSD: i915_gem_dmabuf.c,v 1.6 2021/12/19 11:33:30 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright 2012 Red Hat Inc
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_dmabuf.c,v 1.6 2021/12/19 11:33:30 riastradh Exp $");
11 
12 #include <linux/dma-buf.h>
13 #include <linux/highmem.h>
14 #include <linux/dma-resv.h>
15 
16 #include "i915_drv.h"
17 #include "i915_gem_object.h"
18 #include "i915_scatterlist.h"
19 
dma_buf_to_obj(struct dma_buf * buf)20 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
21 {
22 	struct drm_gem_object *obj = buf->priv;
23 
24 	return to_intel_bo(obj);
25 }
26 
i915_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)27 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
28 					     enum dma_data_direction dir)
29 {
30 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
31 	struct sg_table *st;
32 	struct scatterlist *src, *dst;
33 	int ret, i;
34 
35 	ret = i915_gem_object_pin_pages(obj);
36 	if (ret)
37 		goto err;
38 
39 	/* Copy sg so that we make an independent mapping */
40 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
41 	if (st == NULL) {
42 		ret = -ENOMEM;
43 		goto err_unpin_pages;
44 	}
45 
46 	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
47 	if (ret)
48 		goto err_free;
49 
50 #ifdef __NetBSD__
51 	__USE(i);
52 	__USE(src);
53 	__USE(dst);
54 	memcpy(st->sgl->sg_pgs, obj->mm.pages->sgl->sg_pgs,
55 	    obj->mm.pages->nents * sizeof(st->sgl->sg_pgs[0]));
56 #else
57 
58 	src = obj->mm.pages->sgl;
59 	dst = st->sgl;
60 	for (i = 0; i < obj->mm.pages->nents; i++) {
61 		sg_set_page(dst, sg_page(src), src->length, 0);
62 		dst = sg_next(dst);
63 		src = sg_next(src);
64 	}
65 #endif
66 
67 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
68 		ret = -ENOMEM;
69 		goto err_free_sg;
70 	}
71 
72 	return st;
73 
74 err_free_sg:
75 	sg_free_table(st);
76 err_free:
77 	kfree(st);
78 err_unpin_pages:
79 	i915_gem_object_unpin_pages(obj);
80 err:
81 	return ERR_PTR(ret);
82 }
83 
i915_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)84 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 				   struct sg_table *sg,
86 				   enum dma_data_direction dir)
87 {
88 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
89 
90 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
91 	sg_free_table(sg);
92 	kfree(sg);
93 
94 	i915_gem_object_unpin_pages(obj);
95 }
96 
i915_gem_dmabuf_vmap(struct dma_buf * dma_buf)97 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
98 {
99 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
100 
101 	return i915_gem_object_pin_map(obj, I915_MAP_WB);
102 }
103 
i915_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)104 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
105 {
106 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
107 
108 	i915_gem_object_flush_map(obj);
109 	i915_gem_object_unpin_map(obj);
110 }
111 
112 #ifdef __NetBSD__
i915_gem_dmabuf_mmap(struct dma_buf * dma_buf,off_t * offp,size_t size,int prot,int * flagsp,int * advicep,struct uvm_object ** uobjp,int * maxprotp)113 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp,
114     size_t size, int prot, int *flagsp, int *advicep,
115     struct uvm_object **uobjp, int *maxprotp)
116 #else
117 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
118 #endif
119 {
120 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
121 	int ret;
122 
123 #ifdef __NetBSD__
124 	__USE(ret);
125 	if (obj->base.size < size)
126 		return -EINVAL;
127 	if (!obj->base.filp)
128 		return -ENODEV;
129 	/* XXX review mmap refcount */
130 	drm_gem_object_get(&obj->base);
131 	*advicep = UVM_ADV_RANDOM;
132 	*uobjp = &obj->base.gemo_uvmobj;
133 	*maxprotp = prot;
134 #else
135 	if (obj->base.size < vma->vm_end - vma->vm_start)
136 		return -EINVAL;
137 
138 	if (!obj->base.filp)
139 		return -ENODEV;
140 
141 	ret = call_mmap(obj->base.filp, vma);
142 	if (ret)
143 		return ret;
144 
145 	fput(vma->vm_file);
146 	vma->vm_file = get_file(obj->base.filp);
147 #endif
148 
149 	return 0;
150 }
151 
i915_gem_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)152 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
153 {
154 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
155 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
156 	int err;
157 
158 	err = i915_gem_object_pin_pages(obj);
159 	if (err)
160 		return err;
161 
162 	err = i915_gem_object_lock_interruptible(obj);
163 	if (err)
164 		goto out;
165 
166 	err = i915_gem_object_set_to_cpu_domain(obj, write);
167 	i915_gem_object_unlock(obj);
168 
169 out:
170 	i915_gem_object_unpin_pages(obj);
171 	return err;
172 }
173 
i915_gem_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)174 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
175 {
176 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
177 	int err;
178 
179 	err = i915_gem_object_pin_pages(obj);
180 	if (err)
181 		return err;
182 
183 	err = i915_gem_object_lock_interruptible(obj);
184 	if (err)
185 		goto out;
186 
187 	err = i915_gem_object_set_to_gtt_domain(obj, false);
188 	i915_gem_object_unlock(obj);
189 
190 out:
191 	i915_gem_object_unpin_pages(obj);
192 	return err;
193 }
194 
195 static const struct dma_buf_ops i915_dmabuf_ops =  {
196 	.map_dma_buf = i915_gem_map_dma_buf,
197 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
198 	.release = drm_gem_dmabuf_release,
199 	.mmap = i915_gem_dmabuf_mmap,
200 	.vmap = i915_gem_dmabuf_vmap,
201 	.vunmap = i915_gem_dmabuf_vunmap,
202 	.begin_cpu_access = i915_gem_begin_cpu_access,
203 	.end_cpu_access = i915_gem_end_cpu_access,
204 };
205 
i915_gem_prime_export(struct drm_gem_object * gem_obj,int flags)206 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
207 {
208 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
209 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
210 
211 	exp_info.ops = &i915_dmabuf_ops;
212 	exp_info.size = gem_obj->size;
213 	exp_info.flags = flags;
214 	exp_info.priv = gem_obj;
215 	exp_info.resv = obj->base.resv;
216 
217 	if (obj->ops->dmabuf_export) {
218 		int ret = obj->ops->dmabuf_export(obj);
219 		if (ret)
220 			return ERR_PTR(ret);
221 	}
222 
223 	return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
224 }
225 
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object * obj)226 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
227 {
228 	struct sg_table *pages;
229 	unsigned int sg_page_sizes;
230 
231 	pages = dma_buf_map_attachment(obj->base.import_attach,
232 				       DMA_BIDIRECTIONAL);
233 	if (IS_ERR(pages))
234 		return PTR_ERR(pages);
235 
236 	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
237 
238 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
239 
240 	return 0;
241 }
242 
i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object * obj,struct sg_table * pages)243 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
244 					     struct sg_table *pages)
245 {
246 	dma_buf_unmap_attachment(obj->base.import_attach, pages,
247 				 DMA_BIDIRECTIONAL);
248 }
249 
250 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
251 	.get_pages = i915_gem_object_get_pages_dmabuf,
252 	.put_pages = i915_gem_object_put_pages_dmabuf,
253 };
254 
i915_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)255 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
256 					     struct dma_buf *dma_buf)
257 {
258 	static struct lock_class_key lock_class;
259 	struct dma_buf_attachment *attach;
260 	struct drm_i915_gem_object *obj;
261 	int ret;
262 
263 	/* is this one of own objects? */
264 	if (dma_buf->ops == &i915_dmabuf_ops) {
265 		obj = dma_buf_to_obj(dma_buf);
266 		/* is it from our device? */
267 		if (obj->base.dev == dev) {
268 			/*
269 			 * Importing dmabuf exported from out own gem increases
270 			 * refcount on gem itself instead of f_count of dmabuf.
271 			 */
272 			return &i915_gem_object_get(obj)->base;
273 		}
274 	}
275 
276 	/* need to attach */
277 	attach = dma_buf_attach(dma_buf, dev->dmat);
278 	if (IS_ERR(attach))
279 		return ERR_CAST(attach);
280 
281 	get_dma_buf(dma_buf);
282 
283 	obj = i915_gem_object_alloc();
284 	if (obj == NULL) {
285 		ret = -ENOMEM;
286 		goto fail_detach;
287 	}
288 
289 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
290 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
291 	obj->base.import_attach = attach;
292 	obj->base.resv = dma_buf->resv;
293 
294 	/* We use GTT as shorthand for a coherent domain, one that is
295 	 * neither in the GPU cache nor in the CPU cache, where all
296 	 * writes are immediately visible in memory. (That's not strictly
297 	 * true, but it's close! There are internal buffers such as the
298 	 * write-combined buffer or a delay through the chipset for GTT
299 	 * writes that do require us to treat GTT as a separate cache domain.)
300 	 */
301 	obj->read_domains = I915_GEM_DOMAIN_GTT;
302 	obj->write_domain = 0;
303 
304 	return &obj->base;
305 
306 fail_detach:
307 	dma_buf_detach(dma_buf, attach);
308 	dma_buf_put(dma_buf);
309 
310 	return ERR_PTR(ret);
311 }
312 
313 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
314 #include "selftests/mock_dmabuf.c"
315 #include "selftests/i915_gem_dmabuf.c"
316 #endif
317