xref: /openbsd/sys/dev/pci/drm/i915/gem/i915_gem_phys.c (revision e5dd7070)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
15 
16 #include "gt/intel_gt.h"
17 #include "i915_drv.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_region.h"
20 #include "i915_scatterlist.h"
21 
22 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
23 {
24 #ifdef __linux__
25 	struct address_space *mapping = obj->base.filp->f_mapping;
26 #else
27 	struct drm_dma_handle *phys;
28 #endif
29 	struct scatterlist *sg;
30 	struct sg_table *st;
31 	dma_addr_t dma;
32 	void *vaddr;
33 	void *dst;
34 	int i;
35 
36 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
37 		return -EINVAL;
38 
39 	/*
40 	 * Always aligning to the object size, allows a single allocation
41 	 * to handle all possible callers, and given typical object sizes,
42 	 * the alignment of the buddy allocation will naturally match.
43 	 */
44 #ifdef __linux__
45 	vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
46 				   roundup_pow_of_two(obj->base.size),
47 				   &dma, GFP_KERNEL);
48 	if (!vaddr)
49 		return -ENOMEM;
50 #else
51 	phys = drm_pci_alloc(obj->base.dev,
52 			     roundup_pow_of_two(obj->base.size),
53 			     roundup_pow_of_two(obj->base.size));
54 	if (!phys)
55 		return -ENOMEM;
56 	vaddr = phys->vaddr;
57 	dma = phys->busaddr;
58 #endif
59 
60 	st = kmalloc(sizeof(*st), GFP_KERNEL);
61 	if (!st)
62 		goto err_pci;
63 
64 	if (sg_alloc_table(st, 1, GFP_KERNEL))
65 		goto err_st;
66 
67 	sg = st->sgl;
68 	sg->offset = 0;
69 	sg->length = obj->base.size;
70 
71 #ifdef __linux__
72 	sg_assign_page(sg, (struct page *)vaddr);
73 #else
74 	sg_assign_page(sg, (struct vm_page *)phys);
75 #endif
76 	sg_dma_address(sg) = dma;
77 	sg_dma_len(sg) = obj->base.size;
78 
79 	dst = vaddr;
80 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
81 		struct vm_page *page;
82 		void *src;
83 
84 #ifdef  __linux__
85 		page = shmem_read_mapping_page(mapping, i);
86 		if (IS_ERR(page))
87 			goto err_st;
88 #else
89 		struct pglist plist;
90 		TAILQ_INIT(&plist);
91 		if (uvm_objwire(obj->base.uao, i * PAGE_SIZE,
92 				(i + 1) * PAGE_SIZE, &plist))
93 			goto err_st;
94 		page = TAILQ_FIRST(&plist);
95 #endif
96 
97 		src = kmap_atomic(page);
98 		memcpy(dst, src, PAGE_SIZE);
99 		drm_clflush_virt_range(dst, PAGE_SIZE);
100 		kunmap_atomic(src);
101 
102 #ifdef __linux__
103 		put_page(page);
104 #else
105 		uvm_objunwire(obj->base.uao, i * PAGE_SIZE,
106 			      (i + 1) * PAGE_SIZE);
107 #endif
108 		dst += PAGE_SIZE;
109 	}
110 
111 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
112 
113 	__i915_gem_object_set_pages(obj, st, sg->length);
114 
115 	return 0;
116 
117 err_st:
118 	kfree(st);
119 err_pci:
120 #ifdef __linux__
121 	dma_free_coherent(&obj->base.dev->pdev->dev,
122 			  roundup_pow_of_two(obj->base.size),
123 			  vaddr, dma);
124 #else
125 	drm_pci_free(obj->base.dev, phys);
126 #endif
127 	return -ENOMEM;
128 }
129 
130 static void
131 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
132 			       struct sg_table *pages)
133 {
134 	dma_addr_t dma = sg_dma_address(pages->sgl);
135 #ifdef __linux__
136 	void *vaddr = sg_page(pages->sgl);
137 #else
138 	struct drm_dma_handle *phys = (void *)sg_page(pages->sgl);
139 	void *vaddr = phys->vaddr;
140 #endif
141 
142 	__i915_gem_object_release_shmem(obj, pages, false);
143 
144 	if (obj->mm.dirty) {
145 #ifdef __linux__
146 		struct address_space *mapping = obj->base.filp->f_mapping;
147 #endif
148 		void *src = vaddr;
149 		int i;
150 
151 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
152 			struct vm_page *page;
153 			char *dst;
154 
155 #ifdef __linux__
156 			page = shmem_read_mapping_page(mapping, i);
157 			if (IS_ERR(page))
158 				continue;
159 #else
160 			struct pglist plist;
161 			TAILQ_INIT(&plist);
162 			if (uvm_objwire(obj->base.uao, i * PAGE_SIZE,
163 					(i + 1) * PAGE_SIZE, &plist))
164 				continue;
165 			page = TAILQ_FIRST(&plist);
166 #endif
167 
168 			dst = kmap_atomic(page);
169 			drm_clflush_virt_range(src, PAGE_SIZE);
170 			memcpy(dst, src, PAGE_SIZE);
171 			kunmap_atomic(dst);
172 
173 			set_page_dirty(page);
174 #ifdef __linux__
175 			if (obj->mm.madv == I915_MADV_WILLNEED)
176 				mark_page_accessed(page);
177 			put_page(page);
178 #else
179 			uvm_objunwire(obj->base.uao, i * PAGE_SIZE,
180 				      (i + 1) * PAGE_SIZE);
181 #endif
182 
183 			src += PAGE_SIZE;
184 		}
185 		obj->mm.dirty = false;
186 	}
187 
188 	sg_free_table(pages);
189 	kfree(pages);
190 
191 #ifdef __linux__
192 	dma_free_coherent(&obj->base.dev->pdev->dev,
193 			  roundup_pow_of_two(obj->base.size),
194 			  vaddr, dma);
195 #else
196 	drm_pci_free(obj->base.dev, phys);
197 #endif
198 }
199 
200 static void phys_release(struct drm_i915_gem_object *obj)
201 {
202 	fput(obj->base.filp);
203 }
204 
205 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
206 	.get_pages = i915_gem_object_get_pages_phys,
207 	.put_pages = i915_gem_object_put_pages_phys,
208 
209 	.release = phys_release,
210 };
211 
212 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
213 {
214 	struct sg_table *pages;
215 	int err;
216 
217 	if (align > obj->base.size)
218 		return -EINVAL;
219 
220 	if (obj->ops == &i915_gem_phys_ops)
221 		return 0;
222 
223 	if (obj->ops != &i915_gem_shmem_ops)
224 		return -EINVAL;
225 
226 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
227 	if (err)
228 		return err;
229 
230 	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
231 
232 	if (obj->mm.madv != I915_MADV_WILLNEED) {
233 		err = -EFAULT;
234 		goto err_unlock;
235 	}
236 
237 	if (obj->mm.quirked) {
238 		err = -EFAULT;
239 		goto err_unlock;
240 	}
241 
242 	if (obj->mm.mapping) {
243 		err = -EBUSY;
244 		goto err_unlock;
245 	}
246 
247 	pages = __i915_gem_object_unset_pages(obj);
248 
249 	obj->ops = &i915_gem_phys_ops;
250 
251 	err = ____i915_gem_object_get_pages(obj);
252 	if (err)
253 		goto err_xfer;
254 
255 	/* Perma-pin (until release) the physical set of pages */
256 	__i915_gem_object_pin_pages(obj);
257 
258 	if (!IS_ERR_OR_NULL(pages))
259 		i915_gem_shmem_ops.put_pages(obj, pages);
260 
261 	i915_gem_object_release_memory_region(obj);
262 
263 	mutex_unlock(&obj->mm.lock);
264 	return 0;
265 
266 err_xfer:
267 	obj->ops = &i915_gem_shmem_ops;
268 	if (!IS_ERR_OR_NULL(pages)) {
269 		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
270 
271 		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
272 	}
273 err_unlock:
274 	mutex_unlock(&obj->mm.lock);
275 	return err;
276 }
277 
278 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
279 #include "selftests/i915_gem_phys.c"
280 #endif
281