xref: /openbsd/sys/dev/pci/drm/i915/gt/shmem_utils.c (revision 0a503ede)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/iosys-map.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/shmem_fs.h>
10 
11 #include "i915_drv.h"
12 #include "gem/i915_gem_object.h"
13 #include "gem/i915_gem_lmem.h"
14 #include "shmem_utils.h"
15 
16 #ifdef __linux__
17 
shmem_create_from_data(const char * name,void * data,size_t len)18 struct file *shmem_create_from_data(const char *name, void *data, size_t len)
19 {
20 	struct file *file;
21 	int err;
22 
23 	file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
24 	if (IS_ERR(file))
25 		return file;
26 
27 	err = shmem_write(file, 0, data, len);
28 	if (err) {
29 		fput(file);
30 		return ERR_PTR(err);
31 	}
32 
33 	return file;
34 }
35 
shmem_create_from_object(struct drm_i915_gem_object * obj)36 struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
37 {
38 	enum i915_map_type map_type;
39 	struct file *file;
40 	void *ptr;
41 
42 	if (i915_gem_object_is_shmem(obj)) {
43 		file = obj->base.filp;
44 		atomic_long_inc(&file->f_count);
45 		return file;
46 	}
47 
48 	map_type = i915_gem_object_is_lmem(obj) ? I915_MAP_WC : I915_MAP_WB;
49 	ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
50 	if (IS_ERR(ptr))
51 		return ERR_CAST(ptr);
52 
53 	file = shmem_create_from_data("", ptr, obj->base.size);
54 	i915_gem_object_unpin_map(obj);
55 
56 	return file;
57 }
58 
shmem_pin_map(struct file * file)59 void *shmem_pin_map(struct file *file)
60 {
61 	struct page **pages;
62 	size_t n_pages, i;
63 	void *vaddr;
64 
65 	n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
66 	pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
67 	if (!pages)
68 		return NULL;
69 
70 	for (i = 0; i < n_pages; i++) {
71 		pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
72 						       GFP_KERNEL);
73 		if (IS_ERR(pages[i]))
74 			goto err_page;
75 	}
76 
77 	vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
78 	if (!vaddr)
79 		goto err_page;
80 	mapping_set_unevictable(file->f_mapping);
81 	return vaddr;
82 err_page:
83 	while (i--)
84 		put_page(pages[i]);
85 	kvfree(pages);
86 	return NULL;
87 }
88 
shmem_unpin_map(struct file * file,void * ptr)89 void shmem_unpin_map(struct file *file, void *ptr)
90 {
91 	mapping_clear_unevictable(file->f_mapping);
92 	vfree(ptr);
93 }
94 
__shmem_rw(struct file * file,loff_t off,void * ptr,size_t len,bool write)95 static int __shmem_rw(struct file *file, loff_t off,
96 		      void *ptr, size_t len,
97 		      bool write)
98 {
99 	unsigned long pfn;
100 
101 	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
102 		unsigned int this =
103 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
104 		struct page *page;
105 		void *vaddr;
106 
107 		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
108 						   GFP_KERNEL);
109 		if (IS_ERR(page))
110 			return PTR_ERR(page);
111 
112 		vaddr = kmap(page);
113 		if (write) {
114 			memcpy(vaddr + offset_in_page(off), ptr, this);
115 			set_page_dirty(page);
116 		} else {
117 			memcpy(ptr, vaddr + offset_in_page(off), this);
118 		}
119 		mark_page_accessed(page);
120 		kunmap(page);
121 		put_page(page);
122 
123 		len -= this;
124 		ptr += this;
125 		off = 0;
126 	}
127 
128 	return 0;
129 }
130 
shmem_read_to_iosys_map(struct file * file,loff_t off,struct iosys_map * map,size_t map_off,size_t len)131 int shmem_read_to_iosys_map(struct file *file, loff_t off,
132 			    struct iosys_map *map, size_t map_off, size_t len)
133 {
134 	unsigned long pfn;
135 
136 	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
137 		unsigned int this =
138 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
139 		struct page *page;
140 		void *vaddr;
141 
142 		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
143 						   GFP_KERNEL);
144 		if (IS_ERR(page))
145 			return PTR_ERR(page);
146 
147 		vaddr = kmap(page);
148 		iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
149 				    this);
150 		mark_page_accessed(page);
151 		kunmap(page);
152 		put_page(page);
153 
154 		len -= this;
155 		map_off += this;
156 		off = 0;
157 	}
158 
159 	return 0;
160 }
161 
shmem_read(struct file * file,loff_t off,void * dst,size_t len)162 int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
163 {
164 	return __shmem_rw(file, off, dst, len, false);
165 }
166 
shmem_write(struct file * file,loff_t off,void * src,size_t len)167 int shmem_write(struct file *file, loff_t off, void *src, size_t len)
168 {
169 	return __shmem_rw(file, off, src, len, true);
170 }
171 
172 #endif /* __linux__ */
173 
174 struct uvm_object *
uao_create_from_data(void * data,size_t len)175 uao_create_from_data(void *data, size_t len)
176 {
177 	struct uvm_object *uao;
178 	int err;
179 
180 	uao = uao_create(PAGE_ALIGN(len), 0);
181 	if (uao == NULL) {
182 		return ERR_PTR(-ENOMEM);
183 	}
184 
185 	err = uao_write(uao, 0, data, len);
186 	if (err) {
187 		uao_detach(uao);
188 		return ERR_PTR(err);
189 	}
190 
191 	return uao;
192 }
193 
194 struct uvm_object *
uao_create_from_object(struct drm_i915_gem_object * obj)195 uao_create_from_object(struct drm_i915_gem_object *obj)
196 {
197 	struct uvm_object *uao;
198 	void *ptr;
199 
200 	if (i915_gem_object_is_shmem(obj)) {
201 		uao_reference(obj->base.uao);
202 		return obj->base.uao;
203 	}
204 
205 	ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
206 						I915_MAP_WC : I915_MAP_WB);
207 	if (IS_ERR(ptr))
208 		return ERR_CAST(ptr);
209 
210 	uao = uao_create_from_data(ptr, obj->base.size);
211 	i915_gem_object_unpin_map(obj);
212 
213 	return uao;
214 }
215 
__uao_rw(struct uvm_object * uao,loff_t off,void * ptr,size_t len,bool write)216 static int __uao_rw(struct uvm_object *uao, loff_t off,
217 		      void *ptr, size_t len,
218 		      bool write)
219 {
220 	struct pglist plist;
221 	struct vm_page *page;
222 	vaddr_t pgoff = trunc_page(off);
223 	size_t olen = round_page(len);
224 
225 	TAILQ_INIT(&plist);
226 	if (uvm_obj_wire(uao, pgoff, olen, &plist))
227 		return -ENOMEM;
228 
229 	TAILQ_FOREACH(page, &plist, pageq) {
230 		unsigned int this =
231 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
232 		void *vaddr = kmap(page);
233 
234 		if (write) {
235 			memcpy(vaddr + offset_in_page(off), ptr, this);
236 			set_page_dirty(page);
237 		} else {
238 			memcpy(ptr, vaddr + offset_in_page(off), this);
239 		}
240 
241 		kunmap_va(vaddr);
242 		len -= this;
243 		ptr += this;
244 		off = 0;
245 	}
246 
247 	uvm_obj_unwire(uao, pgoff, olen);
248 
249 	return 0;
250 }
251 
uao_read_to_iosys_map(struct uvm_object * uao,loff_t off,struct iosys_map * map,size_t map_off,size_t len)252 int uao_read_to_iosys_map(struct uvm_object *uao, loff_t off,
253 			    struct iosys_map *map, size_t map_off, size_t len)
254 {
255 	struct pglist plist;
256 	struct vm_page *page;
257 	vaddr_t pgoff = trunc_page(off);
258 	size_t olen = round_page(len);
259 
260 	TAILQ_INIT(&plist);
261 	if (uvm_obj_wire(uao, pgoff, olen, &plist))
262 		return -ENOMEM;
263 
264 	TAILQ_FOREACH(page, &plist, pageq) {
265 		unsigned int this =
266 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
267 		void *vaddr;
268 
269 		vaddr = kmap(page);
270 		iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
271 				    this);
272 		kunmap_va(vaddr);
273 
274 		len -= this;
275 		map_off += this;
276 		off = 0;
277 	}
278 
279 	uvm_obj_unwire(uao, pgoff, olen);
280 
281 	return 0;
282 }
283 
uao_read(struct uvm_object * uao,loff_t off,void * dst,size_t len)284 int uao_read(struct uvm_object *uao, loff_t off, void *dst, size_t len)
285 {
286 	return __uao_rw(uao, off, dst, len, false);
287 }
288 
uao_write(struct uvm_object * uao,loff_t off,void * src,size_t len)289 int uao_write(struct uvm_object *uao, loff_t off, void *src, size_t len)
290 {
291 	return __uao_rw(uao, off, src, len, true);
292 }
293 
294 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
295 #include "st_shmem_utils.c"
296 #endif
297