xref: /openbsd/sys/dev/pci/drm/i915/gt/shmem_utils.c (revision d89ec533)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/shmem_fs.h>
9 
10 #include "gem/i915_gem_object.h"
11 #include "shmem_utils.h"
12 
13 #ifdef __linux__
14 
15 struct file *shmem_create_from_data(const char *name, void *data, size_t len)
16 {
17 	struct file *file;
18 	int err;
19 
20 	file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
21 	if (IS_ERR(file))
22 		return file;
23 
24 	err = shmem_write(file, 0, data, len);
25 	if (err) {
26 		fput(file);
27 		return ERR_PTR(err);
28 	}
29 
30 	return file;
31 }
32 
33 struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
34 {
35 	struct file *file;
36 	void *ptr;
37 
38 	if (obj->ops == &i915_gem_shmem_ops) {
39 		file = obj->base.filp;
40 		atomic_long_inc(&file->f_count);
41 		return file;
42 	}
43 
44 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
45 	if (IS_ERR(ptr))
46 		return ERR_CAST(ptr);
47 
48 	file = shmem_create_from_data("", ptr, obj->base.size);
49 	i915_gem_object_unpin_map(obj);
50 
51 	return file;
52 }
53 
54 void *shmem_pin_map(struct file *file)
55 {
56 	struct page **pages;
57 	size_t n_pages, i;
58 	void *vaddr;
59 
60 	n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
61 	pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
62 	if (!pages)
63 		return NULL;
64 
65 	for (i = 0; i < n_pages; i++) {
66 		pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
67 						       GFP_KERNEL);
68 		if (IS_ERR(pages[i]))
69 			goto err_page;
70 	}
71 
72 	vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
73 	if (!vaddr)
74 		goto err_page;
75 	mapping_set_unevictable(file->f_mapping);
76 	return vaddr;
77 err_page:
78 	while (i--)
79 		put_page(pages[i]);
80 	kvfree(pages);
81 	return NULL;
82 }
83 
84 void shmem_unpin_map(struct file *file, void *ptr)
85 {
86 	mapping_clear_unevictable(file->f_mapping);
87 	vfree(ptr);
88 }
89 
90 static int __shmem_rw(struct file *file, loff_t off,
91 		      void *ptr, size_t len,
92 		      bool write)
93 {
94 	unsigned long pfn;
95 
96 	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
97 		unsigned int this =
98 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
99 		struct page *page;
100 		void *vaddr;
101 
102 		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
103 						   GFP_KERNEL);
104 		if (IS_ERR(page))
105 			return PTR_ERR(page);
106 
107 		vaddr = kmap(page);
108 		if (write) {
109 			memcpy(vaddr + offset_in_page(off), ptr, this);
110 			set_page_dirty(page);
111 		} else {
112 			memcpy(ptr, vaddr + offset_in_page(off), this);
113 		}
114 		mark_page_accessed(page);
115 		kunmap(page);
116 		put_page(page);
117 
118 		len -= this;
119 		ptr += this;
120 		off = 0;
121 	}
122 
123 	return 0;
124 }
125 
126 int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
127 {
128 	return __shmem_rw(file, off, dst, len, false);
129 }
130 
131 int shmem_write(struct file *file, loff_t off, void *src, size_t len)
132 {
133 	return __shmem_rw(file, off, src, len, true);
134 }
135 
136 #endif /* __linux__ */
137 
138 struct uvm_object *
139 uao_create_from_data(void *data, size_t len)
140 {
141 	struct uvm_object *uao;
142 	int err;
143 
144 	uao = uao_create(PAGE_ALIGN(len), 0);
145 	if (uao == NULL) {
146 		return ERR_PTR(-ENOMEM);
147 	}
148 
149 	err = uao_write(uao, 0, data, len);
150 	if (err) {
151 		uao_detach(uao);
152 		return ERR_PTR(err);
153 	}
154 
155 	return uao;
156 }
157 
158 struct uvm_object *
159 uao_create_from_object(struct drm_i915_gem_object *obj)
160 {
161 	struct uvm_object *uao;
162 	void *ptr;
163 
164 	if (obj->ops == &i915_gem_shmem_ops) {
165 		uao_reference(obj->base.uao);
166 		return obj->base.uao;
167 	}
168 
169 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
170 	if (IS_ERR(ptr))
171 		return ERR_CAST(ptr);
172 
173 	uao = uao_create_from_data(ptr, obj->base.size);
174 	i915_gem_object_unpin_map(obj);
175 
176 	return uao;
177 }
178 
179 static int __uao_rw(struct uvm_object *uao, loff_t off,
180 		      void *ptr, size_t len,
181 		      bool write)
182 {
183 	struct pglist plist;
184 	struct vm_page *page;
185 	vaddr_t pgoff = trunc_page(off);
186 	size_t olen = round_page(len);
187 
188 	TAILQ_INIT(&plist);
189 	if (uvm_obj_wire(uao, pgoff, olen, &plist))
190 		return -ENOMEM;
191 
192 	TAILQ_FOREACH(page, &plist, pageq) {
193 		unsigned int this =
194 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
195 		void *vaddr = kmap(page);
196 
197 		if (write) {
198 			memcpy(vaddr + offset_in_page(off), ptr, this);
199 			set_page_dirty(page);
200 		} else {
201 			memcpy(ptr, vaddr + offset_in_page(off), this);
202 		}
203 
204 		kunmap_va(vaddr);
205 		len -= this;
206 		ptr += this;
207 		off = 0;
208 	}
209 
210 	uvm_obj_unwire(uao, pgoff, olen);
211 
212 	return 0;
213 }
214 
215 int uao_read(struct uvm_object *uao, loff_t off, void *dst, size_t len)
216 {
217 	return __uao_rw(uao, off, dst, len, false);
218 }
219 
220 int uao_write(struct uvm_object *uao, loff_t off, void *src, size_t len)
221 {
222 	return __uao_rw(uao, off, src, len, true);
223 }
224 
225 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
226 #include "st_shmem_utils.c"
227 #endif
228