xref: /openbsd/sys/dev/pci/drm/drm_vma_manager.c (revision 09467b48)
1 /*	$OpenBSD: drm_vma_manager.c,v 1.5 2020/06/08 04:47:58 jsg Exp $	*/
2 /*
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
5  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/rbtree.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/types.h>
32 
33 #include <drm/drm_mm.h>
34 #include <drm/drm_vma_manager.h>
35 
36 /**
37  * DOC: vma offset manager
38  *
39  * The vma-manager is responsible to map arbitrary driver-dependent memory
40  * regions into the linear user address-space. It provides offsets to the
41  * caller which can then be used on the address_space of the drm-device. It
42  * takes care to not overlap regions, size them appropriately and to not
43  * confuse mm-core by inconsistent fake vm_pgoff fields.
44  * Drivers shouldn't use this for object placement in VMEM. This manager should
45  * only be used to manage mappings into linear user-space VMs.
46  *
47  * We use drm_mm as backend to manage object allocations. But it is highly
48  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49  * speed up offset lookups.
50  *
51  * You must not use multiple offset managers on a single address_space.
52  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
53  * no longer be linear.
54  *
55  * This offset manager works on page-based addresses. That is, every argument
56  * and return code (with the exception of drm_vma_node_offset_addr()) is given
57  * in number of pages, not number of bytes. That means, object sizes and offsets
58  * must always be page-aligned (as usual).
59  * If you want to get a valid byte-based user-space address for a given offset,
60  * please see drm_vma_node_offset_addr().
61  *
62  * Additionally to offset management, the vma offset manager also handles access
63  * management. For every open-file context that is allowed to access a given
64  * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
65  * open-file with the offset of the node will fail with -EACCES. To revoke
66  * access again, use drm_vma_node_revoke(). However, the caller is responsible
67  * for destroying already existing mappings, if required.
68  */
69 
70 /**
71  * drm_vma_offset_manager_init - Initialize new offset-manager
72  * @mgr: Manager object
73  * @page_offset: Offset of available memory area (page-based)
74  * @size: Size of available address space range (page-based)
75  *
76  * Initialize a new offset-manager. The offset and area size available for the
77  * manager are given as @page_offset and @size. Both are interpreted as
78  * page-numbers, not bytes.
79  *
80  * Adding/removing nodes from the manager is locked internally and protected
81  * against concurrent access. However, node allocation and destruction is left
82  * for the caller. While calling into the vma-manager, a given node must
83  * always be guaranteed to be referenced.
84  */
85 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
86 				 unsigned long page_offset, unsigned long size)
87 {
88 	rw_init(&mgr->vm_lock, "drmvmo");
89 	mgr->vm_addr_space_rb = RB_ROOT;
90 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
91 }
92 EXPORT_SYMBOL(drm_vma_offset_manager_init);
93 
94 /**
95  * drm_vma_offset_manager_destroy() - Destroy offset manager
96  * @mgr: Manager object
97  *
98  * Destroy an object manager which was previously created via
99  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
100  * before destroying the manager. Otherwise, drm_mm will refuse to free the
101  * requested resources.
102  *
103  * The manager must not be accessed after this function is called.
104  */
105 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
106 {
107 	/* take the lock to protect against buggy drivers */
108 	write_lock(&mgr->vm_lock);
109 	drm_mm_takedown(&mgr->vm_addr_space_mm);
110 	write_unlock(&mgr->vm_lock);
111 }
112 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
113 
114 /**
115  * drm_vma_offset_lookup_locked() - Find node in offset space
116  * @mgr: Manager object
117  * @start: Start address for object (page-based)
118  * @pages: Size of object (page-based)
119  *
120  * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
121  * manually. See drm_vma_offset_lock_lookup() for an example.
122  *
123  * RETURNS:
124  * Returns NULL if no suitable node can be found. Otherwise, the best match
125  * is returned.
126  */
127 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
128 							 unsigned long start,
129 							 unsigned long pages)
130 {
131 	struct drm_vma_offset_node *node, *best;
132 	struct rb_node *iter;
133 	unsigned long offset;
134 
135 	iter = mgr->vm_addr_space_rb.rb_node;
136 	best = NULL;
137 
138 	while (likely(iter)) {
139 		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
140 		offset = node->vm_node.start;
141 		if (start >= offset) {
142 			iter = iter->rb_right;
143 			best = node;
144 			if (start == offset)
145 				break;
146 		} else {
147 			iter = iter->rb_left;
148 		}
149 	}
150 
151 	/* verify that the node spans the requested area */
152 	if (best) {
153 		offset = best->vm_node.start + best->vm_node.size;
154 		if (offset < start + pages)
155 			best = NULL;
156 	}
157 
158 	return best;
159 }
160 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
161 
162 /* internal helper to link @node into the rb-tree */
163 static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
164 				   struct drm_vma_offset_node *node)
165 {
166 	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
167 	struct rb_node *parent = NULL;
168 	struct drm_vma_offset_node *iter_node;
169 
170 	while (likely(*iter)) {
171 		parent = *iter;
172 		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
173 
174 		if (node->vm_node.start < iter_node->vm_node.start)
175 			iter = &(*iter)->rb_left;
176 		else if (node->vm_node.start > iter_node->vm_node.start)
177 			iter = &(*iter)->rb_right;
178 		else
179 			BUG();
180 	}
181 
182 	rb_link_node(&node->vm_rb, parent, iter);
183 	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
184 }
185 
186 /**
187  * drm_vma_offset_add() - Add offset node to manager
188  * @mgr: Manager object
189  * @node: Node to be added
190  * @pages: Allocation size visible to user-space (in number of pages)
191  *
192  * Add a node to the offset-manager. If the node was already added, this does
193  * nothing and return 0. @pages is the size of the object given in number of
194  * pages.
195  * After this call succeeds, you can access the offset of the node until it
196  * is removed again.
197  *
198  * If this call fails, it is safe to retry the operation or call
199  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
200  * case.
201  *
202  * @pages is not required to be the same size as the underlying memory object
203  * that you want to map. It only limits the size that user-space can map into
204  * their address space.
205  *
206  * RETURNS:
207  * 0 on success, negative error code on failure.
208  */
209 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
210 		       struct drm_vma_offset_node *node, unsigned long pages)
211 {
212 	int ret;
213 
214 	write_lock(&mgr->vm_lock);
215 
216 	if (drm_mm_node_allocated(&node->vm_node)) {
217 		ret = 0;
218 		goto out_unlock;
219 	}
220 
221 	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
222 				 pages, 0, DRM_MM_SEARCH_DEFAULT);
223 	if (ret)
224 		goto out_unlock;
225 
226 	_drm_vma_offset_add_rb(mgr, node);
227 
228 out_unlock:
229 	write_unlock(&mgr->vm_lock);
230 	return ret;
231 }
232 EXPORT_SYMBOL(drm_vma_offset_add);
233 
234 /**
235  * drm_vma_offset_remove() - Remove offset node from manager
236  * @mgr: Manager object
237  * @node: Node to be removed
238  *
239  * Remove a node from the offset manager. If the node wasn't added before, this
240  * does nothing. After this call returns, the offset and size will be 0 until a
241  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
242  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
243  * offset is allocated.
244  */
245 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
246 			   struct drm_vma_offset_node *node)
247 {
248 	write_lock(&mgr->vm_lock);
249 
250 	if (drm_mm_node_allocated(&node->vm_node)) {
251 		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
252 		drm_mm_remove_node(&node->vm_node);
253 		memset(&node->vm_node, 0, sizeof(node->vm_node));
254 	}
255 
256 	write_unlock(&mgr->vm_lock);
257 }
258 EXPORT_SYMBOL(drm_vma_offset_remove);
259 
260 /**
261  * drm_vma_node_allow - Add open-file to list of allowed users
262  * @node: Node to modify
263  * @filp: Open file to add
264  *
265  * Add @filp to the list of allowed open-files for this node. If @filp is
266  * already on this list, the ref-count is incremented.
267  *
268  * The list of allowed-users is preserved across drm_vma_offset_add() and
269  * drm_vma_offset_remove() calls. You may even call it if the node is currently
270  * not added to any offset-manager.
271  *
272  * You must remove all open-files the same number of times as you added them
273  * before destroying the node. Otherwise, you will leak memory.
274  *
275  * This is locked against concurrent access internally.
276  *
277  * RETURNS:
278  * 0 on success, negative error code on internal failure (out-of-mem)
279  */
280 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
281 {
282 	struct rb_node **iter;
283 	struct rb_node *parent = NULL;
284 	struct drm_vma_offset_file *new, *entry;
285 	int ret = 0;
286 
287 	/* Preallocate entry to avoid atomic allocations below. It is quite
288 	 * unlikely that an open-file is added twice to a single node so we
289 	 * don't optimize for this case. OOM is checked below only if the entry
290 	 * is actually used. */
291 	new = kmalloc(sizeof(*entry), GFP_KERNEL);
292 
293 	write_lock(&node->vm_lock);
294 
295 	iter = &node->vm_files.rb_node;
296 
297 	while (likely(*iter)) {
298 		parent = *iter;
299 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
300 
301 		if (filp == entry->vm_filp) {
302 			entry->vm_count++;
303 			goto unlock;
304 		} else if (filp > entry->vm_filp) {
305 			iter = &(*iter)->rb_right;
306 		} else {
307 			iter = &(*iter)->rb_left;
308 		}
309 	}
310 
311 	if (!new) {
312 		ret = -ENOMEM;
313 		goto unlock;
314 	}
315 
316 	new->vm_filp = filp;
317 	new->vm_count = 1;
318 	rb_link_node(&new->vm_rb, parent, iter);
319 	rb_insert_color(&new->vm_rb, &node->vm_files);
320 	new = NULL;
321 
322 unlock:
323 	write_unlock(&node->vm_lock);
324 	kfree(new);
325 	return ret;
326 }
327 EXPORT_SYMBOL(drm_vma_node_allow);
328 
329 /**
330  * drm_vma_node_revoke - Remove open-file from list of allowed users
331  * @node: Node to modify
332  * @filp: Open file to remove
333  *
334  * Decrement the ref-count of @filp in the list of allowed open-files on @node.
335  * If the ref-count drops to zero, remove @filp from the list. You must call
336  * this once for every drm_vma_node_allow() on @filp.
337  *
338  * This is locked against concurrent access internally.
339  *
340  * If @filp is not on the list, nothing is done.
341  */
342 void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
343 {
344 	struct drm_vma_offset_file *entry;
345 	struct rb_node *iter;
346 
347 	write_lock(&node->vm_lock);
348 
349 	iter = node->vm_files.rb_node;
350 	while (likely(iter)) {
351 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
352 		if (filp == entry->vm_filp) {
353 			if (!--entry->vm_count) {
354 				rb_erase(&entry->vm_rb, &node->vm_files);
355 				kfree(entry);
356 			}
357 			break;
358 		} else if (filp > entry->vm_filp) {
359 			iter = iter->rb_right;
360 		} else {
361 			iter = iter->rb_left;
362 		}
363 	}
364 
365 	write_unlock(&node->vm_lock);
366 }
367 EXPORT_SYMBOL(drm_vma_node_revoke);
368 
369 /**
370  * drm_vma_node_is_allowed - Check whether an open-file is granted access
371  * @node: Node to check
372  * @filp: Open-file to check for
373  *
374  * Search the list in @node whether @filp is currently on the list of allowed
375  * open-files (see drm_vma_node_allow()).
376  *
377  * This is locked against concurrent access internally.
378  *
379  * RETURNS:
380  * true iff @filp is on the list
381  */
382 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
383 			     struct file *filp)
384 {
385 	struct drm_vma_offset_file *entry;
386 	struct rb_node *iter;
387 
388 	read_lock(&node->vm_lock);
389 
390 	iter = node->vm_files.rb_node;
391 	while (likely(iter)) {
392 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
393 		if (filp == entry->vm_filp)
394 			break;
395 		else if (filp > entry->vm_filp)
396 			iter = iter->rb_right;
397 		else
398 			iter = iter->rb_left;
399 	}
400 
401 	read_unlock(&node->vm_lock);
402 
403 	return iter;
404 }
405 EXPORT_SYMBOL(drm_vma_node_is_allowed);
406