xref: /dragonfly/sys/dev/drm/drm_vma_manager.c (revision 279dd846)
1 /*
2  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
3  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
4  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/drm_mm.h>
27 #include <drm/drm_vma_manager.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/types.h>
34 
35 /**
36  * DOC: vma offset manager
37  *
38  * The vma-manager is responsible to map arbitrary driver-dependent memory
39  * regions into the linear user address-space. It provides offsets to the
40  * caller which can then be used on the address_space of the drm-device. It
41  * takes care to not overlap regions, size them appropriately and to not
42  * confuse mm-core by inconsistent fake vm_pgoff fields.
43  * Drivers shouldn't use this for object placement in VMEM. This manager should
44  * only be used to manage mappings into linear user-space VMs.
45  *
46  * We use drm_mm as backend to manage object allocations. But it is highly
47  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
48  * speed up offset lookups.
49  *
50  * You must not use multiple offset managers on a single address_space.
51  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
52  * no longer be linear. Please use VM_NONLINEAR in that case and implement your
53  * own offset managers.
54  *
55  * This offset manager works on page-based addresses. That is, every argument
56  * and return code (with the exception of drm_vma_node_offset_addr()) is given
57  * in number of pages, not number of bytes. That means, object sizes and offsets
58  * must always be page-aligned (as usual).
59  * If you want to get a valid byte-based user-space address for a given offset,
60  * please see drm_vma_node_offset_addr().
61  */
62 
63 /**
64  * drm_vma_offset_manager_init - Initialize new offset-manager
65  * @mgr: Manager object
66  * @page_offset: Offset of available memory area (page-based)
67  * @size: Size of available address space range (page-based)
68  *
69  * Initialize a new offset-manager. The offset and area size available for the
70  * manager are given as @page_offset and @size. Both are interpreted as
71  * page-numbers, not bytes.
72  *
73  * Adding/removing nodes from the manager is locked internally and protected
74  * against concurrent access. However, node allocation and destruction is left
75  * for the caller. While calling into the vma-manager, a given node must
76  * always be guaranteed to be referenced.
77  */
78 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
79 				 unsigned long page_offset, unsigned long size)
80 {
81 	lockinit(&mgr->vm_lock, "drmvml", 0, LK_CANRECURSE);
82 	mgr->vm_addr_space_rb = RB_ROOT;
83 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
84 }
85 EXPORT_SYMBOL(drm_vma_offset_manager_init);
86 
87 /**
88  * drm_vma_offset_manager_destroy() - Destroy offset manager
89  * @mgr: Manager object
90  *
91  * Destroy an object manager which was previously created via
92  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
93  * before destroying the manager. Otherwise, drm_mm will refuse to free the
94  * requested resources.
95  *
96  * The manager must not be accessed after this function is called.
97  */
98 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
99 {
100 	/* take the lock to protect against buggy drivers */
101 	lockmgr(&mgr->vm_lock, LK_EXCLUSIVE);
102 	drm_mm_takedown(&mgr->vm_addr_space_mm);
103 	lockmgr(&mgr->vm_lock, LK_RELEASE);
104 }
105 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
106 
107 /**
108  * drm_vma_offset_lookup() - Find node in offset space
109  * @mgr: Manager object
110  * @start: Start address for object (page-based)
111  * @pages: Size of object (page-based)
112  *
113  * Find a node given a start address and object size. This returns the _best_
114  * match for the given node. That is, @start may point somewhere into a valid
115  * region and the given node will be returned, as long as the node spans the
116  * whole requested area (given the size in number of pages as @pages).
117  *
118  * RETURNS:
119  * Returns NULL if no suitable node can be found. Otherwise, the best match
120  * is returned. It's the caller's responsibility to make sure the node doesn't
121  * get destroyed before the caller can access it.
122  */
123 struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
124 						  unsigned long start,
125 						  unsigned long pages)
126 {
127 	struct drm_vma_offset_node *node;
128 
129 	lockmgr(&mgr->vm_lock, LK_EXCLUSIVE);
130 	node = drm_vma_offset_lookup_locked(mgr, start, pages);
131 	lockmgr(&mgr->vm_lock, LK_RELEASE);
132 
133 	return node;
134 }
135 EXPORT_SYMBOL(drm_vma_offset_lookup);
136 
137 /**
138  * drm_vma_offset_lookup_locked() - Find node in offset space
139  * @mgr: Manager object
140  * @start: Start address for object (page-based)
141  * @pages: Size of object (page-based)
142  *
143  * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
144  * manually. See drm_vma_offset_lock_lookup() for an example.
145  *
146  * RETURNS:
147  * Returns NULL if no suitable node can be found. Otherwise, the best match
148  * is returned.
149  */
150 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
151 							 unsigned long start,
152 							 unsigned long pages)
153 {
154 	struct drm_vma_offset_node *node, *best;
155 	struct rb_node *iter;
156 	unsigned long offset;
157 
158 	iter = mgr->vm_addr_space_rb.rb_node;
159 	best = NULL;
160 
161 	while (likely(iter)) {
162 		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
163 		offset = node->vm_node.start;
164 		if (start >= offset) {
165 			iter = iter->rb_right;
166 			best = node;
167 			if (start == offset)
168 				break;
169 		} else {
170 			iter = iter->rb_left;
171 		}
172 	}
173 
174 	/* verify that the node spans the requested area */
175 	if (best) {
176 		offset = best->vm_node.start + best->vm_node.size;
177 		if (offset < start + pages)
178 			best = NULL;
179 	}
180 
181 	return best;
182 }
183 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
184 
185 /* internal helper to link @node into the rb-tree */
186 static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
187 				   struct drm_vma_offset_node *node)
188 {
189 	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
190 	struct rb_node *parent = NULL;
191 	struct drm_vma_offset_node *iter_node;
192 
193 	while (likely(*iter)) {
194 		parent = *iter;
195 		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
196 
197 		if (node->vm_node.start < iter_node->vm_node.start)
198 			iter = &(*iter)->rb_left;
199 		else if (node->vm_node.start > iter_node->vm_node.start)
200 			iter = &(*iter)->rb_right;
201 		else
202 			BUG();
203 	}
204 
205 	rb_link_node(&node->vm_rb, parent, iter);
206 	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
207 }
208 
209 /**
210  * drm_vma_offset_add() - Add offset node to manager
211  * @mgr: Manager object
212  * @node: Node to be added
213  * @pages: Allocation size visible to user-space (in number of pages)
214  *
215  * Add a node to the offset-manager. If the node was already added, this does
216  * nothing and return 0. @pages is the size of the object given in number of
217  * pages.
218  * After this call succeeds, you can access the offset of the node until it
219  * is removed again.
220  *
221  * If this call fails, it is safe to retry the operation or call
222  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
223  * case.
224  *
225  * @pages is not required to be the same size as the underlying memory object
226  * that you want to map. It only limits the size that user-space can map into
227  * their address space.
228  *
229  * RETURNS:
230  * 0 on success, negative error code on failure.
231  */
232 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
233 		       struct drm_vma_offset_node *node, unsigned long pages)
234 {
235 	int ret;
236 
237 	lockmgr(&mgr->vm_lock, LK_EXCLUSIVE);
238 
239 	if (drm_mm_node_allocated(&node->vm_node)) {
240 		ret = 0;
241 		goto out_unlock;
242 	}
243 
244 	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
245 				 pages, 0, DRM_MM_SEARCH_DEFAULT);
246 	if (ret)
247 		goto out_unlock;
248 
249 	_drm_vma_offset_add_rb(mgr, node);
250 
251 out_unlock:
252 	lockmgr(&mgr->vm_lock, LK_RELEASE);
253 	return ret;
254 }
255 EXPORT_SYMBOL(drm_vma_offset_add);
256 
257 /**
258  * drm_vma_offset_remove() - Remove offset node from manager
259  * @mgr: Manager object
260  * @node: Node to be removed
261  *
262  * Remove a node from the offset manager. If the node wasn't added before, this
263  * does nothing. After this call returns, the offset and size will be 0 until a
264  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
265  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
266  * offset is allocated.
267  */
268 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
269 			   struct drm_vma_offset_node *node)
270 {
271 	lockmgr(&mgr->vm_lock, LK_EXCLUSIVE);
272 
273 	if (drm_mm_node_allocated(&node->vm_node)) {
274 		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
275 		drm_mm_remove_node(&node->vm_node);
276 		memset(&node->vm_node, 0, sizeof(node->vm_node));
277 	}
278 
279 	lockmgr(&mgr->vm_lock, LK_RELEASE);
280 }
281 EXPORT_SYMBOL(drm_vma_offset_remove);
282