1 /* $NetBSD: drm_vma_manager.c,v 1.4 2016/04/19 02:52:29 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_vma_manager.c,v 1.4 2016/04/19 02:52:29 riastradh Exp $");
34
35 #include <sys/kmem.h>
36 #include <sys/rbtree.h>
37 #include <sys/vmem.h>
38
39 #include <drm/drm_vma_manager.h>
40
41 static int
drm_vma_node_compare(void * cookie __unused,const void * va,const void * vb)42 drm_vma_node_compare(void *cookie __unused, const void *va, const void *vb)
43 {
44 const struct drm_vma_offset_node *const na = va;
45 const struct drm_vma_offset_node *const nb = vb;
46
47 if (na->von_startpage < nb->von_startpage)
48 return -1;
49 if (na->von_startpage > nb->von_startpage)
50 return +1;
51 return 0;
52 }
53
54 static int
drm_vma_node_compare_key(void * cookie __unused,const void * vn,const void * vk)55 drm_vma_node_compare_key(void *cookie __unused, const void *vn, const void *vk)
56 {
57 const struct drm_vma_offset_node *const n = vn;
58 const vmem_addr_t *const k = vk;
59
60 if (n->von_startpage < *k)
61 return -1;
62 if (n->von_startpage > *k)
63 return +1;
64 return 0;
65 }
66
67 static const rb_tree_ops_t drm_vma_node_rb_ops = {
68 .rbto_compare_nodes = &drm_vma_node_compare,
69 .rbto_compare_key = &drm_vma_node_compare_key,
70 .rbto_node_offset = offsetof(struct drm_vma_offset_node, von_rb_node),
71 .rbto_context = NULL,
72 };
73
74 static int
drm_vma_file_compare(void * cookie __unused,const void * va,const void * vb)75 drm_vma_file_compare(void *cookie __unused, const void *va, const void *vb)
76 {
77 const struct drm_vma_offset_file *const fa = va;
78 const struct drm_vma_offset_file *const fb = vb;
79
80 if (fa->vof_file < fb->vof_file)
81 return -1;
82 if (fa->vof_file > fb->vof_file)
83 return +1;
84 return 0;
85 }
86
87 static int
drm_vma_file_compare_key(void * cookie __unused,const void * vf,const void * vk)88 drm_vma_file_compare_key(void *cookie __unused, const void *vf, const void *vk)
89 {
90 const struct drm_vma_offset_file *const f = vf;
91 const struct file *const k = vk;
92
93 if (f->vof_file < k)
94 return -1;
95 if (f->vof_file > k)
96 return +1;
97 return 0;
98 }
99
100 static const rb_tree_ops_t drm_vma_file_rb_ops = {
101 .rbto_compare_nodes = &drm_vma_file_compare,
102 .rbto_compare_key = &drm_vma_file_compare_key,
103 .rbto_node_offset = offsetof(struct drm_vma_offset_file, vof_rb_node),
104 .rbto_context = NULL,
105 };
106
107 void
drm_vma_offset_manager_init(struct drm_vma_offset_manager * mgr,unsigned long startpage,unsigned long npages)108 drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
109 unsigned long startpage, unsigned long npages)
110 {
111
112 rw_init(&mgr->vom_lock);
113 rb_tree_init(&mgr->vom_nodes, &drm_vma_node_rb_ops);
114 mgr->vom_vmem = vmem_create("drm_vma", startpage, npages, 1,
115 NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
116 }
117
118 void
drm_vma_offset_manager_destroy(struct drm_vma_offset_manager * mgr)119 drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
120 {
121
122 vmem_destroy(mgr->vom_vmem);
123 KASSERTMSG((RB_TREE_MIN(&mgr->vom_nodes) == NULL),
124 "drm vma offset manager %p not empty", mgr);
125 #if 0
126 rb_tree_destroy(&mgr->vom_nodes);
127 #endif
128 rw_destroy(&mgr->vom_lock);
129 }
130
131 void
drm_vma_node_init(struct drm_vma_offset_node * node)132 drm_vma_node_init(struct drm_vma_offset_node *node)
133 {
134 static const struct drm_vma_offset_node zero_node;
135
136 *node = zero_node;
137
138 rw_init(&node->von_lock);
139 node->von_startpage = 0;
140 node->von_npages = 0;
141 rb_tree_init(&node->von_files, &drm_vma_file_rb_ops);
142 }
143
144 void
drm_vma_node_destroy(struct drm_vma_offset_node * node)145 drm_vma_node_destroy(struct drm_vma_offset_node *node)
146 {
147
148 KASSERTMSG((RB_TREE_MIN(&node->von_files) == NULL),
149 "drm vma node %p not empty", node);
150 #if 0
151 rb_tree_destroy(&node->von_files);
152 #endif
153 KASSERT(node->von_startpage == 0);
154 KASSERT(node->von_npages == 0);
155 rw_destroy(&node->von_lock);
156 }
157
158 int
drm_vma_offset_add(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node,unsigned long npages)159 drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
160 struct drm_vma_offset_node *node, unsigned long npages)
161 {
162 vmem_size_t startpage;
163 struct drm_vma_offset_node *collision __diagused;
164 int error;
165
166 KASSERT(npages != 0);
167
168 if (0 < node->von_npages)
169 return 0;
170
171 error = vmem_alloc(mgr->vom_vmem, npages, VM_NOSLEEP|VM_BESTFIT,
172 &startpage);
173 if (error) {
174 if (error == ENOMEM)
175 error = ENOSPC;
176 /* XXX errno NetBSD->Linux */
177 return -error;
178 }
179
180 node->von_startpage = startpage;
181 node->von_npages = npages;
182
183 rw_enter(&node->von_lock, RW_WRITER);
184 collision = rb_tree_insert_node(&mgr->vom_nodes, node);
185 KASSERT(collision == node);
186 rw_exit(&node->von_lock);
187
188 return 0;
189 }
190
191 void
drm_vma_offset_remove(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node)192 drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
193 struct drm_vma_offset_node *node)
194 {
195
196 if (node->von_npages == 0)
197 return;
198
199 rw_enter(&node->von_lock, RW_WRITER);
200 rb_tree_remove_node(&mgr->vom_nodes, node);
201 rw_exit(&node->von_lock);
202
203 vmem_free(mgr->vom_vmem, node->von_startpage, node->von_npages);
204
205 node->von_npages = 0;
206 node->von_startpage = 0;
207 }
208
209 void
drm_vma_offset_lock_lookup(struct drm_vma_offset_manager * mgr)210 drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
211 {
212
213 rw_enter(&mgr->vom_lock, RW_READER);
214 }
215
216 void
drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager * mgr)217 drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
218 {
219
220 rw_exit(&mgr->vom_lock);
221 }
222
223 struct drm_vma_offset_node *
drm_vma_offset_lookup_locked(struct drm_vma_offset_manager * mgr,unsigned long startpage,unsigned long npages)224 drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
225 unsigned long startpage, unsigned long npages)
226 {
227 const vmem_addr_t key = startpage;
228 struct drm_vma_offset_node *node;
229
230 KASSERT(rw_lock_held(&mgr->vom_lock));
231
232 node = rb_tree_find_node_leq(&mgr->vom_nodes, &key);
233 if (node == NULL)
234 return NULL;
235 KASSERT(node->von_startpage <= startpage);
236 if (npages < node->von_npages)
237 return NULL;
238 if (node->von_npages - npages < startpage - node->von_startpage)
239 return NULL;
240
241 return node;
242 }
243
244 struct drm_vma_offset_node *
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager * mgr,unsigned long startpage,unsigned long npages)245 drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
246 unsigned long startpage, unsigned long npages)
247 {
248 const vmem_addr_t key = startpage;
249 struct drm_vma_offset_node *node;
250
251 rw_enter(&mgr->vom_lock, RW_READER);
252
253 node = rb_tree_find_node(&mgr->vom_nodes, &key);
254 if (node == NULL)
255 goto out;
256 KASSERT(node->von_startpage == startpage);
257 if (node->von_npages != npages) {
258 node = NULL;
259 goto out;
260 }
261
262 out: rw_exit(&mgr->vom_lock);
263 return node;
264 }
265
266 int
drm_vma_node_allow(struct drm_vma_offset_node * node,struct file * file)267 drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *file)
268 {
269 struct drm_vma_offset_file *new, *old;
270
271 new = kmem_alloc(sizeof(*new), KM_NOSLEEP);
272 if (new == NULL)
273 return -ENOMEM;
274 new->vof_file = file;
275
276 rw_enter(&node->von_lock, RW_WRITER);
277 old = rb_tree_insert_node(&node->von_files, new);
278 rw_exit(&node->von_lock);
279
280 if (old != new) /* collision */
281 kmem_free(new, sizeof(*new));
282
283 return 0;
284 }
285
286 void
drm_vma_node_revoke(struct drm_vma_offset_node * node,struct file * file)287 drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *file)
288 {
289
290 rw_enter(&node->von_lock, RW_WRITER);
291 struct drm_vma_offset_file *const found =
292 rb_tree_find_node(&node->von_files, file);
293 if (found != NULL)
294 rb_tree_remove_node(&node->von_files, found);
295 rw_exit(&node->von_lock);
296 if (found != NULL)
297 kmem_free(found, sizeof(*found));
298 }
299
300 bool
drm_vma_node_is_allowed(struct drm_vma_offset_node * node,struct file * file)301 drm_vma_node_is_allowed(struct drm_vma_offset_node *node, struct file *file)
302 {
303
304 rw_enter(&node->von_lock, RW_READER);
305 const bool allowed =
306 (rb_tree_find_node(&node->von_files, file) != NULL);
307 rw_exit(&node->von_lock);
308
309 return allowed;
310 }
311
312 int
drm_vma_node_verify_access(struct drm_vma_offset_node * node,struct file * file)313 drm_vma_node_verify_access(struct drm_vma_offset_node *node, struct file *file)
314 {
315
316 if (!drm_vma_node_is_allowed(node, file))
317 return -EACCES;
318
319 return 0;
320 }
321