xref: /freebsd/sys/kern/subr_kobj.c (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000,2003 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/kobj.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39 #ifndef TEST
40 #include <sys/systm.h>
41 #endif
42 
43 #ifdef TEST
44 #include "usertest.h"
45 #endif
46 
47 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
48 
49 #ifdef KOBJ_STATS
50 
51 u_int kobj_lookup_hits;
52 u_int kobj_lookup_misses;
53 
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
55 	   &kobj_lookup_hits, 0, "");
56 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
57 	   &kobj_lookup_misses, 0, "");
58 
59 #endif
60 
61 static struct mtx kobj_mtx;
62 static int kobj_mutex_inited;
63 static int kobj_next_id = 1;
64 
65 #define	KOBJ_LOCK()		mtx_lock(&kobj_mtx)
66 #define	KOBJ_UNLOCK()		mtx_unlock(&kobj_mtx)
67 #define	KOBJ_ASSERT(what)	mtx_assert(&kobj_mtx, what);
68 
69 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
70 	   &kobj_next_id, 0, "");
71 
72 static void
73 kobj_init_mutex(void *arg)
74 {
75 	if (!kobj_mutex_inited) {
76 		mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
77 		kobj_mutex_inited = 1;
78 	}
79 }
80 
81 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
82 
83 /*
84  * This method structure is used to initialise new caches. Since the
85  * desc pointer is NULL, it is guaranteed never to match any read
86  * descriptors.
87  */
88 static const struct kobj_method null_method = {
89 	0, 0,
90 };
91 
92 int
93 kobj_error_method(void)
94 {
95 
96 	return ENXIO;
97 }
98 
99 static void
100 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
101 {
102 	kobj_method_t *m;
103 	int i;
104 
105 	/*
106 	 * Don't do anything if we are already compiled.
107 	 */
108 	if (cls->ops)
109 		return;
110 
111 	/*
112 	 * First register any methods which need it.
113 	 */
114 	for (i = 0, m = cls->methods; m->desc; i++, m++) {
115 		if (m->desc->id == 0)
116 			m->desc->id = kobj_next_id++;
117 	}
118 
119 	/*
120 	 * Then initialise the ops table.
121 	 */
122 	for (i = 0; i < KOBJ_CACHE_SIZE; i++)
123 		ops->cache[i] = &null_method;
124 	ops->cls = cls;
125 	cls->ops = ops;
126 }
127 
128 static int
129 kobj_class_compile1(kobj_class_t cls, int mflags)
130 {
131 	kobj_ops_t ops;
132 
133 	KOBJ_ASSERT(MA_NOTOWNED);
134 
135 	ops = malloc(sizeof(struct kobj_ops), M_KOBJ, mflags);
136 	if (ops == NULL)
137 		return (ENOMEM);
138 
139 	/*
140 	 * We may have lost a race for kobj_class_compile here - check
141 	 * to make sure someone else hasn't already compiled this
142 	 * class.
143 	 */
144 	KOBJ_LOCK();
145 	if (cls->ops) {
146 		KOBJ_UNLOCK();
147 		free(ops, M_KOBJ);
148 		return (0);
149 	}
150 	kobj_class_compile_common(cls, ops);
151 	KOBJ_UNLOCK();
152 	return (0);
153 }
154 
155 void
156 kobj_class_compile(kobj_class_t cls)
157 {
158 	int error;
159 
160 	error = kobj_class_compile1(cls, M_WAITOK);
161 	KASSERT(error == 0, ("kobj_class_compile1 returned %d", error));
162 }
163 
164 void
165 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
166 {
167 
168 	KASSERT(kobj_mutex_inited == 0,
169 	    ("%s: only supported during early cycles", __func__));
170 
171 	/*
172 	 * Increment refs to make sure that the ops table is not freed.
173 	 */
174 	cls->refs++;
175 	kobj_class_compile_common(cls, ops);
176 }
177 
178 static kobj_method_t*
179 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
180 {
181 	kobj_method_t *methods = cls->methods;
182 	kobj_method_t *ce;
183 
184 	for (ce = methods; ce && ce->desc; ce++) {
185 		if (ce->desc == desc) {
186 			return ce;
187 		}
188 	}
189 
190 	return NULL;
191 }
192 
193 static kobj_method_t*
194 kobj_lookup_method_mi(kobj_class_t cls,
195 		      kobjop_desc_t desc)
196 {
197 	kobj_method_t *ce;
198 	kobj_class_t *basep;
199 
200 	ce = kobj_lookup_method_class(cls, desc);
201 	if (ce)
202 		return ce;
203 
204 	basep = cls->baseclasses;
205 	if (basep) {
206 		for (; *basep; basep++) {
207 			ce = kobj_lookup_method_mi(*basep, desc);
208 			if (ce)
209 				return ce;
210 		}
211 	}
212 
213 	return NULL;
214 }
215 
216 kobj_method_t*
217 kobj_lookup_method(kobj_class_t cls,
218 		   kobj_method_t **cep,
219 		   kobjop_desc_t desc)
220 {
221 	kobj_method_t *ce;
222 
223 	ce = kobj_lookup_method_mi(cls, desc);
224 	if (!ce)
225 		ce = &desc->deflt;
226 	if (cep)
227 		*cep = ce;
228 	return ce;
229 }
230 
231 void
232 kobj_class_free(kobj_class_t cls)
233 {
234 	void* ops = NULL;
235 
236 	KOBJ_ASSERT(MA_NOTOWNED);
237 	KOBJ_LOCK();
238 
239 	/*
240 	 * Protect against a race between kobj_create and
241 	 * kobj_delete.
242 	 */
243 	if (cls->refs == 0) {
244 		/*
245 		 * For now we don't do anything to unregister any methods
246 		 * which are no longer used.
247 		 */
248 
249 		/*
250 		 * Free memory and clean up.
251 		 */
252 		ops = cls->ops;
253 		cls->ops = NULL;
254 	}
255 
256 	KOBJ_UNLOCK();
257 
258 	if (ops)
259 		free(ops, M_KOBJ);
260 }
261 
262 static void
263 kobj_init_common(kobj_t obj, kobj_class_t cls)
264 {
265 
266 	obj->ops = cls->ops;
267 	cls->refs++;
268 }
269 
270 static int
271 kobj_init1(kobj_t obj, kobj_class_t cls, int mflags)
272 {
273 	int error;
274 
275 	KOBJ_LOCK();
276 	while (cls->ops == NULL) {
277 		/*
278 		 * kobj_class_compile doesn't want the lock held
279 		 * because of the call to malloc - we drop the lock
280 		 * and re-try.
281 		 */
282 		KOBJ_UNLOCK();
283 		error = kobj_class_compile1(cls, mflags);
284 		if (error != 0)
285 			return (error);
286 		KOBJ_LOCK();
287 	}
288 	kobj_init_common(obj, cls);
289 	KOBJ_UNLOCK();
290 	return (0);
291 }
292 
293 kobj_t
294 kobj_create(kobj_class_t cls, struct malloc_type *mtype, int mflags)
295 {
296 	kobj_t obj;
297 
298 	obj = malloc(cls->size, mtype, mflags | M_ZERO);
299 	if (obj == NULL)
300 		return (NULL);
301 	if (kobj_init1(obj, cls, mflags) != 0) {
302 		free(obj, mtype);
303 		return (NULL);
304 	}
305 	return (obj);
306 }
307 
308 void
309 kobj_init(kobj_t obj, kobj_class_t cls)
310 {
311 	int error;
312 
313 	error = kobj_init1(obj, cls, M_NOWAIT);
314 	if (error != 0)
315 		panic("kobj_init1 failed: error %d", error);
316 }
317 
318 void
319 kobj_init_static(kobj_t obj, kobj_class_t cls)
320 {
321 
322 	KASSERT(kobj_mutex_inited == 0,
323 	    ("%s: only supported during early cycles", __func__));
324 
325 	kobj_init_common(obj, cls);
326 }
327 
328 void
329 kobj_delete(kobj_t obj, struct malloc_type *mtype)
330 {
331 	kobj_class_t cls = obj->ops->cls;
332 	int refs;
333 
334 	/*
335 	 * Consider freeing the compiled method table for the class
336 	 * after its last instance is deleted. As an optimisation, we
337 	 * should defer this for a short while to avoid thrashing.
338 	 */
339 	KOBJ_ASSERT(MA_NOTOWNED);
340 	KOBJ_LOCK();
341 	cls->refs--;
342 	refs = cls->refs;
343 	KOBJ_UNLOCK();
344 
345 	if (!refs)
346 		kobj_class_free(cls);
347 
348 	obj->ops = NULL;
349 	if (mtype)
350 		free(obj, mtype);
351 }
352