FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_kobj.c
1 /*-
2 * Copyright (c) 2000,2003 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.3/sys/kern/subr_kobj.c 173886 2007-11-24 19:45:58Z cvs2svn $");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/kobj.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/sysctl.h>
37 #ifndef TEST
38 #include <sys/systm.h>
39 #endif
40
41 #ifdef TEST
42 #include "usertest.h"
43 #endif
44
45 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
46
47 #ifdef KOBJ_STATS
48
49 u_int kobj_lookup_hits;
50 u_int kobj_lookup_misses;
51
52 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53 &kobj_lookup_hits, 0, "");
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55 &kobj_lookup_misses, 0, "");
56
57 #endif
58
59 static struct mtx kobj_mtx;
60 static int kobj_next_id = 1;
61
62 SYSCTL_UINT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
63 &kobj_next_id, 0, "");
64
65 static void
66 kobj_init_mutex(void *arg)
67 {
68
69 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
70 }
71
72 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
73
74 /*
75 * This method structure is used to initialise new caches. Since the
76 * desc pointer is NULL, it is guaranteed never to match any read
77 * descriptors.
78 */
79 static struct kobj_method null_method = {
80 0, 0,
81 };
82
83 int
84 kobj_error_method(void)
85 {
86
87 return ENXIO;
88 }
89
90 static void
91 kobj_register_method(struct kobjop_desc *desc)
92 {
93
94 mtx_assert(&kobj_mtx, MA_OWNED);
95 if (desc->id == 0) {
96 desc->id = kobj_next_id++;
97 }
98 }
99
100 static void
101 kobj_unregister_method(struct kobjop_desc *desc)
102 {
103 }
104
105 static void
106 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
107 {
108 kobj_method_t *m;
109 int i;
110
111 mtx_assert(&kobj_mtx, MA_OWNED);
112
113 /*
114 * Don't do anything if we are already compiled.
115 */
116 if (cls->ops)
117 return;
118
119 /*
120 * First register any methods which need it.
121 */
122 for (i = 0, m = cls->methods; m->desc; i++, m++)
123 kobj_register_method(m->desc);
124
125 /*
126 * Then initialise the ops table.
127 */
128 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
129 ops->cache[i] = &null_method;
130 ops->cls = cls;
131 cls->ops = ops;
132 }
133
134 void
135 kobj_class_compile(kobj_class_t cls)
136 {
137 kobj_ops_t ops;
138
139 mtx_assert(&kobj_mtx, MA_NOTOWNED);
140
141 /*
142 * Allocate space for the compiled ops table.
143 */
144 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
145 if (!ops)
146 panic("kobj_compile_methods: out of memory");
147
148 mtx_lock(&kobj_mtx);
149
150 /*
151 * We may have lost a race for kobj_class_compile here - check
152 * to make sure someone else hasn't already compiled this
153 * class.
154 */
155 if (cls->ops) {
156 mtx_unlock(&kobj_mtx);
157 free(ops, M_KOBJ);
158 return;
159 }
160
161 kobj_class_compile_common(cls, ops);
162 mtx_unlock(&kobj_mtx);
163 }
164
165 void
166 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
167 {
168
169 mtx_assert(&kobj_mtx, MA_NOTOWNED);
170
171 /*
172 * Increment refs to make sure that the ops table is not freed.
173 */
174 mtx_lock(&kobj_mtx);
175 cls->refs++;
176 kobj_class_compile_common(cls, ops);
177 mtx_unlock(&kobj_mtx);
178 }
179
180 static kobj_method_t*
181 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
182 {
183 kobj_method_t *methods = cls->methods;
184 kobj_method_t *ce;
185
186 for (ce = methods; ce && ce->desc; ce++) {
187 if (ce->desc == desc) {
188 return ce;
189 }
190 }
191
192 return 0;
193 }
194
195 static kobj_method_t*
196 kobj_lookup_method_mi(kobj_class_t cls,
197 kobjop_desc_t desc)
198 {
199 kobj_method_t *ce;
200 kobj_class_t *basep;
201
202 ce = kobj_lookup_method_class(cls, desc);
203 if (ce)
204 return ce;
205
206 basep = cls->baseclasses;
207 if (basep) {
208 for (; *basep; basep++) {
209 ce = kobj_lookup_method_mi(*basep, desc);
210 if (ce)
211 return ce;
212 }
213 }
214
215 return 0;
216 }
217
218 kobj_method_t*
219 kobj_lookup_method(kobj_class_t cls,
220 kobj_method_t **cep,
221 kobjop_desc_t desc)
222 {
223 kobj_method_t *ce;
224
225 #ifdef KOBJ_STATS
226 /*
227 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
228 * a 'miss'.
229 */
230 kobj_lookup_hits--;
231 kobj_lookup_misses--;
232 #endif
233
234 ce = kobj_lookup_method_mi(cls, desc);
235 if (!ce)
236 ce = desc->deflt;
237 *cep = ce;
238 return ce;
239 }
240
241 void
242 kobj_class_free(kobj_class_t cls)
243 {
244 int i;
245 kobj_method_t *m;
246 void* ops = 0;
247
248 mtx_assert(&kobj_mtx, MA_NOTOWNED);
249 mtx_lock(&kobj_mtx);
250
251 /*
252 * Protect against a race between kobj_create and
253 * kobj_delete.
254 */
255 if (cls->refs == 0) {
256 /*
257 * Unregister any methods which are no longer used.
258 */
259 for (i = 0, m = cls->methods; m->desc; i++, m++)
260 kobj_unregister_method(m->desc);
261
262 /*
263 * Free memory and clean up.
264 */
265 ops = cls->ops;
266 cls->ops = 0;
267 }
268
269 mtx_unlock(&kobj_mtx);
270
271 if (ops)
272 free(ops, M_KOBJ);
273 }
274
275 kobj_t
276 kobj_create(kobj_class_t cls,
277 struct malloc_type *mtype,
278 int mflags)
279 {
280 kobj_t obj;
281
282 /*
283 * Allocate and initialise the new object.
284 */
285 obj = malloc(cls->size, mtype, mflags | M_ZERO);
286 if (!obj)
287 return 0;
288 kobj_init(obj, cls);
289
290 return obj;
291 }
292
293 void
294 kobj_init(kobj_t obj, kobj_class_t cls)
295 {
296 mtx_assert(&kobj_mtx, MA_NOTOWNED);
297 retry:
298 mtx_lock(&kobj_mtx);
299
300 /*
301 * Consider compiling the class' method table.
302 */
303 if (!cls->ops) {
304 /*
305 * kobj_class_compile doesn't want the lock held
306 * because of the call to malloc - we drop the lock
307 * and re-try.
308 */
309 mtx_unlock(&kobj_mtx);
310 kobj_class_compile(cls);
311 goto retry;
312 }
313
314 obj->ops = cls->ops;
315 cls->refs++;
316
317 mtx_unlock(&kobj_mtx);
318 }
319
320 void
321 kobj_delete(kobj_t obj, struct malloc_type *mtype)
322 {
323 kobj_class_t cls = obj->ops->cls;
324 int refs;
325
326 /*
327 * Consider freeing the compiled method table for the class
328 * after its last instance is deleted. As an optimisation, we
329 * should defer this for a short while to avoid thrashing.
330 */
331 mtx_assert(&kobj_mtx, MA_NOTOWNED);
332 mtx_lock(&kobj_mtx);
333 cls->refs--;
334 refs = cls->refs;
335 mtx_unlock(&kobj_mtx);
336
337 if (!refs)
338 kobj_class_free(cls);
339
340 obj->ops = 0;
341 if (mtype)
342 free(obj, mtype);
343 }
Cache object: 8b4a1e418a7c3761a8918f3a3ce96b1c
|