The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_kobj.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000,2003 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/9.2/sys/kern/subr_kobj.c 227711 2011-11-19 12:55:32Z marius $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/kernel.h>
   32 #include <sys/kobj.h>
   33 #include <sys/lock.h>
   34 #include <sys/malloc.h>
   35 #include <sys/mutex.h>
   36 #include <sys/sysctl.h>
   37 #ifndef TEST
   38 #include <sys/systm.h>
   39 #endif
   40 
   41 #ifdef TEST
   42 #include "usertest.h"
   43 #endif
   44 
   45 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
   46 
   47 #ifdef KOBJ_STATS
   48 
   49 u_int kobj_lookup_hits;
   50 u_int kobj_lookup_misses;
   51 
   52 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
   53            &kobj_lookup_hits, 0, "");
   54 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
   55            &kobj_lookup_misses, 0, "");
   56 
   57 #endif
   58 
   59 static struct mtx kobj_mtx;
   60 static int kobj_mutex_inited;
   61 static int kobj_next_id = 1;
   62 
   63 #define KOBJ_LOCK()             mtx_lock(&kobj_mtx)
   64 #define KOBJ_UNLOCK()           mtx_unlock(&kobj_mtx)
   65 #define KOBJ_ASSERT(what)       mtx_assert(&kobj_mtx, what);
   66 
   67 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
   68            &kobj_next_id, 0, "");
   69 
   70 static void
   71 kobj_init_mutex(void *arg)
   72 {
   73         if (!kobj_mutex_inited) {
   74                 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
   75                 kobj_mutex_inited = 1;
   76         }
   77 }
   78 
   79 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
   80 
   81 /*
   82  * This method structure is used to initialise new caches. Since the
   83  * desc pointer is NULL, it is guaranteed never to match any read
   84  * descriptors.
   85  */
   86 static struct kobj_method null_method = {
   87         0, 0,
   88 };
   89 
   90 int
   91 kobj_error_method(void)
   92 {
   93 
   94         return ENXIO;
   95 }
   96 
   97 static void
   98 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
   99 {
  100         kobj_method_t *m;
  101         int i;
  102 
  103         /*
  104          * Don't do anything if we are already compiled.
  105          */
  106         if (cls->ops)
  107                 return;
  108 
  109         /*
  110          * First register any methods which need it.
  111          */
  112         for (i = 0, m = cls->methods; m->desc; i++, m++) {
  113                 if (m->desc->id == 0)
  114                         m->desc->id = kobj_next_id++;
  115         }
  116 
  117         /*
  118          * Then initialise the ops table.
  119          */
  120         for (i = 0; i < KOBJ_CACHE_SIZE; i++)
  121                 ops->cache[i] = &null_method;
  122         ops->cls = cls;
  123         cls->ops = ops;
  124 }
  125 
  126 void
  127 kobj_class_compile(kobj_class_t cls)
  128 {
  129         kobj_ops_t ops;
  130 
  131         KOBJ_ASSERT(MA_NOTOWNED);
  132 
  133         /*
  134          * Allocate space for the compiled ops table.
  135          */
  136         ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
  137         if (!ops)
  138                 panic("%s: out of memory", __func__);
  139 
  140         KOBJ_LOCK();
  141         
  142         /*
  143          * We may have lost a race for kobj_class_compile here - check
  144          * to make sure someone else hasn't already compiled this
  145          * class.
  146          */
  147         if (cls->ops) {
  148                 KOBJ_UNLOCK();
  149                 free(ops, M_KOBJ);
  150                 return;
  151         }
  152 
  153         kobj_class_compile_common(cls, ops);
  154         KOBJ_UNLOCK();
  155 }
  156 
  157 void
  158 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
  159 {
  160 
  161         KASSERT(kobj_mutex_inited == 0,
  162             ("%s: only supported during early cycles", __func__));
  163 
  164         /*
  165          * Increment refs to make sure that the ops table is not freed.
  166          */
  167         cls->refs++;
  168         kobj_class_compile_common(cls, ops);
  169 }
  170 
  171 static kobj_method_t*
  172 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
  173 {
  174         kobj_method_t *methods = cls->methods;
  175         kobj_method_t *ce;
  176 
  177         for (ce = methods; ce && ce->desc; ce++) {
  178                 if (ce->desc == desc) {
  179                         return ce;
  180                 }
  181         }
  182 
  183         return NULL;
  184 }
  185 
  186 static kobj_method_t*
  187 kobj_lookup_method_mi(kobj_class_t cls,
  188                       kobjop_desc_t desc)
  189 {
  190         kobj_method_t *ce;
  191         kobj_class_t *basep;
  192 
  193         ce = kobj_lookup_method_class(cls, desc);
  194         if (ce)
  195                 return ce;
  196 
  197         basep = cls->baseclasses;
  198         if (basep) {
  199                 for (; *basep; basep++) {
  200                         ce = kobj_lookup_method_mi(*basep, desc);
  201                         if (ce)
  202                                 return ce;
  203                 }
  204         }
  205 
  206         return NULL;
  207 }
  208 
  209 kobj_method_t*
  210 kobj_lookup_method(kobj_class_t cls,
  211                    kobj_method_t **cep,
  212                    kobjop_desc_t desc)
  213 {
  214         kobj_method_t *ce;
  215 
  216 #ifdef KOBJ_STATS
  217         /*
  218          * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
  219          * a 'miss'.
  220          */
  221         kobj_lookup_hits--;
  222         kobj_lookup_misses++;
  223 #endif
  224 
  225         ce = kobj_lookup_method_mi(cls, desc);
  226         if (!ce)
  227                 ce = desc->deflt;
  228         *cep = ce;
  229         return ce;
  230 }
  231 
  232 void
  233 kobj_class_free(kobj_class_t cls)
  234 {
  235         void* ops = NULL;
  236 
  237         KOBJ_ASSERT(MA_NOTOWNED);
  238         KOBJ_LOCK();
  239 
  240         /*
  241          * Protect against a race between kobj_create and
  242          * kobj_delete.
  243          */
  244         if (cls->refs == 0) {
  245                 /*
  246                  * For now we don't do anything to unregister any methods
  247                  * which are no longer used.
  248                  */
  249 
  250                 /*
  251                  * Free memory and clean up.
  252                  */
  253                 ops = cls->ops;
  254                 cls->ops = NULL;
  255         }
  256         
  257         KOBJ_UNLOCK();
  258 
  259         if (ops)
  260                 free(ops, M_KOBJ);
  261 }
  262 
  263 kobj_t
  264 kobj_create(kobj_class_t cls,
  265             struct malloc_type *mtype,
  266             int mflags)
  267 {
  268         kobj_t obj;
  269 
  270         /*
  271          * Allocate and initialise the new object.
  272          */
  273         obj = malloc(cls->size, mtype, mflags | M_ZERO);
  274         if (!obj)
  275                 return NULL;
  276         kobj_init(obj, cls);
  277 
  278         return obj;
  279 }
  280 
  281 static void
  282 kobj_init_common(kobj_t obj, kobj_class_t cls)
  283 {
  284 
  285         obj->ops = cls->ops;
  286         cls->refs++;
  287 }
  288 
  289 void
  290 kobj_init(kobj_t obj, kobj_class_t cls)
  291 {
  292         KOBJ_ASSERT(MA_NOTOWNED);
  293   retry:
  294         KOBJ_LOCK();
  295 
  296         /*
  297          * Consider compiling the class' method table.
  298          */
  299         if (!cls->ops) {
  300                 /*
  301                  * kobj_class_compile doesn't want the lock held
  302                  * because of the call to malloc - we drop the lock
  303                  * and re-try.
  304                  */
  305                 KOBJ_UNLOCK();
  306                 kobj_class_compile(cls);
  307                 goto retry;
  308         }
  309 
  310         kobj_init_common(obj, cls);
  311 
  312         KOBJ_UNLOCK();
  313 }
  314 
  315 void
  316 kobj_init_static(kobj_t obj, kobj_class_t cls)
  317 {
  318 
  319         KASSERT(kobj_mutex_inited == 0,
  320             ("%s: only supported during early cycles", __func__));
  321 
  322         kobj_init_common(obj, cls);
  323 }
  324 
  325 void
  326 kobj_delete(kobj_t obj, struct malloc_type *mtype)
  327 {
  328         kobj_class_t cls = obj->ops->cls;
  329         int refs;
  330 
  331         /*
  332          * Consider freeing the compiled method table for the class
  333          * after its last instance is deleted. As an optimisation, we
  334          * should defer this for a short while to avoid thrashing.
  335          */
  336         KOBJ_ASSERT(MA_NOTOWNED);
  337         KOBJ_LOCK();
  338         cls->refs--;
  339         refs = cls->refs;
  340         KOBJ_UNLOCK();
  341 
  342         if (!refs)
  343                 kobj_class_free(cls);
  344 
  345         obj->ops = NULL;
  346         if (mtype)
  347                 free(obj, mtype);
  348 }

Cache object: b502f782a5affa07e16a451214f844e3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.