The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_kobj.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000,2003 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/7.4/sys/kern/subr_kobj.c 153844 2005-12-29 18:00:42Z jhb $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/kernel.h>
   32 #include <sys/kobj.h>
   33 #include <sys/lock.h>
   34 #include <sys/malloc.h>
   35 #include <sys/mutex.h>
   36 #include <sys/sysctl.h>
   37 #ifndef TEST
   38 #include <sys/systm.h>
   39 #endif
   40 
   41 #ifdef TEST
   42 #include "usertest.h"
   43 #endif
   44 
   45 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
   46 
   47 #ifdef KOBJ_STATS
   48 
   49 u_int kobj_lookup_hits;
   50 u_int kobj_lookup_misses;
   51 
   52 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
   53            &kobj_lookup_hits, 0, "");
   54 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
   55            &kobj_lookup_misses, 0, "");
   56 
   57 #endif
   58 
   59 static struct mtx kobj_mtx;
   60 static int kobj_mutex_inited;
   61 static int kobj_next_id = 1;
   62 
   63 SYSCTL_UINT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
   64            &kobj_next_id, 0, "");
   65 
   66 static void
   67 kobj_init_mutex(void *arg)
   68 {
   69         if (!kobj_mutex_inited) {
   70                 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
   71                 kobj_mutex_inited = 1;
   72         }
   73 }
   74 
   75 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
   76 
   77 void
   78 kobj_machdep_init(void)
   79 {
   80         kobj_init_mutex(NULL);
   81 }
   82 
   83 /*
   84  * This method structure is used to initialise new caches. Since the
   85  * desc pointer is NULL, it is guaranteed never to match any read
   86  * descriptors.
   87  */
   88 static struct kobj_method null_method = {
   89         0, 0,
   90 };
   91 
   92 int
   93 kobj_error_method(void)
   94 {
   95 
   96         return ENXIO;
   97 }
   98 
   99 static void
  100 kobj_register_method(struct kobjop_desc *desc)
  101 {
  102 
  103         mtx_assert(&kobj_mtx, MA_OWNED);
  104         if (desc->id == 0) {
  105                 desc->id = kobj_next_id++;
  106         }
  107 }
  108 
  109 static void
  110 kobj_unregister_method(struct kobjop_desc *desc)
  111 {
  112 }
  113 
  114 static void
  115 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
  116 {
  117         kobj_method_t *m;
  118         int i;
  119 
  120         mtx_assert(&kobj_mtx, MA_OWNED);
  121 
  122         /*
  123          * Don't do anything if we are already compiled.
  124          */
  125         if (cls->ops)
  126                 return;
  127 
  128         /*
  129          * First register any methods which need it.
  130          */
  131         for (i = 0, m = cls->methods; m->desc; i++, m++)
  132                 kobj_register_method(m->desc);
  133 
  134         /*
  135          * Then initialise the ops table.
  136          */
  137         for (i = 0; i < KOBJ_CACHE_SIZE; i++)
  138                 ops->cache[i] = &null_method;
  139         ops->cls = cls;
  140         cls->ops = ops;
  141 }
  142 
  143 void
  144 kobj_class_compile(kobj_class_t cls)
  145 {
  146         kobj_ops_t ops;
  147 
  148         mtx_assert(&kobj_mtx, MA_NOTOWNED);
  149 
  150         /*
  151          * Allocate space for the compiled ops table.
  152          */
  153         ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
  154         if (!ops)
  155                 panic("kobj_compile_methods: out of memory");
  156 
  157         mtx_lock(&kobj_mtx);
  158         
  159         /*
  160          * We may have lost a race for kobj_class_compile here - check
  161          * to make sure someone else hasn't already compiled this
  162          * class.
  163          */
  164         if (cls->ops) {
  165                 mtx_unlock(&kobj_mtx);
  166                 free(ops, M_KOBJ);
  167                 return;
  168         }
  169 
  170         kobj_class_compile_common(cls, ops);
  171         mtx_unlock(&kobj_mtx);
  172 }
  173 
  174 void
  175 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
  176 {
  177 
  178         mtx_assert(&kobj_mtx, MA_NOTOWNED);
  179 
  180         /*
  181          * Increment refs to make sure that the ops table is not freed.
  182          */
  183         mtx_lock(&kobj_mtx);
  184         cls->refs++;
  185         kobj_class_compile_common(cls, ops);
  186         mtx_unlock(&kobj_mtx);
  187 }
  188 
  189 static kobj_method_t*
  190 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
  191 {
  192         kobj_method_t *methods = cls->methods;
  193         kobj_method_t *ce;
  194 
  195         for (ce = methods; ce && ce->desc; ce++) {
  196                 if (ce->desc == desc) {
  197                         return ce;
  198                 }
  199         }
  200 
  201         return 0;
  202 }
  203 
  204 static kobj_method_t*
  205 kobj_lookup_method_mi(kobj_class_t cls,
  206                       kobjop_desc_t desc)
  207 {
  208         kobj_method_t *ce;
  209         kobj_class_t *basep;
  210 
  211         ce = kobj_lookup_method_class(cls, desc);
  212         if (ce)
  213                 return ce;
  214 
  215         basep = cls->baseclasses;
  216         if (basep) {
  217                 for (; *basep; basep++) {
  218                         ce = kobj_lookup_method_mi(*basep, desc);
  219                         if (ce)
  220                                 return ce;
  221                 }
  222         }
  223 
  224         return 0;
  225 }
  226 
  227 kobj_method_t*
  228 kobj_lookup_method(kobj_class_t cls,
  229                    kobj_method_t **cep,
  230                    kobjop_desc_t desc)
  231 {
  232         kobj_method_t *ce;
  233 
  234 #ifdef KOBJ_STATS
  235         /*
  236          * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
  237          * a 'miss'.
  238          */
  239         kobj_lookup_hits--;
  240         kobj_lookup_misses++;
  241 #endif
  242 
  243         ce = kobj_lookup_method_mi(cls, desc);
  244         if (!ce)
  245                 ce = desc->deflt;
  246         *cep = ce;
  247         return ce;
  248 }
  249 
  250 void
  251 kobj_class_free(kobj_class_t cls)
  252 {
  253         int i;
  254         kobj_method_t *m;
  255         void* ops = 0;
  256 
  257         mtx_assert(&kobj_mtx, MA_NOTOWNED);
  258         mtx_lock(&kobj_mtx);
  259 
  260         /*
  261          * Protect against a race between kobj_create and
  262          * kobj_delete.
  263          */
  264         if (cls->refs == 0) {
  265                 /*
  266                  * Unregister any methods which are no longer used.
  267                  */
  268                 for (i = 0, m = cls->methods; m->desc; i++, m++)
  269                         kobj_unregister_method(m->desc);
  270 
  271                 /*
  272                  * Free memory and clean up.
  273                  */
  274                 ops = cls->ops;
  275                 cls->ops = 0;
  276         }
  277         
  278         mtx_unlock(&kobj_mtx);
  279 
  280         if (ops)
  281                 free(ops, M_KOBJ);
  282 }
  283 
  284 kobj_t
  285 kobj_create(kobj_class_t cls,
  286             struct malloc_type *mtype,
  287             int mflags)
  288 {
  289         kobj_t obj;
  290 
  291         /*
  292          * Allocate and initialise the new object.
  293          */
  294         obj = malloc(cls->size, mtype, mflags | M_ZERO);
  295         if (!obj)
  296                 return 0;
  297         kobj_init(obj, cls);
  298 
  299         return obj;
  300 }
  301 
  302 void
  303 kobj_init(kobj_t obj, kobj_class_t cls)
  304 {
  305         mtx_assert(&kobj_mtx, MA_NOTOWNED);
  306   retry:
  307         mtx_lock(&kobj_mtx);
  308 
  309         /*
  310          * Consider compiling the class' method table.
  311          */
  312         if (!cls->ops) {
  313                 /*
  314                  * kobj_class_compile doesn't want the lock held
  315                  * because of the call to malloc - we drop the lock
  316                  * and re-try.
  317                  */
  318                 mtx_unlock(&kobj_mtx);
  319                 kobj_class_compile(cls);
  320                 goto retry;
  321         }
  322 
  323         obj->ops = cls->ops;
  324         cls->refs++;
  325 
  326         mtx_unlock(&kobj_mtx);
  327 }
  328 
  329 void
  330 kobj_delete(kobj_t obj, struct malloc_type *mtype)
  331 {
  332         kobj_class_t cls = obj->ops->cls;
  333         int refs;
  334 
  335         /*
  336          * Consider freeing the compiled method table for the class
  337          * after its last instance is deleted. As an optimisation, we
  338          * should defer this for a short while to avoid thrashing.
  339          */
  340         mtx_assert(&kobj_mtx, MA_NOTOWNED);
  341         mtx_lock(&kobj_mtx);
  342         cls->refs--;
  343         refs = cls->refs;
  344         mtx_unlock(&kobj_mtx);
  345 
  346         if (!refs)
  347                 kobj_class_free(cls);
  348 
  349         obj->ops = 0;
  350         if (mtype)
  351                 free(obj, mtype);
  352 }

Cache object: 583f26bc59debf18b4bf5f21bf863119


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.