The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/locks_ppc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */
   22 /*
   23  * @OSF_COPYRIGHT@
   24  */
   25 /* 
   26  * Mach Operating System
   27  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
   28  * All Rights Reserved.
   29  * 
   30  * Permission to use, copy, modify and distribute this software and its
   31  * documentation is hereby granted, provided that both the copyright
   32  * notice and this permission notice appear in all copies of the
   33  * software, derivative works or modified versions, and any portions
   34  * thereof, and that both notices appear in supporting documentation.
   35  * 
   36  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   37  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   38  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   39  * 
   40  * Carnegie Mellon requests users of this software to return to
   41  * 
   42  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   43  *  School of Computer Science
   44  *  Carnegie Mellon University
   45  *  Pittsburgh PA 15213-3890
   46  * 
   47  * any improvements or extensions that they make and grant Carnegie Mellon
   48  * the rights to redistribute these changes.
   49  */
   50 /*
   51  *      File:   kern/lock.c
   52  *      Author: Avadis Tevanian, Jr., Michael Wayne Young
   53  *      Date:   1985
   54  *
   55  *      Locking primitives implementation
   56  */
   57 
   58 #include <mach_kdb.h>
   59 #include <mach_ldebug.h>
   60 
   61 #include <kern/kalloc.h>
   62 #include <kern/lock.h>
   63 #include <kern/locks.h>
   64 #include <kern/misc_protos.h>
   65 #include <kern/thread.h>
   66 #include <kern/processor.h>
   67 #include <kern/sched_prim.h>
   68 #include <kern/xpr.h>
   69 #include <kern/debug.h>
   70 #include <string.h>
   71 
   72 #if     MACH_KDB
   73 #include <ddb/db_command.h>
   74 #include <ddb/db_output.h>
   75 #include <ddb/db_sym.h>
   76 #include <ddb/db_print.h>
   77 #endif  /* MACH_KDB */
   78 
   79 #ifdef __ppc__
   80 #include <ppc/Firmware.h>
   81 #endif
   82 
   83 #include <sys/kdebug.h>
   84 
   85 #define LCK_RW_LCK_EXCLUSIVE_CODE       0x100
   86 #define LCK_RW_LCK_EXCLUSIVE1_CODE      0x101
   87 #define LCK_RW_LCK_SHARED_CODE          0x102
   88 #define LCK_RW_LCK_SH_TO_EX_CODE        0x103
   89 #define LCK_RW_LCK_SH_TO_EX1_CODE       0x104
   90 #define LCK_RW_LCK_EX_TO_SH_CODE        0x105
   91 
   92 
   93 #define ANY_LOCK_DEBUG  (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
   94 
   95 unsigned int lock_wait_time[2] = { (unsigned int)-1, 0 } ;
   96 
   97 /* Forwards */
   98 
   99 
  100 #if     USLOCK_DEBUG
  101 /*
  102  *      Perform simple lock checks.
  103  */
  104 int     uslock_check = 1;
  105 int     max_lock_loops  = 100000000;
  106 decl_simple_lock_data(extern , printf_lock)
  107 decl_simple_lock_data(extern , panic_lock)
  108 #if     MACH_KDB
  109 decl_simple_lock_data(extern , kdb_lock)
  110 #endif  /* MACH_KDB */
  111 #endif  /* USLOCK_DEBUG */
  112 
  113 
  114 /*
  115  *      We often want to know the addresses of the callers
  116  *      of the various lock routines.  However, this information
  117  *      is only used for debugging and statistics.
  118  */
  119 typedef void    *pc_t;
  120 #define INVALID_PC      ((void *) VM_MAX_KERNEL_ADDRESS)
  121 #define INVALID_THREAD  ((void *) VM_MAX_KERNEL_ADDRESS)
  122 #if     ANY_LOCK_DEBUG
  123 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
  124 #else   /* ANY_LOCK_DEBUG */
  125 #ifdef  lint
  126 /*
  127  *      Eliminate lint complaints about unused local pc variables.
  128  */
  129 #define OBTAIN_PC(pc,l) ++pc
  130 #else   /* lint */
  131 #define OBTAIN_PC(pc,l)
  132 #endif  /* lint */
  133 #endif  /* USLOCK_DEBUG */
  134 
  135 
  136 /*
  137  *      Portable lock package implementation of usimple_locks.
  138  */
  139 
  140 #if     USLOCK_DEBUG
  141 #define USLDBG(stmt)    stmt
  142 void            usld_lock_init(usimple_lock_t, unsigned short);
  143 void            usld_lock_pre(usimple_lock_t, pc_t);
  144 void            usld_lock_post(usimple_lock_t, pc_t);
  145 void            usld_unlock(usimple_lock_t, pc_t);
  146 void            usld_lock_try_pre(usimple_lock_t, pc_t);
  147 void            usld_lock_try_post(usimple_lock_t, pc_t);
  148 int             usld_lock_common_checks(usimple_lock_t, char *);
  149 #else   /* USLOCK_DEBUG */
  150 #define USLDBG(stmt)
  151 #endif  /* USLOCK_DEBUG */
  152 
  153 /*
  154  *      Routine:        lck_spin_alloc_init
  155  */
  156 lck_spin_t *
  157 lck_spin_alloc_init(
  158         lck_grp_t       *grp,
  159         lck_attr_t      *attr) {
  160         lck_spin_t      *lck;
  161 
  162         if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
  163                 lck_spin_init(lck, grp, attr);
  164                 
  165         return(lck);
  166 }
  167 
  168 /*
  169  *      Routine:        lck_spin_free
  170  */
  171 void
  172 lck_spin_free(
  173         lck_spin_t      *lck,
  174         lck_grp_t       *grp) {
  175         lck_spin_destroy(lck, grp);
  176         kfree((void *)lck, sizeof(lck_spin_t));
  177 }
  178 
  179 /*
  180  *      Routine:        lck_spin_init
  181  */
  182 void
  183 lck_spin_init(
  184         lck_spin_t              *lck,
  185         lck_grp_t               *grp,
  186         __unused lck_attr_t     *attr) {
  187 
  188         lck->interlock = 0;
  189         lck_grp_reference(grp);
  190         lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
  191 }
  192 
  193 /*
  194  *      Routine:        lck_spin_destroy
  195  */
  196 void
  197 lck_spin_destroy(
  198         lck_spin_t      *lck,
  199         lck_grp_t       *grp) {
  200         if (lck->interlock == LCK_SPIN_TAG_DESTROYED)
  201                 return;
  202         lck->interlock = LCK_SPIN_TAG_DESTROYED;
  203         lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
  204         lck_grp_deallocate(grp);
  205 }
  206 
  207 /*
  208  *      Initialize a usimple_lock.
  209  *
  210  *      No change in preemption state.
  211  */
  212 void
  213 usimple_lock_init(
  214         usimple_lock_t  l,
  215         unsigned short  tag)
  216 {
  217 #ifndef MACHINE_SIMPLE_LOCK
  218         USLDBG(usld_lock_init(l, tag));
  219         hw_lock_init(&l->interlock);
  220 #else
  221         simple_lock_init((simple_lock_t)l,tag);
  222 #endif
  223 }
  224 
  225 
  226 /*
  227  *      Acquire a usimple_lock.
  228  *
  229  *      Returns with preemption disabled.  Note
  230  *      that the hw_lock routines are responsible for
  231  *      maintaining preemption state.
  232  */
  233 void
  234 usimple_lock(
  235         usimple_lock_t  l)
  236 {
  237 #ifndef MACHINE_SIMPLE_LOCK
  238         int i;
  239         pc_t            pc;
  240 #if     USLOCK_DEBUG
  241         int             count = 0;
  242 #endif  /* USLOCK_DEBUG */
  243 
  244         OBTAIN_PC(pc, l);
  245         USLDBG(usld_lock_pre(l, pc));
  246 
  247         if(!hw_lock_to(&l->interlock, LockTimeOut))     /* Try to get the lock with a timeout */ 
  248                 panic("simple lock deadlock detection - l=0x%08X, cpu=%d, ret=0x%08X", l, cpu_number(), pc);
  249 
  250         USLDBG(usld_lock_post(l, pc));
  251 #else
  252         simple_lock((simple_lock_t)l);
  253 #endif
  254 }
  255 
  256 
  257 /*
  258  *      Release a usimple_lock.
  259  *
  260  *      Returns with preemption enabled.  Note
  261  *      that the hw_lock routines are responsible for
  262  *      maintaining preemption state.
  263  */
  264 void
  265 usimple_unlock(
  266         usimple_lock_t  l)
  267 {
  268 #ifndef MACHINE_SIMPLE_LOCK
  269         pc_t    pc;
  270 
  271         OBTAIN_PC(pc, l);
  272         USLDBG(usld_unlock(l, pc));
  273         sync();
  274         hw_lock_unlock(&l->interlock);
  275 #else
  276         simple_unlock_rwmb((simple_lock_t)l);
  277 #endif
  278 }
  279 
  280 
  281 /*
  282  *      Conditionally acquire a usimple_lock.
  283  *
  284  *      On success, returns with preemption disabled.
  285  *      On failure, returns with preemption in the same state
  286  *      as when first invoked.  Note that the hw_lock routines
  287  *      are responsible for maintaining preemption state.
  288  *
  289  *      XXX No stats are gathered on a miss; I preserved this
  290  *      behavior from the original assembly-language code, but
  291  *      doesn't it make sense to log misses?  XXX
  292  */
  293 unsigned int
  294 usimple_lock_try(
  295         usimple_lock_t  l)
  296 {
  297 #ifndef MACHINE_SIMPLE_LOCK
  298         pc_t            pc;
  299         unsigned int    success;
  300 
  301         OBTAIN_PC(pc, l);
  302         USLDBG(usld_lock_try_pre(l, pc));
  303         if (success = hw_lock_try(&l->interlock)) {
  304                 USLDBG(usld_lock_try_post(l, pc));
  305         }
  306         return success;
  307 #else
  308         return(simple_lock_try((simple_lock_t)l));
  309 #endif
  310 }
  311 
  312 #if     USLOCK_DEBUG
  313 /*
  314  *      States of a usimple_lock.  The default when initializing
  315  *      a usimple_lock is setting it up for debug checking.
  316  */
  317 #define USLOCK_CHECKED          0x0001          /* lock is being checked */
  318 #define USLOCK_TAKEN            0x0002          /* lock has been taken */
  319 #define USLOCK_INIT             0xBAA0          /* lock has been initialized */
  320 #define USLOCK_INITIALIZED      (USLOCK_INIT|USLOCK_CHECKED)
  321 #define USLOCK_CHECKING(l)      (uslock_check &&                        \
  322                                  ((l)->debug.state & USLOCK_CHECKED))
  323 
  324 /*
  325  *      Trace activities of a particularly interesting lock.
  326  */
  327 void    usl_trace(usimple_lock_t, int, pc_t, const char *);
  328 
  329 
  330 /*
  331  *      Initialize the debugging information contained
  332  *      in a usimple_lock.
  333  */
  334 void
  335 usld_lock_init(
  336         usimple_lock_t  l,
  337         unsigned short  tag)
  338 {
  339         if (l == USIMPLE_LOCK_NULL)
  340                 panic("lock initialization:  null lock pointer");
  341         l->lock_type = USLOCK_TAG;
  342         l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
  343         l->debug.lock_cpu = l->debug.unlock_cpu = 0;
  344         l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
  345         l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
  346         l->debug.duration[0] = l->debug.duration[1] = 0;
  347         l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
  348         l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
  349         l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
  350 }
  351 
  352 
  353 /*
  354  *      These checks apply to all usimple_locks, not just
  355  *      those with USLOCK_CHECKED turned on.
  356  */
  357 int
  358 usld_lock_common_checks(
  359         usimple_lock_t  l,
  360         char            *caller)
  361 {
  362         if (l == USIMPLE_LOCK_NULL)
  363                 panic("%s:  null lock pointer", caller);
  364         if (l->lock_type != USLOCK_TAG)
  365                 panic("%s:  0x%x is not a usimple lock", caller, (integer_t) l);
  366         if (!(l->debug.state & USLOCK_INIT))
  367                 panic("%s:  0x%x is not an initialized lock",
  368                       caller, (integer_t) l);
  369         return USLOCK_CHECKING(l);
  370 }
  371 
  372 
  373 /*
  374  *      Debug checks on a usimple_lock just before attempting
  375  *      to acquire it.
  376  */
  377 /* ARGSUSED */
  378 void
  379 usld_lock_pre(
  380         usimple_lock_t  l,
  381         pc_t            pc)
  382 {
  383         char            *caller = "usimple_lock";
  384 
  385 
  386         if (!usld_lock_common_checks(l, caller))
  387                 return;
  388 
  389 /*
  390  *      Note that we have a weird case where we are getting a lock when we are]
  391  *      in the process of putting the system to sleep. We are running with no
  392  *      current threads, therefore we can't tell if we are trying to retake a lock
  393  *      we have or someone on the other processor has it.  Therefore we just
  394  *      ignore this test if the locking thread is 0.
  395  */
  396 
  397         if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
  398             l->debug.lock_thread == (void *) current_thread()) {
  399                 printf("%s:  lock 0x%x already locked (at 0x%x) by",
  400                       caller, (integer_t) l, l->debug.lock_pc);
  401                 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
  402                        l->debug.lock_thread, pc);
  403                 panic(caller);
  404         }
  405         mp_disable_preemption();
  406         usl_trace(l, cpu_number(), pc, caller);
  407         mp_enable_preemption();
  408 }
  409 
  410 
  411 /*
  412  *      Debug checks on a usimple_lock just after acquiring it.
  413  *
  414  *      Pre-emption has been disabled at this point,
  415  *      so we are safe in using cpu_number.
  416  */
  417 void
  418 usld_lock_post(
  419         usimple_lock_t  l,
  420         pc_t            pc)
  421 {
  422         register int    mycpu;
  423         char            *caller = "successful usimple_lock";
  424 
  425 
  426         if (!usld_lock_common_checks(l, caller))
  427                 return;
  428 
  429         if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
  430                 panic("%s:  lock 0x%x became uninitialized",
  431                       caller, (integer_t) l);
  432         if ((l->debug.state & USLOCK_TAKEN))
  433                 panic("%s:  lock 0x%x became TAKEN by someone else",
  434                       caller, (integer_t) l);
  435 
  436         mycpu = cpu_number();
  437         l->debug.lock_thread = (void *)current_thread();
  438         l->debug.state |= USLOCK_TAKEN;
  439         l->debug.lock_pc = pc;
  440         l->debug.lock_cpu = mycpu;
  441 
  442         usl_trace(l, mycpu, pc, caller);
  443 }
  444 
  445 
  446 /*
  447  *      Debug checks on a usimple_lock just before
  448  *      releasing it.  Note that the caller has not
  449  *      yet released the hardware lock.
  450  *
  451  *      Preemption is still disabled, so there's
  452  *      no problem using cpu_number.
  453  */
  454 void
  455 usld_unlock(
  456         usimple_lock_t  l,
  457         pc_t            pc)
  458 {
  459         register int    mycpu;
  460         char            *caller = "usimple_unlock";
  461 
  462 
  463         if (!usld_lock_common_checks(l, caller))
  464                 return;
  465 
  466         mycpu = cpu_number();
  467 
  468         if (!(l->debug.state & USLOCK_TAKEN))
  469                 panic("%s:  lock 0x%x hasn't been taken",
  470                       caller, (integer_t) l);
  471         if (l->debug.lock_thread != (void *) current_thread())
  472                 panic("%s:  unlocking lock 0x%x, owned by thread 0x%x",
  473                       caller, (integer_t) l, l->debug.lock_thread);
  474         if (l->debug.lock_cpu != mycpu) {
  475                 printf("%s:  unlocking lock 0x%x on cpu 0x%x",
  476                        caller, (integer_t) l, mycpu);
  477                 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
  478                 panic(caller);
  479         }
  480         usl_trace(l, mycpu, pc, caller);
  481 
  482         l->debug.unlock_thread = l->debug.lock_thread;
  483         l->debug.lock_thread = INVALID_PC;
  484         l->debug.state &= ~USLOCK_TAKEN;
  485         l->debug.unlock_pc = pc;
  486         l->debug.unlock_cpu = mycpu;
  487 }
  488 
  489 
  490 /*
  491  *      Debug checks on a usimple_lock just before
  492  *      attempting to acquire it.
  493  *
  494  *      Preemption isn't guaranteed to be disabled.
  495  */
  496 void
  497 usld_lock_try_pre(
  498         usimple_lock_t  l,
  499         pc_t            pc)
  500 {
  501         char            *caller = "usimple_lock_try";
  502 
  503         if (!usld_lock_common_checks(l, caller))
  504                 return;
  505         mp_disable_preemption();
  506         usl_trace(l, cpu_number(), pc, caller);
  507         mp_enable_preemption();
  508 }
  509 
  510 
  511 /*
  512  *      Debug checks on a usimple_lock just after
  513  *      successfully attempting to acquire it.
  514  *
  515  *      Preemption has been disabled by the
  516  *      lock acquisition attempt, so it's safe
  517  *      to use cpu_number.
  518  */
  519 void
  520 usld_lock_try_post(
  521         usimple_lock_t  l,
  522         pc_t            pc)
  523 {
  524         register int    mycpu;
  525         char            *caller = "successful usimple_lock_try";
  526 
  527         if (!usld_lock_common_checks(l, caller))
  528                 return;
  529 
  530         if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
  531                 panic("%s:  lock 0x%x became uninitialized",
  532                       caller, (integer_t) l);
  533         if ((l->debug.state & USLOCK_TAKEN))
  534                 panic("%s:  lock 0x%x became TAKEN by someone else",
  535                       caller, (integer_t) l);
  536 
  537         mycpu = cpu_number();
  538         l->debug.lock_thread = (void *) current_thread();
  539         l->debug.state |= USLOCK_TAKEN;
  540         l->debug.lock_pc = pc;
  541         l->debug.lock_cpu = mycpu;
  542 
  543         usl_trace(l, mycpu, pc, caller);
  544 }
  545 
  546 
  547 /*
  548  *      For very special cases, set traced_lock to point to a
  549  *      specific lock of interest.  The result is a series of
  550  *      XPRs showing lock operations on that lock.  The lock_seq
  551  *      value is used to show the order of those operations.
  552  */
  553 usimple_lock_t          traced_lock;
  554 unsigned int            lock_seq;
  555 
  556 void
  557 usl_trace(
  558         usimple_lock_t  l,
  559         int             mycpu,
  560         pc_t            pc,
  561         const char *    op_name)
  562 {
  563         if (traced_lock == l) {
  564                 XPR(XPR_SLOCK,
  565                     "seq %d, cpu %d, %s @ %x\n",
  566                     (integer_t) lock_seq, (integer_t) mycpu,
  567                     (integer_t) op_name, (integer_t) pc, 0);
  568                 lock_seq++;
  569         }
  570 }
  571 
  572 
  573 #endif  /* USLOCK_DEBUG */
  574 
  575 /*
  576  * The C portion of the shared/exclusive locks package.
  577  */
  578 
  579 /*
  580  * Forward definition 
  581  */
  582 
  583 void lck_rw_lock_exclusive_gen(
  584         lck_rw_t        *lck);
  585 
  586 lck_rw_type_t lck_rw_done_gen(
  587         lck_rw_t        *lck);
  588 
  589 void
  590 lck_rw_lock_shared_gen(
  591         lck_rw_t        *lck);
  592 
  593 boolean_t
  594 lck_rw_lock_shared_to_exclusive_gen(
  595         lck_rw_t        *lck);
  596 
  597 void
  598 lck_rw_lock_exclusive_to_shared_gen(
  599         lck_rw_t        *lck);
  600 
  601 boolean_t
  602 lck_rw_try_lock_exclusive_gen(
  603         lck_rw_t        *lck);
  604 
  605 boolean_t
  606 lck_rw_try_lock_shared_gen(
  607         lck_rw_t        *lck);
  608 
  609 void lck_rw_ext_init(
  610         lck_rw_ext_t    *lck,
  611         lck_grp_t       *grp,
  612         lck_attr_t      *attr);
  613 
  614 void lck_rw_ext_backtrace(
  615         lck_rw_ext_t    *lck);
  616 
  617 void lck_rw_lock_exclusive_ext(
  618         lck_rw_ext_t    *lck,
  619         lck_rw_t        *rlck);
  620 
  621 lck_rw_type_t lck_rw_done_ext(
  622         lck_rw_ext_t    *lck,
  623         lck_rw_t        *rlck);
  624 
  625 void
  626 lck_rw_lock_shared_ext(
  627         lck_rw_ext_t    *lck,
  628         lck_rw_t        *rlck);
  629 
  630 boolean_t
  631 lck_rw_lock_shared_to_exclusive_ext(
  632         lck_rw_ext_t    *lck,
  633         lck_rw_t        *rlck);
  634 
  635 void
  636 lck_rw_lock_exclusive_to_shared_ext(
  637         lck_rw_ext_t    *lck,
  638         lck_rw_t        *rlck);
  639 
  640 boolean_t
  641 lck_rw_try_lock_exclusive_ext(
  642         lck_rw_ext_t    *lck,
  643         lck_rw_t        *rlck);
  644 
  645 boolean_t
  646 lck_rw_try_lock_shared_ext(
  647         lck_rw_ext_t    *lck,
  648         lck_rw_t        *rlck);
  649 
  650 void
  651 lck_rw_ilk_lock(
  652         lck_rw_t        *lck);
  653 
  654 void
  655 lck_rw_ilk_unlock(
  656         lck_rw_t        *lck);
  657 
  658 void
  659 lck_rw_check_type(
  660         lck_rw_ext_t    *lck,
  661         lck_rw_t        *rlck);
  662 
  663 /*
  664  *      Routine:        lock_alloc
  665  *      Function:
  666  *              Allocate a lock for external users who cannot
  667  *              hard-code the structure definition into their
  668  *              objects.
  669  *              For now just use kalloc, but a zone is probably
  670  *              warranted.
  671  */
  672 lock_t *
  673 lock_alloc(
  674         boolean_t               can_sleep,
  675         __unused unsigned short tag,
  676         __unused unsigned short tag1)
  677 {
  678         lock_t          *lck;
  679 
  680         if ((lck = (lock_t *)kalloc(sizeof(lock_t))) != 0)
  681           lock_init(lck, can_sleep, tag, tag1);
  682         return(lck);
  683 }
  684 
  685 /*
  686  *      Routine:        lock_init
  687  *      Function:
  688  *              Initialize a lock; required before use.
  689  *              Note that clients declare the "struct lock"
  690  *              variables and then initialize them, rather
  691  *              than getting a new one from this module.
  692  */
  693 void
  694 lock_init(
  695         lock_t                  *lck,
  696         boolean_t               can_sleep,
  697         __unused unsigned short tag,
  698         __unused unsigned short tag1)
  699 {
  700         if (!can_sleep)
  701                 panic("lock_init: sleep mode must be set to TRUE\n");
  702 
  703         (void) memset((void *) lck, 0, sizeof(lock_t));
  704 #if     MACH_LDEBUG
  705         lck->lck_rw_deb.type = RW_TAG;
  706         lck->lck_rw_attr |= (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD|LCK_RW_ATTR_DIS_MYLOCK);
  707 #endif
  708 
  709 }
  710 
  711 
  712 /*
  713  *      Routine:        lock_free
  714  *      Function:
  715  *              Free a lock allocated for external users.
  716  *              For now just use kfree, but a zone is probably
  717  *              warranted.
  718  */
  719 void
  720 lock_free(
  721         lock_t  *lck)
  722 {
  723         kfree((void *)lck, sizeof(lock_t));
  724 }
  725 
  726 #if     MACH_LDEBUG
  727 void
  728 lock_write(
  729         lock_t  *lck)
  730 {
  731         lck_rw_lock_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
  732 }
  733 
  734 void
  735 lock_done(
  736         lock_t  *lck)
  737 {
  738         (void)lck_rw_done_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
  739 }
  740 
  741 void
  742 lock_read(
  743         lock_t  *lck)
  744 {
  745         lck_rw_lock_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
  746 }
  747 
  748 boolean_t
  749 lock_read_to_write(
  750         lock_t  *lck)
  751 {
  752         return(lck_rw_lock_shared_to_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck));
  753 }
  754 
  755 void
  756 lock_write_to_read(
  757         register lock_t *lck)
  758 {
  759         lck_rw_lock_exclusive_to_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
  760 }
  761 #endif
  762 
  763 /*
  764  *      Routine:        lck_rw_alloc_init
  765  */
  766 lck_rw_t *
  767 lck_rw_alloc_init(
  768         lck_grp_t       *grp,
  769         lck_attr_t      *attr) {
  770         lck_rw_t        *lck;
  771 
  772         if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
  773                 lck_rw_init(lck, grp, attr);
  774                 
  775         return(lck);
  776 }
  777 
  778 /*
  779  *      Routine:        lck_rw_free
  780  */
  781 void
  782 lck_rw_free(
  783         lck_rw_t        *lck,
  784         lck_grp_t       *grp) {
  785         lck_rw_destroy(lck, grp);
  786         kfree((void *)lck, sizeof(lck_rw_t));
  787 }
  788 
  789 /*
  790  *      Routine:        lck_rw_init
  791  */
  792 void
  793 lck_rw_init(
  794         lck_rw_t                *lck,
  795         lck_grp_t               *grp,
  796         lck_attr_t              *attr) {
  797         lck_rw_ext_t    *lck_ext;
  798         lck_attr_t      *lck_attr;
  799 
  800         if (attr != LCK_ATTR_NULL)
  801                 lck_attr = attr;
  802         else
  803                 lck_attr = &LockDefaultLckAttr;
  804 
  805         if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
  806                 if ((lck_ext = (lck_rw_ext_t *)kalloc(sizeof(lck_rw_ext_t))) != 0) {
  807                         lck_rw_ext_init(lck_ext, grp, lck_attr);        
  808                         lck->lck_rw_tag = LCK_RW_TAG_INDIRECT;
  809                         lck->lck_rw_ptr = lck_ext;
  810                 }
  811         } else {
  812                 (void) memset((void *) lck, 0, sizeof(lck_rw_t));
  813         }
  814 
  815         lck_grp_reference(grp);
  816         lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
  817 }
  818 
  819 /*
  820  *      Routine:        lck_rw_ext_init
  821  */
  822 void
  823 lck_rw_ext_init(
  824         lck_rw_ext_t    *lck,
  825         lck_grp_t       *grp,
  826         lck_attr_t      *attr) {
  827 
  828         bzero((void *)lck, sizeof(lck_rw_ext_t));
  829 
  830         if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
  831                 lck->lck_rw_deb.type = RW_TAG;
  832                 lck->lck_rw_attr |= LCK_RW_ATTR_DEBUG;
  833         }
  834 
  835         lck->lck_rw_grp = grp;
  836 
  837         if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
  838                  lck->lck_rw_attr |= LCK_RW_ATTR_STAT;
  839 }
  840 
  841 /*
  842  *      Routine:        lck_rw_destroy
  843  */
  844 void
  845 lck_rw_destroy(
  846         lck_rw_t        *lck,
  847         lck_grp_t       *grp) {
  848         boolean_t lck_is_indirect;
  849         
  850         if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
  851                 return;
  852         lck_is_indirect = (lck->lck_rw_tag == LCK_RW_TAG_INDIRECT);
  853         lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
  854         if (lck_is_indirect)
  855                 kfree((void *)lck->lck_rw_ptr, sizeof(lck_rw_ext_t));
  856 
  857         lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
  858         lck_grp_deallocate(grp);
  859         return;
  860 }
  861 
  862 /*
  863  *      Routine:        lck_rw_lock
  864  */
  865 void
  866 lck_rw_lock(
  867         lck_rw_t        *lck,
  868         lck_rw_type_t   lck_rw_type)
  869 {
  870         if (lck_rw_type == LCK_RW_TYPE_SHARED)
  871                 lck_rw_lock_shared(lck);
  872         else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
  873                 lck_rw_lock_exclusive(lck);
  874         else
  875                 panic("lck_rw_lock(): Invalid RW lock type: %d\n", lck_rw_type);
  876 }
  877 
  878 
  879 /*
  880  *      Routine:        lck_rw_unlock
  881  */
  882 void
  883 lck_rw_unlock(
  884         lck_rw_t        *lck,
  885         lck_rw_type_t   lck_rw_type)
  886 {
  887         if (lck_rw_type == LCK_RW_TYPE_SHARED)
  888                 lck_rw_unlock_shared(lck);
  889         else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
  890                 lck_rw_unlock_exclusive(lck);
  891         else
  892                 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
  893 }
  894 
  895 
  896 /*
  897  *      Routine:        lck_rw_unlock_shared
  898  */
  899 void
  900 lck_rw_unlock_shared(
  901         lck_rw_t        *lck)
  902 {
  903         lck_rw_type_t   ret;
  904 
  905         ret = lck_rw_done(lck);
  906 
  907         if (ret != LCK_RW_TYPE_SHARED)
  908                 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
  909 }
  910 
  911 
  912 /*
  913  *      Routine:        lck_rw_unlock_exclusive
  914  */
  915 void
  916 lck_rw_unlock_exclusive(
  917         lck_rw_t        *lck)
  918 {
  919         lck_rw_type_t   ret;
  920 
  921         ret = lck_rw_done(lck);
  922 
  923         if (ret != LCK_RW_TYPE_EXCLUSIVE)
  924                 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
  925 }
  926 
  927 
  928 /*
  929  *      Routine:        lck_rw_try_lock
  930  */
  931 boolean_t
  932 lck_rw_try_lock(
  933         lck_rw_t        *lck,
  934         lck_rw_type_t   lck_rw_type)
  935 {
  936         if (lck_rw_type == LCK_RW_TYPE_SHARED)
  937                 return(lck_rw_try_lock_shared(lck));
  938         else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
  939                 return(lck_rw_try_lock_exclusive(lck));
  940         else
  941                 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
  942         return(FALSE);
  943 }
  944 
  945 
  946 
  947 /*
  948  *      Routine:        lck_rw_lock_exclusive_gen
  949  */
  950 void
  951 lck_rw_lock_exclusive_gen(
  952         lck_rw_t        *lck)
  953 {
  954         int        i;
  955         boolean_t               lock_miss = FALSE;
  956         wait_result_t   res;
  957 
  958         lck_rw_ilk_lock(lck);
  959 
  960         /*
  961          *      Try to acquire the lck_rw_want_excl bit.
  962          */
  963         while (lck->lck_rw_want_excl) {
  964                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
  965 
  966                 if (!lock_miss) {
  967                         lock_miss = TRUE;
  968                 }
  969 
  970                 i = lock_wait_time[1];
  971                 if (i != 0) {
  972                         lck_rw_ilk_unlock(lck);
  973                         while (--i != 0 && lck->lck_rw_want_excl)
  974                                 continue;
  975                         lck_rw_ilk_lock(lck);
  976                 }
  977 
  978                 if (lck->lck_rw_want_excl) {
  979                         lck->lck_rw_waiting = TRUE;
  980                         res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
  981                         if (res == THREAD_WAITING) {
  982                                 lck_rw_ilk_unlock(lck);
  983                                 res = thread_block(THREAD_CONTINUE_NULL);
  984                                 lck_rw_ilk_lock(lck);
  985                         }
  986                 }
  987                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
  988         }
  989         lck->lck_rw_want_excl = TRUE;
  990 
  991         /* Wait for readers (and upgrades) to finish */
  992 
  993         while ((lck->lck_rw_shared_cnt != 0) || lck->lck_rw_want_upgrade) {
  994                 if (!lock_miss) {
  995                         lock_miss = TRUE;
  996                 }
  997 
  998                 i = lock_wait_time[1];
  999 
 1000                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
 1001                              (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, i, 0);
 1002 
 1003                 if (i != 0) {
 1004                         lck_rw_ilk_unlock(lck);
 1005                         while (--i != 0 && (lck->lck_rw_shared_cnt != 0 ||
 1006                                             lck->lck_rw_want_upgrade))
 1007                                 continue;
 1008                         lck_rw_ilk_lock(lck);
 1009                 }
 1010 
 1011                 if (lck->lck_rw_shared_cnt != 0 || lck->lck_rw_want_upgrade) {
 1012                         lck->lck_rw_waiting = TRUE;
 1013                         res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
 1014                         if (res == THREAD_WAITING) {
 1015                                 lck_rw_ilk_unlock(lck);
 1016                                 res = thread_block(THREAD_CONTINUE_NULL);
 1017                                 lck_rw_ilk_lock(lck);
 1018                         }
 1019                 }
 1020                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
 1021                              (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, res, 0);
 1022         }
 1023 
 1024         lck_rw_ilk_unlock(lck);
 1025 }
 1026 
 1027 
 1028 /*
 1029  *      Routine:        lck_rw_done_gen
 1030  */
 1031 lck_rw_type_t
 1032 lck_rw_done_gen(
 1033         lck_rw_t        *lck)
 1034 {
 1035         boolean_t       do_wakeup = FALSE;
 1036         lck_rw_type_t   lck_rw_type;
 1037 
 1038 
 1039         lck_rw_ilk_lock(lck);
 1040 
 1041         if (lck->lck_rw_shared_cnt != 0) {
 1042                 lck_rw_type = LCK_RW_TYPE_SHARED;
 1043                 lck->lck_rw_shared_cnt--;
 1044         }
 1045         else {  
 1046                 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
 1047                 if (lck->lck_rw_want_upgrade) 
 1048                         lck->lck_rw_want_upgrade = FALSE;
 1049                 else 
 1050                         lck->lck_rw_want_excl = FALSE;
 1051         }
 1052 
 1053         /*
 1054          *      There is no reason to wakeup a lck_rw_waiting thread
 1055          *      if the read-count is non-zero.  Consider:
 1056          *              we must be dropping a read lock
 1057          *              threads are waiting only if one wants a write lock
 1058          *              if there are still readers, they can't proceed
 1059          */
 1060 
 1061         if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
 1062                 lck->lck_rw_waiting = FALSE;
 1063                 do_wakeup = TRUE;
 1064         }
 1065 
 1066         lck_rw_ilk_unlock(lck);
 1067 
 1068         if (do_wakeup)
 1069                 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
 1070         return(lck_rw_type);
 1071 }
 1072 
 1073 
 1074 /*
 1075  *      Routine:        lck_rw_lock_shared_gen
 1076  */
 1077 void
 1078 lck_rw_lock_shared_gen(
 1079         lck_rw_t        *lck)
 1080 {
 1081         int             i;
 1082         wait_result_t      res;
 1083 
 1084         lck_rw_ilk_lock(lck);
 1085 
 1086         while (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
 1087                 i = lock_wait_time[1];
 1088 
 1089                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
 1090                              (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, i, 0);
 1091 
 1092                 if (i != 0) {
 1093                         lck_rw_ilk_unlock(lck);
 1094                         while (--i != 0 && (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade))
 1095                                 continue;
 1096                         lck_rw_ilk_lock(lck);
 1097                 }
 1098 
 1099                 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
 1100                         lck->lck_rw_waiting = TRUE;
 1101                         res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
 1102                         if (res == THREAD_WAITING) {
 1103                                 lck_rw_ilk_unlock(lck);
 1104                                 res = thread_block(THREAD_CONTINUE_NULL);
 1105                                 lck_rw_ilk_lock(lck);
 1106                         }
 1107                 }
 1108                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
 1109                              (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, res, 0);
 1110         }
 1111 
 1112         lck->lck_rw_shared_cnt++;
 1113 
 1114         lck_rw_ilk_unlock(lck);
 1115 }
 1116 
 1117 
 1118 /*
 1119  *      Routine:        lck_rw_lock_shared_to_exclusive_gen
 1120  *      Function:
 1121  *              Improves a read-only lock to one with
 1122  *              write permission.  If another reader has
 1123  *              already requested an upgrade to a write lock,
 1124  *              no lock is held upon return.
 1125  *
 1126  *              Returns TRUE if the upgrade *failed*.
 1127  */
 1128 
 1129 boolean_t
 1130 lck_rw_lock_shared_to_exclusive_gen(
 1131         lck_rw_t        *lck)
 1132 {
 1133         int         i;
 1134         boolean_t           do_wakeup = FALSE;
 1135         wait_result_t      res;
 1136 
 1137         lck_rw_ilk_lock(lck);
 1138 
 1139         lck->lck_rw_shared_cnt--;       
 1140 
 1141         if (lck->lck_rw_want_upgrade) {
 1142                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
 1143                              (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
 1144 
 1145                 /*
 1146                  *      Someone else has requested upgrade.
 1147                  *      Since we've released a read lock, wake
 1148                  *      him up.
 1149                  */
 1150                 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
 1151                         lck->lck_rw_waiting = FALSE;
 1152                         do_wakeup = TRUE;
 1153                 }
 1154 
 1155                 lck_rw_ilk_unlock(lck);
 1156 
 1157                 if (do_wakeup)
 1158                         thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
 1159 
 1160                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
 1161                              (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
 1162 
 1163                 return (TRUE);
 1164         }
 1165 
 1166         lck->lck_rw_want_upgrade = TRUE;
 1167 
 1168         while (lck->lck_rw_shared_cnt != 0) {
 1169                 i = lock_wait_time[1];
 1170 
 1171                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
 1172                              (int)lck, lck->lck_rw_shared_cnt, i, 0, 0);
 1173 
 1174                 if (i != 0) {
 1175                         lck_rw_ilk_unlock(lck);
 1176                         while (--i != 0 && lck->lck_rw_shared_cnt != 0)
 1177                                 continue;
 1178                         lck_rw_ilk_lock(lck);
 1179                 }
 1180 
 1181                 if (lck->lck_rw_shared_cnt != 0) {
 1182                         lck->lck_rw_waiting = TRUE;
 1183                         res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
 1184                         if (res == THREAD_WAITING) {
 1185                                 lck_rw_ilk_unlock(lck);
 1186                                 res = thread_block(THREAD_CONTINUE_NULL);
 1187                                 lck_rw_ilk_lock(lck);
 1188                         }
 1189                 }
 1190                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
 1191                              (int)lck, lck->lck_rw_shared_cnt, 0, 0, 0);
 1192         }
 1193 
 1194         lck_rw_ilk_unlock(lck);
 1195 
 1196         return (FALSE);
 1197 }
 1198 
 1199 /*
 1200  *      Routine:        lck_rw_lock_exclusive_to_shared_gen
 1201  */
 1202 void
 1203 lck_rw_lock_exclusive_to_shared_gen(
 1204         lck_rw_t        *lck)
 1205 {
 1206         boolean_t          do_wakeup = FALSE;
 1207 
 1208         lck_rw_ilk_lock(lck);
 1209 
 1210         lck->lck_rw_shared_cnt++;
 1211         if (lck->lck_rw_want_upgrade)
 1212                 lck->lck_rw_want_upgrade = FALSE;
 1213         else
 1214                 lck->lck_rw_want_excl = FALSE;
 1215 
 1216         if (lck->lck_rw_waiting) {
 1217                 lck->lck_rw_waiting = FALSE;
 1218                 do_wakeup = TRUE;
 1219         }
 1220 
 1221         lck_rw_ilk_unlock(lck);
 1222 
 1223         if (do_wakeup)
 1224                 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
 1225 
 1226 }
 1227 
 1228 
 1229 /*
 1230  *      Routine:        lck_rw_try_lock_exclusive_gen
 1231  *      Function:
 1232  *              Tries to get a write lock.
 1233  *
 1234  *              Returns FALSE if the lock is not held on return.
 1235  */
 1236 
 1237 boolean_t
 1238 lck_rw_try_lock_exclusive_gen(
 1239         lck_rw_t        *lck)
 1240 {
 1241         lck_rw_ilk_lock(lck);
 1242 
 1243         if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade || lck->lck_rw_shared_cnt) {
 1244                 /*
 1245                  *      Can't get lock.
 1246                  */
 1247                 lck_rw_ilk_unlock(lck);
 1248                 return(FALSE);
 1249         }
 1250 
 1251         /*
 1252          *      Have lock.
 1253          */
 1254 
 1255         lck->lck_rw_want_excl = TRUE;
 1256 
 1257         lck_rw_ilk_unlock(lck);
 1258 
 1259         return(TRUE);
 1260 }
 1261 
 1262 /*
 1263  *      Routine:        lck_rw_try_lock_shared_gen
 1264  *      Function:
 1265  *              Tries to get a read lock.
 1266  *
 1267  *              Returns FALSE if the lock is not held on return.
 1268  */
 1269 
 1270 boolean_t
 1271 lck_rw_try_lock_shared_gen(
 1272         lck_rw_t        *lck)
 1273 {
 1274         lck_rw_ilk_lock(lck);
 1275 
 1276         if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
 1277                 lck_rw_ilk_unlock(lck);
 1278                 return(FALSE);
 1279         }
 1280 
 1281         lck->lck_rw_shared_cnt++;
 1282 
 1283         lck_rw_ilk_unlock(lck);
 1284 
 1285         return(TRUE);
 1286 }
 1287 
 1288 
 1289 /*
 1290  *      Routine:        lck_rw_ext_backtrace
 1291  */
 1292 void
 1293 lck_rw_ext_backtrace(
 1294         lck_rw_ext_t    *lck)
 1295 {
 1296         unsigned int *stackptr, *stackptr_prev;
 1297         unsigned int frame;
 1298 
 1299         __asm__ volatile("mr %0,r1" : "=r" (stackptr)); 
 1300         frame = 0;
 1301         while (frame < LCK_FRAMES_MAX) {
 1302                 stackptr_prev = stackptr;
 1303                 stackptr = ( unsigned int *)*stackptr;
 1304                 if ( (((unsigned int)stackptr_prev) ^ ((unsigned int)stackptr)) > 8192)
 1305                         break;
 1306                 lck->lck_rw_deb.stack[frame] = *(stackptr+2); 
 1307                 frame++;
 1308         }
 1309         while (frame < LCK_FRAMES_MAX) {
 1310                 lck->lck_rw_deb.stack[frame] = 0;
 1311                 frame++;
 1312         }
 1313 }
 1314 
 1315 
 1316 /*
 1317  *      Routine:        lck_rw_lock_exclusive_ext
 1318  */
 1319 void
 1320 lck_rw_lock_exclusive_ext(
 1321         lck_rw_ext_t    *lck,
 1322         lck_rw_t        *rlck)
 1323 {
 1324         int                             i;
 1325         wait_result_t   res;
 1326         boolean_t               lock_miss = FALSE;
 1327         boolean_t               lock_wait = FALSE;
 1328         boolean_t               lock_stat;
 1329 
 1330         lck_rw_check_type(lck, rlck);
 1331 
 1332         if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_MYLOCK)) == LCK_RW_ATTR_DEBUG) 
 1333              && (lck->lck_rw_deb.thread == current_thread()))
 1334                 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
 1335 
 1336         lck_rw_ilk_lock(&lck->lck_rw);
 1337 
 1338         lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
 1339 
 1340         if (lock_stat)
 1341                 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
 1342 
 1343         /*
 1344          *      Try to acquire the lck_rw.lck_rw_want_excl bit.
 1345          */
 1346         while (lck->lck_rw.lck_rw_want_excl) {
 1347                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)rlck, 0, 0, 0, 0);
 1348 
 1349                 if (lock_stat && !lock_miss) {
 1350                         lock_miss = TRUE;
 1351                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
 1352                 }
 1353 
 1354                 i = lock_wait_time[1];
 1355                 if (i != 0) {
 1356                         lck_rw_ilk_unlock(&lck->lck_rw);
 1357                         while (--i != 0 && lck->lck_rw.lck_rw_want_excl)
 1358                                 continue;
 1359                         lck_rw_ilk_lock(&lck->lck_rw);
 1360                 }
 1361 
 1362                 if (lck->lck_rw.lck_rw_want_excl) {
 1363                         lck->lck_rw.lck_rw_waiting = TRUE;
 1364                         res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
 1365                         if (res == THREAD_WAITING) {
 1366                                 if (lock_stat && !lock_wait) {
 1367                                         lock_wait = TRUE;
 1368                                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
 1369                                 }
 1370                                 lck_rw_ilk_unlock(&lck->lck_rw);
 1371                                 res = thread_block(THREAD_CONTINUE_NULL);
 1372                                 lck_rw_ilk_lock(&lck->lck_rw);
 1373                         }
 1374                 }
 1375                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)rlck, res, 0, 0, 0);
 1376         }
 1377         lck->lck_rw.lck_rw_want_excl = TRUE;
 1378 
 1379         /* Wait for readers (and upgrades) to finish */
 1380 
 1381         while ((lck->lck_rw.lck_rw_shared_cnt != 0) || lck->lck_rw.lck_rw_want_upgrade) {
 1382                 i = lock_wait_time[1];
 1383 
 1384                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
 1385                              (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, i, 0);
 1386 
 1387                 if (lock_stat && !lock_miss) {
 1388                         lock_miss = TRUE;
 1389                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
 1390                 }
 1391 
 1392                 if (i != 0) {
 1393                         lck_rw_ilk_unlock(&lck->lck_rw);
 1394                         while (--i != 0 && (lck->lck_rw.lck_rw_shared_cnt != 0 ||
 1395                                             lck->lck_rw.lck_rw_want_upgrade))
 1396                                 continue;
 1397                         lck_rw_ilk_lock(&lck->lck_rw);
 1398                 }
 1399 
 1400                 if (lck->lck_rw.lck_rw_shared_cnt != 0 || lck->lck_rw.lck_rw_want_upgrade) {
 1401                         lck->lck_rw.lck_rw_waiting = TRUE;
 1402                         res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
 1403                         if (res == THREAD_WAITING) {
 1404                                 if (lock_stat && !lock_wait) {
 1405                                         lock_wait = TRUE;
 1406                                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
 1407                                 }
 1408                                 lck_rw_ilk_unlock(&lck->lck_rw);
 1409                                 res = thread_block(THREAD_CONTINUE_NULL);
 1410                                 lck_rw_ilk_lock(&lck->lck_rw);
 1411                         }
 1412                 }
 1413                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
 1414                              (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, res, 0);
 1415         }
 1416 
 1417         lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
 1418         if (LcksOpts & enaLkExtStck)
 1419                 lck_rw_ext_backtrace(lck);
 1420         lck->lck_rw_deb.thread = current_thread();
 1421 
 1422         lck_rw_ilk_unlock(&lck->lck_rw);
 1423 }
 1424 
 1425 
 1426 /*
 1427  *      Routine:        lck_rw_done_ext
 1428  */
 1429 lck_rw_type_t
 1430 lck_rw_done_ext(
 1431         lck_rw_ext_t    *lck,
 1432         lck_rw_t        *rlck)
 1433 {
 1434         boolean_t       do_wakeup = FALSE;
 1435         lck_rw_type_t   lck_rw_type;
 1436 
 1437 
 1438         lck_rw_check_type(lck, rlck);
 1439 
 1440         lck_rw_ilk_lock(&lck->lck_rw);
 1441 
 1442         if (lck->lck_rw.lck_rw_shared_cnt != 0) {
 1443                 lck_rw_type = LCK_RW_TYPE_SHARED;
 1444                 lck->lck_rw.lck_rw_shared_cnt--;
 1445         }
 1446         else {  
 1447                 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
 1448                 if (lck->lck_rw.lck_rw_want_upgrade) 
 1449                         lck->lck_rw.lck_rw_want_upgrade = FALSE;
 1450                 else if (lck->lck_rw.lck_rw_want_excl)
 1451                         lck->lck_rw.lck_rw_want_excl = FALSE;
 1452                 else
 1453                         panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
 1454                                   rlck, lck->lck_rw);
 1455                 if (lck->lck_rw_deb.thread == THREAD_NULL)
 1456                         panic("rw lock (0x%08X) not held\n",
 1457                               rlck);
 1458                 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG) 
 1459                          && (lck->lck_rw_deb.thread != current_thread()))
 1460                         panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
 1461                                   rlck, current_thread(), lck->lck_rw_deb.thread);
 1462                 lck->lck_rw_deb.thread = THREAD_NULL;
 1463         }
 1464 
 1465         if (lck->lck_rw_attr & LCK_RW_ATTR_DEBUG)
 1466                 lck->lck_rw_deb.pc_done = __builtin_return_address(0);
 1467 
 1468         /*
 1469          *      There is no reason to wakeup a waiting thread
 1470          *      if the read-count is non-zero.  Consider:
 1471          *              we must be dropping a read lock
 1472          *              threads are waiting only if one wants a write lock
 1473          *              if there are still readers, they can't proceed
 1474          */
 1475 
 1476         if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
 1477                 lck->lck_rw.lck_rw_waiting = FALSE;
 1478                 do_wakeup = TRUE;
 1479         }
 1480 
 1481         lck_rw_ilk_unlock(&lck->lck_rw);
 1482 
 1483         if (do_wakeup)
 1484                 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
 1485         return(lck_rw_type);
 1486 }
 1487 
 1488 
 1489 /*
 1490  *      Routine:        lck_rw_lock_shared_ext
 1491  */
 1492 void
 1493 lck_rw_lock_shared_ext(
 1494         lck_rw_ext_t    *lck,
 1495         lck_rw_t        *rlck)
 1496 {
 1497         int                             i;
 1498         wait_result_t   res;
 1499         boolean_t               lock_miss = FALSE;
 1500         boolean_t               lock_wait = FALSE;
 1501         boolean_t               lock_stat;
 1502 
 1503         lck_rw_check_type(lck, rlck);
 1504 
 1505         lck_rw_ilk_lock(&lck->lck_rw);
 1506 
 1507         lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
 1508 
 1509         if (lock_stat)
 1510                 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
 1511 
 1512         while (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
 1513                 i = lock_wait_time[1];
 1514 
 1515                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
 1516                              (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, i, 0);
 1517 
 1518                 if (lock_stat && !lock_miss) {
 1519                         lock_miss = TRUE;
 1520                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
 1521                 }
 1522 
 1523                 if (i != 0) {
 1524                         lck_rw_ilk_unlock(&lck->lck_rw);
 1525                         while (--i != 0 && (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade))
 1526                                 continue;
 1527                         lck_rw_ilk_lock(&lck->lck_rw);
 1528                 }
 1529 
 1530                 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
 1531                         lck->lck_rw.lck_rw_waiting = TRUE;
 1532                         res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
 1533                         if (res == THREAD_WAITING) {
 1534                                 if (lock_stat && !lock_wait) {
 1535                                         lock_wait = TRUE;
 1536                                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
 1537                                 }
 1538                                 lck_rw_ilk_unlock(&lck->lck_rw);
 1539                                 res = thread_block(THREAD_CONTINUE_NULL);
 1540                                 lck_rw_ilk_lock(&lck->lck_rw);
 1541                         }
 1542                 }
 1543                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
 1544                              (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, res, 0);
 1545         }
 1546 
 1547         lck->lck_rw.lck_rw_shared_cnt++;
 1548 
 1549         lck_rw_ilk_unlock(&lck->lck_rw);
 1550 }
 1551 
 1552 
 1553 /*
 1554  *      Routine:        lck_rw_lock_shared_to_exclusive_ext
 1555  *      Function:
 1556  *              Improves a read-only lock to one with
 1557  *              write permission.  If another reader has
 1558  *              already requested an upgrade to a write lock,
 1559  *              no lock is held upon return.
 1560  *
 1561  *              Returns TRUE if the upgrade *failed*.
 1562  */
 1563 
 1564 boolean_t
 1565 lck_rw_lock_shared_to_exclusive_ext(
 1566         lck_rw_ext_t    *lck,
 1567         lck_rw_t        *rlck)
 1568 {
 1569         int         i;
 1570         boolean_t           do_wakeup = FALSE;
 1571         wait_result_t      res;
 1572         boolean_t               lock_miss = FALSE;
 1573         boolean_t               lock_wait = FALSE;
 1574         boolean_t               lock_stat;
 1575 
 1576         lck_rw_check_type(lck, rlck);
 1577 
 1578         if (lck->lck_rw_deb.thread == current_thread())
 1579                 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
 1580 
 1581         lck_rw_ilk_lock(&lck->lck_rw);
 1582 
 1583         lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
 1584 
 1585         if (lock_stat)
 1586                 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
 1587 
 1588         lck->lck_rw.lck_rw_shared_cnt--;        
 1589 
 1590         if (lck->lck_rw.lck_rw_want_upgrade) {
 1591                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
 1592                              (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
 1593 
 1594                 /*
 1595                  *      Someone else has requested upgrade.
 1596                  *      Since we've released a read lock, wake
 1597                  *      him up.
 1598                  */
 1599                 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
 1600                         lck->lck_rw.lck_rw_waiting = FALSE;
 1601                         do_wakeup = TRUE;
 1602                 }
 1603 
 1604                 lck_rw_ilk_unlock(&lck->lck_rw);
 1605 
 1606                 if (do_wakeup)
 1607                         thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
 1608 
 1609                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
 1610                              (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
 1611 
 1612                 return (TRUE);
 1613         }
 1614 
 1615         lck->lck_rw.lck_rw_want_upgrade = TRUE;
 1616 
 1617         while (lck->lck_rw.lck_rw_shared_cnt != 0) {
 1618                 i = lock_wait_time[1];
 1619 
 1620                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
 1621                              (int)rlck, lck->lck_rw.lck_rw_shared_cnt, i, 0, 0);
 1622 
 1623                 if (lock_stat && !lock_miss) {
 1624                         lock_miss = TRUE;
 1625                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
 1626                 }
 1627 
 1628                 if (i != 0) {
 1629                         lck_rw_ilk_unlock(&lck->lck_rw);
 1630                         while (--i != 0 && lck->lck_rw.lck_rw_shared_cnt != 0)
 1631                                 continue;
 1632                         lck_rw_ilk_lock(&lck->lck_rw);
 1633                 }
 1634 
 1635                 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
 1636                         lck->lck_rw.lck_rw_waiting = TRUE;
 1637                         res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
 1638                         if (res == THREAD_WAITING) {
 1639                                 if (lock_stat && !lock_wait) {
 1640                                         lock_wait = TRUE;
 1641                                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
 1642                                 }
 1643                                 lck_rw_ilk_unlock(&lck->lck_rw);
 1644                                 res = thread_block(THREAD_CONTINUE_NULL);
 1645                                 lck_rw_ilk_lock(&lck->lck_rw);
 1646                         }
 1647                 }
 1648                 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
 1649                              (int)rlck, lck->lck_rw.lck_rw_shared_cnt, 0, 0, 0);
 1650         }
 1651 
 1652         lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
 1653         if (LcksOpts & enaLkExtStck)
 1654                 lck_rw_ext_backtrace(lck);
 1655         lck->lck_rw_deb.thread = current_thread();
 1656 
 1657         lck_rw_ilk_unlock(&lck->lck_rw);
 1658 
 1659         return (FALSE);
 1660 }
 1661 
 1662 /*
 1663  *      Routine:        lck_rw_lock_exclusive_to_shared_ext
 1664  */
 1665 void
 1666 lck_rw_lock_exclusive_to_shared_ext(
 1667         lck_rw_ext_t    *lck,
 1668         lck_rw_t        *rlck)
 1669 {
 1670         boolean_t          do_wakeup = FALSE;
 1671 
 1672         lck_rw_check_type(lck, rlck);
 1673 
 1674         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
 1675                              (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
 1676 
 1677         lck_rw_ilk_lock(&lck->lck_rw);
 1678 
 1679         lck->lck_rw.lck_rw_shared_cnt++;
 1680         if (lck->lck_rw.lck_rw_want_upgrade)
 1681                 lck->lck_rw.lck_rw_want_upgrade = FALSE;
 1682         else if (lck->lck_rw.lck_rw_want_excl)
 1683                 lck->lck_rw.lck_rw_want_excl = FALSE;
 1684         else
 1685                 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
 1686                           rlck, lck->lck_rw);
 1687         if (lck->lck_rw_deb.thread == THREAD_NULL)
 1688                 panic("rw lock (0x%08X) not held\n",
 1689                       rlck);
 1690         else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG) 
 1691                   && (lck->lck_rw_deb.thread != current_thread()))
 1692                 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
 1693                           rlck, current_thread(), lck->lck_rw_deb.thread);
 1694 
 1695         lck->lck_rw_deb.thread = THREAD_NULL;
 1696 
 1697         if (lck->lck_rw.lck_rw_waiting) {
 1698                 lck->lck_rw.lck_rw_waiting = FALSE;
 1699                 do_wakeup = TRUE;
 1700         }
 1701 
 1702         lck_rw_ilk_unlock(&lck->lck_rw);
 1703 
 1704         if (do_wakeup)
 1705                 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
 1706 
 1707         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
 1708                              (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, lck->lck_rw.lck_rw_shared_cnt, 0);
 1709 
 1710 }
 1711 
 1712 
 1713 /*
 1714  *      Routine:        lck_rw_try_lock_exclusive_ext
 1715  *      Function:
 1716  *              Tries to get a write lock.
 1717  *
 1718  *              Returns FALSE if the lock is not held on return.
 1719  */
 1720 
 1721 boolean_t
 1722 lck_rw_try_lock_exclusive_ext(
 1723         lck_rw_ext_t    *lck,
 1724         lck_rw_t        *rlck)
 1725 {
 1726         boolean_t               lock_stat;
 1727 
 1728         lck_rw_check_type(lck, rlck);
 1729 
 1730         lck_rw_ilk_lock(&lck->lck_rw);
 1731 
 1732         lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
 1733 
 1734         if (lock_stat)
 1735                 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
 1736 
 1737         if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade || lck->lck_rw.lck_rw_shared_cnt) {
 1738                 /*
 1739                  *      Can't get lock.
 1740                  */
 1741                 if (lock_stat) {
 1742                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
 1743                 }
 1744                 lck_rw_ilk_unlock(&lck->lck_rw);
 1745                 return(FALSE);
 1746         }
 1747 
 1748         /*
 1749          *      Have lock.
 1750          */
 1751 
 1752         lck->lck_rw.lck_rw_want_excl = TRUE;
 1753         lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
 1754         if (LcksOpts & enaLkExtStck)
 1755                 lck_rw_ext_backtrace(lck);
 1756         lck->lck_rw_deb.thread = current_thread();
 1757 
 1758         lck_rw_ilk_unlock(&lck->lck_rw);
 1759 
 1760         return(TRUE);
 1761 }
 1762 
 1763 /*
 1764  *      Routine:        lck_rw_try_lock_shared_ext
 1765  *      Function:
 1766  *              Tries to get a read lock.
 1767  *
 1768  *              Returns FALSE if the lock is not held on return.
 1769  */
 1770 
 1771 boolean_t
 1772 lck_rw_try_lock_shared_ext(
 1773         lck_rw_ext_t    *lck,
 1774         lck_rw_t        *rlck)
 1775 {
 1776         boolean_t               lock_stat;
 1777 
 1778         lck_rw_check_type(lck, rlck);
 1779 
 1780         lck_rw_ilk_lock(&lck->lck_rw);
 1781 
 1782         lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
 1783 
 1784         if (lock_stat)
 1785                 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
 1786 
 1787         if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
 1788                 if (lock_stat) {
 1789                         lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
 1790                 }
 1791                 lck_rw_ilk_unlock(&lck->lck_rw);
 1792                 return(FALSE);
 1793         }
 1794 
 1795         lck->lck_rw.lck_rw_shared_cnt++;
 1796 
 1797         lck_rw_ilk_unlock(&lck->lck_rw);
 1798 
 1799         return(TRUE);
 1800 }
 1801 
 1802 void
 1803 lck_rw_check_type(
 1804         lck_rw_ext_t    *lck,
 1805         lck_rw_t                *rlck)
 1806 {
 1807         if (lck->lck_rw_deb.type != RW_TAG)
 1808                 panic("rw lock (0x%08X) not a rw lock type (0x%08X)\n",rlck, lck->lck_rw_deb.type);
 1809 }
 1810 
 1811 /*
 1812  * The C portion of the mutex package.  These routines are only invoked
 1813  * if the optimized assembler routines can't do the work.
 1814  */
 1815 
 1816 /*
 1817  * Forward definition 
 1818  */
 1819 
 1820 void lck_mtx_ext_init(
 1821         lck_mtx_ext_t   *lck,
 1822         lck_grp_t       *grp,
 1823         lck_attr_t      *attr);
 1824 
 1825 /*
 1826  *      Routine:        mutex_alloc
 1827  *      Function:
 1828  *              Allocate a mutex for external users who cannot
 1829  *              hard-code the structure definition into their
 1830  *              objects.
 1831  *              For now just use kalloc, but a zone is probably
 1832  *              warranted.
 1833  */
 1834 mutex_t *
 1835 mutex_alloc(
 1836         unsigned short  tag)
 1837 {
 1838         mutex_t         *m;
 1839 
 1840         if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
 1841           mutex_init(m, tag);
 1842         return(m);
 1843 }
 1844 
 1845 /*
 1846  *      Routine:        mutex_free
 1847  */
 1848 void
 1849 mutex_free(
 1850         mutex_t *m)
 1851 {
 1852         kfree((void *)m, sizeof(mutex_t));
 1853 }
 1854 
 1855 /*
 1856  *      Routine:        lck_mtx_alloc_init
 1857  */
 1858 lck_mtx_t *
 1859 lck_mtx_alloc_init(
 1860         lck_grp_t       *grp,
 1861         lck_attr_t      *attr) {
 1862         lck_mtx_t       *lck;
 1863 
 1864         if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
 1865                 lck_mtx_init(lck, grp, attr);
 1866                 
 1867         return(lck);
 1868 }
 1869 
 1870 /*
 1871  *      Routine:        lck_mtx_free
 1872  */
 1873 void
 1874 lck_mtx_free(
 1875         lck_mtx_t       *lck,
 1876         lck_grp_t       *grp) {
 1877         lck_mtx_destroy(lck, grp);
 1878         kfree((void *)lck, sizeof(lck_mtx_t));
 1879 }
 1880 
 1881 /*
 1882  *      Routine:        lck_mtx_init
 1883  */
 1884 void
 1885 lck_mtx_init(
 1886         lck_mtx_t       *lck,
 1887         lck_grp_t       *grp,
 1888         lck_attr_t      *attr) {
 1889         lck_mtx_ext_t   *lck_ext;
 1890         lck_attr_t      *lck_attr;
 1891 
 1892         if (attr != LCK_ATTR_NULL)
 1893                 lck_attr = attr;
 1894         else
 1895                 lck_attr = &LockDefaultLckAttr;
 1896 
 1897         if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
 1898                 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
 1899                         lck_mtx_ext_init(lck_ext, grp, lck_attr);       
 1900                         lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
 1901                         lck->lck_mtx_ptr = lck_ext;
 1902                 }
 1903         } else {
 1904                 lck->lck_mtx_data = 0;
 1905                 lck->lck_mtx_waiters = 0;
 1906                 lck->lck_mtx_pri = 0;
 1907         }
 1908         lck_grp_reference(grp);
 1909         lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
 1910 }
 1911 
 1912 /*
 1913  *      Routine:        lck_mtx_ext_init
 1914  */
 1915 void
 1916 lck_mtx_ext_init(
 1917         lck_mtx_ext_t   *lck,
 1918         lck_grp_t       *grp,
 1919         lck_attr_t      *attr) {
 1920 
 1921         bzero((void *)lck, sizeof(lck_mtx_ext_t));
 1922 
 1923         if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
 1924                 lck->lck_mtx_deb.type = MUTEX_TAG;
 1925                 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
 1926         }
 1927 
 1928         lck->lck_mtx_grp = grp;
 1929 
 1930         if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
 1931                  lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT;
 1932 }
 1933 
 1934 /*
 1935  *      Routine:        lck_mtx_destroy
 1936  */
 1937 void
 1938 lck_mtx_destroy(
 1939         lck_mtx_t       *lck,
 1940         lck_grp_t       *grp) {
 1941         boolean_t lck_is_indirect;
 1942         
 1943         if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
 1944                 return;
 1945         lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
 1946         lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
 1947         if (lck_is_indirect)
 1948                 kfree((void *)lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
 1949 
 1950         lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
 1951         lck_grp_deallocate(grp);
 1952         return;
 1953 }
 1954 
 1955 
 1956 #if     MACH_KDB
 1957 /*
 1958  * Routines to print out simple_locks and mutexes in a nicely-formatted
 1959  * fashion.
 1960  */
 1961 
 1962 char *simple_lock_labels =      "ENTRY    ILK THREAD   DURATION CALLER";
 1963 char *mutex_labels =            "ENTRY    LOCKED WAITERS   THREAD CALLER";
 1964 
 1965 void    db_print_simple_lock(
 1966                         simple_lock_t   addr);
 1967 
 1968 void    db_print_mutex(
 1969                         mutex_t         * addr);
 1970 
 1971 void
 1972 db_show_one_simple_lock (
 1973         db_expr_t       addr,
 1974         boolean_t       have_addr,
 1975         db_expr_t       count,
 1976         char            * modif)
 1977 {
 1978         simple_lock_t   saddr = (simple_lock_t)addr;
 1979 
 1980         if (saddr == (simple_lock_t)0 || !have_addr) {
 1981                 db_error ("No simple_lock\n");
 1982         }
 1983 #if     USLOCK_DEBUG
 1984         else if (saddr->lock_type != USLOCK_TAG)
 1985                 db_error ("Not a simple_lock\n");
 1986 #endif  /* USLOCK_DEBUG */
 1987 
 1988         db_printf ("%s\n", simple_lock_labels);
 1989         db_print_simple_lock (saddr);
 1990 }
 1991 
 1992 void
 1993 db_print_simple_lock (
 1994         simple_lock_t   addr)
 1995 {
 1996 
 1997         db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
 1998 #if     USLOCK_DEBUG
 1999         db_printf (" %08x", addr->debug.lock_thread);
 2000         db_printf (" %08x ", addr->debug.duration[1]);
 2001         db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
 2002 #endif  /* USLOCK_DEBUG */
 2003         db_printf ("\n");
 2004 }
 2005 
 2006 void
 2007 db_show_one_mutex (
 2008         db_expr_t       addr,
 2009         boolean_t       have_addr,
 2010         db_expr_t       count,
 2011         char            * modif)
 2012 {
 2013         mutex_t         * maddr = (mutex_t *)addr;
 2014 
 2015         if (maddr == (mutex_t *)0 || !have_addr)
 2016                 db_error ("No mutex\n");
 2017 #if     MACH_LDEBUG
 2018         else if (maddr->lck_mtx_deb.type != MUTEX_TAG)
 2019                 db_error ("Not a mutex\n");
 2020 #endif  /* MACH_LDEBUG */
 2021 
 2022         db_printf ("%s\n", mutex_labels);
 2023         db_print_mutex (maddr);
 2024 }
 2025 
 2026 void
 2027 db_print_mutex (
 2028         mutex_t         * addr)
 2029 {
 2030         db_printf ("%08x %6d %7d",
 2031                    addr, *addr, addr->lck_mtx.lck_mtx_waiters);
 2032 #if     MACH_LDEBUG
 2033         db_printf (" %08x ", addr->lck_mtx_deb.thread);
 2034         db_printsym (addr->lck_mtx_deb.stack[0], DB_STGY_ANY);
 2035 #endif  /* MACH_LDEBUG */
 2036         db_printf ("\n");
 2037 }
 2038 
 2039 void
 2040 db_show_one_lock(
 2041         lock_t  *lock)
 2042 {
 2043         db_printf("shared_count = 0x%x, %swant_upgrade, %swant_exclusive, ",
 2044                   lock->lck_rw.lck_rw_shared_cnt,
 2045                   lock->lck_rw.lck_rw_want_upgrade ? "" : "!",
 2046                   lock->lck_rw.lck_rw_want_excl ? "" : "!");
 2047         db_printf("%swaiting\n", 
 2048                   lock->lck_rw.lck_rw_waiting ? "" : "!");
 2049         db_printf("%sInterlock\n",
 2050                   lock->lck_rw.lck_rw_interlock ? "" : "!");
 2051 }
 2052 
 2053 #endif  /* MACH_KDB */
 2054 

Cache object: 5fc22375693b8b0a1b622181b2a79852


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.