The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_lock.c,v 1.75.2.1 2004/08/23 05:59:27 tron Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    9  * NASA Ames Research Center.
   10  *
   11  * This code is derived from software contributed to The NetBSD Foundation
   12  * by Ross Harvey.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. All advertising materials mentioning features or use of this software
   23  *    must display the following acknowledgement:
   24  *      This product includes software developed by the NetBSD
   25  *      Foundation, Inc. and its contributors.
   26  * 4. Neither the name of The NetBSD Foundation nor the names of its
   27  *    contributors may be used to endorse or promote products derived
   28  *    from this software without specific prior written permission.
   29  *
   30  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   32  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   33  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   34  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   37  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   38  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   40  * POSSIBILITY OF SUCH DAMAGE.
   41  */
   42 
   43 /* 
   44  * Copyright (c) 1995
   45  *      The Regents of the University of California.  All rights reserved.
   46  *
   47  * This code contains ideas from software contributed to Berkeley by
   48  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
   49  * System project at Carnegie-Mellon University.
   50  *
   51  * Redistribution and use in source and binary forms, with or without
   52  * modification, are permitted provided that the following conditions
   53  * are met:
   54  * 1. Redistributions of source code must retain the above copyright
   55  *    notice, this list of conditions and the following disclaimer.
   56  * 2. Redistributions in binary form must reproduce the above copyright
   57  *    notice, this list of conditions and the following disclaimer in the
   58  *    documentation and/or other materials provided with the distribution.
   59  * 3. Neither the name of the University nor the names of its contributors
   60  *    may be used to endorse or promote products derived from this software
   61  *    without specific prior written permission.
   62  *
   63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   73  * SUCH DAMAGE.
   74  *
   75  *      @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.75.2.1 2004/08/23 05:59:27 tron Exp $");
   80 
   81 #include "opt_multiprocessor.h"
   82 #include "opt_lockdebug.h"
   83 #include "opt_ddb.h"
   84 
   85 #include <sys/param.h>
   86 #include <sys/proc.h>
   87 #include <sys/lock.h>
   88 #include <sys/systm.h>
   89 #include <machine/cpu.h>
   90 
   91 #if defined(LOCKDEBUG)
   92 #include <sys/syslog.h>
   93 /*
   94  * note that stdarg.h and the ansi style va_start macro is used for both
   95  * ansi and traditional c compiles.
   96  * XXX: this requires that stdarg.h define: va_alist and va_dcl
   97  */
   98 #include <machine/stdarg.h>
   99 
  100 void    lock_printf(const char *fmt, ...)
  101     __attribute__((__format__(__printf__,1,2)));
  102 
  103 static int acquire(__volatile struct lock *, int *, int, int, int);
  104 
  105 int     lock_debug_syslog = 0;  /* defaults to printf, but can be patched */
  106 
  107 #ifdef DDB
  108 #include <ddb/ddbvar.h>
  109 #include <machine/db_machdep.h>
  110 #include <ddb/db_command.h>
  111 #include <ddb/db_interface.h>
  112 #endif
  113 #endif
  114 
  115 /*
  116  * Locking primitives implementation.
  117  * Locks provide shared/exclusive synchronization.
  118  */
  119 
  120 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
  121 #if defined(MULTIPROCESSOR) /* { */
  122 #define COUNT_CPU(cpu_id, x)                                            \
  123         curcpu()->ci_spin_locks += (x)
  124 #else
  125 u_long  spin_locks;
  126 #define COUNT_CPU(cpu_id, x)    spin_locks += (x)
  127 #endif /* MULTIPROCESSOR */ /* } */
  128 
  129 #define COUNT(lkp, l, cpu_id, x)                                        \
  130 do {                                                                    \
  131         if ((lkp)->lk_flags & LK_SPIN)                                  \
  132                 COUNT_CPU((cpu_id), (x));                               \
  133         else                                                            \
  134                 (l)->l_locks += (x);                                    \
  135 } while (/*CONSTCOND*/0)
  136 #else
  137 #define COUNT(lkp, p, cpu_id, x)
  138 #define COUNT_CPU(cpu_id, x)
  139 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
  140 
  141 #ifndef SPINLOCK_SPIN_HOOK              /* from <machine/lock.h> */
  142 #define SPINLOCK_SPIN_HOOK              /* nothing */
  143 #endif
  144 
  145 #define INTERLOCK_ACQUIRE(lkp, flags, s)                                \
  146 do {                                                                    \
  147         if ((flags) & LK_SPIN)                                          \
  148                 s = spllock();                                          \
  149         simple_lock(&(lkp)->lk_interlock);                              \
  150 } while (/*CONSTCOND*/ 0)
  151 
  152 #define INTERLOCK_RELEASE(lkp, flags, s)                                \
  153 do {                                                                    \
  154         simple_unlock(&(lkp)->lk_interlock);                            \
  155         if ((flags) & LK_SPIN)                                          \
  156                 splx(s);                                                \
  157 } while (/*CONSTCOND*/ 0)
  158 
  159 #ifdef DDB /* { */
  160 #ifdef MULTIPROCESSOR
  161 int simple_lock_debugger = 1;   /* more serious on MP */
  162 #else
  163 int simple_lock_debugger = 0;
  164 #endif
  165 #define SLOCK_DEBUGGER()        if (simple_lock_debugger) Debugger()
  166 #define SLOCK_TRACE()                                                   \
  167         db_stack_trace_print((db_expr_t)__builtin_frame_address(0),     \
  168             TRUE, 65535, "", lock_printf);
  169 #else
  170 #define SLOCK_DEBUGGER()        /* nothing */
  171 #define SLOCK_TRACE()           /* nothing */
  172 #endif /* } */
  173 
  174 #if defined(LOCKDEBUG)
  175 #if defined(DDB)
  176 #define SPINLOCK_SPINCHECK_DEBUGGER     Debugger()
  177 #else
  178 #define SPINLOCK_SPINCHECK_DEBUGGER     /* nothing */
  179 #endif
  180 
  181 #define SPINLOCK_SPINCHECK_DECL                                         \
  182         /* 32-bits of count -- wrap constitutes a "spinout" */          \
  183         uint32_t __spinc = 0
  184 
  185 #define SPINLOCK_SPINCHECK                                              \
  186 do {                                                                    \
  187         if (++__spinc == 0) {                                           \
  188                 lock_printf("LK_SPIN spinout, excl %d, share %d\n",     \
  189                     lkp->lk_exclusivecount, lkp->lk_sharecount);        \
  190                 if (lkp->lk_exclusivecount)                             \
  191                         lock_printf("held by CPU %lu\n",                \
  192                             (u_long) lkp->lk_cpu);                      \
  193                 if (lkp->lk_lock_file)                                  \
  194                         lock_printf("last locked at %s:%d\n",           \
  195                             lkp->lk_lock_file, lkp->lk_lock_line);      \
  196                 if (lkp->lk_unlock_file)                                \
  197                         lock_printf("last unlocked at %s:%d\n",         \
  198                             lkp->lk_unlock_file, lkp->lk_unlock_line);  \
  199                 SLOCK_TRACE();                                          \
  200                 SPINLOCK_SPINCHECK_DEBUGGER;                            \
  201         }                                                               \
  202 } while (/*CONSTCOND*/ 0)
  203 #else
  204 #define SPINLOCK_SPINCHECK_DECL                 /* nothing */
  205 #define SPINLOCK_SPINCHECK                      /* nothing */
  206 #endif /* LOCKDEBUG && DDB */
  207 
  208 /*
  209  * Acquire a resource.
  210  */
  211 static int
  212 acquire(__volatile struct lock *lkp, int *s, int extflags,
  213     int drain, int wanted)
  214 {
  215         int error;
  216 
  217         KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
  218 
  219         if (extflags & LK_SPIN) {
  220                 int interlocked;
  221 
  222                 SPINLOCK_SPINCHECK_DECL;
  223 
  224                 if (!drain) {
  225                         lkp->lk_waitcount++;
  226                         lkp->lk_flags |= LK_WAIT_NONZERO;
  227                 }
  228                 for (interlocked = 1;;) {
  229                         SPINLOCK_SPINCHECK;
  230                         if ((lkp->lk_flags & wanted) != 0) {
  231                                 if (interlocked) {
  232                                         INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
  233                                         interlocked = 0;
  234                                 }
  235                                 SPINLOCK_SPIN_HOOK;
  236                         } else if (interlocked) {
  237                                 break;
  238                         } else {
  239                                 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
  240                                 interlocked = 1;
  241                         }
  242                 }
  243                 if (!drain) {
  244                         lkp->lk_waitcount--;
  245                         if (lkp->lk_waitcount == 0)
  246                                 lkp->lk_flags &= ~LK_WAIT_NONZERO;
  247                 }
  248                 KASSERT((lkp->lk_flags & wanted) == 0);
  249                 error = 0;      /* sanity */
  250         } else {
  251                 for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
  252                         if (drain)
  253                                 lkp->lk_flags |= LK_WAITDRAIN;
  254                         else {
  255                                 lkp->lk_waitcount++;
  256                                 lkp->lk_flags |= LK_WAIT_NONZERO;
  257                         }
  258                         /* XXX Cast away volatile. */
  259                         error = ltsleep(drain ?
  260                             (void *)&lkp->lk_flags :
  261                             (void *)lkp, lkp->lk_prio,
  262                             lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
  263                         if (!drain) {
  264                                 lkp->lk_waitcount--;
  265                                 if (lkp->lk_waitcount == 0)
  266                                         lkp->lk_flags &= ~LK_WAIT_NONZERO;
  267                         }
  268                         if (error)
  269                                 break;
  270                         if (extflags & LK_SLEEPFAIL) {
  271                                 error = ENOLCK;
  272                                 break;
  273                         }
  274                 }
  275         }
  276 
  277         return error;
  278 }
  279 
  280 #define SETHOLDER(lkp, pid, lid, cpu_id)                                \
  281 do {                                                                    \
  282         if ((lkp)->lk_flags & LK_SPIN)                                  \
  283                 (lkp)->lk_cpu = cpu_id;                                 \
  284         else {                                                          \
  285                 (lkp)->lk_lockholder = pid;                             \
  286                 (lkp)->lk_locklwp = lid;                                \
  287         }                                                               \
  288 } while (/*CONSTCOND*/0)
  289 
  290 #define WEHOLDIT(lkp, pid, lid, cpu_id)                                 \
  291         (((lkp)->lk_flags & LK_SPIN) != 0 ?                             \
  292          ((lkp)->lk_cpu == (cpu_id)) :                                  \
  293          ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
  294 
  295 #define WAKEUP_WAITER(lkp)                                              \
  296 do {                                                                    \
  297         if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) ==          \
  298             LK_WAIT_NONZERO) {                                          \
  299                 /* XXX Cast away volatile. */                           \
  300                 wakeup((void *)(lkp));                                  \
  301         }                                                               \
  302 } while (/*CONSTCOND*/0)
  303 
  304 #if defined(LOCKDEBUG) /* { */
  305 #if defined(MULTIPROCESSOR) /* { */
  306 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
  307 
  308 #define SPINLOCK_LIST_LOCK()                                            \
  309         __cpu_simple_lock(&spinlock_list_slock.lock_data)
  310 
  311 #define SPINLOCK_LIST_UNLOCK()                                          \
  312         __cpu_simple_unlock(&spinlock_list_slock.lock_data)
  313 #else
  314 #define SPINLOCK_LIST_LOCK()    /* nothing */
  315 
  316 #define SPINLOCK_LIST_UNLOCK()  /* nothing */
  317 #endif /* MULTIPROCESSOR */ /* } */
  318 
  319 TAILQ_HEAD(, lock) spinlock_list =
  320     TAILQ_HEAD_INITIALIZER(spinlock_list);
  321 
  322 #define HAVEIT(lkp)                                                     \
  323 do {                                                                    \
  324         if ((lkp)->lk_flags & LK_SPIN) {                                \
  325                 int s = spllock();                                      \
  326                 SPINLOCK_LIST_LOCK();                                   \
  327                 /* XXX Cast away volatile. */                           \
  328                 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
  329                     lk_list);                                           \
  330                 SPINLOCK_LIST_UNLOCK();                                 \
  331                 splx(s);                                                \
  332         }                                                               \
  333 } while (/*CONSTCOND*/0)
  334 
  335 #define DONTHAVEIT(lkp)                                                 \
  336 do {                                                                    \
  337         if ((lkp)->lk_flags & LK_SPIN) {                                \
  338                 int s = spllock();                                      \
  339                 SPINLOCK_LIST_LOCK();                                   \
  340                 /* XXX Cast away volatile. */                           \
  341                 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),      \
  342                     lk_list);                                           \
  343                 SPINLOCK_LIST_UNLOCK();                                 \
  344                 splx(s);                                                \
  345         }                                                               \
  346 } while (/*CONSTCOND*/0)
  347 #else
  348 #define HAVEIT(lkp)             /* nothing */
  349 
  350 #define DONTHAVEIT(lkp)         /* nothing */
  351 #endif /* LOCKDEBUG */ /* } */
  352 
  353 #if defined(LOCKDEBUG)
  354 /*
  355  * Lock debug printing routine; can be configured to print to console
  356  * or log to syslog.
  357  */
  358 void
  359 lock_printf(const char *fmt, ...)
  360 {
  361         char b[150];
  362         va_list ap;
  363 
  364         va_start(ap, fmt);
  365         if (lock_debug_syslog)
  366                 vlog(LOG_DEBUG, fmt, ap);
  367         else {
  368                 vsnprintf(b, sizeof(b), fmt, ap);
  369                 printf_nolog("%s", b);
  370         }
  371         va_end(ap);
  372 }
  373 #endif /* LOCKDEBUG */
  374 
  375 /*
  376  * Initialize a lock; required before use.
  377  */
  378 void
  379 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
  380 {
  381 
  382         memset(lkp, 0, sizeof(struct lock));
  383         simple_lock_init(&lkp->lk_interlock);
  384         lkp->lk_flags = flags & LK_EXTFLG_MASK;
  385         if (flags & LK_SPIN)
  386                 lkp->lk_cpu = LK_NOCPU;
  387         else {
  388                 lkp->lk_lockholder = LK_NOPROC;
  389                 lkp->lk_prio = prio;
  390                 lkp->lk_timo = timo;
  391         }
  392         lkp->lk_wmesg = wmesg;  /* just a name for spin locks */
  393 #if defined(LOCKDEBUG)
  394         lkp->lk_lock_file = NULL;
  395         lkp->lk_unlock_file = NULL;
  396 #endif
  397 }
  398 
  399 /*
  400  * Determine the status of a lock.
  401  */
  402 int
  403 lockstatus(struct lock *lkp)
  404 {
  405         int s = 0, lock_type = 0;
  406 
  407         INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
  408         if (lkp->lk_exclusivecount != 0)
  409                 lock_type = LK_EXCLUSIVE;
  410         else if (lkp->lk_sharecount != 0)
  411                 lock_type = LK_SHARED;
  412         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  413         return (lock_type);
  414 }
  415 
  416 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
  417 /*
  418  * Make sure no spin locks are held by a CPU that is about
  419  * to context switch.
  420  */
  421 void
  422 spinlock_switchcheck(void)
  423 {
  424         u_long cnt;
  425         int s;
  426 
  427         s = spllock();
  428 #if defined(MULTIPROCESSOR)
  429         cnt = curcpu()->ci_spin_locks;
  430 #else
  431         cnt = spin_locks;
  432 #endif
  433         splx(s);
  434 
  435         if (cnt != 0)
  436                 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
  437                     (u_long) cpu_number(), cnt);
  438 }
  439 #endif /* LOCKDEBUG || DIAGNOSTIC */
  440 
  441 /*
  442  * Locks and IPLs (interrupt priority levels):
  443  *
  444  * Locks which may be taken from interrupt context must be handled
  445  * very carefully; you must spl to the highest IPL where the lock
  446  * is needed before acquiring the lock.
  447  *
  448  * It is also important to avoid deadlock, since certain (very high
  449  * priority) interrupts are often needed to keep the system as a whole
  450  * from deadlocking, and must not be blocked while you are spinning
  451  * waiting for a lower-priority lock.
  452  *
  453  * In addition, the lock-debugging hooks themselves need to use locks!
  454  *
  455  * A raw __cpu_simple_lock may be used from interrupts are long as it
  456  * is acquired and held at a single IPL.
  457  *
  458  * A simple_lock (which is a __cpu_simple_lock wrapped with some
  459  * debugging hooks) may be used at or below spllock(), which is
  460  * typically at or just below splhigh() (i.e. blocks everything
  461  * but certain machine-dependent extremely high priority interrupts).
  462  *
  463  * spinlockmgr spinlocks should be used at or below splsched().
  464  *
  465  * Some platforms may have interrupts of higher priority than splsched(),
  466  * including hard serial interrupts, inter-processor interrupts, and
  467  * kernel debugger traps.
  468  */
  469 
  470 /*
  471  * XXX XXX kludge around another kludge..
  472  *
  473  * vfs_shutdown() may be called from interrupt context, either as a result
  474  * of a panic, or from the debugger.   It proceeds to call
  475  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
  476  *
  477  * We would like to make an attempt to sync the filesystems in this case, so
  478  * if this happens, we treat attempts to acquire locks specially.
  479  * All locks are acquired on behalf of proc0.
  480  *
  481  * If we've already paniced, we don't block waiting for locks, but
  482  * just barge right ahead since we're already going down in flames.
  483  */
  484 
  485 /*
  486  * Set, change, or release a lock.
  487  *
  488  * Shared requests increment the shared count. Exclusive requests set the
  489  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
  490  * accepted shared locks and shared-to-exclusive upgrades to go away.
  491  */
  492 int
  493 #if defined(LOCKDEBUG)
  494 _lockmgr(__volatile struct lock *lkp, u_int flags,
  495     struct simplelock *interlkp, const char *file, int line)
  496 #else
  497 lockmgr(__volatile struct lock *lkp, u_int flags,
  498     struct simplelock *interlkp)
  499 #endif
  500 {
  501         int error;
  502         pid_t pid;
  503         lwpid_t lid;
  504         int extflags;
  505         cpuid_t cpu_id;
  506         struct lwp *l = curlwp;
  507         int lock_shutdown_noblock = 0;
  508         int s = 0;
  509 
  510         error = 0;
  511 
  512         INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
  513         if (flags & LK_INTERLOCK)
  514                 simple_unlock(interlkp);
  515         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
  516 
  517 #ifdef DIAGNOSTIC /* { */
  518         /*
  519          * Don't allow spins on sleep locks and don't allow sleeps
  520          * on spin locks.
  521          */
  522         if ((flags ^ lkp->lk_flags) & LK_SPIN)
  523                 panic("lockmgr: sleep/spin mismatch");
  524 #endif /* } */
  525 
  526         if (extflags & LK_SPIN) {
  527                 pid = LK_KERNPROC;
  528                 lid = 0;
  529         } else {
  530                 if (l == NULL) {
  531                         if (!doing_shutdown) {
  532                                 panic("lockmgr: no context");
  533                         } else {
  534                                 l = &lwp0;
  535                                 if (panicstr && (!(flags & LK_NOWAIT))) {
  536                                         flags |= LK_NOWAIT;
  537                                         lock_shutdown_noblock = 1;
  538                                 }
  539                         }
  540                 }
  541                 lid = l->l_lid;
  542                 pid = l->l_proc->p_pid;
  543         }
  544         cpu_id = cpu_number();
  545 
  546         /*
  547          * Once a lock has drained, the LK_DRAINING flag is set and an
  548          * exclusive lock is returned. The only valid operation thereafter
  549          * is a single release of that exclusive lock. This final release
  550          * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
  551          * further requests of any sort will result in a panic. The bits
  552          * selected for these two flags are chosen so that they will be set
  553          * in memory that is freed (freed memory is filled with 0xdeadbeef).
  554          * The final release is permitted to give a new lease on life to
  555          * the lock by specifying LK_REENABLE.
  556          */
  557         if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
  558 #ifdef DIAGNOSTIC /* { */
  559                 if (lkp->lk_flags & LK_DRAINED)
  560                         panic("lockmgr: using decommissioned lock");
  561                 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
  562                     WEHOLDIT(lkp, pid, lid, cpu_id) == 0)
  563                         panic("lockmgr: non-release on draining lock: %d",
  564                             flags & LK_TYPE_MASK);
  565 #endif /* DIAGNOSTIC */ /* } */
  566                 lkp->lk_flags &= ~LK_DRAINING;
  567                 if ((flags & LK_REENABLE) == 0)
  568                         lkp->lk_flags |= LK_DRAINED;
  569         }
  570 
  571         switch (flags & LK_TYPE_MASK) {
  572 
  573         case LK_SHARED:
  574                 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
  575                         /*
  576                          * If just polling, check to see if we will block.
  577                          */
  578                         if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  579                             (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
  580                                 error = EBUSY;
  581                                 break;
  582                         }
  583                         /*
  584                          * Wait for exclusive locks and upgrades to clear.
  585                          */
  586                         error = acquire(lkp, &s, extflags, 0,
  587                             LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE);
  588                         if (error)
  589                                 break;
  590                         lkp->lk_sharecount++;
  591                         lkp->lk_flags |= LK_SHARE_NONZERO;
  592                         COUNT(lkp, l, cpu_id, 1);
  593                         break;
  594                 }
  595                 /*
  596                  * We hold an exclusive lock, so downgrade it to shared.
  597                  * An alternative would be to fail with EDEADLK.
  598                  */
  599                 lkp->lk_sharecount++;
  600                 lkp->lk_flags |= LK_SHARE_NONZERO;
  601                 COUNT(lkp, l, cpu_id, 1);
  602                 /* fall into downgrade */
  603 
  604         case LK_DOWNGRADE:
  605                 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 ||
  606                     lkp->lk_exclusivecount == 0)
  607                         panic("lockmgr: not holding exclusive lock");
  608                 lkp->lk_sharecount += lkp->lk_exclusivecount;
  609                 lkp->lk_flags |= LK_SHARE_NONZERO;
  610                 lkp->lk_exclusivecount = 0;
  611                 lkp->lk_recurselevel = 0;
  612                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  613                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  614 #if defined(LOCKDEBUG)
  615                 lkp->lk_unlock_file = file;
  616                 lkp->lk_unlock_line = line;
  617 #endif
  618                 DONTHAVEIT(lkp);
  619                 WAKEUP_WAITER(lkp);
  620                 break;
  621 
  622         case LK_EXCLUPGRADE:
  623                 /*
  624                  * If another process is ahead of us to get an upgrade,
  625                  * then we want to fail rather than have an intervening
  626                  * exclusive access.
  627                  */
  628                 if (lkp->lk_flags & LK_WANT_UPGRADE) {
  629                         lkp->lk_sharecount--;
  630                         if (lkp->lk_sharecount == 0)
  631                                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
  632                         COUNT(lkp, l, cpu_id, -1);
  633                         error = EBUSY;
  634                         break;
  635                 }
  636                 /* fall into normal upgrade */
  637 
  638         case LK_UPGRADE:
  639                 /*
  640                  * Upgrade a shared lock to an exclusive one. If another
  641                  * shared lock has already requested an upgrade to an
  642                  * exclusive lock, our shared lock is released and an
  643                  * exclusive lock is requested (which will be granted
  644                  * after the upgrade). If we return an error, the file
  645                  * will always be unlocked.
  646                  */
  647                 if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0)
  648                         panic("lockmgr: upgrade exclusive lock");
  649                 lkp->lk_sharecount--;
  650                 if (lkp->lk_sharecount == 0)
  651                         lkp->lk_flags &= ~LK_SHARE_NONZERO;
  652                 COUNT(lkp, l, cpu_id, -1);
  653                 /*
  654                  * If we are just polling, check to see if we will block.
  655                  */
  656                 if ((extflags & LK_NOWAIT) &&
  657                     ((lkp->lk_flags & LK_WANT_UPGRADE) ||
  658                      lkp->lk_sharecount > 1)) {
  659                         error = EBUSY;
  660                         break;
  661                 }
  662                 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
  663                         /*
  664                          * We are first shared lock to request an upgrade, so
  665                          * request upgrade and wait for the shared count to
  666                          * drop to zero, then take exclusive lock.
  667                          */
  668                         lkp->lk_flags |= LK_WANT_UPGRADE;
  669                         error = acquire(lkp, &s, extflags, 0, LK_SHARE_NONZERO);
  670                         lkp->lk_flags &= ~LK_WANT_UPGRADE;
  671                         if (error) {
  672                                 WAKEUP_WAITER(lkp);
  673                                 break;
  674                         }
  675                         lkp->lk_flags |= LK_HAVE_EXCL;
  676                         SETHOLDER(lkp, pid, lid, cpu_id);
  677 #if defined(LOCKDEBUG)
  678                         lkp->lk_lock_file = file;
  679                         lkp->lk_lock_line = line;
  680 #endif
  681                         HAVEIT(lkp);
  682                         if (lkp->lk_exclusivecount != 0)
  683                                 panic("lockmgr: non-zero exclusive count");
  684                         lkp->lk_exclusivecount = 1;
  685                         if (extflags & LK_SETRECURSE)
  686                                 lkp->lk_recurselevel = 1;
  687                         COUNT(lkp, l, cpu_id, 1);
  688                         break;
  689                 }
  690                 /*
  691                  * Someone else has requested upgrade. Release our shared
  692                  * lock, awaken upgrade requestor if we are the last shared
  693                  * lock, then request an exclusive lock.
  694                  */
  695                 if (lkp->lk_sharecount == 0)
  696                         WAKEUP_WAITER(lkp);
  697                 /* fall into exclusive request */
  698 
  699         case LK_EXCLUSIVE:
  700                 if (WEHOLDIT(lkp, pid, lid, cpu_id)) {
  701                         /*
  702                          * Recursive lock.
  703                          */
  704                         if ((extflags & LK_CANRECURSE) == 0 &&
  705                              lkp->lk_recurselevel == 0) {
  706                                 if (extflags & LK_RECURSEFAIL) {
  707                                         error = EDEADLK;
  708                                         break;
  709                                 } else
  710                                         panic("lockmgr: locking against myself");
  711                         }
  712                         lkp->lk_exclusivecount++;
  713                         if (extflags & LK_SETRECURSE &&
  714                             lkp->lk_recurselevel == 0)
  715                                 lkp->lk_recurselevel = lkp->lk_exclusivecount;
  716                         COUNT(lkp, l, cpu_id, 1);
  717                         break;
  718                 }
  719                 /*
  720                  * If we are just polling, check to see if we will sleep.
  721                  */
  722                 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  723                      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  724                      LK_SHARE_NONZERO))) {
  725                         error = EBUSY;
  726                         break;
  727                 }
  728                 /*
  729                  * Try to acquire the want_exclusive flag.
  730                  */
  731                 error = acquire(lkp, &s, extflags, 0,
  732                     LK_HAVE_EXCL | LK_WANT_EXCL);
  733                 if (error)
  734                         break;
  735                 lkp->lk_flags |= LK_WANT_EXCL;
  736                 /*
  737                  * Wait for shared locks and upgrades to finish.
  738                  */
  739                 error = acquire(lkp, &s, extflags, 0,
  740                     LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
  741                 lkp->lk_flags &= ~LK_WANT_EXCL;
  742                 if (error) {
  743                         WAKEUP_WAITER(lkp);
  744                         break;
  745                 }
  746                 lkp->lk_flags |= LK_HAVE_EXCL;
  747                 SETHOLDER(lkp, pid, lid, cpu_id);
  748 #if defined(LOCKDEBUG)
  749                 lkp->lk_lock_file = file;
  750                 lkp->lk_lock_line = line;
  751 #endif
  752                 HAVEIT(lkp);
  753                 if (lkp->lk_exclusivecount != 0)
  754                         panic("lockmgr: non-zero exclusive count");
  755                 lkp->lk_exclusivecount = 1;
  756                 if (extflags & LK_SETRECURSE)
  757                         lkp->lk_recurselevel = 1;
  758                 COUNT(lkp, l, cpu_id, 1);
  759                 break;
  760 
  761         case LK_RELEASE:
  762                 if (lkp->lk_exclusivecount != 0) {
  763                         if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
  764                                 if (lkp->lk_flags & LK_SPIN) {
  765                                         panic("lockmgr: processor %lu, not "
  766                                             "exclusive lock holder %lu "
  767                                             "unlocking", cpu_id, lkp->lk_cpu);
  768                                 } else {
  769                                         panic("lockmgr: pid %d, not "
  770                                             "exclusive lock holder %d "
  771                                             "unlocking", pid,
  772                                             lkp->lk_lockholder);
  773                                 }
  774                         }
  775                         if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
  776                                 lkp->lk_recurselevel = 0;
  777                         lkp->lk_exclusivecount--;
  778                         COUNT(lkp, l, cpu_id, -1);
  779                         if (lkp->lk_exclusivecount == 0) {
  780                                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  781                                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  782 #if defined(LOCKDEBUG)
  783                                 lkp->lk_unlock_file = file;
  784                                 lkp->lk_unlock_line = line;
  785 #endif
  786                                 DONTHAVEIT(lkp);
  787                         }
  788                 } else if (lkp->lk_sharecount != 0) {
  789                         lkp->lk_sharecount--;
  790                         if (lkp->lk_sharecount == 0)
  791                                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
  792                         COUNT(lkp, l, cpu_id, -1);
  793                 }
  794 #ifdef DIAGNOSTIC
  795                 else
  796                         panic("lockmgr: release of unlocked lock!");
  797 #endif
  798                 WAKEUP_WAITER(lkp);
  799                 break;
  800 
  801         case LK_DRAIN:
  802                 /*
  803                  * Check that we do not already hold the lock, as it can 
  804                  * never drain if we do. Unfortunately, we have no way to
  805                  * check for holding a shared lock, but at least we can
  806                  * check for an exclusive one.
  807                  */
  808                 if (WEHOLDIT(lkp, pid, lid, cpu_id))
  809                         panic("lockmgr: draining against myself");
  810                 /*
  811                  * If we are just polling, check to see if we will sleep.
  812                  */
  813                 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  814                      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  815                      LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
  816                         error = EBUSY;
  817                         break;
  818                 }
  819                 error = acquire(lkp, &s, extflags, 1,
  820                     LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  821                     LK_SHARE_NONZERO | LK_WAIT_NONZERO);
  822                 if (error)
  823                         break;
  824                 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
  825                 SETHOLDER(lkp, pid, lid, cpu_id);
  826 #if defined(LOCKDEBUG)
  827                 lkp->lk_lock_file = file;
  828                 lkp->lk_lock_line = line;
  829 #endif
  830                 HAVEIT(lkp);
  831                 lkp->lk_exclusivecount = 1;
  832                 /* XXX unlikely that we'd want this */
  833                 if (extflags & LK_SETRECURSE)
  834                         lkp->lk_recurselevel = 1;
  835                 COUNT(lkp, l, cpu_id, 1);
  836                 break;
  837 
  838         default:
  839                 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  840                 panic("lockmgr: unknown locktype request %d",
  841                     flags & LK_TYPE_MASK);
  842                 /* NOTREACHED */
  843         }
  844         if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
  845             ((lkp->lk_flags &
  846               (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  847               LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
  848                 lkp->lk_flags &= ~LK_WAITDRAIN;
  849                 wakeup((void *)&lkp->lk_flags);
  850         }
  851         /*
  852          * Note that this panic will be a recursive panic, since
  853          * we only set lock_shutdown_noblock above if panicstr != NULL.
  854          */
  855         if (error && lock_shutdown_noblock)
  856                 panic("lockmgr: deadlock (see previous panic)");
  857         
  858         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  859         return (error);
  860 }
  861 
  862 /*
  863  * For a recursive spinlock held one or more times by the current CPU,
  864  * release all N locks, and return N.
  865  * Intended for use in mi_switch() shortly before context switching.
  866  */
  867 
  868 int
  869 #if defined(LOCKDEBUG)
  870 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
  871 #else
  872 spinlock_release_all(__volatile struct lock *lkp)
  873 #endif
  874 {
  875         int s, count;
  876         cpuid_t cpu_id;
  877         
  878         KASSERT(lkp->lk_flags & LK_SPIN);
  879         
  880         INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
  881 
  882         cpu_id = cpu_number();
  883         count = lkp->lk_exclusivecount;
  884         
  885         if (count != 0) {
  886 #ifdef DIAGNOSTIC               
  887                 if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) {
  888                         panic("spinlock_release_all: processor %lu, not "
  889                             "exclusive lock holder %lu "
  890                             "unlocking", (long)cpu_id, lkp->lk_cpu);
  891                 }
  892 #endif
  893                 lkp->lk_recurselevel = 0;
  894                 lkp->lk_exclusivecount = 0;
  895                 COUNT_CPU(cpu_id, -count);
  896                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  897                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  898 #if defined(LOCKDEBUG)
  899                 lkp->lk_unlock_file = file;
  900                 lkp->lk_unlock_line = line;
  901 #endif
  902                 DONTHAVEIT(lkp);
  903         }
  904 #ifdef DIAGNOSTIC
  905         else if (lkp->lk_sharecount != 0)
  906                 panic("spinlock_release_all: release of shared lock!");
  907         else
  908                 panic("spinlock_release_all: release of unlocked lock!");
  909 #endif
  910         INTERLOCK_RELEASE(lkp, LK_SPIN, s);     
  911 
  912         return (count);
  913 }
  914 
  915 /*
  916  * For a recursive spinlock held one or more times by the current CPU,
  917  * release all N locks, and return N.
  918  * Intended for use in mi_switch() right after resuming execution.
  919  */
  920 
  921 void
  922 #if defined(LOCKDEBUG)
  923 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
  924     const char *file, int line)
  925 #else
  926 spinlock_acquire_count(__volatile struct lock *lkp, int count)
  927 #endif
  928 {
  929         int s, error;
  930         cpuid_t cpu_id;
  931         
  932         KASSERT(lkp->lk_flags & LK_SPIN);
  933         
  934         INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
  935 
  936         cpu_id = cpu_number();
  937 
  938 #ifdef DIAGNOSTIC
  939         if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id))
  940                 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
  941 #endif
  942         /*
  943          * Try to acquire the want_exclusive flag.
  944          */
  945         error = acquire(lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL);
  946         lkp->lk_flags |= LK_WANT_EXCL;
  947         /*
  948          * Wait for shared locks and upgrades to finish.
  949          */
  950         error = acquire(lkp, &s, LK_SPIN, 0,
  951             LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE);
  952         lkp->lk_flags &= ~LK_WANT_EXCL;
  953         lkp->lk_flags |= LK_HAVE_EXCL;
  954         SETHOLDER(lkp, LK_NOPROC, 0, cpu_id);
  955 #if defined(LOCKDEBUG)
  956         lkp->lk_lock_file = file;
  957         lkp->lk_lock_line = line;
  958 #endif
  959         HAVEIT(lkp);
  960         if (lkp->lk_exclusivecount != 0)
  961                 panic("lockmgr: non-zero exclusive count");
  962         lkp->lk_exclusivecount = count;
  963         lkp->lk_recurselevel = 1;
  964         COUNT_CPU(cpu_id, count);
  965 
  966         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);       
  967 }
  968 
  969 
  970 
  971 /*
  972  * Print out information about state of a lock. Used by VOP_PRINT
  973  * routines to display ststus about contained locks.
  974  */
  975 void
  976 lockmgr_printinfo(__volatile struct lock *lkp)
  977 {
  978 
  979         if (lkp->lk_sharecount)
  980                 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
  981                     lkp->lk_sharecount);
  982         else if (lkp->lk_flags & LK_HAVE_EXCL) {
  983                 printf(" lock type %s: EXCL (count %d) by ",
  984                     lkp->lk_wmesg, lkp->lk_exclusivecount);
  985                 if (lkp->lk_flags & LK_SPIN)
  986                         printf("processor %lu", lkp->lk_cpu);
  987                 else
  988                         printf("pid %d.%d", lkp->lk_lockholder,
  989                             lkp->lk_locklwp);
  990         } else
  991                 printf(" not locked");
  992         if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
  993                 printf(" with %d pending", lkp->lk_waitcount);
  994 }
  995 
  996 #if defined(LOCKDEBUG) /* { */
  997 TAILQ_HEAD(, simplelock) simplelock_list =
  998     TAILQ_HEAD_INITIALIZER(simplelock_list);
  999 
 1000 #if defined(MULTIPROCESSOR) /* { */
 1001 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
 1002 
 1003 #define SLOCK_LIST_LOCK()                                               \
 1004         __cpu_simple_lock(&simplelock_list_slock.lock_data)
 1005 
 1006 #define SLOCK_LIST_UNLOCK()                                             \
 1007         __cpu_simple_unlock(&simplelock_list_slock.lock_data)
 1008 
 1009 #define SLOCK_COUNT(x)                                                  \
 1010         curcpu()->ci_simple_locks += (x)
 1011 #else
 1012 u_long simple_locks;
 1013 
 1014 #define SLOCK_LIST_LOCK()       /* nothing */
 1015 
 1016 #define SLOCK_LIST_UNLOCK()     /* nothing */
 1017 
 1018 #define SLOCK_COUNT(x)          simple_locks += (x)
 1019 #endif /* MULTIPROCESSOR */ /* } */
 1020 
 1021 #ifdef MULTIPROCESSOR
 1022 #define SLOCK_MP()              lock_printf("on CPU %ld\n",             \
 1023                                     (u_long) cpu_number())
 1024 #else
 1025 #define SLOCK_MP()              /* nothing */
 1026 #endif
 1027 
 1028 #define SLOCK_WHERE(str, alp, id, l)                                    \
 1029 do {                                                                    \
 1030         lock_printf("\n");                                              \
 1031         lock_printf(str);                                               \
 1032         lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
 1033         SLOCK_MP();                                                     \
 1034         if ((alp)->lock_file != NULL)                                   \
 1035                 lock_printf("last locked: %s:%d\n", (alp)->lock_file,   \
 1036                     (alp)->lock_line);                                  \
 1037         if ((alp)->unlock_file != NULL)                                 \
 1038                 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
 1039                     (alp)->unlock_line);                                \
 1040         SLOCK_TRACE()                                                   \
 1041         SLOCK_DEBUGGER();                                               \
 1042 } while (/*CONSTCOND*/0)
 1043 
 1044 /*
 1045  * Simple lock functions so that the debugger can see from whence
 1046  * they are being called.
 1047  */
 1048 void
 1049 simple_lock_init(struct simplelock *alp)
 1050 {
 1051 
 1052 #if defined(MULTIPROCESSOR) /* { */
 1053         __cpu_simple_lock_init(&alp->lock_data);
 1054 #else
 1055         alp->lock_data = __SIMPLELOCK_UNLOCKED;
 1056 #endif /* } */
 1057         alp->lock_file = NULL;
 1058         alp->lock_line = 0;
 1059         alp->unlock_file = NULL;
 1060         alp->unlock_line = 0;
 1061         alp->lock_holder = LK_NOCPU;
 1062 }
 1063 
 1064 void
 1065 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
 1066 {
 1067         cpuid_t cpu_id = cpu_number();
 1068         int s;
 1069 
 1070         s = spllock();
 1071 
 1072         /*
 1073          * MULTIPROCESSOR case: This is `safe' since if it's not us, we
 1074          * don't take any action, and just fall into the normal spin case.
 1075          */
 1076         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1077 #if defined(MULTIPROCESSOR) /* { */
 1078                 if (alp->lock_holder == cpu_id) {
 1079                         SLOCK_WHERE("simple_lock: locking against myself\n",
 1080                             alp, id, l);
 1081                         goto out;
 1082                 }
 1083 #else
 1084                 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
 1085                 goto out;
 1086 #endif /* MULTIPROCESSOR */ /* } */
 1087         }
 1088 
 1089 #if defined(MULTIPROCESSOR) /* { */
 1090         /* Acquire the lock before modifying any fields. */
 1091         splx(s);
 1092         __cpu_simple_lock(&alp->lock_data);
 1093         s = spllock();
 1094 #else
 1095         alp->lock_data = __SIMPLELOCK_LOCKED;
 1096 #endif /* } */
 1097 
 1098         if (alp->lock_holder != LK_NOCPU) {
 1099                 SLOCK_WHERE("simple_lock: uninitialized lock\n",
 1100                     alp, id, l);
 1101         }
 1102         alp->lock_file = id;
 1103         alp->lock_line = l;
 1104         alp->lock_holder = cpu_id;
 1105 
 1106         SLOCK_LIST_LOCK();
 1107         /* XXX Cast away volatile */
 1108         TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
 1109         SLOCK_LIST_UNLOCK();
 1110 
 1111         SLOCK_COUNT(1);
 1112 
 1113  out:
 1114         splx(s);
 1115 }
 1116 
 1117 int
 1118 _simple_lock_held(__volatile struct simplelock *alp)
 1119 {
 1120 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
 1121         cpuid_t cpu_id = cpu_number();
 1122 #endif
 1123         int s, locked = 0;
 1124 
 1125         s = spllock();
 1126 
 1127 #if defined(MULTIPROCESSOR)
 1128         if (__cpu_simple_lock_try(&alp->lock_data) == 0)
 1129                 locked = (alp->lock_holder == cpu_id);
 1130         else
 1131                 __cpu_simple_unlock(&alp->lock_data);
 1132 #else
 1133         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1134                 locked = 1;
 1135                 KASSERT(alp->lock_holder == cpu_id);
 1136         }
 1137 #endif
 1138 
 1139         splx(s);
 1140 
 1141         return (locked);
 1142 }
 1143 
 1144 int
 1145 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
 1146 {
 1147         cpuid_t cpu_id = cpu_number();
 1148         int s, rv = 0;
 1149 
 1150         s = spllock();
 1151 
 1152         /*
 1153          * MULTIPROCESSOR case: This is `safe' since if it's not us, we
 1154          * don't take any action.
 1155          */
 1156 #if defined(MULTIPROCESSOR) /* { */
 1157         if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
 1158                 if (alp->lock_holder == cpu_id)
 1159                         SLOCK_WHERE("simple_lock_try: locking against myself\n",
 1160                             alp, id, l);
 1161                 goto out;
 1162         }
 1163 #else
 1164         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1165                 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
 1166                 goto out;
 1167         }
 1168         alp->lock_data = __SIMPLELOCK_LOCKED;
 1169 #endif /* MULTIPROCESSOR */ /* } */
 1170 
 1171         /*
 1172          * At this point, we have acquired the lock.
 1173          */
 1174 
 1175         rv = 1;
 1176 
 1177         alp->lock_file = id;
 1178         alp->lock_line = l;
 1179         alp->lock_holder = cpu_id;
 1180 
 1181         SLOCK_LIST_LOCK();
 1182         /* XXX Cast away volatile. */
 1183         TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
 1184         SLOCK_LIST_UNLOCK();
 1185 
 1186         SLOCK_COUNT(1);
 1187 
 1188  out:
 1189         splx(s);
 1190         return (rv);
 1191 }
 1192 
 1193 void
 1194 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
 1195 {
 1196         int s;
 1197 
 1198         s = spllock();
 1199 
 1200         /*
 1201          * MULTIPROCESSOR case: This is `safe' because we think we hold
 1202          * the lock, and if we don't, we don't take any action.
 1203          */
 1204         if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
 1205                 SLOCK_WHERE("simple_unlock: lock not held\n",
 1206                     alp, id, l);
 1207                 goto out;
 1208         }
 1209 
 1210         SLOCK_LIST_LOCK();
 1211         TAILQ_REMOVE(&simplelock_list, alp, list);
 1212         SLOCK_LIST_UNLOCK();
 1213 
 1214         SLOCK_COUNT(-1);
 1215 
 1216         alp->list.tqe_next = NULL;      /* sanity */
 1217         alp->list.tqe_prev = NULL;      /* sanity */
 1218 
 1219         alp->unlock_file = id;
 1220         alp->unlock_line = l;
 1221 
 1222 #if defined(MULTIPROCESSOR) /* { */
 1223         alp->lock_holder = LK_NOCPU;
 1224         /* Now that we've modified all fields, release the lock. */
 1225         __cpu_simple_unlock(&alp->lock_data);
 1226 #else
 1227         alp->lock_data = __SIMPLELOCK_UNLOCKED;
 1228         KASSERT(alp->lock_holder == cpu_number());
 1229         alp->lock_holder = LK_NOCPU;
 1230 #endif /* } */
 1231 
 1232  out:
 1233         splx(s);
 1234 }
 1235 
 1236 void
 1237 simple_lock_dump(void)
 1238 {
 1239         struct simplelock *alp;
 1240         int s;
 1241 
 1242         s = spllock();
 1243         SLOCK_LIST_LOCK();
 1244         lock_printf("all simple locks:\n");
 1245         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1246                 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
 1247                     alp->lock_file, alp->lock_line);
 1248         }
 1249         SLOCK_LIST_UNLOCK();
 1250         splx(s);
 1251 }
 1252 
 1253 void
 1254 simple_lock_freecheck(void *start, void *end)
 1255 {
 1256         struct simplelock *alp;
 1257         int s;
 1258 
 1259         s = spllock();
 1260         SLOCK_LIST_LOCK();
 1261         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1262                 if ((void *)alp >= start && (void *)alp < end) {
 1263                         lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
 1264                             alp, alp->lock_holder, alp->lock_file,
 1265                             alp->lock_line);
 1266                         SLOCK_DEBUGGER();
 1267                 }
 1268         }
 1269         SLOCK_LIST_UNLOCK();
 1270         splx(s);
 1271 }
 1272 
 1273 /*
 1274  * We must be holding exactly one lock: the sched_lock.
 1275  */
 1276 
 1277 void
 1278 simple_lock_switchcheck(void)
 1279 {
 1280 
 1281         simple_lock_only_held(&sched_lock, "switching");
 1282 }
 1283 
 1284 void
 1285 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
 1286 {
 1287         struct simplelock *alp;
 1288         cpuid_t cpu_id = cpu_number();
 1289         int s;
 1290 
 1291         if (lp) {
 1292                 LOCK_ASSERT(simple_lock_held(lp));
 1293         }
 1294         s = spllock();
 1295         SLOCK_LIST_LOCK();
 1296         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1297                 if (alp == lp)
 1298                         continue;
 1299                 if (alp->lock_holder == cpu_id)
 1300                         break;
 1301         }
 1302         SLOCK_LIST_UNLOCK();
 1303         splx(s);
 1304 
 1305         if (alp != NULL) {
 1306                 lock_printf("\n%s with held simple_lock %p "
 1307                     "CPU %lu %s:%d\n",
 1308                     where, alp, alp->lock_holder, alp->lock_file,
 1309                     alp->lock_line);
 1310                 SLOCK_TRACE();
 1311                 SLOCK_DEBUGGER();
 1312         }
 1313 }
 1314 #endif /* LOCKDEBUG */ /* } */
 1315 
 1316 #if defined(MULTIPROCESSOR)
 1317 /*
 1318  * Functions for manipulating the kernel_lock.  We put them here
 1319  * so that they show up in profiles.
 1320  */
 1321 
 1322 struct lock kernel_lock; 
 1323 
 1324 void
 1325 _kernel_lock_init(void)
 1326 {
 1327 
 1328         spinlockinit(&kernel_lock, "klock", 0);
 1329 }
 1330 
 1331 /*
 1332  * Acquire/release the kernel lock.  Intended for use in the scheduler
 1333  * and the lower half of the kernel.
 1334  */
 1335 void
 1336 _kernel_lock(int flag)
 1337 {
 1338 
 1339         SCHED_ASSERT_UNLOCKED();
 1340         spinlockmgr(&kernel_lock, flag, 0);
 1341 }
 1342 
 1343 void
 1344 _kernel_unlock(void)
 1345 {
 1346 
 1347         spinlockmgr(&kernel_lock, LK_RELEASE, 0);
 1348 }
 1349 
 1350 /*
 1351  * Acquire/release the kernel_lock on behalf of a process.  Intended for
 1352  * use in the top half of the kernel.
 1353  */
 1354 void
 1355 _kernel_proc_lock(struct lwp *l)
 1356 {
 1357 
 1358         SCHED_ASSERT_UNLOCKED();
 1359         spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
 1360         l->l_flag |= L_BIGLOCK;
 1361 }
 1362 
 1363 void
 1364 _kernel_proc_unlock(struct lwp *l)
 1365 {
 1366 
 1367         l->l_flag &= ~L_BIGLOCK;
 1368         spinlockmgr(&kernel_lock, LK_RELEASE, 0);
 1369 }
 1370 #endif /* MULTIPROCESSOR */

Cache object: d3023b7f01dce0e6fb45b1d5e1619418


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.