The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_lock.c,v 1.86.2.1 2007/08/26 18:45:17 bouyer Exp $        */
    2 
    3 /*-
    4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    9  * NASA Ames Research Center.
   10  *
   11  * This code is derived from software contributed to The NetBSD Foundation
   12  * by Ross Harvey.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. All advertising materials mentioning features or use of this software
   23  *    must display the following acknowledgement:
   24  *      This product includes software developed by the NetBSD
   25  *      Foundation, Inc. and its contributors.
   26  * 4. Neither the name of The NetBSD Foundation nor the names of its
   27  *    contributors may be used to endorse or promote products derived
   28  *    from this software without specific prior written permission.
   29  *
   30  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   32  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   33  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   34  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   37  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   38  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   40  * POSSIBILITY OF SUCH DAMAGE.
   41  */
   42 
   43 /*
   44  * Copyright (c) 1995
   45  *      The Regents of the University of California.  All rights reserved.
   46  *
   47  * This code contains ideas from software contributed to Berkeley by
   48  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
   49  * System project at Carnegie-Mellon University.
   50  *
   51  * Redistribution and use in source and binary forms, with or without
   52  * modification, are permitted provided that the following conditions
   53  * are met:
   54  * 1. Redistributions of source code must retain the above copyright
   55  *    notice, this list of conditions and the following disclaimer.
   56  * 2. Redistributions in binary form must reproduce the above copyright
   57  *    notice, this list of conditions and the following disclaimer in the
   58  *    documentation and/or other materials provided with the distribution.
   59  * 3. Neither the name of the University nor the names of its contributors
   60  *    may be used to endorse or promote products derived from this software
   61  *    without specific prior written permission.
   62  *
   63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   73  * SUCH DAMAGE.
   74  *
   75  *      @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.86.2.1 2007/08/26 18:45:17 bouyer Exp $");
   80 
   81 #include "opt_multiprocessor.h"
   82 #include "opt_lockdebug.h"
   83 #include "opt_ddb.h"
   84 
   85 #include <sys/param.h>
   86 #include <sys/proc.h>
   87 #include <sys/lock.h>
   88 #include <sys/systm.h>
   89 #include <machine/cpu.h>
   90 
   91 #if defined(LOCKDEBUG)
   92 #include <sys/syslog.h>
   93 /*
   94  * note that stdarg.h and the ansi style va_start macro is used for both
   95  * ansi and traditional c compiles.
   96  * XXX: this requires that stdarg.h define: va_alist and va_dcl
   97  */
   98 #include <machine/stdarg.h>
   99 
  100 void    lock_printf(const char *fmt, ...)
  101     __attribute__((__format__(__printf__,1,2)));
  102 
  103 static int acquire(__volatile struct lock **, int *, int, int, int);
  104 
  105 int     lock_debug_syslog = 0;  /* defaults to printf, but can be patched */
  106 
  107 #ifdef DDB
  108 #include <ddb/ddbvar.h>
  109 #include <machine/db_machdep.h>
  110 #include <ddb/db_command.h>
  111 #include <ddb/db_interface.h>
  112 #endif
  113 #endif /* defined(LOCKDEBUG) */
  114 
  115 #if defined(MULTIPROCESSOR)
  116 struct simplelock kernel_lock;
  117 #endif
  118 
  119 /*
  120  * Locking primitives implementation.
  121  * Locks provide shared/exclusive synchronization.
  122  */
  123 
  124 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
  125 #if defined(MULTIPROCESSOR) /* { */
  126 #define COUNT_CPU(cpu_id, x)                                            \
  127         curcpu()->ci_spin_locks += (x)
  128 #else
  129 u_long  spin_locks;
  130 #define COUNT_CPU(cpu_id, x)    spin_locks += (x)
  131 #endif /* MULTIPROCESSOR */ /* } */
  132 
  133 #define COUNT(lkp, l, cpu_id, x)                                        \
  134 do {                                                                    \
  135         if ((lkp)->lk_flags & LK_SPIN)                                  \
  136                 COUNT_CPU((cpu_id), (x));                               \
  137         else                                                            \
  138                 (l)->l_locks += (x);                                    \
  139 } while (/*CONSTCOND*/0)
  140 #else
  141 #define COUNT(lkp, p, cpu_id, x)
  142 #define COUNT_CPU(cpu_id, x)
  143 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
  144 
  145 #ifndef SPINLOCK_SPIN_HOOK              /* from <machine/lock.h> */
  146 #define SPINLOCK_SPIN_HOOK              /* nothing */
  147 #endif
  148 
  149 #define INTERLOCK_ACQUIRE(lkp, flags, s)                                \
  150 do {                                                                    \
  151         if ((flags) & LK_SPIN)                                          \
  152                 s = spllock();                                          \
  153         simple_lock(&(lkp)->lk_interlock);                              \
  154 } while (/*CONSTCOND*/ 0)
  155 
  156 #define INTERLOCK_RELEASE(lkp, flags, s)                                \
  157 do {                                                                    \
  158         simple_unlock(&(lkp)->lk_interlock);                            \
  159         if ((flags) & LK_SPIN)                                          \
  160                 splx(s);                                                \
  161 } while (/*CONSTCOND*/ 0)
  162 
  163 #ifdef DDB /* { */
  164 #ifdef MULTIPROCESSOR
  165 int simple_lock_debugger = 1;   /* more serious on MP */
  166 #else
  167 int simple_lock_debugger = 0;
  168 #endif
  169 #define SLOCK_DEBUGGER()        if (simple_lock_debugger) Debugger()
  170 #define SLOCK_TRACE()                                                   \
  171         db_stack_trace_print((db_expr_t)__builtin_frame_address(0),     \
  172             TRUE, 65535, "", lock_printf);
  173 #else
  174 #define SLOCK_DEBUGGER()        /* nothing */
  175 #define SLOCK_TRACE()           /* nothing */
  176 #endif /* } */
  177 
  178 #if defined(LOCKDEBUG)
  179 #if defined(DDB)
  180 #define SPINLOCK_SPINCHECK_DEBUGGER     Debugger()
  181 #else
  182 #define SPINLOCK_SPINCHECK_DEBUGGER     /* nothing */
  183 #endif
  184 
  185 #define SPINLOCK_SPINCHECK_DECL                                         \
  186         /* 32-bits of count -- wrap constitutes a "spinout" */          \
  187         uint32_t __spinc = 0
  188 
  189 #define SPINLOCK_SPINCHECK                                              \
  190 do {                                                                    \
  191         if (++__spinc == 0) {                                           \
  192                 lock_printf("LK_SPIN spinout, excl %d, share %d\n",     \
  193                     lkp->lk_exclusivecount, lkp->lk_sharecount);        \
  194                 if (lkp->lk_exclusivecount)                             \
  195                         lock_printf("held by CPU %lu\n",                \
  196                             (u_long) lkp->lk_cpu);                      \
  197                 if (lkp->lk_lock_file)                                  \
  198                         lock_printf("last locked at %s:%d\n",           \
  199                             lkp->lk_lock_file, lkp->lk_lock_line);      \
  200                 if (lkp->lk_unlock_file)                                \
  201                         lock_printf("last unlocked at %s:%d\n",         \
  202                             lkp->lk_unlock_file, lkp->lk_unlock_line);  \
  203                 SLOCK_TRACE();                                          \
  204                 SPINLOCK_SPINCHECK_DEBUGGER;                            \
  205         }                                                               \
  206 } while (/*CONSTCOND*/ 0)
  207 #else
  208 #define SPINLOCK_SPINCHECK_DECL                 /* nothing */
  209 #define SPINLOCK_SPINCHECK                      /* nothing */
  210 #endif /* LOCKDEBUG && DDB */
  211 
  212 /*
  213  * Acquire a resource.
  214  */
  215 static int
  216 acquire(__volatile struct lock **lkpp, int *s, int extflags,
  217     int drain, int wanted)
  218 {
  219         int error;
  220         __volatile struct lock *lkp = *lkpp;
  221 
  222         KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
  223 
  224         if (extflags & LK_SPIN) {
  225                 int interlocked;
  226 
  227                 SPINLOCK_SPINCHECK_DECL;
  228 
  229                 if (!drain) {
  230                         lkp->lk_waitcount++;
  231                         lkp->lk_flags |= LK_WAIT_NONZERO;
  232                 }
  233                 for (interlocked = 1;;) {
  234                         SPINLOCK_SPINCHECK;
  235                         if ((lkp->lk_flags & wanted) != 0) {
  236                                 if (interlocked) {
  237                                         INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
  238                                         interlocked = 0;
  239                                 }
  240                                 SPINLOCK_SPIN_HOOK;
  241                         } else if (interlocked) {
  242                                 break;
  243                         } else {
  244                                 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
  245                                 interlocked = 1;
  246                         }
  247                 }
  248                 if (!drain) {
  249                         lkp->lk_waitcount--;
  250                         if (lkp->lk_waitcount == 0)
  251                                 lkp->lk_flags &= ~LK_WAIT_NONZERO;
  252                 }
  253                 KASSERT((lkp->lk_flags & wanted) == 0);
  254                 error = 0;      /* sanity */
  255         } else {
  256                 for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
  257                         if (drain)
  258                                 lkp->lk_flags |= LK_WAITDRAIN;
  259                         else {
  260                                 lkp->lk_waitcount++;
  261                                 lkp->lk_flags |= LK_WAIT_NONZERO;
  262                         }
  263                         /* XXX Cast away volatile. */
  264                         error = ltsleep(drain ?
  265                             (void *)&lkp->lk_flags :
  266                             (void *)lkp, lkp->lk_prio,
  267                             lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
  268                         if (!drain) {
  269                                 lkp->lk_waitcount--;
  270                                 if (lkp->lk_waitcount == 0)
  271                                         lkp->lk_flags &= ~LK_WAIT_NONZERO;
  272                         }
  273                         if (error)
  274                                 break;
  275                         if (extflags & LK_SLEEPFAIL) {
  276                                 error = ENOLCK;
  277                                 break;
  278                         }
  279                         if (lkp->lk_newlock != NULL) {
  280                                 simple_lock(&lkp->lk_newlock->lk_interlock);
  281                                 simple_unlock(&lkp->lk_interlock);
  282                                 if (lkp->lk_waitcount == 0)
  283                                         wakeup((void *)&lkp->lk_newlock);
  284                                 *lkpp = lkp = lkp->lk_newlock;
  285                         }
  286                 }
  287         }
  288 
  289         return error;
  290 }
  291 
  292 #define SETHOLDER(lkp, pid, lid, cpu_id)                                \
  293 do {                                                                    \
  294         if ((lkp)->lk_flags & LK_SPIN)                                  \
  295                 (lkp)->lk_cpu = cpu_id;                                 \
  296         else {                                                          \
  297                 (lkp)->lk_lockholder = pid;                             \
  298                 (lkp)->lk_locklwp = lid;                                \
  299         }                                                               \
  300 } while (/*CONSTCOND*/0)
  301 
  302 #define WEHOLDIT(lkp, pid, lid, cpu_id)                                 \
  303         (((lkp)->lk_flags & LK_SPIN) != 0 ?                             \
  304          ((lkp)->lk_cpu == (cpu_id)) :                                  \
  305          ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
  306 
  307 #define WAKEUP_WAITER(lkp)                                              \
  308 do {                                                                    \
  309         if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) ==          \
  310             LK_WAIT_NONZERO) {                                          \
  311                 /* XXX Cast away volatile. */                           \
  312                 wakeup((void *)(lkp));                                  \
  313         }                                                               \
  314 } while (/*CONSTCOND*/0)
  315 
  316 #if defined(LOCKDEBUG) /* { */
  317 #if defined(MULTIPROCESSOR) /* { */
  318 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
  319 
  320 #define SPINLOCK_LIST_LOCK()                                            \
  321         __cpu_simple_lock(&spinlock_list_slock.lock_data)
  322 
  323 #define SPINLOCK_LIST_UNLOCK()                                          \
  324         __cpu_simple_unlock(&spinlock_list_slock.lock_data)
  325 #else
  326 #define SPINLOCK_LIST_LOCK()    /* nothing */
  327 
  328 #define SPINLOCK_LIST_UNLOCK()  /* nothing */
  329 #endif /* MULTIPROCESSOR */ /* } */
  330 
  331 TAILQ_HEAD(, lock) spinlock_list =
  332     TAILQ_HEAD_INITIALIZER(spinlock_list);
  333 
  334 #define HAVEIT(lkp)                                                     \
  335 do {                                                                    \
  336         if ((lkp)->lk_flags & LK_SPIN) {                                \
  337                 int s = spllock();                                      \
  338                 SPINLOCK_LIST_LOCK();                                   \
  339                 /* XXX Cast away volatile. */                           \
  340                 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
  341                     lk_list);                                           \
  342                 SPINLOCK_LIST_UNLOCK();                                 \
  343                 splx(s);                                                \
  344         }                                                               \
  345 } while (/*CONSTCOND*/0)
  346 
  347 #define DONTHAVEIT(lkp)                                                 \
  348 do {                                                                    \
  349         if ((lkp)->lk_flags & LK_SPIN) {                                \
  350                 int s = spllock();                                      \
  351                 SPINLOCK_LIST_LOCK();                                   \
  352                 /* XXX Cast away volatile. */                           \
  353                 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),      \
  354                     lk_list);                                           \
  355                 SPINLOCK_LIST_UNLOCK();                                 \
  356                 splx(s);                                                \
  357         }                                                               \
  358 } while (/*CONSTCOND*/0)
  359 #else
  360 #define HAVEIT(lkp)             /* nothing */
  361 
  362 #define DONTHAVEIT(lkp)         /* nothing */
  363 #endif /* LOCKDEBUG */ /* } */
  364 
  365 #if defined(LOCKDEBUG)
  366 /*
  367  * Lock debug printing routine; can be configured to print to console
  368  * or log to syslog.
  369  */
  370 void
  371 lock_printf(const char *fmt, ...)
  372 {
  373         char b[150];
  374         va_list ap;
  375 
  376         va_start(ap, fmt);
  377         if (lock_debug_syslog)
  378                 vlog(LOG_DEBUG, fmt, ap);
  379         else {
  380                 vsnprintf(b, sizeof(b), fmt, ap);
  381                 printf_nolog("%s", b);
  382         }
  383         va_end(ap);
  384 }
  385 #endif /* LOCKDEBUG */
  386 
  387 /*
  388  * Transfer any waiting processes from one lock to another.
  389  */
  390 void
  391 transferlockers(struct lock *from, struct lock *to)
  392 {
  393 
  394         KASSERT(from != to);
  395         KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
  396         if (from->lk_waitcount == 0)
  397                 return;
  398         from->lk_newlock = to;
  399         wakeup((void *)from);
  400         tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
  401         from->lk_newlock = NULL;
  402         from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
  403         KASSERT(from->lk_waitcount == 0);
  404 }
  405 
  406 
  407 /*
  408  * Initialize a lock; required before use.
  409  */
  410 void
  411 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
  412 {
  413 
  414         memset(lkp, 0, sizeof(struct lock));
  415         simple_lock_init(&lkp->lk_interlock);
  416         lkp->lk_flags = flags & LK_EXTFLG_MASK;
  417         if (flags & LK_SPIN)
  418                 lkp->lk_cpu = LK_NOCPU;
  419         else {
  420                 lkp->lk_lockholder = LK_NOPROC;
  421                 lkp->lk_newlock = NULL;
  422                 lkp->lk_prio = prio;
  423                 lkp->lk_timo = timo;
  424         }
  425         lkp->lk_wmesg = wmesg;  /* just a name for spin locks */
  426 #if defined(LOCKDEBUG)
  427         lkp->lk_lock_file = NULL;
  428         lkp->lk_unlock_file = NULL;
  429 #endif
  430 }
  431 
  432 /*
  433  * Determine the status of a lock.
  434  */
  435 int
  436 lockstatus(struct lock *lkp)
  437 {
  438         int s = 0; /* XXX: gcc */
  439         int lock_type = 0;
  440         struct lwp *l = curlwp; /* XXX */
  441         pid_t pid;
  442         lwpid_t lid;
  443         cpuid_t cpu_id;
  444 
  445         if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
  446                 cpu_id = cpu_number();
  447                 pid = LK_KERNPROC;
  448                 lid = 0;
  449         } else {
  450                 cpu_id = LK_NOCPU;
  451                 pid = l->l_proc->p_pid;
  452                 lid = l->l_lid;
  453         }
  454 
  455         INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
  456         if (lkp->lk_exclusivecount != 0) {
  457                 if (WEHOLDIT(lkp, pid, lid, cpu_id))
  458                         lock_type = LK_EXCLUSIVE;
  459                 else
  460                         lock_type = LK_EXCLOTHER;
  461         } else if (lkp->lk_sharecount != 0)
  462                 lock_type = LK_SHARED;
  463         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  464         return (lock_type);
  465 }
  466 
  467 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
  468 /*
  469  * Make sure no spin locks are held by a CPU that is about
  470  * to context switch.
  471  */
  472 void
  473 spinlock_switchcheck(void)
  474 {
  475         u_long cnt;
  476         int s;
  477 
  478         s = spllock();
  479 #if defined(MULTIPROCESSOR)
  480         cnt = curcpu()->ci_spin_locks;
  481 #else
  482         cnt = spin_locks;
  483 #endif
  484         splx(s);
  485 
  486         if (cnt != 0)
  487                 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
  488                     (u_long) cpu_number(), cnt);
  489 }
  490 #endif /* LOCKDEBUG || DIAGNOSTIC */
  491 
  492 /*
  493  * Locks and IPLs (interrupt priority levels):
  494  *
  495  * Locks which may be taken from interrupt context must be handled
  496  * very carefully; you must spl to the highest IPL where the lock
  497  * is needed before acquiring the lock.
  498  *
  499  * It is also important to avoid deadlock, since certain (very high
  500  * priority) interrupts are often needed to keep the system as a whole
  501  * from deadlocking, and must not be blocked while you are spinning
  502  * waiting for a lower-priority lock.
  503  *
  504  * In addition, the lock-debugging hooks themselves need to use locks!
  505  *
  506  * A raw __cpu_simple_lock may be used from interrupts are long as it
  507  * is acquired and held at a single IPL.
  508  *
  509  * A simple_lock (which is a __cpu_simple_lock wrapped with some
  510  * debugging hooks) may be used at or below spllock(), which is
  511  * typically at or just below splhigh() (i.e. blocks everything
  512  * but certain machine-dependent extremely high priority interrupts).
  513  *
  514  * spinlockmgr spinlocks should be used at or below splsched().
  515  *
  516  * Some platforms may have interrupts of higher priority than splsched(),
  517  * including hard serial interrupts, inter-processor interrupts, and
  518  * kernel debugger traps.
  519  */
  520 
  521 /*
  522  * XXX XXX kludge around another kludge..
  523  *
  524  * vfs_shutdown() may be called from interrupt context, either as a result
  525  * of a panic, or from the debugger.   It proceeds to call
  526  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
  527  *
  528  * We would like to make an attempt to sync the filesystems in this case, so
  529  * if this happens, we treat attempts to acquire locks specially.
  530  * All locks are acquired on behalf of proc0.
  531  *
  532  * If we've already paniced, we don't block waiting for locks, but
  533  * just barge right ahead since we're already going down in flames.
  534  */
  535 
  536 /*
  537  * Set, change, or release a lock.
  538  *
  539  * Shared requests increment the shared count. Exclusive requests set the
  540  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
  541  * accepted shared locks and shared-to-exclusive upgrades to go away.
  542  */
  543 int
  544 #if defined(LOCKDEBUG)
  545 _lockmgr(__volatile struct lock *lkp, u_int flags,
  546     struct simplelock *interlkp, const char *file, int line)
  547 #else
  548 lockmgr(__volatile struct lock *lkp, u_int flags,
  549     struct simplelock *interlkp)
  550 #endif
  551 {
  552         int error;
  553         pid_t pid;
  554         lwpid_t lid;
  555         int extflags;
  556         cpuid_t cpu_id;
  557         struct lwp *l = curlwp;
  558         int lock_shutdown_noblock = 0;
  559         int s = 0;
  560 
  561         error = 0;
  562 
  563         /* LK_RETRY is for vn_lock, not for lockmgr. */
  564         KASSERT((flags & LK_RETRY) == 0);
  565 
  566         INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
  567         if (flags & LK_INTERLOCK)
  568                 simple_unlock(interlkp);
  569         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
  570 
  571 #ifdef DIAGNOSTIC /* { */
  572         /*
  573          * Don't allow spins on sleep locks and don't allow sleeps
  574          * on spin locks.
  575          */
  576         if ((flags ^ lkp->lk_flags) & LK_SPIN)
  577                 panic("lockmgr: sleep/spin mismatch");
  578 #endif /* } */
  579 
  580         if (extflags & LK_SPIN) {
  581                 pid = LK_KERNPROC;
  582                 lid = 0;
  583         } else {
  584                 if (l == NULL) {
  585                         if (!doing_shutdown) {
  586                                 panic("lockmgr: no context");
  587                         } else {
  588                                 l = &lwp0;
  589                                 if (panicstr && (!(flags & LK_NOWAIT))) {
  590                                         flags |= LK_NOWAIT;
  591                                         lock_shutdown_noblock = 1;
  592                                 }
  593                         }
  594                 }
  595                 lid = l->l_lid;
  596                 pid = l->l_proc->p_pid;
  597         }
  598         cpu_id = cpu_number();
  599 
  600         /*
  601          * Once a lock has drained, the LK_DRAINING flag is set and an
  602          * exclusive lock is returned. The only valid operation thereafter
  603          * is a single release of that exclusive lock. This final release
  604          * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
  605          * further requests of any sort will result in a panic. The bits
  606          * selected for these two flags are chosen so that they will be set
  607          * in memory that is freed (freed memory is filled with 0xdeadbeef).
  608          * The final release is permitted to give a new lease on life to
  609          * the lock by specifying LK_REENABLE.
  610          */
  611         if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
  612 #ifdef DIAGNOSTIC /* { */
  613                 if (lkp->lk_flags & LK_DRAINED)
  614                         panic("lockmgr: using decommissioned lock");
  615                 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
  616                     WEHOLDIT(lkp, pid, lid, cpu_id) == 0)
  617                         panic("lockmgr: non-release on draining lock: %d",
  618                             flags & LK_TYPE_MASK);
  619 #endif /* DIAGNOSTIC */ /* } */
  620                 lkp->lk_flags &= ~LK_DRAINING;
  621                 if ((flags & LK_REENABLE) == 0)
  622                         lkp->lk_flags |= LK_DRAINED;
  623         }
  624 
  625         switch (flags & LK_TYPE_MASK) {
  626 
  627         case LK_SHARED:
  628                 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
  629                         /*
  630                          * If just polling, check to see if we will block.
  631                          */
  632                         if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  633                             (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
  634                                 error = EBUSY;
  635                                 break;
  636                         }
  637                         /*
  638                          * Wait for exclusive locks and upgrades to clear.
  639                          */
  640                         error = acquire(&lkp, &s, extflags, 0,
  641                             LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE);
  642                         if (error)
  643                                 break;
  644                         lkp->lk_sharecount++;
  645                         lkp->lk_flags |= LK_SHARE_NONZERO;
  646                         COUNT(lkp, l, cpu_id, 1);
  647                         break;
  648                 }
  649                 /*
  650                  * We hold an exclusive lock, so downgrade it to shared.
  651                  * An alternative would be to fail with EDEADLK.
  652                  */
  653                 lkp->lk_sharecount++;
  654                 lkp->lk_flags |= LK_SHARE_NONZERO;
  655                 COUNT(lkp, l, cpu_id, 1);
  656                 /* fall into downgrade */
  657 
  658         case LK_DOWNGRADE:
  659                 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 ||
  660                     lkp->lk_exclusivecount == 0)
  661                         panic("lockmgr: not holding exclusive lock");
  662                 lkp->lk_sharecount += lkp->lk_exclusivecount;
  663                 lkp->lk_flags |= LK_SHARE_NONZERO;
  664                 lkp->lk_exclusivecount = 0;
  665                 lkp->lk_recurselevel = 0;
  666                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  667                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  668 #if defined(LOCKDEBUG)
  669                 lkp->lk_unlock_file = file;
  670                 lkp->lk_unlock_line = line;
  671 #endif
  672                 DONTHAVEIT(lkp);
  673                 WAKEUP_WAITER(lkp);
  674                 break;
  675 
  676         case LK_EXCLUPGRADE:
  677                 /*
  678                  * If another process is ahead of us to get an upgrade,
  679                  * then we want to fail rather than have an intervening
  680                  * exclusive access.
  681                  */
  682                 if (lkp->lk_flags & LK_WANT_UPGRADE) {
  683                         lkp->lk_sharecount--;
  684                         if (lkp->lk_sharecount == 0)
  685                                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
  686                         COUNT(lkp, l, cpu_id, -1);
  687                         error = EBUSY;
  688                         break;
  689                 }
  690                 /* fall into normal upgrade */
  691 
  692         case LK_UPGRADE:
  693                 /*
  694                  * Upgrade a shared lock to an exclusive one. If another
  695                  * shared lock has already requested an upgrade to an
  696                  * exclusive lock, our shared lock is released and an
  697                  * exclusive lock is requested (which will be granted
  698                  * after the upgrade). If we return an error, the file
  699                  * will always be unlocked.
  700                  */
  701                 if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0)
  702                         panic("lockmgr: upgrade exclusive lock");
  703                 lkp->lk_sharecount--;
  704                 if (lkp->lk_sharecount == 0)
  705                         lkp->lk_flags &= ~LK_SHARE_NONZERO;
  706                 COUNT(lkp, l, cpu_id, -1);
  707                 /*
  708                  * If we are just polling, check to see if we will block.
  709                  */
  710                 if ((extflags & LK_NOWAIT) &&
  711                     ((lkp->lk_flags & LK_WANT_UPGRADE) ||
  712                      lkp->lk_sharecount > 1)) {
  713                         error = EBUSY;
  714                         break;
  715                 }
  716                 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
  717                         /*
  718                          * We are first shared lock to request an upgrade, so
  719                          * request upgrade and wait for the shared count to
  720                          * drop to zero, then take exclusive lock.
  721                          */
  722                         lkp->lk_flags |= LK_WANT_UPGRADE;
  723                         error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO);
  724                         lkp->lk_flags &= ~LK_WANT_UPGRADE;
  725                         if (error) {
  726                                 WAKEUP_WAITER(lkp);
  727                                 break;
  728                         }
  729                         lkp->lk_flags |= LK_HAVE_EXCL;
  730                         SETHOLDER(lkp, pid, lid, cpu_id);
  731 #if defined(LOCKDEBUG)
  732                         lkp->lk_lock_file = file;
  733                         lkp->lk_lock_line = line;
  734 #endif
  735                         HAVEIT(lkp);
  736                         if (lkp->lk_exclusivecount != 0)
  737                                 panic("lockmgr: non-zero exclusive count");
  738                         lkp->lk_exclusivecount = 1;
  739                         if (extflags & LK_SETRECURSE)
  740                                 lkp->lk_recurselevel = 1;
  741                         COUNT(lkp, l, cpu_id, 1);
  742                         break;
  743                 }
  744                 /*
  745                  * Someone else has requested upgrade. Release our shared
  746                  * lock, awaken upgrade requestor if we are the last shared
  747                  * lock, then request an exclusive lock.
  748                  */
  749                 if (lkp->lk_sharecount == 0)
  750                         WAKEUP_WAITER(lkp);
  751                 /* fall into exclusive request */
  752 
  753         case LK_EXCLUSIVE:
  754                 if (WEHOLDIT(lkp, pid, lid, cpu_id)) {
  755                         /*
  756                          * Recursive lock.
  757                          */
  758                         if ((extflags & LK_CANRECURSE) == 0 &&
  759                              lkp->lk_recurselevel == 0) {
  760                                 if (extflags & LK_RECURSEFAIL) {
  761                                         error = EDEADLK;
  762                                         break;
  763                                 } else
  764                                         panic("lockmgr: locking against myself");
  765                         }
  766                         lkp->lk_exclusivecount++;
  767                         if (extflags & LK_SETRECURSE &&
  768                             lkp->lk_recurselevel == 0)
  769                                 lkp->lk_recurselevel = lkp->lk_exclusivecount;
  770                         COUNT(lkp, l, cpu_id, 1);
  771                         break;
  772                 }
  773                 /*
  774                  * If we are just polling, check to see if we will sleep.
  775                  */
  776                 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  777                      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  778                      LK_SHARE_NONZERO))) {
  779                         error = EBUSY;
  780                         break;
  781                 }
  782                 /*
  783                  * Try to acquire the want_exclusive flag.
  784                  */
  785                 error = acquire(&lkp, &s, extflags, 0,
  786                     LK_HAVE_EXCL | LK_WANT_EXCL);
  787                 if (error)
  788                         break;
  789                 lkp->lk_flags |= LK_WANT_EXCL;
  790                 /*
  791                  * Wait for shared locks and upgrades to finish.
  792                  */
  793                 error = acquire(&lkp, &s, extflags, 0,
  794                     LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
  795                 lkp->lk_flags &= ~LK_WANT_EXCL;
  796                 if (error) {
  797                         WAKEUP_WAITER(lkp);
  798                         break;
  799                 }
  800                 lkp->lk_flags |= LK_HAVE_EXCL;
  801                 SETHOLDER(lkp, pid, lid, cpu_id);
  802 #if defined(LOCKDEBUG)
  803                 lkp->lk_lock_file = file;
  804                 lkp->lk_lock_line = line;
  805 #endif
  806                 HAVEIT(lkp);
  807                 if (lkp->lk_exclusivecount != 0)
  808                         panic("lockmgr: non-zero exclusive count");
  809                 lkp->lk_exclusivecount = 1;
  810                 if (extflags & LK_SETRECURSE)
  811                         lkp->lk_recurselevel = 1;
  812                 COUNT(lkp, l, cpu_id, 1);
  813                 break;
  814 
  815         case LK_RELEASE:
  816                 if (lkp->lk_exclusivecount != 0) {
  817                         if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
  818                                 if (lkp->lk_flags & LK_SPIN) {
  819                                         panic("lockmgr: processor %lu, not "
  820                                             "exclusive lock holder %lu "
  821                                             "unlocking", cpu_id, lkp->lk_cpu);
  822                                 } else {
  823                                         panic("lockmgr: pid %d, not "
  824                                             "exclusive lock holder %d "
  825                                             "unlocking", pid,
  826                                             lkp->lk_lockholder);
  827                                 }
  828                         }
  829                         if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
  830                                 lkp->lk_recurselevel = 0;
  831                         lkp->lk_exclusivecount--;
  832                         COUNT(lkp, l, cpu_id, -1);
  833                         if (lkp->lk_exclusivecount == 0) {
  834                                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  835                                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  836 #if defined(LOCKDEBUG)
  837                                 lkp->lk_unlock_file = file;
  838                                 lkp->lk_unlock_line = line;
  839 #endif
  840                                 DONTHAVEIT(lkp);
  841                         }
  842                 } else if (lkp->lk_sharecount != 0) {
  843                         lkp->lk_sharecount--;
  844                         if (lkp->lk_sharecount == 0)
  845                                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
  846                         COUNT(lkp, l, cpu_id, -1);
  847                 }
  848 #ifdef DIAGNOSTIC
  849                 else
  850                         panic("lockmgr: release of unlocked lock!");
  851 #endif
  852                 WAKEUP_WAITER(lkp);
  853                 break;
  854 
  855         case LK_DRAIN:
  856                 /*
  857                  * Check that we do not already hold the lock, as it can
  858                  * never drain if we do. Unfortunately, we have no way to
  859                  * check for holding a shared lock, but at least we can
  860                  * check for an exclusive one.
  861                  */
  862                 if (WEHOLDIT(lkp, pid, lid, cpu_id))
  863                         panic("lockmgr: draining against myself");
  864                 /*
  865                  * If we are just polling, check to see if we will sleep.
  866                  */
  867                 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  868                      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  869                      LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
  870                         error = EBUSY;
  871                         break;
  872                 }
  873                 error = acquire(&lkp, &s, extflags, 1,
  874                     LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  875                     LK_SHARE_NONZERO | LK_WAIT_NONZERO);
  876                 if (error)
  877                         break;
  878                 lkp->lk_flags |= LK_HAVE_EXCL;
  879                 if ((extflags & LK_RESURRECT) == 0)
  880                         lkp->lk_flags |= LK_DRAINING;
  881                 SETHOLDER(lkp, pid, lid, cpu_id);
  882 #if defined(LOCKDEBUG)
  883                 lkp->lk_lock_file = file;
  884                 lkp->lk_lock_line = line;
  885 #endif
  886                 HAVEIT(lkp);
  887                 lkp->lk_exclusivecount = 1;
  888                 /* XXX unlikely that we'd want this */
  889                 if (extflags & LK_SETRECURSE)
  890                         lkp->lk_recurselevel = 1;
  891                 COUNT(lkp, l, cpu_id, 1);
  892                 break;
  893 
  894         default:
  895                 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  896                 panic("lockmgr: unknown locktype request %d",
  897                     flags & LK_TYPE_MASK);
  898                 /* NOTREACHED */
  899         }
  900         if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
  901             ((lkp->lk_flags &
  902               (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  903               LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
  904                 lkp->lk_flags &= ~LK_WAITDRAIN;
  905                 wakeup((void *)&lkp->lk_flags);
  906         }
  907         /*
  908          * Note that this panic will be a recursive panic, since
  909          * we only set lock_shutdown_noblock above if panicstr != NULL.
  910          */
  911         if (error && lock_shutdown_noblock)
  912                 panic("lockmgr: deadlock (see previous panic)");
  913 
  914         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  915         return (error);
  916 }
  917 
  918 /*
  919  * For a recursive spinlock held one or more times by the current CPU,
  920  * release all N locks, and return N.
  921  * Intended for use in mi_switch() shortly before context switching.
  922  */
  923 
  924 int
  925 #if defined(LOCKDEBUG)
  926 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
  927 #else
  928 spinlock_release_all(__volatile struct lock *lkp)
  929 #endif
  930 {
  931         int s, count;
  932         cpuid_t cpu_id;
  933 
  934         KASSERT(lkp->lk_flags & LK_SPIN);
  935 
  936         INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
  937 
  938         cpu_id = cpu_number();
  939         count = lkp->lk_exclusivecount;
  940 
  941         if (count != 0) {
  942 #ifdef DIAGNOSTIC
  943                 if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) {
  944                         panic("spinlock_release_all: processor %lu, not "
  945                             "exclusive lock holder %lu "
  946                             "unlocking", (long)cpu_id, lkp->lk_cpu);
  947                 }
  948 #endif
  949                 lkp->lk_recurselevel = 0;
  950                 lkp->lk_exclusivecount = 0;
  951                 COUNT_CPU(cpu_id, -count);
  952                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  953                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  954 #if defined(LOCKDEBUG)
  955                 lkp->lk_unlock_file = file;
  956                 lkp->lk_unlock_line = line;
  957 #endif
  958                 DONTHAVEIT(lkp);
  959         }
  960 #ifdef DIAGNOSTIC
  961         else if (lkp->lk_sharecount != 0)
  962                 panic("spinlock_release_all: release of shared lock!");
  963         else
  964                 panic("spinlock_release_all: release of unlocked lock!");
  965 #endif
  966         INTERLOCK_RELEASE(lkp, LK_SPIN, s);
  967 
  968         return (count);
  969 }
  970 
  971 /*
  972  * For a recursive spinlock held one or more times by the current CPU,
  973  * release all N locks, and return N.
  974  * Intended for use in mi_switch() right after resuming execution.
  975  */
  976 
  977 void
  978 #if defined(LOCKDEBUG)
  979 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
  980     const char *file, int line)
  981 #else
  982 spinlock_acquire_count(__volatile struct lock *lkp, int count)
  983 #endif
  984 {
  985         int s, error;
  986         cpuid_t cpu_id;
  987 
  988         KASSERT(lkp->lk_flags & LK_SPIN);
  989 
  990         INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
  991 
  992         cpu_id = cpu_number();
  993 
  994 #ifdef DIAGNOSTIC
  995         if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id))
  996                 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
  997 #endif
  998         /*
  999          * Try to acquire the want_exclusive flag.
 1000          */
 1001         error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL);
 1002         lkp->lk_flags |= LK_WANT_EXCL;
 1003         /*
 1004          * Wait for shared locks and upgrades to finish.
 1005          */
 1006         error = acquire(&lkp, &s, LK_SPIN, 0,
 1007             LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE);
 1008         lkp->lk_flags &= ~LK_WANT_EXCL;
 1009         lkp->lk_flags |= LK_HAVE_EXCL;
 1010         SETHOLDER(lkp, LK_NOPROC, 0, cpu_id);
 1011 #if defined(LOCKDEBUG)
 1012         lkp->lk_lock_file = file;
 1013         lkp->lk_lock_line = line;
 1014 #endif
 1015         HAVEIT(lkp);
 1016         if (lkp->lk_exclusivecount != 0)
 1017                 panic("lockmgr: non-zero exclusive count");
 1018         lkp->lk_exclusivecount = count;
 1019         lkp->lk_recurselevel = 1;
 1020         COUNT_CPU(cpu_id, count);
 1021 
 1022         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
 1023 }
 1024 
 1025 
 1026 
 1027 /*
 1028  * Print out information about state of a lock. Used by VOP_PRINT
 1029  * routines to display ststus about contained locks.
 1030  */
 1031 void
 1032 lockmgr_printinfo(__volatile struct lock *lkp)
 1033 {
 1034 
 1035         if (lkp->lk_sharecount)
 1036                 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
 1037                     lkp->lk_sharecount);
 1038         else if (lkp->lk_flags & LK_HAVE_EXCL) {
 1039                 printf(" lock type %s: EXCL (count %d) by ",
 1040                     lkp->lk_wmesg, lkp->lk_exclusivecount);
 1041                 if (lkp->lk_flags & LK_SPIN)
 1042                         printf("processor %lu", lkp->lk_cpu);
 1043                 else
 1044                         printf("pid %d.%d", lkp->lk_lockholder,
 1045                             lkp->lk_locklwp);
 1046         } else
 1047                 printf(" not locked");
 1048         if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
 1049                 printf(" with %d pending", lkp->lk_waitcount);
 1050 }
 1051 
 1052 #if defined(LOCKDEBUG) /* { */
 1053 TAILQ_HEAD(, simplelock) simplelock_list =
 1054     TAILQ_HEAD_INITIALIZER(simplelock_list);
 1055 
 1056 #if defined(MULTIPROCESSOR) /* { */
 1057 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
 1058 
 1059 #define SLOCK_LIST_LOCK()                                               \
 1060         __cpu_simple_lock(&simplelock_list_slock.lock_data)
 1061 
 1062 #define SLOCK_LIST_UNLOCK()                                             \
 1063         __cpu_simple_unlock(&simplelock_list_slock.lock_data)
 1064 
 1065 #define SLOCK_COUNT(x)                                                  \
 1066         curcpu()->ci_simple_locks += (x)
 1067 #else
 1068 u_long simple_locks;
 1069 
 1070 #define SLOCK_LIST_LOCK()       /* nothing */
 1071 
 1072 #define SLOCK_LIST_UNLOCK()     /* nothing */
 1073 
 1074 #define SLOCK_COUNT(x)          simple_locks += (x)
 1075 #endif /* MULTIPROCESSOR */ /* } */
 1076 
 1077 #ifdef MULTIPROCESSOR
 1078 #define SLOCK_MP()              lock_printf("on CPU %ld\n",             \
 1079                                     (u_long) cpu_number())
 1080 #else
 1081 #define SLOCK_MP()              /* nothing */
 1082 #endif
 1083 
 1084 #define SLOCK_WHERE(str, alp, id, l)                                    \
 1085 do {                                                                    \
 1086         lock_printf("\n");                                              \
 1087         lock_printf(str);                                               \
 1088         lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
 1089         SLOCK_MP();                                                     \
 1090         if ((alp)->lock_file != NULL)                                   \
 1091                 lock_printf("last locked: %s:%d\n", (alp)->lock_file,   \
 1092                     (alp)->lock_line);                                  \
 1093         if ((alp)->unlock_file != NULL)                                 \
 1094                 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
 1095                     (alp)->unlock_line);                                \
 1096         SLOCK_TRACE()                                                   \
 1097         SLOCK_DEBUGGER();                                               \
 1098 } while (/*CONSTCOND*/0)
 1099 
 1100 /*
 1101  * Simple lock functions so that the debugger can see from whence
 1102  * they are being called.
 1103  */
 1104 void
 1105 simple_lock_init(struct simplelock *alp)
 1106 {
 1107 
 1108 #if defined(MULTIPROCESSOR) /* { */
 1109         __cpu_simple_lock_init(&alp->lock_data);
 1110 #else
 1111         alp->lock_data = __SIMPLELOCK_UNLOCKED;
 1112 #endif /* } */
 1113         alp->lock_file = NULL;
 1114         alp->lock_line = 0;
 1115         alp->unlock_file = NULL;
 1116         alp->unlock_line = 0;
 1117         alp->lock_holder = LK_NOCPU;
 1118 }
 1119 
 1120 void
 1121 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
 1122 {
 1123         cpuid_t cpu_id = cpu_number();
 1124         int s;
 1125 
 1126         s = spllock();
 1127 
 1128         /*
 1129          * MULTIPROCESSOR case: This is `safe' since if it's not us, we
 1130          * don't take any action, and just fall into the normal spin case.
 1131          */
 1132         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1133 #if defined(MULTIPROCESSOR) /* { */
 1134                 if (alp->lock_holder == cpu_id) {
 1135                         SLOCK_WHERE("simple_lock: locking against myself\n",
 1136                             alp, id, l);
 1137                         goto out;
 1138                 }
 1139 #else
 1140                 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
 1141                 goto out;
 1142 #endif /* MULTIPROCESSOR */ /* } */
 1143         }
 1144 
 1145 #if defined(MULTIPROCESSOR) /* { */
 1146         /* Acquire the lock before modifying any fields. */
 1147         splx(s);
 1148         __cpu_simple_lock(&alp->lock_data);
 1149         s = spllock();
 1150 #else
 1151         alp->lock_data = __SIMPLELOCK_LOCKED;
 1152 #endif /* } */
 1153 
 1154         if (alp->lock_holder != LK_NOCPU) {
 1155                 SLOCK_WHERE("simple_lock: uninitialized lock\n",
 1156                     alp, id, l);
 1157         }
 1158         alp->lock_file = id;
 1159         alp->lock_line = l;
 1160         alp->lock_holder = cpu_id;
 1161 
 1162         SLOCK_LIST_LOCK();
 1163         /* XXX Cast away volatile */
 1164         TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
 1165         SLOCK_LIST_UNLOCK();
 1166 
 1167         SLOCK_COUNT(1);
 1168 
 1169  out:
 1170         splx(s);
 1171 }
 1172 
 1173 int
 1174 _simple_lock_held(__volatile struct simplelock *alp)
 1175 {
 1176 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
 1177         cpuid_t cpu_id = cpu_number();
 1178 #endif
 1179         int s, locked = 0;
 1180 
 1181         s = spllock();
 1182 
 1183 #if defined(MULTIPROCESSOR)
 1184         if (__cpu_simple_lock_try(&alp->lock_data) == 0)
 1185                 locked = (alp->lock_holder == cpu_id);
 1186         else
 1187                 __cpu_simple_unlock(&alp->lock_data);
 1188 #else
 1189         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1190                 locked = 1;
 1191                 KASSERT(alp->lock_holder == cpu_id);
 1192         }
 1193 #endif
 1194 
 1195         splx(s);
 1196 
 1197         return (locked);
 1198 }
 1199 
 1200 int
 1201 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
 1202 {
 1203         cpuid_t cpu_id = cpu_number();
 1204         int s, rv = 0;
 1205 
 1206         s = spllock();
 1207 
 1208         /*
 1209          * MULTIPROCESSOR case: This is `safe' since if it's not us, we
 1210          * don't take any action.
 1211          */
 1212 #if defined(MULTIPROCESSOR) /* { */
 1213         if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
 1214                 if (alp->lock_holder == cpu_id)
 1215                         SLOCK_WHERE("simple_lock_try: locking against myself\n",
 1216                             alp, id, l);
 1217                 goto out;
 1218         }
 1219 #else
 1220         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1221                 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
 1222                 goto out;
 1223         }
 1224         alp->lock_data = __SIMPLELOCK_LOCKED;
 1225 #endif /* MULTIPROCESSOR */ /* } */
 1226 
 1227         /*
 1228          * At this point, we have acquired the lock.
 1229          */
 1230 
 1231         rv = 1;
 1232 
 1233         alp->lock_file = id;
 1234         alp->lock_line = l;
 1235         alp->lock_holder = cpu_id;
 1236 
 1237         SLOCK_LIST_LOCK();
 1238         /* XXX Cast away volatile. */
 1239         TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
 1240         SLOCK_LIST_UNLOCK();
 1241 
 1242         SLOCK_COUNT(1);
 1243 
 1244  out:
 1245         splx(s);
 1246         return (rv);
 1247 }
 1248 
 1249 void
 1250 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
 1251 {
 1252         int s;
 1253 
 1254         s = spllock();
 1255 
 1256         /*
 1257          * MULTIPROCESSOR case: This is `safe' because we think we hold
 1258          * the lock, and if we don't, we don't take any action.
 1259          */
 1260         if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
 1261                 SLOCK_WHERE("simple_unlock: lock not held\n",
 1262                     alp, id, l);
 1263                 goto out;
 1264         }
 1265 
 1266         SLOCK_LIST_LOCK();
 1267         TAILQ_REMOVE(&simplelock_list, alp, list);
 1268         SLOCK_LIST_UNLOCK();
 1269 
 1270         SLOCK_COUNT(-1);
 1271 
 1272         alp->list.tqe_next = NULL;      /* sanity */
 1273         alp->list.tqe_prev = NULL;      /* sanity */
 1274 
 1275         alp->unlock_file = id;
 1276         alp->unlock_line = l;
 1277 
 1278 #if defined(MULTIPROCESSOR) /* { */
 1279         alp->lock_holder = LK_NOCPU;
 1280         /* Now that we've modified all fields, release the lock. */
 1281         __cpu_simple_unlock(&alp->lock_data);
 1282 #else
 1283         alp->lock_data = __SIMPLELOCK_UNLOCKED;
 1284         KASSERT(alp->lock_holder == cpu_number());
 1285         alp->lock_holder = LK_NOCPU;
 1286 #endif /* } */
 1287 
 1288  out:
 1289         splx(s);
 1290 }
 1291 
 1292 void
 1293 simple_lock_dump(void)
 1294 {
 1295         struct simplelock *alp;
 1296         int s;
 1297 
 1298         s = spllock();
 1299         SLOCK_LIST_LOCK();
 1300         lock_printf("all simple locks:\n");
 1301         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1302                 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
 1303                     alp->lock_file, alp->lock_line);
 1304         }
 1305         SLOCK_LIST_UNLOCK();
 1306         splx(s);
 1307 }
 1308 
 1309 void
 1310 simple_lock_freecheck(void *start, void *end)
 1311 {
 1312         struct simplelock *alp;
 1313         int s;
 1314 
 1315         s = spllock();
 1316         SLOCK_LIST_LOCK();
 1317         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1318                 if ((void *)alp >= start && (void *)alp < end) {
 1319                         lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
 1320                             alp, alp->lock_holder, alp->lock_file,
 1321                             alp->lock_line);
 1322                         SLOCK_DEBUGGER();
 1323                 }
 1324         }
 1325         SLOCK_LIST_UNLOCK();
 1326         splx(s);
 1327 }
 1328 
 1329 /*
 1330  * We must be holding exactly one lock: the sched_lock.
 1331  */
 1332 
 1333 void
 1334 simple_lock_switchcheck(void)
 1335 {
 1336 
 1337         simple_lock_only_held(&sched_lock, "switching");
 1338 }
 1339 
 1340 void
 1341 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
 1342 {
 1343         struct simplelock *alp;
 1344         cpuid_t cpu_id = cpu_number();
 1345         int s;
 1346 
 1347         if (lp) {
 1348                 LOCK_ASSERT(simple_lock_held(lp));
 1349         }
 1350         s = spllock();
 1351         SLOCK_LIST_LOCK();
 1352         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1353                 if (alp == lp)
 1354                         continue;
 1355 #if defined(MULTIPROCESSOR)
 1356                 if (alp == &kernel_lock)
 1357                         continue;
 1358 #endif /* defined(MULTIPROCESSOR) */
 1359                 if (alp->lock_holder == cpu_id)
 1360                         break;
 1361         }
 1362         SLOCK_LIST_UNLOCK();
 1363         splx(s);
 1364 
 1365         if (alp != NULL) {
 1366                 lock_printf("\n%s with held simple_lock %p "
 1367                     "CPU %lu %s:%d\n",
 1368                     where, alp, alp->lock_holder, alp->lock_file,
 1369                     alp->lock_line);
 1370                 SLOCK_TRACE();
 1371                 SLOCK_DEBUGGER();
 1372         }
 1373 }
 1374 #endif /* LOCKDEBUG */ /* } */
 1375 
 1376 #if defined(MULTIPROCESSOR)
 1377 /*
 1378  * Functions for manipulating the kernel_lock.  We put them here
 1379  * so that they show up in profiles.
 1380  */
 1381 
 1382 /*
 1383  * splbiglock: block IPLs which need to grab kernel_lock.
 1384  * XXX splvm or splaudio should be enough.
 1385  */
 1386 #if !defined(__HAVE_SPLBIGLOCK)
 1387 #define splbiglock()    splclock()
 1388 #endif
 1389 
 1390 void
 1391 _kernel_lock_init(void)
 1392 {
 1393 
 1394         simple_lock_init(&kernel_lock);
 1395 }
 1396 
 1397 /*
 1398  * Acquire/release the kernel lock.  Intended for use in the scheduler
 1399  * and the lower half of the kernel.
 1400  */
 1401 void
 1402 _kernel_lock(int flag)
 1403 {
 1404         struct cpu_info *ci = curcpu();
 1405 
 1406         SCHED_ASSERT_UNLOCKED();
 1407 
 1408         if (ci->ci_data.cpu_biglock_count > 0) {
 1409                 LOCK_ASSERT(simple_lock_held(&kernel_lock));
 1410                 ci->ci_data.cpu_biglock_count++;
 1411         } else {
 1412                 int s;
 1413 
 1414                 s = splbiglock();
 1415                 while (!simple_lock_try(&kernel_lock)) {
 1416                         splx(s);
 1417                         SPINLOCK_SPIN_HOOK;
 1418                         s = splbiglock();
 1419                 }
 1420                 ci->ci_data.cpu_biglock_count++;
 1421                 splx(s);
 1422         }
 1423 }
 1424 
 1425 void
 1426 _kernel_unlock(void)
 1427 {
 1428         struct cpu_info *ci = curcpu();
 1429         int s;
 1430 
 1431         KASSERT(ci->ci_data.cpu_biglock_count > 0);
 1432 
 1433         s = splbiglock();
 1434         if ((--ci->ci_data.cpu_biglock_count) == 0)
 1435                 simple_unlock(&kernel_lock);
 1436         splx(s);
 1437 }
 1438 
 1439 /*
 1440  * Acquire/release the kernel_lock on behalf of a process.  Intended for
 1441  * use in the top half of the kernel.
 1442  */
 1443 void
 1444 _kernel_proc_lock(struct lwp *l)
 1445 {
 1446 
 1447         SCHED_ASSERT_UNLOCKED();
 1448         _kernel_lock(0);
 1449 }
 1450 
 1451 void
 1452 _kernel_proc_unlock(struct lwp *l)
 1453 {
 1454 
 1455         _kernel_unlock();
 1456 }
 1457 
 1458 int
 1459 _kernel_lock_release_all()
 1460 {
 1461         struct cpu_info *ci = curcpu();
 1462         int hold_count;
 1463 
 1464         hold_count = ci->ci_data.cpu_biglock_count;
 1465 
 1466         if (hold_count) {
 1467                 int s;
 1468 
 1469                 s = splbiglock();
 1470                 ci->ci_data.cpu_biglock_count = 0;
 1471                 simple_unlock(&kernel_lock);
 1472                 splx(s);
 1473         }
 1474 
 1475         return hold_count;
 1476 }
 1477 
 1478 void
 1479 _kernel_lock_acquire_count(int hold_count)
 1480 {
 1481 
 1482         KASSERT(curcpu()->ci_data.cpu_biglock_count == 0);
 1483 
 1484         if (hold_count != 0) {
 1485                 struct cpu_info *ci = curcpu();
 1486                 int s;
 1487 
 1488                 s = splbiglock();
 1489                 while (!simple_lock_try(&kernel_lock)) {
 1490                         splx(s);
 1491                         SPINLOCK_SPIN_HOOK;
 1492                         s = splbiglock();
 1493                 }
 1494                 ci->ci_data.cpu_biglock_count = hold_count;
 1495                 splx(s);
 1496         }
 1497 }
 1498 #if defined(DEBUG)
 1499 void
 1500 _kernel_lock_assert_locked()
 1501 {
 1502 
 1503         LOCK_ASSERT(simple_lock_held(&kernel_lock));
 1504 }
 1505 #endif
 1506 #endif /* MULTIPROCESSOR */

Cache object: 2c8ad376738d06416636b5f90c5c2cd2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.