The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_lock.c,v 1.102.2.2 2007/08/01 14:45:46 liamjfoy Exp $     */
    2 
    3 /*-
    4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    9  * NASA Ames Research Center.
   10  *
   11  * This code is derived from software contributed to The NetBSD Foundation
   12  * by Ross Harvey.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. All advertising materials mentioning features or use of this software
   23  *    must display the following acknowledgement:
   24  *      This product includes software developed by the NetBSD
   25  *      Foundation, Inc. and its contributors.
   26  * 4. Neither the name of The NetBSD Foundation nor the names of its
   27  *    contributors may be used to endorse or promote products derived
   28  *    from this software without specific prior written permission.
   29  *
   30  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   32  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   33  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   34  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   37  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   38  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   40  * POSSIBILITY OF SUCH DAMAGE.
   41  */
   42 
   43 /*
   44  * Copyright (c) 1995
   45  *      The Regents of the University of California.  All rights reserved.
   46  *
   47  * This code contains ideas from software contributed to Berkeley by
   48  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
   49  * System project at Carnegie-Mellon University.
   50  *
   51  * Redistribution and use in source and binary forms, with or without
   52  * modification, are permitted provided that the following conditions
   53  * are met:
   54  * 1. Redistributions of source code must retain the above copyright
   55  *    notice, this list of conditions and the following disclaimer.
   56  * 2. Redistributions in binary form must reproduce the above copyright
   57  *    notice, this list of conditions and the following disclaimer in the
   58  *    documentation and/or other materials provided with the distribution.
   59  * 3. Neither the name of the University nor the names of its contributors
   60  *    may be used to endorse or promote products derived from this software
   61  *    without specific prior written permission.
   62  *
   63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   73  * SUCH DAMAGE.
   74  *
   75  *      @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.102.2.2 2007/08/01 14:45:46 liamjfoy Exp $");
   80 
   81 #include "opt_multiprocessor.h"
   82 #include "opt_lockdebug.h"
   83 #include "opt_ddb.h"
   84 
   85 #include <sys/param.h>
   86 #include <sys/proc.h>
   87 #include <sys/lock.h>
   88 #include <sys/systm.h>
   89 #include <machine/cpu.h>
   90 
   91 #include <dev/lockstat.h>
   92 
   93 #if defined(LOCKDEBUG)
   94 #include <sys/syslog.h>
   95 /*
   96  * note that stdarg.h and the ansi style va_start macro is used for both
   97  * ansi and traditional c compiles.
   98  * XXX: this requires that stdarg.h define: va_alist and va_dcl
   99  */
  100 #include <machine/stdarg.h>
  101 
  102 void    lock_printf(const char *fmt, ...)
  103     __attribute__((__format__(__printf__,1,2)));
  104 
  105 static int acquire(volatile struct lock **, int *, int, int, int, uintptr_t ra);
  106 
  107 int     lock_debug_syslog = 0;  /* defaults to printf, but can be patched */
  108 
  109 #ifdef DDB
  110 #include <ddb/ddbvar.h>
  111 #include <machine/db_machdep.h>
  112 #include <ddb/db_command.h>
  113 #include <ddb/db_interface.h>
  114 #endif
  115 #endif /* defined(LOCKDEBUG) */
  116 
  117 #if defined(MULTIPROCESSOR)
  118 struct simplelock kernel_lock;
  119 #endif
  120 
  121 /*
  122  * Locking primitives implementation.
  123  * Locks provide shared/exclusive synchronization.
  124  */
  125 
  126 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
  127 #if defined(MULTIPROCESSOR) /* { */
  128 #define COUNT_CPU(cpu_id, x)                                            \
  129         curcpu()->ci_spin_locks += (x)
  130 #else
  131 u_long  spin_locks;
  132 #define COUNT_CPU(cpu_id, x)    spin_locks += (x)
  133 #endif /* MULTIPROCESSOR */ /* } */
  134 
  135 #define COUNT(lkp, l, cpu_id, x)                                        \
  136 do {                                                                    \
  137         if ((lkp)->lk_flags & LK_SPIN)                                  \
  138                 COUNT_CPU((cpu_id), (x));                               \
  139         else                                                            \
  140                 (l)->l_locks += (x);                                    \
  141 } while (/*CONSTCOND*/0)
  142 #else
  143 #define COUNT(lkp, p, cpu_id, x)
  144 #define COUNT_CPU(cpu_id, x)
  145 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
  146 
  147 #define INTERLOCK_ACQUIRE(lkp, flags, s)                                \
  148 do {                                                                    \
  149         if ((flags) & LK_SPIN)                                          \
  150                 s = spllock();                                          \
  151         simple_lock(&(lkp)->lk_interlock);                              \
  152 } while (/*CONSTCOND*/ 0)
  153 
  154 #define INTERLOCK_RELEASE(lkp, flags, s)                                \
  155 do {                                                                    \
  156         simple_unlock(&(lkp)->lk_interlock);                            \
  157         if ((flags) & LK_SPIN)                                          \
  158                 splx(s);                                                \
  159 } while (/*CONSTCOND*/ 0)
  160 
  161 #ifdef DDB /* { */
  162 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
  163 int simple_lock_debugger = 1;   /* more serious on MP */
  164 #else
  165 int simple_lock_debugger = 0;
  166 #endif
  167 #define SLOCK_DEBUGGER()        if (simple_lock_debugger && db_onpanic) Debugger()
  168 #define SLOCK_TRACE()                                                   \
  169         db_stack_trace_print((db_expr_t)__builtin_frame_address(0),     \
  170             TRUE, 65535, "", lock_printf);
  171 #else
  172 #define SLOCK_DEBUGGER()        /* nothing */
  173 #define SLOCK_TRACE()           /* nothing */
  174 #endif /* } */
  175 
  176 #if defined(LOCKDEBUG)
  177 #if defined(DDB)
  178 #define SPINLOCK_SPINCHECK_DEBUGGER     if (db_onpanic) Debugger()
  179 #else
  180 #define SPINLOCK_SPINCHECK_DEBUGGER     /* nothing */
  181 #endif
  182 
  183 #define SPINLOCK_SPINCHECK_DECL                                         \
  184         /* 32-bits of count -- wrap constitutes a "spinout" */          \
  185         uint32_t __spinc = 0
  186 
  187 #define SPINLOCK_SPINCHECK                                              \
  188 do {                                                                    \
  189         if (++__spinc == 0) {                                           \
  190                 lock_printf("LK_SPIN spinout, excl %d, share %d\n",     \
  191                     lkp->lk_exclusivecount, lkp->lk_sharecount);        \
  192                 if (lkp->lk_exclusivecount)                             \
  193                         lock_printf("held by CPU %lu\n",                \
  194                             (u_long) lkp->lk_cpu);                      \
  195                 if (lkp->lk_lock_file)                                  \
  196                         lock_printf("last locked at %s:%d\n",           \
  197                             lkp->lk_lock_file, lkp->lk_lock_line);      \
  198                 if (lkp->lk_unlock_file)                                \
  199                         lock_printf("last unlocked at %s:%d\n",         \
  200                             lkp->lk_unlock_file, lkp->lk_unlock_line);  \
  201                 SLOCK_TRACE();                                          \
  202                 SPINLOCK_SPINCHECK_DEBUGGER;                            \
  203         }                                                               \
  204 } while (/*CONSTCOND*/ 0)
  205 #else
  206 #define SPINLOCK_SPINCHECK_DECL                 /* nothing */
  207 #define SPINLOCK_SPINCHECK                      /* nothing */
  208 #endif /* LOCKDEBUG && DDB */
  209 
  210 #define RETURN_ADDRESS          ((uintptr_t)__builtin_return_address(0))
  211 
  212 /*
  213  * Acquire a resource.
  214  */
  215 static int
  216 acquire(volatile struct lock **lkpp, int *s, int extflags,
  217     int drain, int wanted, uintptr_t ra)
  218 {
  219         int error;
  220         volatile struct lock *lkp = *lkpp;
  221         LOCKSTAT_TIMER(slptime);
  222 
  223         KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
  224 
  225         if (extflags & LK_SPIN) {
  226                 int interlocked;
  227 
  228                 SPINLOCK_SPINCHECK_DECL;
  229 
  230                 if (!drain) {
  231                         lkp->lk_waitcount++;
  232                         lkp->lk_flags |= LK_WAIT_NONZERO;
  233                 }
  234                 for (interlocked = 1;;) {
  235                         SPINLOCK_SPINCHECK;
  236                         if ((lkp->lk_flags & wanted) != 0) {
  237                                 if (interlocked) {
  238                                         INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
  239                                         interlocked = 0;
  240                                 }
  241                                 SPINLOCK_SPIN_HOOK;
  242                         } else if (interlocked) {
  243                                 break;
  244                         } else {
  245                                 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
  246                                 interlocked = 1;
  247                         }
  248                 }
  249                 if (!drain) {
  250                         lkp->lk_waitcount--;
  251                         if (lkp->lk_waitcount == 0)
  252                                 lkp->lk_flags &= ~LK_WAIT_NONZERO;
  253                 }
  254                 KASSERT((lkp->lk_flags & wanted) == 0);
  255                 error = 0;      /* sanity */
  256         } else {
  257                 for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
  258                         if (drain)
  259                                 lkp->lk_flags |= LK_WAITDRAIN;
  260                         else {
  261                                 lkp->lk_waitcount++;
  262                                 lkp->lk_flags |= LK_WAIT_NONZERO;
  263                         }
  264                         /* XXX Cast away volatile. */
  265                         LOCKSTAT_START_TIMER(slptime);
  266                         error = ltsleep(drain ?
  267                             (volatile const void *)&lkp->lk_flags :
  268                             (volatile const void *)lkp, lkp->lk_prio,
  269                             lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
  270                         LOCKSTAT_STOP_TIMER(slptime);
  271                         LOCKSTAT_EVENT_RA((void *)(uintptr_t)lkp,
  272                             LB_LOCKMGR | LB_SLEEP, 1, slptime, ra);
  273                         if (!drain) {
  274                                 lkp->lk_waitcount--;
  275                                 if (lkp->lk_waitcount == 0)
  276                                         lkp->lk_flags &= ~LK_WAIT_NONZERO;
  277                         }
  278                         if (error)
  279                                 break;
  280                         if (extflags & LK_SLEEPFAIL) {
  281                                 error = ENOLCK;
  282                                 break;
  283                         }
  284                         if (lkp->lk_newlock != NULL) {
  285                                 simple_lock(&lkp->lk_newlock->lk_interlock);
  286                                 simple_unlock(&lkp->lk_interlock);
  287                                 if (lkp->lk_waitcount == 0)
  288                                         wakeup(&lkp->lk_newlock);
  289                                 *lkpp = lkp = lkp->lk_newlock;
  290                         }
  291                 }
  292         }
  293 
  294         return error;
  295 }
  296 
  297 #define SETHOLDER(lkp, pid, lid, cpu_id)                                \
  298 do {                                                                    \
  299         if ((lkp)->lk_flags & LK_SPIN)                                  \
  300                 (lkp)->lk_cpu = cpu_id;                                 \
  301         else {                                                          \
  302                 (lkp)->lk_lockholder = pid;                             \
  303                 (lkp)->lk_locklwp = lid;                                \
  304         }                                                               \
  305 } while (/*CONSTCOND*/0)
  306 
  307 #define WEHOLDIT(lkp, pid, lid, cpu_id)                                 \
  308         (((lkp)->lk_flags & LK_SPIN) != 0 ?                             \
  309          ((lkp)->lk_cpu == (cpu_id)) :                                  \
  310          ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
  311 
  312 #define WAKEUP_WAITER(lkp)                                              \
  313 do {                                                                    \
  314         if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) ==          \
  315             LK_WAIT_NONZERO) {                                          \
  316                 wakeup((lkp));                                          \
  317         }                                                               \
  318 } while (/*CONSTCOND*/0)
  319 
  320 #if defined(LOCKDEBUG) /* { */
  321 #if defined(MULTIPROCESSOR) /* { */
  322 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
  323 
  324 #define SPINLOCK_LIST_LOCK()                                            \
  325         __cpu_simple_lock(&spinlock_list_slock.lock_data)
  326 
  327 #define SPINLOCK_LIST_UNLOCK()                                          \
  328         __cpu_simple_unlock(&spinlock_list_slock.lock_data)
  329 #else
  330 #define SPINLOCK_LIST_LOCK()    /* nothing */
  331 
  332 #define SPINLOCK_LIST_UNLOCK()  /* nothing */
  333 #endif /* MULTIPROCESSOR */ /* } */
  334 
  335 _TAILQ_HEAD(, struct lock, volatile) spinlock_list =
  336     TAILQ_HEAD_INITIALIZER(spinlock_list);
  337 
  338 #define HAVEIT(lkp)                                                     \
  339 do {                                                                    \
  340         if ((lkp)->lk_flags & LK_SPIN) {                                \
  341                 int sp = spllock();                                     \
  342                 SPINLOCK_LIST_LOCK();                                   \
  343                 TAILQ_INSERT_TAIL(&spinlock_list, (lkp), lk_list);      \
  344                 SPINLOCK_LIST_UNLOCK();                                 \
  345                 splx(sp);                                               \
  346         }                                                               \
  347 } while (/*CONSTCOND*/0)
  348 
  349 #define DONTHAVEIT(lkp)                                                 \
  350 do {                                                                    \
  351         if ((lkp)->lk_flags & LK_SPIN) {                                \
  352                 int sp = spllock();                                     \
  353                 SPINLOCK_LIST_LOCK();                                   \
  354                 TAILQ_REMOVE(&spinlock_list, (lkp), lk_list);           \
  355                 SPINLOCK_LIST_UNLOCK();                                 \
  356                 splx(sp);                                               \
  357         }                                                               \
  358 } while (/*CONSTCOND*/0)
  359 #else
  360 #define HAVEIT(lkp)             /* nothing */
  361 
  362 #define DONTHAVEIT(lkp)         /* nothing */
  363 #endif /* LOCKDEBUG */ /* } */
  364 
  365 #if defined(LOCKDEBUG)
  366 /*
  367  * Lock debug printing routine; can be configured to print to console
  368  * or log to syslog.
  369  */
  370 void
  371 lock_printf(const char *fmt, ...)
  372 {
  373         char b[150];
  374         va_list ap;
  375 
  376         va_start(ap, fmt);
  377         if (lock_debug_syslog)
  378                 vlog(LOG_DEBUG, fmt, ap);
  379         else {
  380                 vsnprintf(b, sizeof(b), fmt, ap);
  381                 printf_nolog("%s", b);
  382         }
  383         va_end(ap);
  384 }
  385 #endif /* LOCKDEBUG */
  386 
  387 /*
  388  * Transfer any waiting processes from one lock to another.
  389  */
  390 void
  391 transferlockers(struct lock *from, struct lock *to)
  392 {
  393 
  394         KASSERT(from != to);
  395         KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
  396         if (from->lk_waitcount == 0)
  397                 return;
  398         from->lk_newlock = to;
  399         wakeup((void *)from);
  400         tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
  401         from->lk_newlock = NULL;
  402         from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
  403         KASSERT(from->lk_waitcount == 0);
  404 }
  405 
  406 
  407 /*
  408  * Initialize a lock; required before use.
  409  */
  410 void
  411 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
  412 {
  413 
  414         memset(lkp, 0, sizeof(struct lock));
  415         simple_lock_init(&lkp->lk_interlock);
  416         lkp->lk_flags = flags & LK_EXTFLG_MASK;
  417         if (flags & LK_SPIN)
  418                 lkp->lk_cpu = LK_NOCPU;
  419         else {
  420                 lkp->lk_lockholder = LK_NOPROC;
  421                 lkp->lk_newlock = NULL;
  422                 lkp->lk_prio = prio;
  423                 lkp->lk_timo = timo;
  424         }
  425         lkp->lk_wmesg = wmesg;  /* just a name for spin locks */
  426 #if defined(LOCKDEBUG)
  427         lkp->lk_lock_file = NULL;
  428         lkp->lk_unlock_file = NULL;
  429 #endif
  430 }
  431 
  432 /*
  433  * Determine the status of a lock.
  434  */
  435 int
  436 lockstatus(struct lock *lkp)
  437 {
  438         int s = 0; /* XXX: gcc */
  439         int lock_type = 0;
  440         struct lwp *l = curlwp; /* XXX */
  441         pid_t pid;
  442         lwpid_t lid;
  443         cpuid_t cpu_num;
  444 
  445         if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
  446                 cpu_num = cpu_number();
  447                 pid = LK_KERNPROC;
  448                 lid = 0;
  449         } else {
  450                 cpu_num = LK_NOCPU;
  451                 pid = l->l_proc->p_pid;
  452                 lid = l->l_lid;
  453         }
  454 
  455         INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
  456         if (lkp->lk_exclusivecount != 0) {
  457                 if (WEHOLDIT(lkp, pid, lid, cpu_num))
  458                         lock_type = LK_EXCLUSIVE;
  459                 else
  460                         lock_type = LK_EXCLOTHER;
  461         } else if (lkp->lk_sharecount != 0)
  462                 lock_type = LK_SHARED;
  463         else if (lkp->lk_flags & (LK_WANT_EXCL | LK_WANT_UPGRADE))
  464                 lock_type = LK_EXCLOTHER;
  465         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  466         return (lock_type);
  467 }
  468 
  469 #if defined(LOCKDEBUG)
  470 /*
  471  * Make sure no spin locks are held by a CPU that is about
  472  * to context switch.
  473  */
  474 void
  475 spinlock_switchcheck(void)
  476 {
  477         u_long cnt;
  478         int s;
  479 
  480         s = spllock();
  481 #if defined(MULTIPROCESSOR)
  482         cnt = curcpu()->ci_spin_locks;
  483 #else
  484         cnt = spin_locks;
  485 #endif
  486         splx(s);
  487 
  488         if (cnt != 0)
  489                 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
  490                     (u_long) cpu_number(), cnt);
  491 }
  492 #endif /* LOCKDEBUG */
  493 
  494 /*
  495  * Locks and IPLs (interrupt priority levels):
  496  *
  497  * Locks which may be taken from interrupt context must be handled
  498  * very carefully; you must spl to the highest IPL where the lock
  499  * is needed before acquiring the lock.
  500  *
  501  * It is also important to avoid deadlock, since certain (very high
  502  * priority) interrupts are often needed to keep the system as a whole
  503  * from deadlocking, and must not be blocked while you are spinning
  504  * waiting for a lower-priority lock.
  505  *
  506  * In addition, the lock-debugging hooks themselves need to use locks!
  507  *
  508  * A raw __cpu_simple_lock may be used from interrupts are long as it
  509  * is acquired and held at a single IPL.
  510  *
  511  * A simple_lock (which is a __cpu_simple_lock wrapped with some
  512  * debugging hooks) may be used at or below spllock(), which is
  513  * typically at or just below splhigh() (i.e. blocks everything
  514  * but certain machine-dependent extremely high priority interrupts).
  515  *
  516  * spinlockmgr spinlocks should be used at or below splsched().
  517  *
  518  * Some platforms may have interrupts of higher priority than splsched(),
  519  * including hard serial interrupts, inter-processor interrupts, and
  520  * kernel debugger traps.
  521  */
  522 
  523 /*
  524  * XXX XXX kludge around another kludge..
  525  *
  526  * vfs_shutdown() may be called from interrupt context, either as a result
  527  * of a panic, or from the debugger.   It proceeds to call
  528  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
  529  *
  530  * We would like to make an attempt to sync the filesystems in this case, so
  531  * if this happens, we treat attempts to acquire locks specially.
  532  * All locks are acquired on behalf of proc0.
  533  *
  534  * If we've already paniced, we don't block waiting for locks, but
  535  * just barge right ahead since we're already going down in flames.
  536  */
  537 
  538 /*
  539  * Set, change, or release a lock.
  540  *
  541  * Shared requests increment the shared count. Exclusive requests set the
  542  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
  543  * accepted shared locks and shared-to-exclusive upgrades to go away.
  544  */
  545 int
  546 #if defined(LOCKDEBUG)
  547 _lockmgr(volatile struct lock *lkp, u_int flags,
  548     struct simplelock *interlkp, const char *file, int line)
  549 #else
  550 lockmgr(volatile struct lock *lkp, u_int flags,
  551     struct simplelock *interlkp)
  552 #endif
  553 {
  554         int error;
  555         pid_t pid;
  556         lwpid_t lid;
  557         int extflags;
  558         cpuid_t cpu_num;
  559         struct lwp *l = curlwp;
  560         int lock_shutdown_noblock = 0;
  561         int s = 0;
  562 
  563         error = 0;
  564 
  565         /* LK_RETRY is for vn_lock, not for lockmgr. */
  566         KASSERT((flags & LK_RETRY) == 0);
  567 
  568         INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
  569         if (flags & LK_INTERLOCK)
  570                 simple_unlock(interlkp);
  571         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
  572 
  573 #ifdef DIAGNOSTIC /* { */
  574         /*
  575          * Don't allow spins on sleep locks and don't allow sleeps
  576          * on spin locks.
  577          */
  578         if ((flags ^ lkp->lk_flags) & LK_SPIN)
  579                 panic("lockmgr: sleep/spin mismatch");
  580 #endif /* } */
  581 
  582         if (extflags & LK_SPIN) {
  583                 pid = LK_KERNPROC;
  584                 lid = 0;
  585         } else {
  586                 if (l == NULL) {
  587                         if (!doing_shutdown) {
  588                                 panic("lockmgr: no context");
  589                         } else {
  590                                 l = &lwp0;
  591                                 if (panicstr && (!(flags & LK_NOWAIT))) {
  592                                         flags |= LK_NOWAIT;
  593                                         lock_shutdown_noblock = 1;
  594                                 }
  595                         }
  596                 }
  597                 lid = l->l_lid;
  598                 pid = l->l_proc->p_pid;
  599         }
  600         cpu_num = cpu_number();
  601 
  602         /*
  603          * Once a lock has drained, the LK_DRAINING flag is set and an
  604          * exclusive lock is returned. The only valid operation thereafter
  605          * is a single release of that exclusive lock. This final release
  606          * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
  607          * further requests of any sort will result in a panic. The bits
  608          * selected for these two flags are chosen so that they will be set
  609          * in memory that is freed (freed memory is filled with 0xdeadbeef).
  610          * The final release is permitted to give a new lease on life to
  611          * the lock by specifying LK_REENABLE.
  612          */
  613         if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
  614 #ifdef DIAGNOSTIC /* { */
  615                 if (lkp->lk_flags & LK_DRAINED)
  616                         panic("lockmgr: using decommissioned lock");
  617                 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
  618                     WEHOLDIT(lkp, pid, lid, cpu_num) == 0)
  619                         panic("lockmgr: non-release on draining lock: %d",
  620                             flags & LK_TYPE_MASK);
  621 #endif /* DIAGNOSTIC */ /* } */
  622                 lkp->lk_flags &= ~LK_DRAINING;
  623                 if ((flags & LK_REENABLE) == 0)
  624                         lkp->lk_flags |= LK_DRAINED;
  625         }
  626 
  627         switch (flags & LK_TYPE_MASK) {
  628 
  629         case LK_SHARED:
  630                 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
  631                         /*
  632                          * If just polling, check to see if we will block.
  633                          */
  634                         if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  635                             (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
  636                                 error = EBUSY;
  637                                 break;
  638                         }
  639                         /*
  640                          * Wait for exclusive locks and upgrades to clear.
  641                          */
  642                         error = acquire(&lkp, &s, extflags, 0,
  643                             LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE,
  644                             RETURN_ADDRESS);
  645                         if (error)
  646                                 break;
  647                         lkp->lk_sharecount++;
  648                         lkp->lk_flags |= LK_SHARE_NONZERO;
  649                         COUNT(lkp, l, cpu_num, 1);
  650                         break;
  651                 }
  652                 /*
  653                  * We hold an exclusive lock, so downgrade it to shared.
  654                  * An alternative would be to fail with EDEADLK.
  655                  */
  656                 lkp->lk_sharecount++;
  657                 lkp->lk_flags |= LK_SHARE_NONZERO;
  658                 COUNT(lkp, l, cpu_num, 1);
  659                 /* fall into downgrade */
  660 
  661         case LK_DOWNGRADE:
  662                 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 ||
  663                     lkp->lk_exclusivecount == 0)
  664                         panic("lockmgr: not holding exclusive lock");
  665                 lkp->lk_sharecount += lkp->lk_exclusivecount;
  666                 lkp->lk_flags |= LK_SHARE_NONZERO;
  667                 lkp->lk_exclusivecount = 0;
  668                 lkp->lk_recurselevel = 0;
  669                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  670                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  671 #if defined(LOCKDEBUG)
  672                 lkp->lk_unlock_file = file;
  673                 lkp->lk_unlock_line = line;
  674 #endif
  675                 DONTHAVEIT(lkp);
  676                 WAKEUP_WAITER(lkp);
  677                 break;
  678 
  679         case LK_EXCLUPGRADE:
  680                 /*
  681                  * If another process is ahead of us to get an upgrade,
  682                  * then we want to fail rather than have an intervening
  683                  * exclusive access.
  684                  */
  685                 if (lkp->lk_flags & LK_WANT_UPGRADE) {
  686                         lkp->lk_sharecount--;
  687                         if (lkp->lk_sharecount == 0)
  688                                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
  689                         COUNT(lkp, l, cpu_num, -1);
  690                         error = EBUSY;
  691                         break;
  692                 }
  693                 /* fall into normal upgrade */
  694 
  695         case LK_UPGRADE:
  696                 /*
  697                  * Upgrade a shared lock to an exclusive one. If another
  698                  * shared lock has already requested an upgrade to an
  699                  * exclusive lock, our shared lock is released and an
  700                  * exclusive lock is requested (which will be granted
  701                  * after the upgrade). If we return an error, the file
  702                  * will always be unlocked.
  703                  */
  704                 if (WEHOLDIT(lkp, pid, lid, cpu_num) || lkp->lk_sharecount <= 0)
  705                         panic("lockmgr: upgrade exclusive lock");
  706                 lkp->lk_sharecount--;
  707                 if (lkp->lk_sharecount == 0)
  708                         lkp->lk_flags &= ~LK_SHARE_NONZERO;
  709                 COUNT(lkp, l, cpu_num, -1);
  710                 /*
  711                  * If we are just polling, check to see if we will block.
  712                  */
  713                 if ((extflags & LK_NOWAIT) &&
  714                     ((lkp->lk_flags & LK_WANT_UPGRADE) ||
  715                      lkp->lk_sharecount > 1)) {
  716                         error = EBUSY;
  717                         break;
  718                 }
  719                 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
  720                         /*
  721                          * We are first shared lock to request an upgrade, so
  722                          * request upgrade and wait for the shared count to
  723                          * drop to zero, then take exclusive lock.
  724                          */
  725                         lkp->lk_flags |= LK_WANT_UPGRADE;
  726                         error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO,
  727                             RETURN_ADDRESS);
  728                         lkp->lk_flags &= ~LK_WANT_UPGRADE;
  729                         if (error) {
  730                                 WAKEUP_WAITER(lkp);
  731                                 break;
  732                         }
  733                         lkp->lk_flags |= LK_HAVE_EXCL;
  734                         SETHOLDER(lkp, pid, lid, cpu_num);
  735 #if defined(LOCKDEBUG)
  736                         lkp->lk_lock_file = file;
  737                         lkp->lk_lock_line = line;
  738 #endif
  739                         HAVEIT(lkp);
  740                         if (lkp->lk_exclusivecount != 0)
  741                                 panic("lockmgr: non-zero exclusive count");
  742                         lkp->lk_exclusivecount = 1;
  743                         if (extflags & LK_SETRECURSE)
  744                                 lkp->lk_recurselevel = 1;
  745                         COUNT(lkp, l, cpu_num, 1);
  746                         break;
  747                 }
  748                 /*
  749                  * Someone else has requested upgrade. Release our shared
  750                  * lock, awaken upgrade requestor if we are the last shared
  751                  * lock, then request an exclusive lock.
  752                  */
  753                 if (lkp->lk_sharecount == 0)
  754                         WAKEUP_WAITER(lkp);
  755                 /* fall into exclusive request */
  756 
  757         case LK_EXCLUSIVE:
  758                 if (WEHOLDIT(lkp, pid, lid, cpu_num)) {
  759                         /*
  760                          * Recursive lock.
  761                          */
  762                         if ((extflags & LK_CANRECURSE) == 0 &&
  763                              lkp->lk_recurselevel == 0) {
  764                                 if (extflags & LK_RECURSEFAIL) {
  765                                         error = EDEADLK;
  766                                         break;
  767                                 } else
  768                                         panic("lockmgr: locking against myself");
  769                         }
  770                         lkp->lk_exclusivecount++;
  771                         if (extflags & LK_SETRECURSE &&
  772                             lkp->lk_recurselevel == 0)
  773                                 lkp->lk_recurselevel = lkp->lk_exclusivecount;
  774                         COUNT(lkp, l, cpu_num, 1);
  775                         break;
  776                 }
  777                 /*
  778                  * If we are just polling, check to see if we will sleep.
  779                  */
  780                 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  781                      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  782                      LK_SHARE_NONZERO))) {
  783                         error = EBUSY;
  784                         break;
  785                 }
  786                 /*
  787                  * Try to acquire the want_exclusive flag.
  788                  */
  789                 error = acquire(&lkp, &s, extflags, 0,
  790                     LK_HAVE_EXCL | LK_WANT_EXCL, RETURN_ADDRESS);
  791                 if (error)
  792                         break;
  793                 lkp->lk_flags |= LK_WANT_EXCL;
  794                 /*
  795                  * Wait for shared locks and upgrades to finish.
  796                  */
  797                 error = acquire(&lkp, &s, extflags, 0,
  798                     LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO,
  799                     RETURN_ADDRESS);
  800                 lkp->lk_flags &= ~LK_WANT_EXCL;
  801                 if (error) {
  802                         WAKEUP_WAITER(lkp);
  803                         break;
  804                 }
  805                 lkp->lk_flags |= LK_HAVE_EXCL;
  806                 SETHOLDER(lkp, pid, lid, cpu_num);
  807 #if defined(LOCKDEBUG)
  808                 lkp->lk_lock_file = file;
  809                 lkp->lk_lock_line = line;
  810 #endif
  811                 HAVEIT(lkp);
  812                 if (lkp->lk_exclusivecount != 0)
  813                         panic("lockmgr: non-zero exclusive count");
  814                 lkp->lk_exclusivecount = 1;
  815                 if (extflags & LK_SETRECURSE)
  816                         lkp->lk_recurselevel = 1;
  817                 COUNT(lkp, l, cpu_num, 1);
  818                 break;
  819 
  820         case LK_RELEASE:
  821                 if (lkp->lk_exclusivecount != 0) {
  822                         if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
  823                                 if (lkp->lk_flags & LK_SPIN) {
  824                                         panic("lockmgr: processor %lu, not "
  825                                             "exclusive lock holder %lu "
  826                                             "unlocking", cpu_num, lkp->lk_cpu);
  827                                 } else {
  828                                         panic("lockmgr: pid %d, not "
  829                                             "exclusive lock holder %d "
  830                                             "unlocking", pid,
  831                                             lkp->lk_lockholder);
  832                                 }
  833                         }
  834                         if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
  835                                 lkp->lk_recurselevel = 0;
  836                         lkp->lk_exclusivecount--;
  837                         COUNT(lkp, l, cpu_num, -1);
  838                         if (lkp->lk_exclusivecount == 0) {
  839                                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  840                                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  841 #if defined(LOCKDEBUG)
  842                                 lkp->lk_unlock_file = file;
  843                                 lkp->lk_unlock_line = line;
  844 #endif
  845                                 DONTHAVEIT(lkp);
  846                         }
  847                 } else if (lkp->lk_sharecount != 0) {
  848                         lkp->lk_sharecount--;
  849                         if (lkp->lk_sharecount == 0)
  850                                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
  851                         COUNT(lkp, l, cpu_num, -1);
  852                 }
  853 #ifdef DIAGNOSTIC
  854                 else
  855                         panic("lockmgr: release of unlocked lock!");
  856 #endif
  857                 WAKEUP_WAITER(lkp);
  858                 break;
  859 
  860         case LK_DRAIN:
  861                 /*
  862                  * Check that we do not already hold the lock, as it can
  863                  * never drain if we do. Unfortunately, we have no way to
  864                  * check for holding a shared lock, but at least we can
  865                  * check for an exclusive one.
  866                  */
  867                 if (WEHOLDIT(lkp, pid, lid, cpu_num))
  868                         panic("lockmgr: draining against myself");
  869                 /*
  870                  * If we are just polling, check to see if we will sleep.
  871                  */
  872                 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
  873                      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  874                      LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
  875                         error = EBUSY;
  876                         break;
  877                 }
  878                 error = acquire(&lkp, &s, extflags, 1,
  879                     LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  880                     LK_SHARE_NONZERO | LK_WAIT_NONZERO,
  881                     RETURN_ADDRESS);
  882                 if (error)
  883                         break;
  884                 lkp->lk_flags |= LK_HAVE_EXCL;
  885                 if ((extflags & LK_RESURRECT) == 0)
  886                         lkp->lk_flags |= LK_DRAINING;
  887                 SETHOLDER(lkp, pid, lid, cpu_num);
  888 #if defined(LOCKDEBUG)
  889                 lkp->lk_lock_file = file;
  890                 lkp->lk_lock_line = line;
  891 #endif
  892                 HAVEIT(lkp);
  893                 lkp->lk_exclusivecount = 1;
  894                 /* XXX unlikely that we'd want this */
  895                 if (extflags & LK_SETRECURSE)
  896                         lkp->lk_recurselevel = 1;
  897                 COUNT(lkp, l, cpu_num, 1);
  898                 break;
  899 
  900         default:
  901                 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  902                 panic("lockmgr: unknown locktype request %d",
  903                     flags & LK_TYPE_MASK);
  904                 /* NOTREACHED */
  905         }
  906         if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
  907             ((lkp->lk_flags &
  908               (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
  909               LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
  910                 lkp->lk_flags &= ~LK_WAITDRAIN;
  911                 wakeup(&lkp->lk_flags);
  912         }
  913         /*
  914          * Note that this panic will be a recursive panic, since
  915          * we only set lock_shutdown_noblock above if panicstr != NULL.
  916          */
  917         if (error && lock_shutdown_noblock)
  918                 panic("lockmgr: deadlock (see previous panic)");
  919 
  920         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
  921         return (error);
  922 }
  923 
  924 /*
  925  * For a recursive spinlock held one or more times by the current CPU,
  926  * release all N locks, and return N.
  927  * Intended for use in mi_switch() shortly before context switching.
  928  */
  929 
  930 int
  931 #if defined(LOCKDEBUG)
  932 _spinlock_release_all(volatile struct lock *lkp, const char *file, int line)
  933 #else
  934 spinlock_release_all(volatile struct lock *lkp)
  935 #endif
  936 {
  937         int s, count;
  938         cpuid_t cpu_num;
  939 
  940         KASSERT(lkp->lk_flags & LK_SPIN);
  941 
  942         INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
  943 
  944         cpu_num = cpu_number();
  945         count = lkp->lk_exclusivecount;
  946 
  947         if (count != 0) {
  948 #ifdef DIAGNOSTIC
  949                 if (WEHOLDIT(lkp, 0, 0, cpu_num) == 0) {
  950                         panic("spinlock_release_all: processor %lu, not "
  951                             "exclusive lock holder %lu "
  952                             "unlocking", (long)cpu_num, lkp->lk_cpu);
  953                 }
  954 #endif
  955                 lkp->lk_recurselevel = 0;
  956                 lkp->lk_exclusivecount = 0;
  957                 COUNT_CPU(cpu_num, -count);
  958                 lkp->lk_flags &= ~LK_HAVE_EXCL;
  959                 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
  960 #if defined(LOCKDEBUG)
  961                 lkp->lk_unlock_file = file;
  962                 lkp->lk_unlock_line = line;
  963 #endif
  964                 DONTHAVEIT(lkp);
  965         }
  966 #ifdef DIAGNOSTIC
  967         else if (lkp->lk_sharecount != 0)
  968                 panic("spinlock_release_all: release of shared lock!");
  969         else
  970                 panic("spinlock_release_all: release of unlocked lock!");
  971 #endif
  972         INTERLOCK_RELEASE(lkp, LK_SPIN, s);
  973 
  974         return (count);
  975 }
  976 
  977 /*
  978  * For a recursive spinlock held one or more times by the current CPU,
  979  * release all N locks, and return N.
  980  * Intended for use in mi_switch() right after resuming execution.
  981  */
  982 
  983 void
  984 #if defined(LOCKDEBUG)
  985 _spinlock_acquire_count(volatile struct lock *lkp, int count,
  986     const char *file, int line)
  987 #else
  988 spinlock_acquire_count(volatile struct lock *lkp, int count)
  989 #endif
  990 {
  991         int s, error;
  992         cpuid_t cpu_num;
  993 
  994         KASSERT(lkp->lk_flags & LK_SPIN);
  995 
  996         INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
  997 
  998         cpu_num = cpu_number();
  999 
 1000 #ifdef DIAGNOSTIC
 1001         if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_num))
 1002                 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_num);
 1003 #endif
 1004         /*
 1005          * Try to acquire the want_exclusive flag.
 1006          */
 1007         error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL,
 1008             RETURN_ADDRESS);
 1009         lkp->lk_flags |= LK_WANT_EXCL;
 1010         /*
 1011          * Wait for shared locks and upgrades to finish.
 1012          */
 1013         error = acquire(&lkp, &s, LK_SPIN, 0,
 1014             LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE,
 1015             RETURN_ADDRESS);
 1016         lkp->lk_flags &= ~LK_WANT_EXCL;
 1017         lkp->lk_flags |= LK_HAVE_EXCL;
 1018         SETHOLDER(lkp, LK_NOPROC, 0, cpu_num);
 1019 #if defined(LOCKDEBUG)
 1020         lkp->lk_lock_file = file;
 1021         lkp->lk_lock_line = line;
 1022 #endif
 1023         HAVEIT(lkp);
 1024         if (lkp->lk_exclusivecount != 0)
 1025                 panic("lockmgr: non-zero exclusive count");
 1026         lkp->lk_exclusivecount = count;
 1027         lkp->lk_recurselevel = 1;
 1028         COUNT_CPU(cpu_num, count);
 1029 
 1030         INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
 1031 }
 1032 
 1033 
 1034 
 1035 /*
 1036  * Print out information about state of a lock. Used by VOP_PRINT
 1037  * routines to display ststus about contained locks.
 1038  */
 1039 void
 1040 lockmgr_printinfo(volatile struct lock *lkp)
 1041 {
 1042 
 1043         if (lkp->lk_sharecount)
 1044                 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
 1045                     lkp->lk_sharecount);
 1046         else if (lkp->lk_flags & LK_HAVE_EXCL) {
 1047                 printf(" lock type %s: EXCL (count %d) by ",
 1048                     lkp->lk_wmesg, lkp->lk_exclusivecount);
 1049                 if (lkp->lk_flags & LK_SPIN)
 1050                         printf("processor %lu", lkp->lk_cpu);
 1051                 else
 1052                         printf("pid %d.%d", lkp->lk_lockholder,
 1053                             lkp->lk_locklwp);
 1054         } else
 1055                 printf(" not locked");
 1056         if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
 1057                 printf(" with %d pending", lkp->lk_waitcount);
 1058 }
 1059 
 1060 #if defined(LOCKDEBUG) /* { */
 1061 _TAILQ_HEAD(, struct simplelock, volatile) simplelock_list =
 1062     TAILQ_HEAD_INITIALIZER(simplelock_list);
 1063 
 1064 #if defined(MULTIPROCESSOR) /* { */
 1065 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
 1066 
 1067 #define SLOCK_LIST_LOCK()                                               \
 1068         __cpu_simple_lock(&simplelock_list_slock.lock_data)
 1069 
 1070 #define SLOCK_LIST_UNLOCK()                                             \
 1071         __cpu_simple_unlock(&simplelock_list_slock.lock_data)
 1072 
 1073 #define SLOCK_COUNT(x)                                                  \
 1074         curcpu()->ci_simple_locks += (x)
 1075 #else
 1076 u_long simple_locks;
 1077 
 1078 #define SLOCK_LIST_LOCK()       /* nothing */
 1079 
 1080 #define SLOCK_LIST_UNLOCK()     /* nothing */
 1081 
 1082 #define SLOCK_COUNT(x)          simple_locks += (x)
 1083 #endif /* MULTIPROCESSOR */ /* } */
 1084 
 1085 #ifdef MULTIPROCESSOR
 1086 #define SLOCK_MP()              lock_printf("on CPU %ld\n",             \
 1087                                     (u_long) cpu_number())
 1088 #else
 1089 #define SLOCK_MP()              /* nothing */
 1090 #endif
 1091 
 1092 #define SLOCK_WHERE(str, alp, id, l)                                    \
 1093 do {                                                                    \
 1094         lock_printf("\n");                                              \
 1095         lock_printf(str);                                               \
 1096         lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
 1097         SLOCK_MP();                                                     \
 1098         if ((alp)->lock_file != NULL)                                   \
 1099                 lock_printf("last locked: %s:%d\n", (alp)->lock_file,   \
 1100                     (alp)->lock_line);                                  \
 1101         if ((alp)->unlock_file != NULL)                                 \
 1102                 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
 1103                     (alp)->unlock_line);                                \
 1104         SLOCK_TRACE()                                                   \
 1105         SLOCK_DEBUGGER();                                               \
 1106 } while (/*CONSTCOND*/0)
 1107 
 1108 /*
 1109  * Simple lock functions so that the debugger can see from whence
 1110  * they are being called.
 1111  */
 1112 void
 1113 simple_lock_init(volatile struct simplelock *alp)
 1114 {
 1115 
 1116 #if defined(MULTIPROCESSOR) /* { */
 1117         __cpu_simple_lock_init(&alp->lock_data);
 1118 #else
 1119         alp->lock_data = __SIMPLELOCK_UNLOCKED;
 1120 #endif /* } */
 1121         alp->lock_file = NULL;
 1122         alp->lock_line = 0;
 1123         alp->unlock_file = NULL;
 1124         alp->unlock_line = 0;
 1125         alp->lock_holder = LK_NOCPU;
 1126 }
 1127 
 1128 void
 1129 _simple_lock(volatile struct simplelock *alp, const char *id, int l)
 1130 {
 1131         cpuid_t cpu_num = cpu_number();
 1132         int s;
 1133 
 1134         s = spllock();
 1135 
 1136         /*
 1137          * MULTIPROCESSOR case: This is `safe' since if it's not us, we
 1138          * don't take any action, and just fall into the normal spin case.
 1139          */
 1140         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1141 #if defined(MULTIPROCESSOR) /* { */
 1142                 if (alp->lock_holder == cpu_num) {
 1143                         SLOCK_WHERE("simple_lock: locking against myself\n",
 1144                             alp, id, l);
 1145                         goto out;
 1146                 }
 1147 #else
 1148                 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
 1149                 goto out;
 1150 #endif /* MULTIPROCESSOR */ /* } */
 1151         }
 1152 
 1153 #if defined(MULTIPROCESSOR) /* { */
 1154         /* Acquire the lock before modifying any fields. */
 1155         splx(s);
 1156         __cpu_simple_lock(&alp->lock_data);
 1157         s = spllock();
 1158 #else
 1159         alp->lock_data = __SIMPLELOCK_LOCKED;
 1160 #endif /* } */
 1161 
 1162         if (alp->lock_holder != LK_NOCPU) {
 1163                 SLOCK_WHERE("simple_lock: uninitialized lock\n",
 1164                     alp, id, l);
 1165         }
 1166         alp->lock_file = id;
 1167         alp->lock_line = l;
 1168         alp->lock_holder = cpu_num;
 1169 
 1170         SLOCK_LIST_LOCK();
 1171         TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
 1172         SLOCK_LIST_UNLOCK();
 1173 
 1174         SLOCK_COUNT(1);
 1175 
 1176  out:
 1177         splx(s);
 1178 }
 1179 
 1180 int
 1181 _simple_lock_held(volatile struct simplelock *alp)
 1182 {
 1183 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
 1184         cpuid_t cpu_num = cpu_number();
 1185 #endif
 1186         int s, locked = 0;
 1187 
 1188         s = spllock();
 1189 
 1190 #if defined(MULTIPROCESSOR)
 1191         if (__cpu_simple_lock_try(&alp->lock_data) == 0)
 1192                 locked = (alp->lock_holder == cpu_num);
 1193         else
 1194                 __cpu_simple_unlock(&alp->lock_data);
 1195 #else
 1196         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1197                 locked = 1;
 1198                 KASSERT(alp->lock_holder == cpu_num);
 1199         }
 1200 #endif
 1201 
 1202         splx(s);
 1203 
 1204         return (locked);
 1205 }
 1206 
 1207 int
 1208 _simple_lock_try(volatile struct simplelock *alp, const char *id, int l)
 1209 {
 1210         cpuid_t cpu_num = cpu_number();
 1211         int s, rv = 0;
 1212 
 1213         s = spllock();
 1214 
 1215         /*
 1216          * MULTIPROCESSOR case: This is `safe' since if it's not us, we
 1217          * don't take any action.
 1218          */
 1219 #if defined(MULTIPROCESSOR) /* { */
 1220         if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
 1221                 if (alp->lock_holder == cpu_num)
 1222                         SLOCK_WHERE("simple_lock_try: locking against myself\n",
 1223                             alp, id, l);
 1224                 goto out;
 1225         }
 1226 #else
 1227         if (alp->lock_data == __SIMPLELOCK_LOCKED) {
 1228                 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
 1229                 goto out;
 1230         }
 1231         alp->lock_data = __SIMPLELOCK_LOCKED;
 1232 #endif /* MULTIPROCESSOR */ /* } */
 1233 
 1234         /*
 1235          * At this point, we have acquired the lock.
 1236          */
 1237 
 1238         rv = 1;
 1239 
 1240         alp->lock_file = id;
 1241         alp->lock_line = l;
 1242         alp->lock_holder = cpu_num;
 1243 
 1244         SLOCK_LIST_LOCK();
 1245         TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
 1246         SLOCK_LIST_UNLOCK();
 1247 
 1248         SLOCK_COUNT(1);
 1249 
 1250  out:
 1251         splx(s);
 1252         return (rv);
 1253 }
 1254 
 1255 void
 1256 _simple_unlock(volatile struct simplelock *alp, const char *id, int l)
 1257 {
 1258         int s;
 1259 
 1260         s = spllock();
 1261 
 1262         /*
 1263          * MULTIPROCESSOR case: This is `safe' because we think we hold
 1264          * the lock, and if we don't, we don't take any action.
 1265          */
 1266         if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
 1267                 SLOCK_WHERE("simple_unlock: lock not held\n",
 1268                     alp, id, l);
 1269                 goto out;
 1270         }
 1271 
 1272         SLOCK_LIST_LOCK();
 1273         TAILQ_REMOVE(&simplelock_list, alp, list);
 1274         SLOCK_LIST_UNLOCK();
 1275 
 1276         SLOCK_COUNT(-1);
 1277 
 1278         alp->list.tqe_next = NULL;      /* sanity */
 1279         alp->list.tqe_prev = NULL;      /* sanity */
 1280 
 1281         alp->unlock_file = id;
 1282         alp->unlock_line = l;
 1283 
 1284 #if defined(MULTIPROCESSOR) /* { */
 1285         alp->lock_holder = LK_NOCPU;
 1286         /* Now that we've modified all fields, release the lock. */
 1287         __cpu_simple_unlock(&alp->lock_data);
 1288 #else
 1289         alp->lock_data = __SIMPLELOCK_UNLOCKED;
 1290         KASSERT(alp->lock_holder == cpu_number());
 1291         alp->lock_holder = LK_NOCPU;
 1292 #endif /* } */
 1293 
 1294  out:
 1295         splx(s);
 1296 }
 1297 
 1298 void
 1299 simple_lock_dump(void)
 1300 {
 1301         volatile struct simplelock *alp;
 1302         int s;
 1303 
 1304         s = spllock();
 1305         SLOCK_LIST_LOCK();
 1306         lock_printf("all simple locks:\n");
 1307         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1308                 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
 1309                     alp->lock_file, alp->lock_line);
 1310         }
 1311         SLOCK_LIST_UNLOCK();
 1312         splx(s);
 1313 }
 1314 
 1315 void
 1316 simple_lock_freecheck(void *start, void *end)
 1317 {
 1318         volatile struct simplelock *alp;
 1319         int s;
 1320 
 1321         s = spllock();
 1322         SLOCK_LIST_LOCK();
 1323         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1324                 if ((volatile void *)alp >= start &&
 1325                     (volatile void *)alp < end) {
 1326                         lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
 1327                             alp, alp->lock_holder, alp->lock_file,
 1328                             alp->lock_line);
 1329                         SLOCK_DEBUGGER();
 1330                 }
 1331         }
 1332         SLOCK_LIST_UNLOCK();
 1333         splx(s);
 1334 }
 1335 
 1336 /*
 1337  * We must be holding exactly one lock: the sched_lock.
 1338  */
 1339 
 1340 void
 1341 simple_lock_switchcheck(void)
 1342 {
 1343 
 1344         simple_lock_only_held(&sched_lock, "switching");
 1345 }
 1346 
 1347 /*
 1348  * Drop into the debugger if lp isn't the only lock held.
 1349  * lp may be NULL.
 1350  */
 1351 void
 1352 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
 1353 {
 1354         volatile struct simplelock *alp;
 1355         cpuid_t cpu_num = cpu_number();
 1356         int s;
 1357 
 1358         if (lp) {
 1359                 LOCK_ASSERT(simple_lock_held(lp));
 1360         }
 1361         s = spllock();
 1362         SLOCK_LIST_LOCK();
 1363         TAILQ_FOREACH(alp, &simplelock_list, list) {
 1364                 if (alp == lp)
 1365                         continue;
 1366 #if defined(MULTIPROCESSOR)
 1367                 if (alp == &kernel_lock)
 1368                         continue;
 1369 #endif /* defined(MULTIPROCESSOR) */
 1370                 if (alp->lock_holder == cpu_num)
 1371                         break;
 1372         }
 1373         SLOCK_LIST_UNLOCK();
 1374         splx(s);
 1375 
 1376         if (alp != NULL) {
 1377                 lock_printf("\n%s with held simple_lock %p "
 1378                     "CPU %lu %s:%d\n",
 1379                     where, alp, alp->lock_holder, alp->lock_file,
 1380                     alp->lock_line);
 1381                 SLOCK_TRACE();
 1382                 SLOCK_DEBUGGER();
 1383         }
 1384 }
 1385 
 1386 /*
 1387  * Set to 1 by simple_lock_assert_*().
 1388  * Can be cleared from ddb to avoid a panic.
 1389  */
 1390 int slock_assert_will_panic;
 1391 
 1392 /*
 1393  * If the lock isn't held, print a traceback, optionally drop into the
 1394  *  debugger, then panic.
 1395  * The panic can be avoided by clearing slock_assert_with_panic from the
 1396  *  debugger.
 1397  */
 1398 void
 1399 _simple_lock_assert_locked(volatile struct simplelock *alp,
 1400     const char *lockname, const char *id, int l)
 1401 {
 1402         if (simple_lock_held(alp) == 0) {
 1403                 slock_assert_will_panic = 1;
 1404                 lock_printf("%s lock not held\n", lockname);
 1405                 SLOCK_WHERE("lock not held", alp, id, l);
 1406                 if (slock_assert_will_panic)
 1407                         panic("%s: not locked", lockname);
 1408         }
 1409 }
 1410 
 1411 void
 1412 _simple_lock_assert_unlocked(volatile struct simplelock *alp,
 1413     const char *lockname, const char *id, int l)
 1414 {
 1415         if (simple_lock_held(alp)) {
 1416                 slock_assert_will_panic = 1;
 1417                 lock_printf("%s lock held\n", lockname);
 1418                 SLOCK_WHERE("lock held", alp, id, l);
 1419                 if (slock_assert_will_panic)
 1420                         panic("%s: locked", lockname);
 1421         }
 1422 }
 1423 
 1424 void
 1425 assert_sleepable(struct simplelock *interlock, const char *msg)
 1426 {
 1427 
 1428         if (curlwp == NULL) {
 1429                 panic("assert_sleepable: NULL curlwp");
 1430         }
 1431         spinlock_switchcheck();
 1432         simple_lock_only_held(interlock, msg);
 1433 }
 1434 
 1435 #endif /* LOCKDEBUG */ /* } */
 1436 
 1437 #if defined(MULTIPROCESSOR)
 1438 /*
 1439  * Functions for manipulating the kernel_lock.  We put them here
 1440  * so that they show up in profiles.
 1441  */
 1442 
 1443 /*
 1444  * splbiglock: block IPLs which need to grab kernel_lock.
 1445  * XXX splvm or splaudio should be enough.
 1446  */
 1447 #if !defined(__HAVE_SPLBIGLOCK)
 1448 #define splbiglock()    splclock()
 1449 #endif
 1450 
 1451 void
 1452 _kernel_lock_init(void)
 1453 {
 1454 
 1455         simple_lock_init(&kernel_lock);
 1456 }
 1457 
 1458 /*
 1459  * Acquire/release the kernel lock.  Intended for use in the scheduler
 1460  * and the lower half of the kernel.
 1461  */
 1462 void
 1463 _kernel_lock(int flag)
 1464 {
 1465         struct cpu_info *ci = curcpu();
 1466 
 1467         SCHED_ASSERT_UNLOCKED();
 1468 
 1469         if (ci->ci_data.cpu_biglock_count > 0) {
 1470                 LOCK_ASSERT(simple_lock_held(&kernel_lock));
 1471                 ci->ci_data.cpu_biglock_count++;
 1472         } else {
 1473                 int s;
 1474 
 1475                 s = splbiglock();
 1476                 while (!simple_lock_try(&kernel_lock)) {
 1477                         splx(s);
 1478                         SPINLOCK_SPIN_HOOK;
 1479                         s = splbiglock();
 1480                 }
 1481                 ci->ci_data.cpu_biglock_count++;
 1482                 splx(s);
 1483         }
 1484 }
 1485 
 1486 void
 1487 _kernel_unlock(void)
 1488 {
 1489         struct cpu_info *ci = curcpu();
 1490         int s;
 1491 
 1492         KASSERT(ci->ci_data.cpu_biglock_count > 0);
 1493 
 1494         s = splbiglock();
 1495         if ((--ci->ci_data.cpu_biglock_count) == 0)
 1496                 simple_unlock(&kernel_lock);
 1497         splx(s);
 1498 }
 1499 
 1500 /*
 1501  * Acquire/release the kernel_lock on behalf of a process.  Intended for
 1502  * use in the top half of the kernel.
 1503  */
 1504 void
 1505 _kernel_proc_lock(struct lwp *l)
 1506 {
 1507 
 1508         SCHED_ASSERT_UNLOCKED();
 1509         _kernel_lock(0);
 1510 }
 1511 
 1512 void
 1513 _kernel_proc_unlock(struct lwp *l)
 1514 {
 1515 
 1516         _kernel_unlock();
 1517 }
 1518 
 1519 int
 1520 _kernel_lock_release_all()
 1521 {
 1522         struct cpu_info *ci = curcpu();
 1523         int hold_count;
 1524 
 1525         hold_count = ci->ci_data.cpu_biglock_count;
 1526 
 1527         if (hold_count) {
 1528                 int s;
 1529 
 1530                 s = splbiglock();
 1531                 ci->ci_data.cpu_biglock_count = 0;
 1532                 simple_unlock(&kernel_lock);
 1533                 splx(s);
 1534         }
 1535 
 1536         return hold_count;
 1537 }
 1538 
 1539 void
 1540 _kernel_lock_acquire_count(int hold_count)
 1541 {
 1542 
 1543         KASSERT(curcpu()->ci_data.cpu_biglock_count == 0);
 1544 
 1545         if (hold_count != 0) {
 1546                 struct cpu_info *ci = curcpu();
 1547                 int s;
 1548 
 1549                 s = splbiglock();
 1550                 while (!simple_lock_try(&kernel_lock)) {
 1551                         splx(s);
 1552                         SPINLOCK_SPIN_HOOK;
 1553                         s = splbiglock();
 1554                 }
 1555                 ci->ci_data.cpu_biglock_count = hold_count;
 1556                 splx(s);
 1557         }
 1558 }
 1559 #if defined(DEBUG)
 1560 void
 1561 _kernel_lock_assert_locked()
 1562 {
 1563 
 1564         KDASSERT(curcpu()->ci_data.cpu_biglock_count > 0);
 1565         simple_lock_assert_locked(&kernel_lock, "kernel_lock");
 1566 }
 1567 
 1568 void
 1569 _kernel_lock_assert_unlocked()
 1570 {
 1571 
 1572         KDASSERT(curcpu()->ci_data.cpu_biglock_count == 0);
 1573         simple_lock_assert_unlocked(&kernel_lock, "kernel_lock");
 1574 }
 1575 #endif
 1576 
 1577 int
 1578 lock_owner_onproc(uintptr_t owner)
 1579 {
 1580         CPU_INFO_ITERATOR cii;
 1581         struct cpu_info *ci;
 1582 
 1583         for (CPU_INFO_FOREACH(cii, ci))
 1584                 if (owner == (uintptr_t)ci || owner == (uintptr_t)ci->ci_curlwp)
 1585                         return (1); 
 1586 
 1587         return (0);
 1588 }
 1589 
 1590 #else   /* MULTIPROCESSOR */
 1591 
 1592 int
 1593 lock_owner_onproc(uintptr_t owner)
 1594 {
 1595 
 1596         return 0;
 1597 }
 1598 
 1599 #endif /* MULTIPROCESSOR */

Cache object: 306e383dfbcae33ad8792aafb4be5935


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.