The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/lib/spinlock_debug.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright 2005, Red Hat, Inc., Ingo Molnar
    3  * Released under the General Public License (GPL).
    4  *
    5  * This file contains the spinlock/rwlock implementations for
    6  * DEBUG_SPINLOCK.
    7  */
    8 
    9 #include <linux/spinlock.h>
   10 #include <linux/nmi.h>
   11 #include <linux/interrupt.h>
   12 #include <linux/debug_locks.h>
   13 #include <linux/delay.h>
   14 #include <linux/export.h>
   15 
   16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   17                           struct lock_class_key *key)
   18 {
   19 #ifdef CONFIG_DEBUG_LOCK_ALLOC
   20         /*
   21          * Make sure we are not reinitializing a held lock:
   22          */
   23         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
   24         lockdep_init_map(&lock->dep_map, name, key, 0);
   25 #endif
   26         lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
   27         lock->magic = SPINLOCK_MAGIC;
   28         lock->owner = SPINLOCK_OWNER_INIT;
   29         lock->owner_cpu = -1;
   30 }
   31 
   32 EXPORT_SYMBOL(__raw_spin_lock_init);
   33 
   34 void __rwlock_init(rwlock_t *lock, const char *name,
   35                    struct lock_class_key *key)
   36 {
   37 #ifdef CONFIG_DEBUG_LOCK_ALLOC
   38         /*
   39          * Make sure we are not reinitializing a held lock:
   40          */
   41         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
   42         lockdep_init_map(&lock->dep_map, name, key, 0);
   43 #endif
   44         lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
   45         lock->magic = RWLOCK_MAGIC;
   46         lock->owner = SPINLOCK_OWNER_INIT;
   47         lock->owner_cpu = -1;
   48 }
   49 
   50 EXPORT_SYMBOL(__rwlock_init);
   51 
   52 static void spin_dump(raw_spinlock_t *lock, const char *msg)
   53 {
   54         struct task_struct *owner = NULL;
   55 
   56         if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
   57                 owner = lock->owner;
   58         printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
   59                 msg, raw_smp_processor_id(),
   60                 current->comm, task_pid_nr(current));
   61         printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
   62                         ".owner_cpu: %d\n",
   63                 lock, lock->magic,
   64                 owner ? owner->comm : "<none>",
   65                 owner ? task_pid_nr(owner) : -1,
   66                 lock->owner_cpu);
   67         dump_stack();
   68 }
   69 
   70 static void spin_bug(raw_spinlock_t *lock, const char *msg)
   71 {
   72         if (!debug_locks_off())
   73                 return;
   74 
   75         spin_dump(lock, msg);
   76 }
   77 
   78 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
   79 
   80 static inline void
   81 debug_spin_lock_before(raw_spinlock_t *lock)
   82 {
   83         SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
   84         SPIN_BUG_ON(lock->owner == current, lock, "recursion");
   85         SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
   86                                                         lock, "cpu recursion");
   87 }
   88 
   89 static inline void debug_spin_lock_after(raw_spinlock_t *lock)
   90 {
   91         lock->owner_cpu = raw_smp_processor_id();
   92         lock->owner = current;
   93 }
   94 
   95 static inline void debug_spin_unlock(raw_spinlock_t *lock)
   96 {
   97         SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
   98         SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
   99         SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  100         SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  101                                                         lock, "wrong CPU");
  102         lock->owner = SPINLOCK_OWNER_INIT;
  103         lock->owner_cpu = -1;
  104 }
  105 
  106 static void __spin_lock_debug(raw_spinlock_t *lock)
  107 {
  108         u64 i;
  109         u64 loops = loops_per_jiffy * HZ;
  110 
  111         for (i = 0; i < loops; i++) {
  112                 if (arch_spin_trylock(&lock->raw_lock))
  113                         return;
  114                 __delay(1);
  115         }
  116         /* lockup suspected: */
  117         spin_dump(lock, "lockup suspected");
  118 #ifdef CONFIG_SMP
  119         trigger_all_cpu_backtrace();
  120 #endif
  121 
  122         /*
  123          * The trylock above was causing a livelock.  Give the lower level arch
  124          * specific lock code a chance to acquire the lock. We have already
  125          * printed a warning/backtrace at this point. The non-debug arch
  126          * specific code might actually succeed in acquiring the lock.  If it is
  127          * not successful, the end-result is the same - there is no forward
  128          * progress.
  129          */
  130         arch_spin_lock(&lock->raw_lock);
  131 }
  132 
  133 void do_raw_spin_lock(raw_spinlock_t *lock)
  134 {
  135         debug_spin_lock_before(lock);
  136         if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
  137                 __spin_lock_debug(lock);
  138         debug_spin_lock_after(lock);
  139 }
  140 
  141 int do_raw_spin_trylock(raw_spinlock_t *lock)
  142 {
  143         int ret = arch_spin_trylock(&lock->raw_lock);
  144 
  145         if (ret)
  146                 debug_spin_lock_after(lock);
  147 #ifndef CONFIG_SMP
  148         /*
  149          * Must not happen on UP:
  150          */
  151         SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  152 #endif
  153         return ret;
  154 }
  155 
  156 void do_raw_spin_unlock(raw_spinlock_t *lock)
  157 {
  158         debug_spin_unlock(lock);
  159         arch_spin_unlock(&lock->raw_lock);
  160 }
  161 
  162 static void rwlock_bug(rwlock_t *lock, const char *msg)
  163 {
  164         if (!debug_locks_off())
  165                 return;
  166 
  167         printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  168                 msg, raw_smp_processor_id(), current->comm,
  169                 task_pid_nr(current), lock);
  170         dump_stack();
  171 }
  172 
  173 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  174 
  175 #if 0           /* __write_lock_debug() can lock up - maybe this can too? */
  176 static void __read_lock_debug(rwlock_t *lock)
  177 {
  178         u64 i;
  179         u64 loops = loops_per_jiffy * HZ;
  180         int print_once = 1;
  181 
  182         for (;;) {
  183                 for (i = 0; i < loops; i++) {
  184                         if (arch_read_trylock(&lock->raw_lock))
  185                                 return;
  186                         __delay(1);
  187                 }
  188                 /* lockup suspected: */
  189                 if (print_once) {
  190                         print_once = 0;
  191                         printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  192                                         "%s/%d, %p\n",
  193                                 raw_smp_processor_id(), current->comm,
  194                                 current->pid, lock);
  195                         dump_stack();
  196                 }
  197         }
  198 }
  199 #endif
  200 
  201 void do_raw_read_lock(rwlock_t *lock)
  202 {
  203         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  204         arch_read_lock(&lock->raw_lock);
  205 }
  206 
  207 int do_raw_read_trylock(rwlock_t *lock)
  208 {
  209         int ret = arch_read_trylock(&lock->raw_lock);
  210 
  211 #ifndef CONFIG_SMP
  212         /*
  213          * Must not happen on UP:
  214          */
  215         RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  216 #endif
  217         return ret;
  218 }
  219 
  220 void do_raw_read_unlock(rwlock_t *lock)
  221 {
  222         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  223         arch_read_unlock(&lock->raw_lock);
  224 }
  225 
  226 static inline void debug_write_lock_before(rwlock_t *lock)
  227 {
  228         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  229         RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  230         RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  231                                                         lock, "cpu recursion");
  232 }
  233 
  234 static inline void debug_write_lock_after(rwlock_t *lock)
  235 {
  236         lock->owner_cpu = raw_smp_processor_id();
  237         lock->owner = current;
  238 }
  239 
  240 static inline void debug_write_unlock(rwlock_t *lock)
  241 {
  242         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  243         RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  244         RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  245                                                         lock, "wrong CPU");
  246         lock->owner = SPINLOCK_OWNER_INIT;
  247         lock->owner_cpu = -1;
  248 }
  249 
  250 #if 0           /* This can cause lockups */
  251 static void __write_lock_debug(rwlock_t *lock)
  252 {
  253         u64 i;
  254         u64 loops = loops_per_jiffy * HZ;
  255         int print_once = 1;
  256 
  257         for (;;) {
  258                 for (i = 0; i < loops; i++) {
  259                         if (arch_write_trylock(&lock->raw_lock))
  260                                 return;
  261                         __delay(1);
  262                 }
  263                 /* lockup suspected: */
  264                 if (print_once) {
  265                         print_once = 0;
  266                         printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  267                                         "%s/%d, %p\n",
  268                                 raw_smp_processor_id(), current->comm,
  269                                 current->pid, lock);
  270                         dump_stack();
  271                 }
  272         }
  273 }
  274 #endif
  275 
  276 void do_raw_write_lock(rwlock_t *lock)
  277 {
  278         debug_write_lock_before(lock);
  279         arch_write_lock(&lock->raw_lock);
  280         debug_write_lock_after(lock);
  281 }
  282 
  283 int do_raw_write_trylock(rwlock_t *lock)
  284 {
  285         int ret = arch_write_trylock(&lock->raw_lock);
  286 
  287         if (ret)
  288                 debug_write_lock_after(lock);
  289 #ifndef CONFIG_SMP
  290         /*
  291          * Must not happen on UP:
  292          */
  293         RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  294 #endif
  295         return ret;
  296 }
  297 
  298 void do_raw_write_unlock(rwlock_t *lock)
  299 {
  300         debug_write_unlock(lock);
  301         arch_write_unlock(&lock->raw_lock);
  302 }

Cache object: e11cb5d1624e1686d00c2a74d3ba3046


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.