The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/spinlock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (2004) Linus Torvalds
    3  *
    4  * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
    5  *
    6  * Copyright (2004, 2005) Ingo Molnar
    7  *
    8  * This file contains the spinlock/rwlock implementations for the
    9  * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
   10  *
   11  * Note that some architectures have special knowledge about the
   12  * stack frames of these functions in their profile_pc. If you
   13  * change anything significant here that could change the stack
   14  * frame contact the architecture maintainers.
   15  */
   16 
   17 #include <linux/linkage.h>
   18 #include <linux/preempt.h>
   19 #include <linux/spinlock.h>
   20 #include <linux/interrupt.h>
   21 #include <linux/debug_locks.h>
   22 #include <linux/export.h>
   23 
   24 /*
   25  * If lockdep is enabled then we use the non-preemption spin-ops
   26  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
   27  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
   28  */
   29 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
   30 /*
   31  * The __lock_function inlines are taken from
   32  * include/linux/spinlock_api_smp.h
   33  */
   34 #else
   35 #define raw_read_can_lock(l)    read_can_lock(l)
   36 #define raw_write_can_lock(l)   write_can_lock(l)
   37 /*
   38  * We build the __lock_function inlines here. They are too large for
   39  * inlining all over the place, but here is only one user per function
   40  * which embedds them into the calling _lock_function below.
   41  *
   42  * This could be a long-held lock. We both prepare to spin for a long
   43  * time (making _this_ CPU preemptable if possible), and we also signal
   44  * towards that other CPU that it should break the lock ASAP.
   45  */
   46 #define BUILD_LOCK_OPS(op, locktype)                                    \
   47 void __lockfunc __raw_##op##_lock(locktype##_t *lock)                   \
   48 {                                                                       \
   49         for (;;) {                                                      \
   50                 preempt_disable();                                      \
   51                 if (likely(do_raw_##op##_trylock(lock)))                \
   52                         break;                                          \
   53                 preempt_enable();                                       \
   54                                                                         \
   55                 if (!(lock)->break_lock)                                \
   56                         (lock)->break_lock = 1;                         \
   57                 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
   58                         arch_##op##_relax(&lock->raw_lock);             \
   59         }                                                               \
   60         (lock)->break_lock = 0;                                         \
   61 }                                                                       \
   62                                                                         \
   63 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)  \
   64 {                                                                       \
   65         unsigned long flags;                                            \
   66                                                                         \
   67         for (;;) {                                                      \
   68                 preempt_disable();                                      \
   69                 local_irq_save(flags);                                  \
   70                 if (likely(do_raw_##op##_trylock(lock)))                \
   71                         break;                                          \
   72                 local_irq_restore(flags);                               \
   73                 preempt_enable();                                       \
   74                                                                         \
   75                 if (!(lock)->break_lock)                                \
   76                         (lock)->break_lock = 1;                         \
   77                 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
   78                         arch_##op##_relax(&lock->raw_lock);             \
   79         }                                                               \
   80         (lock)->break_lock = 0;                                         \
   81         return flags;                                                   \
   82 }                                                                       \
   83                                                                         \
   84 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)               \
   85 {                                                                       \
   86         _raw_##op##_lock_irqsave(lock);                                 \
   87 }                                                                       \
   88                                                                         \
   89 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)                \
   90 {                                                                       \
   91         unsigned long flags;                                            \
   92                                                                         \
   93         /*                                                      */      \
   94         /* Careful: we must exclude softirqs too, hence the     */      \
   95         /* irq-disabling. We use the generic preemption-aware   */      \
   96         /* function:                                            */      \
   97         /**/                                                            \
   98         flags = _raw_##op##_lock_irqsave(lock);                         \
   99         local_bh_disable();                                             \
  100         local_irq_restore(flags);                                       \
  101 }                                                                       \
  102 
  103 /*
  104  * Build preemption-friendly versions of the following
  105  * lock-spinning functions:
  106  *
  107  *         __[spin|read|write]_lock()
  108  *         __[spin|read|write]_lock_irq()
  109  *         __[spin|read|write]_lock_irqsave()
  110  *         __[spin|read|write]_lock_bh()
  111  */
  112 BUILD_LOCK_OPS(spin, raw_spinlock);
  113 BUILD_LOCK_OPS(read, rwlock);
  114 BUILD_LOCK_OPS(write, rwlock);
  115 
  116 #endif
  117 
  118 #ifndef CONFIG_INLINE_SPIN_TRYLOCK
  119 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
  120 {
  121         return __raw_spin_trylock(lock);
  122 }
  123 EXPORT_SYMBOL(_raw_spin_trylock);
  124 #endif
  125 
  126 #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
  127 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
  128 {
  129         return __raw_spin_trylock_bh(lock);
  130 }
  131 EXPORT_SYMBOL(_raw_spin_trylock_bh);
  132 #endif
  133 
  134 #ifndef CONFIG_INLINE_SPIN_LOCK
  135 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
  136 {
  137         __raw_spin_lock(lock);
  138 }
  139 EXPORT_SYMBOL(_raw_spin_lock);
  140 #endif
  141 
  142 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
  143 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
  144 {
  145         return __raw_spin_lock_irqsave(lock);
  146 }
  147 EXPORT_SYMBOL(_raw_spin_lock_irqsave);
  148 #endif
  149 
  150 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
  151 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
  152 {
  153         __raw_spin_lock_irq(lock);
  154 }
  155 EXPORT_SYMBOL(_raw_spin_lock_irq);
  156 #endif
  157 
  158 #ifndef CONFIG_INLINE_SPIN_LOCK_BH
  159 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
  160 {
  161         __raw_spin_lock_bh(lock);
  162 }
  163 EXPORT_SYMBOL(_raw_spin_lock_bh);
  164 #endif
  165 
  166 #ifdef CONFIG_UNINLINE_SPIN_UNLOCK
  167 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
  168 {
  169         __raw_spin_unlock(lock);
  170 }
  171 EXPORT_SYMBOL(_raw_spin_unlock);
  172 #endif
  173 
  174 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
  175 void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
  176 {
  177         __raw_spin_unlock_irqrestore(lock, flags);
  178 }
  179 EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
  180 #endif
  181 
  182 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
  183 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
  184 {
  185         __raw_spin_unlock_irq(lock);
  186 }
  187 EXPORT_SYMBOL(_raw_spin_unlock_irq);
  188 #endif
  189 
  190 #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
  191 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
  192 {
  193         __raw_spin_unlock_bh(lock);
  194 }
  195 EXPORT_SYMBOL(_raw_spin_unlock_bh);
  196 #endif
  197 
  198 #ifndef CONFIG_INLINE_READ_TRYLOCK
  199 int __lockfunc _raw_read_trylock(rwlock_t *lock)
  200 {
  201         return __raw_read_trylock(lock);
  202 }
  203 EXPORT_SYMBOL(_raw_read_trylock);
  204 #endif
  205 
  206 #ifndef CONFIG_INLINE_READ_LOCK
  207 void __lockfunc _raw_read_lock(rwlock_t *lock)
  208 {
  209         __raw_read_lock(lock);
  210 }
  211 EXPORT_SYMBOL(_raw_read_lock);
  212 #endif
  213 
  214 #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
  215 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
  216 {
  217         return __raw_read_lock_irqsave(lock);
  218 }
  219 EXPORT_SYMBOL(_raw_read_lock_irqsave);
  220 #endif
  221 
  222 #ifndef CONFIG_INLINE_READ_LOCK_IRQ
  223 void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
  224 {
  225         __raw_read_lock_irq(lock);
  226 }
  227 EXPORT_SYMBOL(_raw_read_lock_irq);
  228 #endif
  229 
  230 #ifndef CONFIG_INLINE_READ_LOCK_BH
  231 void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
  232 {
  233         __raw_read_lock_bh(lock);
  234 }
  235 EXPORT_SYMBOL(_raw_read_lock_bh);
  236 #endif
  237 
  238 #ifndef CONFIG_INLINE_READ_UNLOCK
  239 void __lockfunc _raw_read_unlock(rwlock_t *lock)
  240 {
  241         __raw_read_unlock(lock);
  242 }
  243 EXPORT_SYMBOL(_raw_read_unlock);
  244 #endif
  245 
  246 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
  247 void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  248 {
  249         __raw_read_unlock_irqrestore(lock, flags);
  250 }
  251 EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
  252 #endif
  253 
  254 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
  255 void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
  256 {
  257         __raw_read_unlock_irq(lock);
  258 }
  259 EXPORT_SYMBOL(_raw_read_unlock_irq);
  260 #endif
  261 
  262 #ifndef CONFIG_INLINE_READ_UNLOCK_BH
  263 void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
  264 {
  265         __raw_read_unlock_bh(lock);
  266 }
  267 EXPORT_SYMBOL(_raw_read_unlock_bh);
  268 #endif
  269 
  270 #ifndef CONFIG_INLINE_WRITE_TRYLOCK
  271 int __lockfunc _raw_write_trylock(rwlock_t *lock)
  272 {
  273         return __raw_write_trylock(lock);
  274 }
  275 EXPORT_SYMBOL(_raw_write_trylock);
  276 #endif
  277 
  278 #ifndef CONFIG_INLINE_WRITE_LOCK
  279 void __lockfunc _raw_write_lock(rwlock_t *lock)
  280 {
  281         __raw_write_lock(lock);
  282 }
  283 EXPORT_SYMBOL(_raw_write_lock);
  284 #endif
  285 
  286 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
  287 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
  288 {
  289         return __raw_write_lock_irqsave(lock);
  290 }
  291 EXPORT_SYMBOL(_raw_write_lock_irqsave);
  292 #endif
  293 
  294 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
  295 void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
  296 {
  297         __raw_write_lock_irq(lock);
  298 }
  299 EXPORT_SYMBOL(_raw_write_lock_irq);
  300 #endif
  301 
  302 #ifndef CONFIG_INLINE_WRITE_LOCK_BH
  303 void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
  304 {
  305         __raw_write_lock_bh(lock);
  306 }
  307 EXPORT_SYMBOL(_raw_write_lock_bh);
  308 #endif
  309 
  310 #ifndef CONFIG_INLINE_WRITE_UNLOCK
  311 void __lockfunc _raw_write_unlock(rwlock_t *lock)
  312 {
  313         __raw_write_unlock(lock);
  314 }
  315 EXPORT_SYMBOL(_raw_write_unlock);
  316 #endif
  317 
  318 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
  319 void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  320 {
  321         __raw_write_unlock_irqrestore(lock, flags);
  322 }
  323 EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
  324 #endif
  325 
  326 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
  327 void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
  328 {
  329         __raw_write_unlock_irq(lock);
  330 }
  331 EXPORT_SYMBOL(_raw_write_unlock_irq);
  332 #endif
  333 
  334 #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
  335 void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
  336 {
  337         __raw_write_unlock_bh(lock);
  338 }
  339 EXPORT_SYMBOL(_raw_write_unlock_bh);
  340 #endif
  341 
  342 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  343 
  344 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
  345 {
  346         preempt_disable();
  347         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  348         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
  349 }
  350 EXPORT_SYMBOL(_raw_spin_lock_nested);
  351 
  352 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
  353                                                    int subclass)
  354 {
  355         unsigned long flags;
  356 
  357         local_irq_save(flags);
  358         preempt_disable();
  359         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  360         LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
  361                                 do_raw_spin_lock_flags, &flags);
  362         return flags;
  363 }
  364 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
  365 
  366 void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
  367                                      struct lockdep_map *nest_lock)
  368 {
  369         preempt_disable();
  370         spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
  371         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
  372 }
  373 EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
  374 
  375 #endif
  376 
  377 notrace int in_lock_functions(unsigned long addr)
  378 {
  379         /* Linker adds these: start and end of __lockfunc functions */
  380         extern char __lock_text_start[], __lock_text_end[];
  381 
  382         return addr >= (unsigned long)__lock_text_start
  383         && addr < (unsigned long)__lock_text_end;
  384 }
  385 EXPORT_SYMBOL(in_lock_functions);

Cache object: 98754b97105c1adbf277569ec5fafcf9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.