The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/include/linux/spinlock.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94                                    struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)                               \
   96 do {                                                            \
   97         static struct lock_class_key __key;                     \
   98                                                                 \
   99         __raw_spin_lock_init((lock), #lock, &__key);            \
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)                               \
  104         do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)        arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)     arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)     (((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /* The lock does not imply full memory barrier. */
  121 #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
  122 static inline void smp_mb__after_lock(void) { smp_mb(); }
  123 #endif
  124 
  125 /**
  126  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  127  * @lock: the spinlock in question.
  128  */
  129 #define raw_spin_unlock_wait(lock)      arch_spin_unlock_wait(&(lock)->raw_lock)
  130 
  131 #ifdef CONFIG_DEBUG_SPINLOCK
  132  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  133 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  134  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  135  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  136 #else
  137 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  138 {
  139         __acquire(lock);
  140         arch_spin_lock(&lock->raw_lock);
  141 }
  142 
  143 static inline void
  144 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  145 {
  146         __acquire(lock);
  147         arch_spin_lock_flags(&lock->raw_lock, *flags);
  148 }
  149 
  150 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  151 {
  152         return arch_spin_trylock(&(lock)->raw_lock);
  153 }
  154 
  155 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  156 {
  157         arch_spin_unlock(&lock->raw_lock);
  158         __release(lock);
  159 }
  160 #endif
  161 
  162 /*
  163  * Define the various spin_lock methods.  Note we define these
  164  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  165  * various methods are defined as nops in the case they are not
  166  * required.
  167  */
  168 #define raw_spin_trylock(lock)  __cond_lock(lock, _raw_spin_trylock(lock))
  169 
  170 #define raw_spin_lock(lock)     _raw_spin_lock(lock)
  171 
  172 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  173 # define raw_spin_lock_nested(lock, subclass) \
  174         _raw_spin_lock_nested(lock, subclass)
  175 
  176 # define raw_spin_lock_nest_lock(lock, nest_lock)                       \
  177          do {                                                           \
  178                  typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  179                  _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
  180          } while (0)
  181 #else
  182 # define raw_spin_lock_nested(lock, subclass)           _raw_spin_lock(lock)
  183 # define raw_spin_lock_nest_lock(lock, nest_lock)       _raw_spin_lock(lock)
  184 #endif
  185 
  186 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  187 
  188 #define raw_spin_lock_irqsave(lock, flags)                      \
  189         do {                                            \
  190                 typecheck(unsigned long, flags);        \
  191                 flags = _raw_spin_lock_irqsave(lock);   \
  192         } while (0)
  193 
  194 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  195 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
  196         do {                                                            \
  197                 typecheck(unsigned long, flags);                        \
  198                 flags = _raw_spin_lock_irqsave_nested(lock, subclass);  \
  199         } while (0)
  200 #else
  201 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
  202         do {                                                            \
  203                 typecheck(unsigned long, flags);                        \
  204                 flags = _raw_spin_lock_irqsave(lock);                   \
  205         } while (0)
  206 #endif
  207 
  208 #else
  209 
  210 #define raw_spin_lock_irqsave(lock, flags)              \
  211         do {                                            \
  212                 typecheck(unsigned long, flags);        \
  213                 _raw_spin_lock_irqsave(lock, flags);    \
  214         } while (0)
  215 
  216 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)     \
  217         raw_spin_lock_irqsave(lock, flags)
  218 
  219 #endif
  220 
  221 #define raw_spin_lock_irq(lock)         _raw_spin_lock_irq(lock)
  222 #define raw_spin_lock_bh(lock)          _raw_spin_lock_bh(lock)
  223 #define raw_spin_unlock(lock)           _raw_spin_unlock(lock)
  224 #define raw_spin_unlock_irq(lock)       _raw_spin_unlock_irq(lock)
  225 
  226 #define raw_spin_unlock_irqrestore(lock, flags)         \
  227         do {                                                    \
  228                 typecheck(unsigned long, flags);                \
  229                 _raw_spin_unlock_irqrestore(lock, flags);       \
  230         } while (0)
  231 #define raw_spin_unlock_bh(lock)        _raw_spin_unlock_bh(lock)
  232 
  233 #define raw_spin_trylock_bh(lock) \
  234         __cond_lock(lock, _raw_spin_trylock_bh(lock))
  235 
  236 #define raw_spin_trylock_irq(lock) \
  237 ({ \
  238         local_irq_disable(); \
  239         raw_spin_trylock(lock) ? \
  240         1 : ({ local_irq_enable(); 0;  }); \
  241 })
  242 
  243 #define raw_spin_trylock_irqsave(lock, flags) \
  244 ({ \
  245         local_irq_save(flags); \
  246         raw_spin_trylock(lock) ? \
  247         1 : ({ local_irq_restore(flags); 0; }); \
  248 })
  249 
  250 /**
  251  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  252  * @lock: the spinlock in question.
  253  */
  254 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
  255 
  256 /* Include rwlock functions */
  257 #include <linux/rwlock.h>
  258 
  259 /*
  260  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  261  */
  262 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  263 # include <linux/spinlock_api_smp.h>
  264 #else
  265 # include <linux/spinlock_api_up.h>
  266 #endif
  267 
  268 /*
  269  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  270  */
  271 
  272 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  273 {
  274         return &lock->rlock;
  275 }
  276 
  277 #define spin_lock_init(_lock)                           \
  278 do {                                                    \
  279         spinlock_check(_lock);                          \
  280         raw_spin_lock_init(&(_lock)->rlock);            \
  281 } while (0)
  282 
  283 static inline void spin_lock(spinlock_t *lock)
  284 {
  285         raw_spin_lock(&lock->rlock);
  286 }
  287 
  288 static inline void spin_lock_bh(spinlock_t *lock)
  289 {
  290         raw_spin_lock_bh(&lock->rlock);
  291 }
  292 
  293 static inline int spin_trylock(spinlock_t *lock)
  294 {
  295         return raw_spin_trylock(&lock->rlock);
  296 }
  297 
  298 #define spin_lock_nested(lock, subclass)                        \
  299 do {                                                            \
  300         raw_spin_lock_nested(spinlock_check(lock), subclass);   \
  301 } while (0)
  302 
  303 #define spin_lock_nest_lock(lock, nest_lock)                            \
  304 do {                                                                    \
  305         raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
  306 } while (0)
  307 
  308 static inline void spin_lock_irq(spinlock_t *lock)
  309 {
  310         raw_spin_lock_irq(&lock->rlock);
  311 }
  312 
  313 #define spin_lock_irqsave(lock, flags)                          \
  314 do {                                                            \
  315         raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
  316 } while (0)
  317 
  318 #define spin_lock_irqsave_nested(lock, flags, subclass)                 \
  319 do {                                                                    \
  320         raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  321 } while (0)
  322 
  323 static inline void spin_unlock(spinlock_t *lock)
  324 {
  325         raw_spin_unlock(&lock->rlock);
  326 }
  327 
  328 static inline void spin_unlock_bh(spinlock_t *lock)
  329 {
  330         raw_spin_unlock_bh(&lock->rlock);
  331 }
  332 
  333 static inline void spin_unlock_irq(spinlock_t *lock)
  334 {
  335         raw_spin_unlock_irq(&lock->rlock);
  336 }
  337 
  338 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  339 {
  340         raw_spin_unlock_irqrestore(&lock->rlock, flags);
  341 }
  342 
  343 static inline int spin_trylock_bh(spinlock_t *lock)
  344 {
  345         return raw_spin_trylock_bh(&lock->rlock);
  346 }
  347 
  348 static inline int spin_trylock_irq(spinlock_t *lock)
  349 {
  350         return raw_spin_trylock_irq(&lock->rlock);
  351 }
  352 
  353 #define spin_trylock_irqsave(lock, flags)                       \
  354 ({                                                              \
  355         raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  356 })
  357 
  358 static inline void spin_unlock_wait(spinlock_t *lock)
  359 {
  360         raw_spin_unlock_wait(&lock->rlock);
  361 }
  362 
  363 static inline int spin_is_locked(spinlock_t *lock)
  364 {
  365         return raw_spin_is_locked(&lock->rlock);
  366 }
  367 
  368 static inline int spin_is_contended(spinlock_t *lock)
  369 {
  370         return raw_spin_is_contended(&lock->rlock);
  371 }
  372 
  373 static inline int spin_can_lock(spinlock_t *lock)
  374 {
  375         return raw_spin_can_lock(&lock->rlock);
  376 }
  377 
  378 #define assert_spin_locked(lock)        assert_raw_spin_locked(&(lock)->rlock)
  379 
  380 /*
  381  * Pull the atomic_t declaration:
  382  * (asm-mips/atomic.h needs above definitions)
  383  */
  384 #include <linux/atomic.h>
  385 /**
  386  * atomic_dec_and_lock - lock on reaching reference count zero
  387  * @atomic: the atomic counter
  388  * @lock: the spinlock in question
  389  *
  390  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  391  * @lock.  Returns false for all other cases.
  392  */
  393 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  394 #define atomic_dec_and_lock(atomic, lock) \
  395                 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  396 
  397 #endif /* __LINUX_SPINLOCK_H */

Cache object: 6c188bf4b96f3829e027550209dc7be1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.