The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/include/asm-alpha/spinlock.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #ifndef _ALPHA_SPINLOCK_H
    2 #define _ALPHA_SPINLOCK_H
    3 
    4 #include <linux/config.h>
    5 #include <asm/system.h>
    6 #include <linux/kernel.h>
    7 #include <asm/current.h>
    8 
    9 
   10 /*
   11  * Simple spin lock operations.  There are two variants, one clears IRQ's
   12  * on the local processor, one does not.
   13  *
   14  * We make no fairness assumptions. They have a cost.
   15  */
   16 
   17 typedef struct {
   18         volatile unsigned int lock /*__attribute__((aligned(32))) */;
   19 #if CONFIG_DEBUG_SPINLOCK
   20         int on_cpu;
   21         int line_no;
   22         void *previous;
   23         struct task_struct * task;
   24         const char *base_file;
   25 #endif
   26 } spinlock_t;
   27 
   28 #if CONFIG_DEBUG_SPINLOCK
   29 #define SPIN_LOCK_UNLOCKED (spinlock_t) {0, -1, 0, 0, 0, 0}
   30 #define spin_lock_init(x)                                               \
   31         ((x)->lock = 0, (x)->on_cpu = -1, (x)->previous = 0, (x)->task = 0)
   32 #else
   33 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0 }
   34 #define spin_lock_init(x)       ((x)->lock = 0)
   35 #endif
   36 
   37 #define spin_is_locked(x)       ((x)->lock != 0)
   38 #define spin_unlock_wait(x)     ({ do { barrier(); } while ((x)->lock); })
   39 
   40 #if CONFIG_DEBUG_SPINLOCK
   41 extern void spin_unlock(spinlock_t * lock);
   42 extern void debug_spin_lock(spinlock_t * lock, const char *, int);
   43 extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
   44 
   45 #define spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
   46 #define spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
   47 
   48 #define spin_lock_own(LOCK, LOCATION)                                   \
   49 do {                                                                    \
   50         if (!((LOCK)->lock && (LOCK)->on_cpu == smp_processor_id()))    \
   51                 printk("%s: called on %d from %p but lock %s on %d\n",  \
   52                        LOCATION, smp_processor_id(),                    \
   53                        __builtin_return_address(0),                     \
   54                        (LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \
   55 } while (0)
   56 #else
   57 static inline void spin_unlock(spinlock_t * lock)
   58 {
   59         mb();
   60         lock->lock = 0;
   61 }
   62 
   63 static inline void spin_lock(spinlock_t * lock)
   64 {
   65         long tmp;
   66 
   67         /* Use sub-sections to put the actual loop at the end
   68            of this object file's text section so as to perfect
   69            branch prediction.  */
   70         __asm__ __volatile__(
   71         "1:     ldl_l   %0,%1\n"
   72         "       blbs    %0,2f\n"
   73         "       or      %0,1,%0\n"
   74         "       stl_c   %0,%1\n"
   75         "       beq     %0,2f\n"
   76         "       mb\n"
   77         ".subsection 2\n"
   78         "2:     ldl     %0,%1\n"
   79         "       blbs    %0,2b\n"
   80         "       br      1b\n"
   81         ".previous"
   82         : "=&r" (tmp), "=m" (lock->lock)
   83         : "m"(lock->lock) : "memory");
   84 }
   85 
   86 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
   87 #define spin_lock_own(LOCK, LOCATION)   ((void)0)
   88 #endif /* CONFIG_DEBUG_SPINLOCK */
   89 
   90 /***********************************************************/
   91 
   92 typedef struct {
   93         volatile int write_lock:1, read_counter:31;
   94 } /*__attribute__((aligned(32)))*/ rwlock_t;
   95 
   96 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
   97 
   98 #define rwlock_init(x)  do { *(x) = RW_LOCK_UNLOCKED; } while(0)
   99 
  100 #if CONFIG_DEBUG_RWLOCK
  101 extern void write_lock(rwlock_t * lock);
  102 extern void read_lock(rwlock_t * lock);
  103 #else
  104 static inline void write_lock(rwlock_t * lock)
  105 {
  106         long regx;
  107 
  108         __asm__ __volatile__(
  109         "1:     ldl_l   %1,%0\n"
  110         "       bne     %1,6f\n"
  111         "       or      $31,1,%1\n"
  112         "       stl_c   %1,%0\n"
  113         "       beq     %1,6f\n"
  114         "       mb\n"
  115         ".subsection 2\n"
  116         "6:     ldl     %1,%0\n"
  117         "       bne     %1,6b\n"
  118         "       br      1b\n"
  119         ".previous"
  120         : "=m" (*(volatile int *)lock), "=&r" (regx)
  121         : "" (*(volatile int *)lock) : "memory");
  122 }
  123 
  124 static inline void read_lock(rwlock_t * lock)
  125 {
  126         long regx;
  127 
  128         __asm__ __volatile__(
  129         "1:     ldl_l   %1,%0\n"
  130         "       blbs    %1,6f\n"
  131         "       subl    %1,2,%1\n"
  132         "       stl_c   %1,%0\n"
  133         "       beq     %1,6f\n"
  134         "4:     mb\n"
  135         ".subsection 2\n"
  136         "6:     ldl     %1,%0\n"
  137         "       blbs    %1,6b\n"
  138         "       br      1b\n"
  139         ".previous"
  140         : "=m" (*(volatile int *)lock), "=&r" (regx)
  141         : "m" (*(volatile int *)lock) : "memory");
  142 }
  143 #endif /* CONFIG_DEBUG_RWLOCK */
  144 
  145 static inline void write_unlock(rwlock_t * lock)
  146 {
  147         mb();
  148         *(volatile int *)lock = 0;
  149 }
  150 
  151 static inline void read_unlock(rwlock_t * lock)
  152 {
  153         long regx;
  154         __asm__ __volatile__(
  155         "       mb\n"
  156         "1:     ldl_l   %1,%0\n"
  157         "       addl    %1,2,%1\n"
  158         "       stl_c   %1,%0\n"
  159         "       beq     %1,6f\n"
  160         ".subsection 2\n"
  161         "6:     br      1b\n"
  162         ".previous"
  163         : "=m" (*(volatile int *)lock), "=&r" (regx)
  164         : "m" (*(volatile int *)lock) : "memory");
  165 }
  166 
  167 #endif /* _ALPHA_SPINLOCK_H */

Cache object: dd94296b7efb3c1512118fba89ccd678


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.