The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/include/asm-alpha/bitops.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #ifndef _ALPHA_BITOPS_H
    2 #define _ALPHA_BITOPS_H
    3 
    4 #include <linux/config.h>
    5 #include <linux/kernel.h>
    6 
    7 /*
    8  * Copyright 1994, Linus Torvalds.
    9  */
   10 
   11 /*
   12  * These have to be done with inline assembly: that way the bit-setting
   13  * is guaranteed to be atomic. All bit operations return 0 if the bit
   14  * was cleared before the operation and != 0 if it was not.
   15  *
   16  * To get proper branch prediction for the main line, we must branch
   17  * forward to code at the end of this object's .text section, then
   18  * branch back to restart the operation.
   19  *
   20  * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
   21  */
   22 
   23 static inline void
   24 set_bit(unsigned long nr, volatile void * addr)
   25 {
   26         unsigned long temp;
   27         int *m = ((int *) addr) + (nr >> 5);
   28 
   29         __asm__ __volatile__(
   30         "1:     ldl_l %0,%3\n"
   31         "       bis %0,%2,%0\n"
   32         "       stl_c %0,%1\n"
   33         "       beq %0,2f\n"
   34         ".subsection 2\n"
   35         "2:     br 1b\n"
   36         ".previous"
   37         :"=&r" (temp), "=m" (*m)
   38         :"Ir" (1UL << (nr & 31)), "m" (*m));
   39 }
   40 
   41 /*
   42  * WARNING: non atomic version.
   43  */
   44 static inline void
   45 __set_bit(unsigned long nr, volatile void * addr)
   46 {
   47         int *m = ((int *) addr) + (nr >> 5);
   48 
   49         *m |= 1 << (nr & 31);
   50 }
   51 
   52 #define smp_mb__before_clear_bit()      smp_mb()
   53 #define smp_mb__after_clear_bit()       smp_mb()
   54 
   55 static inline void
   56 clear_bit(unsigned long nr, volatile void * addr)
   57 {
   58         unsigned long temp;
   59         int *m = ((int *) addr) + (nr >> 5);
   60 
   61         __asm__ __volatile__(
   62         "1:     ldl_l %0,%3\n"
   63         "       and %0,%2,%0\n"
   64         "       stl_c %0,%1\n"
   65         "       beq %0,2f\n"
   66         ".subsection 2\n"
   67         "2:     br 1b\n"
   68         ".previous"
   69         :"=&r" (temp), "=m" (*m)
   70         :"Ir" (~(1UL << (nr & 31))), "m" (*m));
   71 }
   72 
   73 /*
   74  * WARNING: non atomic version.
   75  */
   76 static __inline__ void
   77 __change_bit(unsigned long nr, volatile void * addr)
   78 {
   79         int *m = ((int *) addr) + (nr >> 5);
   80 
   81         *m ^= 1 << (nr & 31);
   82 }
   83 
   84 static inline void
   85 change_bit(unsigned long nr, volatile void * addr)
   86 {
   87         unsigned long temp;
   88         int *m = ((int *) addr) + (nr >> 5);
   89 
   90         __asm__ __volatile__(
   91         "1:     ldl_l %0,%3\n"
   92         "       xor %0,%2,%0\n"
   93         "       stl_c %0,%1\n"
   94         "       beq %0,2f\n"
   95         ".subsection 2\n"
   96         "2:     br 1b\n"
   97         ".previous"
   98         :"=&r" (temp), "=m" (*m)
   99         :"Ir" (1UL << (nr & 31)), "m" (*m));
  100 }
  101 
  102 static inline int
  103 test_and_set_bit(unsigned long nr, volatile void *addr)
  104 {
  105         unsigned long oldbit;
  106         unsigned long temp;
  107         int *m = ((int *) addr) + (nr >> 5);
  108 
  109         __asm__ __volatile__(
  110         "1:     ldl_l %0,%4\n"
  111         "       and %0,%3,%2\n"
  112         "       bne %2,2f\n"
  113         "       xor %0,%3,%0\n"
  114         "       stl_c %0,%1\n"
  115         "       beq %0,3f\n"
  116         "2:\n"
  117 #ifdef CONFIG_SMP
  118         "       mb\n"
  119 #endif
  120         ".subsection 2\n"
  121         "3:     br 1b\n"
  122         ".previous"
  123         :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  124         :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  125 
  126         return oldbit != 0;
  127 }
  128 
  129 /*
  130  * WARNING: non atomic version.
  131  */
  132 static inline int
  133 __test_and_set_bit(unsigned long nr, volatile void * addr)
  134 {
  135         unsigned long mask = 1 << (nr & 0x1f);
  136         int *m = ((int *) addr) + (nr >> 5);
  137         int old = *m;
  138 
  139         *m = old | mask;
  140         return (old & mask) != 0;
  141 }
  142 
  143 static inline int
  144 test_and_clear_bit(unsigned long nr, volatile void * addr)
  145 {
  146         unsigned long oldbit;
  147         unsigned long temp;
  148         int *m = ((int *) addr) + (nr >> 5);
  149 
  150         __asm__ __volatile__(
  151         "1:     ldl_l %0,%4\n"
  152         "       and %0,%3,%2\n"
  153         "       beq %2,2f\n"
  154         "       xor %0,%3,%0\n"
  155         "       stl_c %0,%1\n"
  156         "       beq %0,3f\n"
  157         "2:\n"
  158 #ifdef CONFIG_SMP
  159         "       mb\n"
  160 #endif
  161         ".subsection 2\n"
  162         "3:     br 1b\n"
  163         ".previous"
  164         :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  165         :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  166 
  167         return oldbit != 0;
  168 }
  169 
  170 /*
  171  * WARNING: non atomic version.
  172  */
  173 static inline int
  174 __test_and_clear_bit(unsigned long nr, volatile void * addr)
  175 {
  176         unsigned long mask = 1 << (nr & 0x1f);
  177         int *m = ((int *) addr) + (nr >> 5);
  178         int old = *m;
  179 
  180         *m = old & ~mask;
  181         return (old & mask) != 0;
  182 }
  183 
  184 /*
  185  * WARNING: non atomic version.
  186  */
  187 static __inline__ int
  188 __test_and_change_bit(unsigned long nr, volatile void * addr)
  189 {
  190         unsigned long mask = 1 << (nr & 0x1f);
  191         int *m = ((int *) addr) + (nr >> 5);
  192         int old = *m;
  193 
  194         *m = old ^ mask;
  195         return (old & mask) != 0;
  196 }
  197 
  198 static inline int
  199 test_and_change_bit(unsigned long nr, volatile void * addr)
  200 {
  201         unsigned long oldbit;
  202         unsigned long temp;
  203         int *m = ((int *) addr) + (nr >> 5);
  204 
  205         __asm__ __volatile__(
  206         "1:     ldl_l %0,%4\n"
  207         "       and %0,%3,%2\n"
  208         "       xor %0,%3,%0\n"
  209         "       stl_c %0,%1\n"
  210         "       beq %0,3f\n"
  211 #ifdef CONFIG_SMP
  212         "       mb\n"
  213 #endif
  214         ".subsection 2\n"
  215         "3:     br 1b\n"
  216         ".previous"
  217         :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  218         :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  219 
  220         return oldbit != 0;
  221 }
  222 
  223 static inline int
  224 test_bit(int nr, volatile void * addr)
  225 {
  226         return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
  227 }
  228 
  229 /*
  230  * ffz = Find First Zero in word. Undefined if no zero exists,
  231  * so code should check against ~0UL first..
  232  *
  233  * Do a binary search on the bits.  Due to the nature of large
  234  * constants on the alpha, it is worthwhile to split the search.
  235  */
  236 static inline unsigned long ffz_b(unsigned long x)
  237 {
  238         unsigned long sum = 0;
  239 
  240         x = ~x & -~x;           /* set first 0 bit, clear others */
  241         if (x & 0xF0) sum += 4;
  242         if (x & 0xCC) sum += 2;
  243         if (x & 0xAA) sum += 1;
  244 
  245         return sum;
  246 }
  247 
  248 static inline unsigned long ffz(unsigned long word)
  249 {
  250 #if defined(__alpha_cix__) && defined(__alpha_fix__)
  251         /* Whee.  EV67 can calculate it directly.  */
  252         unsigned long result;
  253         __asm__("cttz %1,%0" : "=r"(result) : "r"(~word));
  254         return result;
  255 #else
  256         unsigned long bits, qofs, bofs;
  257 
  258         __asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL));
  259         qofs = ffz_b(bits);
  260         __asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs));
  261         bofs = ffz_b(bits);
  262 
  263         return qofs*8 + bofs;
  264 #endif
  265 }
  266 
  267 #ifdef __KERNEL__
  268 
  269 /*
  270  * ffs: find first bit set. This is defined the same way as
  271  * the libc and compiler builtin ffs routines, therefore
  272  * differs in spirit from the above ffz (man ffs).
  273  */
  274 
  275 static inline int ffs(int word)
  276 {
  277         int result = ffz(~word);
  278         return word ? result+1 : 0;
  279 }
  280 
  281 /* Compute powers of two for the given integer.  */
  282 static inline int floor_log2(unsigned long word)
  283 {
  284         long bit;
  285 #if defined(__alpha_cix__) && defined(__alpha_fix__)
  286         __asm__("ctlz %1,%0" : "=r"(bit) : "r"(word));
  287         return 63 - bit;
  288 #else
  289         for (bit = -1; word ; bit++)
  290                 word >>= 1;
  291         return bit;
  292 #endif
  293 }
  294 
  295 static inline int ceil_log2(unsigned int word)
  296 {
  297         long bit = floor_log2(word);
  298         return bit + (word > (1UL << bit));
  299 }
  300 
  301 /*
  302  * hweightN: returns the hamming weight (i.e. the number
  303  * of bits set) of a N-bit word
  304  */
  305 
  306 #if defined(__alpha_cix__) && defined(__alpha_fix__)
  307 /* Whee.  EV67 can calculate it directly.  */
  308 static inline unsigned long hweight64(unsigned long w)
  309 {
  310         unsigned long result;
  311         __asm__("ctpop %1,%0" : "=r"(result) : "r"(w));
  312         return result;
  313 }
  314 
  315 #define hweight32(x) hweight64((x) & 0xfffffffful)
  316 #define hweight16(x) hweight64((x) & 0xfffful)
  317 #define hweight8(x)  hweight64((x) & 0xfful)
  318 #else
  319 #define hweight32(x) generic_hweight32(x)
  320 #define hweight16(x) generic_hweight16(x)
  321 #define hweight8(x)  generic_hweight8(x)
  322 #endif
  323 
  324 #endif /* __KERNEL__ */
  325 
  326 /*
  327  * Find next zero bit in a bitmap reasonably efficiently..
  328  */
  329 static inline unsigned long
  330 find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
  331 {
  332         unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
  333         unsigned long result = offset & ~63UL;
  334         unsigned long tmp;
  335 
  336         if (offset >= size)
  337                 return size;
  338         size -= result;
  339         offset &= 63UL;
  340         if (offset) {
  341                 tmp = *(p++);
  342                 tmp |= ~0UL >> (64-offset);
  343                 if (size < 64)
  344                         goto found_first;
  345                 if (~tmp)
  346                         goto found_middle;
  347                 size -= 64;
  348                 result += 64;
  349         }
  350         while (size & ~63UL) {
  351                 if (~(tmp = *(p++)))
  352                         goto found_middle;
  353                 result += 64;
  354                 size -= 64;
  355         }
  356         if (!size)
  357                 return result;
  358         tmp = *p;
  359 found_first:
  360         tmp |= ~0UL << size;
  361         if (tmp == ~0UL)        /* Are any bits zero? */
  362                 return result + size; /* Nope. */
  363 found_middle:
  364         return result + ffz(tmp);
  365 }
  366 
  367 /*
  368  * The optimizer actually does good code for this case..
  369  */
  370 #define find_first_zero_bit(addr, size) \
  371         find_next_zero_bit((addr), (size), 0)
  372 
  373 #ifdef __KERNEL__
  374 
  375 #define ext2_set_bit                 __test_and_set_bit
  376 #define ext2_clear_bit               __test_and_clear_bit
  377 #define ext2_test_bit                test_bit
  378 #define ext2_find_first_zero_bit     find_first_zero_bit
  379 #define ext2_find_next_zero_bit      find_next_zero_bit
  380 
  381 /* Bitmap functions for the minix filesystem.  */
  382 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
  383 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
  384 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
  385 #define minix_test_bit(nr,addr) test_bit(nr,addr)
  386 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  387 
  388 #endif /* __KERNEL__ */
  389 
  390 #endif /* _ALPHA_BITOPS_H */

Cache object: bee4b452d1776c34fad42713c1aa13aa


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.