1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31 #ifndef _LINUXKPI_LINUX_BITOPS_H_
32 #define _LINUXKPI_LINUX_BITOPS_H_
33
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/libkern.h>
39
40 #define BIT(nr) (1UL << (nr))
41 #define BIT_ULL(nr) (1ULL << (nr))
42 #ifdef __LP64__
43 #define BITS_PER_LONG 64
44 #else
45 #define BITS_PER_LONG 32
46 #endif
47
48 #define BITS_PER_LONG_LONG 64
49
50 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
51 #define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n)))
52 #define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
53 #define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
54 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
55 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
56 #define GENMASK_ULL(h, l) (((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
57 #define BITS_PER_BYTE 8
58 #define BITS_PER_TYPE(t) (sizeof(t) * BITS_PER_BYTE)
59
60 #define hweight8(x) bitcount((uint8_t)(x))
61 #define hweight16(x) bitcount16(x)
62 #define hweight32(x) bitcount32(x)
63 #define hweight64(x) bitcount64(x)
64 #define hweight_long(x) bitcountl(x)
65
66 #define HWEIGHT8(x) (bitcount8((uint8_t)(x)) + 1)
67 #define HWEIGHT16(x) (bitcount16(x) + 1)
68 #define HWEIGHT32(x) (bitcount32(x) + 1)
69 #define HWEIGHT64(x) (bitcount64(x) + 1)
70
71 static inline int
72 __ffs(int mask)
73 {
74 return (ffs(mask) - 1);
75 }
76
77 static inline int
78 __fls(int mask)
79 {
80 return (fls(mask) - 1);
81 }
82
83 static inline int
84 __ffsl(long mask)
85 {
86 return (ffsl(mask) - 1);
87 }
88
89 static inline unsigned long
90 __ffs64(uint64_t mask)
91 {
92 return (ffsll(mask) - 1);
93 }
94
95 static inline int
96 __flsl(long mask)
97 {
98 return (flsl(mask) - 1);
99 }
100
101 static inline int
102 fls64(uint64_t mask)
103 {
104 return (flsll(mask));
105 }
106
107 static inline uint32_t
108 ror32(uint32_t word, unsigned int shift)
109 {
110 return ((word >> shift) | (word << (32 - shift)));
111 }
112
113 #define ffz(mask) __ffs(~(mask))
114
115 static inline int get_count_order(unsigned int count)
116 {
117 int order;
118
119 order = fls(count) - 1;
120 if (count & (count - 1))
121 order++;
122 return order;
123 }
124
125 static inline unsigned long
126 find_first_bit(const unsigned long *addr, unsigned long size)
127 {
128 long mask;
129 int bit;
130
131 for (bit = 0; size >= BITS_PER_LONG;
132 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
133 if (*addr == 0)
134 continue;
135 return (bit + __ffsl(*addr));
136 }
137 if (size) {
138 mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
139 if (mask)
140 bit += __ffsl(mask);
141 else
142 bit += size;
143 }
144 return (bit);
145 }
146
147 static inline unsigned long
148 find_first_zero_bit(const unsigned long *addr, unsigned long size)
149 {
150 long mask;
151 int bit;
152
153 for (bit = 0; size >= BITS_PER_LONG;
154 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
155 if (~(*addr) == 0)
156 continue;
157 return (bit + __ffsl(~(*addr)));
158 }
159 if (size) {
160 mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
161 if (mask)
162 bit += __ffsl(mask);
163 else
164 bit += size;
165 }
166 return (bit);
167 }
168
169 static inline unsigned long
170 find_last_bit(const unsigned long *addr, unsigned long size)
171 {
172 long mask;
173 int offs;
174 int bit;
175 int pos;
176
177 pos = size / BITS_PER_LONG;
178 offs = size % BITS_PER_LONG;
179 bit = BITS_PER_LONG * pos;
180 addr += pos;
181 if (offs) {
182 mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
183 if (mask)
184 return (bit + __flsl(mask));
185 }
186 while (pos--) {
187 addr--;
188 bit -= BITS_PER_LONG;
189 if (*addr)
190 return (bit + __flsl(*addr));
191 }
192 return (size);
193 }
194
195 static inline unsigned long
196 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
197 {
198 long mask;
199 int offs;
200 int bit;
201 int pos;
202
203 if (offset >= size)
204 return (size);
205 pos = offset / BITS_PER_LONG;
206 offs = offset % BITS_PER_LONG;
207 bit = BITS_PER_LONG * pos;
208 addr += pos;
209 if (offs) {
210 mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
211 if (mask)
212 return (bit + __ffsl(mask));
213 if (size - bit <= BITS_PER_LONG)
214 return (size);
215 bit += BITS_PER_LONG;
216 addr++;
217 }
218 for (size -= bit; size >= BITS_PER_LONG;
219 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
220 if (*addr == 0)
221 continue;
222 return (bit + __ffsl(*addr));
223 }
224 if (size) {
225 mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
226 if (mask)
227 bit += __ffsl(mask);
228 else
229 bit += size;
230 }
231 return (bit);
232 }
233
234 static inline unsigned long
235 find_next_zero_bit(const unsigned long *addr, unsigned long size,
236 unsigned long offset)
237 {
238 long mask;
239 int offs;
240 int bit;
241 int pos;
242
243 if (offset >= size)
244 return (size);
245 pos = offset / BITS_PER_LONG;
246 offs = offset % BITS_PER_LONG;
247 bit = BITS_PER_LONG * pos;
248 addr += pos;
249 if (offs) {
250 mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
251 if (mask)
252 return (bit + __ffsl(mask));
253 if (size - bit <= BITS_PER_LONG)
254 return (size);
255 bit += BITS_PER_LONG;
256 addr++;
257 }
258 for (size -= bit; size >= BITS_PER_LONG;
259 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
260 if (~(*addr) == 0)
261 continue;
262 return (bit + __ffsl(~(*addr)));
263 }
264 if (size) {
265 mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
266 if (mask)
267 bit += __ffsl(mask);
268 else
269 bit += size;
270 }
271 return (bit);
272 }
273
274 #define __set_bit(i, a) \
275 atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
276
277 #define set_bit(i, a) \
278 atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
279
280 #define __clear_bit(i, a) \
281 atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
282
283 #define clear_bit(i, a) \
284 atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
285
286 #define clear_bit_unlock(i, a) \
287 atomic_clear_rel_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
288
289 #define test_bit(i, a) \
290 !!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
291
292 static inline int
293 test_and_clear_bit(long bit, volatile unsigned long *var)
294 {
295 long val;
296
297 var += BIT_WORD(bit);
298 bit %= BITS_PER_LONG;
299 bit = (1UL << bit);
300
301 val = *var;
302 while (!atomic_fcmpset_long(var, &val, val & ~bit))
303 ;
304 return !!(val & bit);
305 }
306
307 static inline int
308 __test_and_clear_bit(long bit, volatile unsigned long *var)
309 {
310 long val;
311
312 var += BIT_WORD(bit);
313 bit %= BITS_PER_LONG;
314 bit = (1UL << bit);
315
316 val = *var;
317 *var &= ~bit;
318
319 return !!(val & bit);
320 }
321
322 static inline int
323 test_and_set_bit(long bit, volatile unsigned long *var)
324 {
325 long val;
326
327 var += BIT_WORD(bit);
328 bit %= BITS_PER_LONG;
329 bit = (1UL << bit);
330
331 val = *var;
332 while (!atomic_fcmpset_long(var, &val, val | bit))
333 ;
334 return !!(val & bit);
335 }
336
337 static inline int
338 __test_and_set_bit(long bit, volatile unsigned long *var)
339 {
340 long val;
341
342 var += BIT_WORD(bit);
343 bit %= BITS_PER_LONG;
344 bit = (1UL << bit);
345
346 val = *var;
347 *var |= bit;
348
349 return !!(val & bit);
350 }
351
352 enum {
353 REG_OP_ISFREE,
354 REG_OP_ALLOC,
355 REG_OP_RELEASE,
356 };
357
358 static inline int
359 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
360 {
361 int nbits_reg;
362 int index;
363 int offset;
364 int nlongs_reg;
365 int nbitsinlong;
366 unsigned long mask;
367 int i;
368 int ret = 0;
369
370 nbits_reg = 1 << order;
371 index = pos / BITS_PER_LONG;
372 offset = pos - (index * BITS_PER_LONG);
373 nlongs_reg = BITS_TO_LONGS(nbits_reg);
374 nbitsinlong = MIN(nbits_reg, BITS_PER_LONG);
375
376 mask = (1UL << (nbitsinlong - 1));
377 mask += mask - 1;
378 mask <<= offset;
379
380 switch (reg_op) {
381 case REG_OP_ISFREE:
382 for (i = 0; i < nlongs_reg; i++) {
383 if (bitmap[index + i] & mask)
384 goto done;
385 }
386 ret = 1;
387 break;
388
389 case REG_OP_ALLOC:
390 for (i = 0; i < nlongs_reg; i++)
391 bitmap[index + i] |= mask;
392 break;
393
394 case REG_OP_RELEASE:
395 for (i = 0; i < nlongs_reg; i++)
396 bitmap[index + i] &= ~mask;
397 break;
398 }
399 done:
400 return ret;
401 }
402
403 #define for_each_set_bit(bit, addr, size) \
404 for ((bit) = find_first_bit((addr), (size)); \
405 (bit) < (size); \
406 (bit) = find_next_bit((addr), (size), (bit) + 1))
407
408 #define for_each_clear_bit(bit, addr, size) \
409 for ((bit) = find_first_zero_bit((addr), (size)); \
410 (bit) < (size); \
411 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
412
413 static inline uint64_t
414 sign_extend64(uint64_t value, int index)
415 {
416 uint8_t shift = 63 - index;
417
418 return ((int64_t)(value << shift) >> shift);
419 }
420
421 static inline uint32_t
422 sign_extend32(uint32_t value, int index)
423 {
424 uint8_t shift = 31 - index;
425
426 return ((int32_t)(value << shift) >> shift);
427 }
428
429 #endif /* _LINUXKPI_LINUX_BITOPS_H_ */
Cache object: 98330c53591a0b784f6585b5dcbe81ae
|