nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / bitops.h
1 #ifndef _LINUX_BITOPS_H
2 #define _LINUX_BITOPS_H
3 #include <asm/types.h>
4
5 #ifdef __KERNEL__
6 #define BIT(nr) (1UL << (nr))
7 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
8 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
9 #define BITS_PER_BYTE 8
10 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
11 #endif
12
13 extern unsigned int __sw_hweight8(unsigned int w);
14 extern unsigned int __sw_hweight16(unsigned int w);
15 extern unsigned int __sw_hweight32(unsigned int w);
16 extern unsigned long __sw_hweight64(__u64 w);
17
18 /*
19 * Include this here because some architectures need generic_ffs/fls in
20 * scope
21 */
22 #include <asm/bitops.h>
23
24 #define for_each_set_bit(bit, addr, size) \
25 for ((bit) = find_first_bit((addr), (size)); \
26 (bit) < (size); \
27 (bit) = find_next_bit((addr), (size), (bit) + 1))
28
29 /* same as for_each_set_bit() but use bit as value to start with */
30 #define for_each_set_bit_from(bit, addr, size) \
31 for ((bit) = find_next_bit((addr), (size), (bit)); \
32 (bit) < (size); \
33 (bit) = find_next_bit((addr), (size), (bit) + 1))
34
35 #define for_each_clear_bit(bit, addr, size) \
36 for ((bit) = find_first_zero_bit((addr), (size)); \
37 (bit) < (size); \
38 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
39
40 /* same as for_each_clear_bit() but use bit as value to start with */
41 #define for_each_clear_bit_from(bit, addr, size) \
42 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
43 (bit) < (size); \
44 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
45
46 static __inline__ int get_bitmask_order(unsigned int count)
47 {
48 int order;
49
50 order = fls(count);
51 return order; /* We could be slightly more clever with -1 here... */
52 }
53
54 static __inline__ int get_count_order(unsigned int count)
55 {
56 int order;
57
58 order = fls(count) - 1;
59 if (count & (count - 1))
60 order++;
61 return order;
62 }
63
64 static inline unsigned long hweight_long(unsigned long w)
65 {
66 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
67 }
68
69 /**
70 * rol64 - rotate a 64-bit value left
71 * @word: value to rotate
72 * @shift: bits to roll
73 */
74 static inline __u64 rol64(__u64 word, unsigned int shift)
75 {
76 return (word << shift) | (word >> (64 - shift));
77 }
78
79 /**
80 * ror64 - rotate a 64-bit value right
81 * @word: value to rotate
82 * @shift: bits to roll
83 */
84 static inline __u64 ror64(__u64 word, unsigned int shift)
85 {
86 return (word >> shift) | (word << (64 - shift));
87 }
88
89 /**
90 * rol32 - rotate a 32-bit value left
91 * @word: value to rotate
92 * @shift: bits to roll
93 */
94 static inline __u32 rol32(__u32 word, unsigned int shift)
95 {
96 return (word << shift) | (word >> (32 - shift));
97 }
98
99 /**
100 * ror32 - rotate a 32-bit value right
101 * @word: value to rotate
102 * @shift: bits to roll
103 */
104 static inline __u32 ror32(__u32 word, unsigned int shift)
105 {
106 return (word >> shift) | (word << (32 - shift));
107 }
108
109 /**
110 * rol16 - rotate a 16-bit value left
111 * @word: value to rotate
112 * @shift: bits to roll
113 */
114 static inline __u16 rol16(__u16 word, unsigned int shift)
115 {
116 return (word << shift) | (word >> (16 - shift));
117 }
118
119 /**
120 * ror16 - rotate a 16-bit value right
121 * @word: value to rotate
122 * @shift: bits to roll
123 */
124 static inline __u16 ror16(__u16 word, unsigned int shift)
125 {
126 return (word >> shift) | (word << (16 - shift));
127 }
128
129 /**
130 * rol8 - rotate an 8-bit value left
131 * @word: value to rotate
132 * @shift: bits to roll
133 */
134 static inline __u8 rol8(__u8 word, unsigned int shift)
135 {
136 return (word << shift) | (word >> (8 - shift));
137 }
138
139 /**
140 * ror8 - rotate an 8-bit value right
141 * @word: value to rotate
142 * @shift: bits to roll
143 */
144 static inline __u8 ror8(__u8 word, unsigned int shift)
145 {
146 return (word >> shift) | (word << (8 - shift));
147 }
148
149 /**
150 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
151 * @value: value to sign extend
152 * @index: 0 based bit index (0<=index<32) to sign bit
153 */
154 static inline __s32 sign_extend32(__u32 value, int index)
155 {
156 __u8 shift = 31 - index;
157 return (__s32)(value << shift) >> shift;
158 }
159
160 static inline unsigned fls_long(unsigned long l)
161 {
162 if (sizeof(l) == 4)
163 return fls(l);
164 return fls64(l);
165 }
166
167 /**
168 * __ffs64 - find first set bit in a 64 bit word
169 * @word: The 64 bit word
170 *
171 * On 64 bit arches this is a synomyn for __ffs
172 * The result is not defined if no bits are set, so check that @word
173 * is non-zero before calling this.
174 */
175 static inline unsigned long __ffs64(u64 word)
176 {
177 #if BITS_PER_LONG == 32
178 if (((u32)word) == 0UL)
179 return __ffs((u32)(word >> 32)) + 32;
180 #elif BITS_PER_LONG != 64
181 #error BITS_PER_LONG not 32 or 64
182 #endif
183 return __ffs((unsigned long)word);
184 }
185
186 #ifdef __KERNEL__
187
188 #ifndef set_mask_bits
189 #define set_mask_bits(ptr, _mask, _bits) \
190 ({ \
191 const typeof(*ptr) mask = (_mask), bits = (_bits); \
192 typeof(*ptr) old, new; \
193 \
194 do { \
195 old = ACCESS_ONCE(*ptr); \
196 new = (old & ~mask) | bits; \
197 } while (cmpxchg(ptr, old, new) != old); \
198 \
199 new; \
200 })
201 #endif
202
203 #ifndef find_last_bit
204 /**
205 * find_last_bit - find the last set bit in a memory region
206 * @addr: The address to start the search at
207 * @size: The maximum size to search
208 *
209 * Returns the bit number of the first set bit, or size.
210 */
211 extern unsigned long find_last_bit(const unsigned long *addr,
212 unsigned long size);
213 #endif
214
215 #endif /* __KERNEL__ */
216 #endif