Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] bitops: xtensa: use generic bitops

- remove {,test_and_}{set,clear,change}_bit()
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- remove generic_fls64()
- remove find_{next,first}{,_zero}_bit()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove generic_hweight{32,16,8}()
- remove sched_find_first_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()

Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Chris Zankel <chris@zankel.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Akinobu Mita and committed by
Linus Torvalds
d4337aa5 f33e2fba

+16 -332
+8
arch/xtensa/Kconfig
··· 22 22 bool 23 23 default y 24 24 25 + config GENERIC_FIND_NEXT_BIT 26 + bool 27 + default y 28 + 29 + config GENERIC_HWEIGHT 30 + bool 31 + default y 32 + 25 33 config GENERIC_HARDIRQS 26 34 bool 27 35 default y
+8 -332
include/asm-xtensa/bitops.h
··· 23 23 # error SMP not supported on this architecture 24 24 #endif 25 25 26 - static __inline__ void set_bit(int nr, volatile void * addr) 27 - { 28 - unsigned long mask = 1 << (nr & 0x1f); 29 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 30 - unsigned long flags; 31 - 32 - local_irq_save(flags); 33 - *a |= mask; 34 - local_irq_restore(flags); 35 - } 36 - 37 - static __inline__ void __set_bit(int nr, volatile unsigned long * addr) 38 - { 39 - unsigned long mask = 1 << (nr & 0x1f); 40 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 41 - 42 - *a |= mask; 43 - } 44 - 45 - static __inline__ void clear_bit(int nr, volatile void * addr) 46 - { 47 - unsigned long mask = 1 << (nr & 0x1f); 48 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 49 - unsigned long flags; 50 - 51 - local_irq_save(flags); 52 - *a &= ~mask; 53 - local_irq_restore(flags); 54 - } 55 - 56 - static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) 57 - { 58 - unsigned long mask = 1 << (nr & 0x1f); 59 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 60 - 61 - *a &= ~mask; 62 - } 63 - 64 - /* 65 - * clear_bit() doesn't provide any barrier for the compiler. 66 - */ 67 - 68 26 #define smp_mb__before_clear_bit() barrier() 69 27 #define smp_mb__after_clear_bit() barrier() 70 28 71 - static __inline__ void change_bit(int nr, volatile void * addr) 72 - { 73 - unsigned long mask = 1 << (nr & 0x1f); 74 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 75 - unsigned long flags; 76 - 77 - local_irq_save(flags); 78 - *a ^= mask; 79 - local_irq_restore(flags); 80 - } 81 - 82 - static __inline__ void __change_bit(int nr, volatile void * addr) 83 - { 84 - unsigned long mask = 1 << (nr & 0x1f); 85 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 86 - 87 - *a ^= mask; 88 - } 89 - 90 - static __inline__ int test_and_set_bit(int nr, volatile void * addr) 91 - { 92 - unsigned long retval; 93 - unsigned long mask = 1 << (nr & 0x1f); 94 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 95 - unsigned long flags; 96 - 97 - local_irq_save(flags); 98 - retval = (mask & *a) != 0; 99 - *a |= mask; 100 - local_irq_restore(flags); 101 - 102 - return retval; 103 - } 104 - 105 - static __inline__ int __test_and_set_bit(int nr, volatile void * addr) 106 - { 107 - unsigned long retval; 108 - unsigned long mask = 1 << (nr & 0x1f); 109 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 110 - 111 - retval = (mask & *a) != 0; 112 - *a |= mask; 113 - 114 - return retval; 115 - } 116 - 117 - static __inline__ int test_and_clear_bit(int nr, volatile void * addr) 118 - { 119 - unsigned long retval; 120 - unsigned long mask = 1 << (nr & 0x1f); 121 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 122 - unsigned long flags; 123 - 124 - local_irq_save(flags); 125 - retval = (mask & *a) != 0; 126 - *a &= ~mask; 127 - local_irq_restore(flags); 128 - 129 - return retval; 130 - } 131 - 132 - static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) 133 - { 134 - unsigned long mask = 1 << (nr & 0x1f); 135 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 136 - unsigned long old = *a; 137 - 138 - *a = old & ~mask; 139 - return (old & mask) != 0; 140 - } 141 - 142 - static __inline__ int test_and_change_bit(int nr, volatile void * addr) 143 - { 144 - unsigned long retval; 145 - unsigned long mask = 1 << (nr & 0x1f); 146 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 147 - unsigned long flags; 148 - 149 - local_irq_save(flags); 150 - 151 - retval = (mask & *a) != 0; 152 - *a ^= mask; 153 - local_irq_restore(flags); 154 - 155 - return retval; 156 - } 157 - 158 - /* 159 - * non-atomic version; can be reordered 160 - */ 161 - 162 - static __inline__ int __test_and_change_bit(int nr, volatile void *addr) 163 - { 164 - unsigned long mask = 1 << (nr & 0x1f); 165 - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 166 - unsigned long old = *a; 167 - 168 - *a = old ^ mask; 169 - return (old & mask) != 0; 170 - } 171 - 172 - static __inline__ int test_bit(int nr, const volatile void *addr) 173 - { 174 - return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); 175 - } 29 + #include <asm-generic/bitops/atomic.h> 30 + #include <asm-generic/bitops/non-atomic.h> 176 31 177 32 #if XCHAL_HAVE_NSA 178 33 ··· 100 245 { 101 246 return __cntlz(x); 102 247 } 103 - #define fls64(x) generic_fls64(x) 104 - 105 - static __inline__ int 106 - find_next_bit(const unsigned long *addr, int size, int offset) 107 - { 108 - const unsigned long *p = addr + (offset >> 5); 109 - unsigned long result = offset & ~31UL; 110 - unsigned long tmp; 111 - 112 - if (offset >= size) 113 - return size; 114 - size -= result; 115 - offset &= 31UL; 116 - if (offset) { 117 - tmp = *p++; 118 - tmp &= ~0UL << offset; 119 - if (size < 32) 120 - goto found_first; 121 - if (tmp) 122 - goto found_middle; 123 - size -= 32; 124 - result += 32; 125 - } 126 - while (size >= 32) { 127 - if ((tmp = *p++) != 0) 128 - goto found_middle; 129 - result += 32; 130 - size -= 32; 131 - } 132 - if (!size) 133 - return result; 134 - tmp = *p; 135 - 136 - found_first: 137 - tmp &= ~0UL >> (32 - size); 138 - if (tmp == 0UL) /* Are any bits set? */ 139 - return result + size; /* Nope. */ 140 - found_middle: 141 - return result + __ffs(tmp); 142 - } 143 - 144 - /** 145 - * find_first_bit - find the first set bit in a memory region 146 - * @addr: The address to start the search at 147 - * @size: The maximum size to search 148 - * 149 - * Returns the bit-number of the first set bit, not the number of the byte 150 - * containing a bit. 151 - */ 152 - 153 - #define find_first_bit(addr, size) \ 154 - find_next_bit((addr), (size), 0) 155 - 156 - static __inline__ int 157 - find_next_zero_bit(const unsigned long *addr, int size, int offset) 158 - { 159 - const unsigned long *p = addr + (offset >> 5); 160 - unsigned long result = offset & ~31UL; 161 - unsigned long tmp; 162 - 163 - if (offset >= size) 164 - return size; 165 - size -= result; 166 - offset &= 31UL; 167 - if (offset) { 168 - tmp = *p++; 169 - tmp |= ~0UL >> (32-offset); 170 - if (size < 32) 171 - goto found_first; 172 - if (~tmp) 173 - goto found_middle; 174 - size -= 32; 175 - result += 32; 176 - } 177 - while (size & ~31UL) { 178 - if (~(tmp = *p++)) 179 - goto found_middle; 180 - result += 32; 181 - size -= 32; 182 - } 183 - if (!size) 184 - return result; 185 - tmp = *p; 186 - 187 - found_first: 188 - tmp |= ~0UL << size; 189 - found_middle: 190 - return result + ffz(tmp); 191 - } 192 - 193 - #define find_first_zero_bit(addr, size) \ 194 - find_next_zero_bit((addr), (size), 0) 248 + #include <asm-generic/bitops/fls64.h> 249 + #include <asm-generic/bitops/find.h> 250 + #include <asm-generic/bitops/ext2-non-atomic.h> 195 251 196 252 #ifdef __XTENSA_EL__ 197 - # define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr)) 198 253 # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) 199 - # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr)) 200 254 # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) 201 - # define ext2_test_bit(nr,addr) test_bit((nr), (addr)) 202 - # define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size)) 203 - # define ext2_find_next_zero_bit(addr, size, offset) \ 204 - find_next_zero_bit((addr), (size), (offset)) 205 255 #elif defined(__XTENSA_EB__) 206 - # define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr)) 207 256 # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) 208 - # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr)) 209 257 # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) 210 - # define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr)) 211 - # define ext2_find_first_zero_bit(addr, size) \ 212 - ext2_find_next_zero_bit((addr), (size), 0) 213 - 214 - static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) 215 - { 216 - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 217 - unsigned long result = offset & ~31UL; 218 - unsigned long tmp; 219 - 220 - if (offset >= size) 221 - return size; 222 - size -= result; 223 - offset &= 31UL; 224 - if(offset) { 225 - /* We hold the little endian value in tmp, but then the 226 - * shift is illegal. So we could keep a big endian value 227 - * in tmp, like this: 228 - * 229 - * tmp = __swab32(*(p++)); 230 - * tmp |= ~0UL >> (32-offset); 231 - * 232 - * but this would decrease preformance, so we change the 233 - * shift: 234 - */ 235 - tmp = *(p++); 236 - tmp |= __swab32(~0UL >> (32-offset)); 237 - if(size < 32) 238 - goto found_first; 239 - if(~tmp) 240 - goto found_middle; 241 - size -= 32; 242 - result += 32; 243 - } 244 - while(size & ~31UL) { 245 - if(~(tmp = *(p++))) 246 - goto found_middle; 247 - result += 32; 248 - size -= 32; 249 - } 250 - if(!size) 251 - return result; 252 - tmp = *p; 253 - 254 - found_first: 255 - /* tmp is little endian, so we would have to swab the shift, 256 - * see above. But then we have to swab tmp below for ffz, so 257 - * we might as well do this here. 258 - */ 259 - return result + ffz(__swab32(tmp) | (~0UL << size)); 260 - found_middle: 261 - return result + ffz(__swab32(tmp)); 262 - } 263 - 264 258 #else 265 259 # error processor byte order undefined! 266 260 #endif 267 261 268 - 269 - #define hweight32(x) generic_hweight32(x) 270 - #define hweight16(x) generic_hweight16(x) 271 - #define hweight8(x) generic_hweight8(x) 272 - 273 - /* 274 - * Find the first bit set in a 140-bit bitmap. 275 - * The first 100 bits are unlikely to be set. 276 - */ 277 - 278 - static inline int sched_find_first_bit(const unsigned long *b) 279 - { 280 - if (unlikely(b[0])) 281 - return __ffs(b[0]); 282 - if (unlikely(b[1])) 283 - return __ffs(b[1]) + 32; 284 - if (unlikely(b[2])) 285 - return __ffs(b[2]) + 64; 286 - if (b[3]) 287 - return __ffs(b[3]) + 96; 288 - return __ffs(b[4]) + 128; 289 - } 290 - 291 - 292 - /* Bitmap functions for the minix filesystem. */ 293 - 294 - #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) 295 - #define minix_set_bit(nr,addr) __set_bit(nr,addr) 296 - #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) 297 - #define minix_test_bit(nr,addr) test_bit(nr,addr) 298 - #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 262 + #include <asm-generic/bitops/hweight.h> 263 + #include <asm-generic/bitops/sched.h> 264 + #include <asm-generic/bitops/minix.h> 299 265 300 266 #endif /* __KERNEL__ */ 301 267