Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] bitops: h8300: use generic bitops

- remove generic_ffs()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
- remove generic_fls()
- remove generic_fls64()

Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Akinobu Mita and committed by
Linus Torvalds
f6e0213f 1f6d7a93

+17 -213
+8
arch/h8300/Kconfig
··· 29 29 bool 30 30 default n 31 31 32 + config GENERIC_FIND_NEXT_BIT 33 + bool 34 + default y 35 + 36 + config GENERIC_HWEIGHT 37 + bool 38 + default y 39 + 32 40 config GENERIC_CALIBRATE_DELAY 33 41 bool 34 42 default y
+9 -213
include/asm-h8300/bitops.h
··· 8 8 9 9 #include <linux/config.h> 10 10 #include <linux/compiler.h> 11 - #include <asm/byteorder.h> /* swab32 */ 12 11 #include <asm/system.h> 13 12 14 13 #ifdef __KERNEL__ ··· 176 177 #undef H8300_GEN_TEST_BITOP_CONST_INT 177 178 #undef H8300_GEN_TEST_BITOP 178 179 179 - #define find_first_zero_bit(addr, size) \ 180 - find_next_zero_bit((addr), (size), 0) 181 - 182 - #define ffs(x) generic_ffs(x) 180 + #include <asm-generic/bitops/ffs.h> 183 181 184 182 static __inline__ unsigned long __ffs(unsigned long word) 185 183 { ··· 192 196 return result; 193 197 } 194 198 195 - static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset) 196 - { 197 - unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); 198 - unsigned long result = offset & ~31UL; 199 - unsigned long tmp; 200 - 201 - if (offset >= size) 202 - return size; 203 - size -= result; 204 - offset &= 31UL; 205 - if (offset) { 206 - tmp = *(p++); 207 - tmp |= ~0UL >> (32-offset); 208 - if (size < 32) 209 - goto found_first; 210 - if (~tmp) 211 - goto found_middle; 212 - size -= 32; 213 - result += 32; 214 - } 215 - while (size & ~31UL) { 216 - if (~(tmp = *(p++))) 217 - goto found_middle; 218 - result += 32; 219 - size -= 32; 220 - } 221 - if (!size) 222 - return result; 223 - tmp = *p; 224 - 225 - found_first: 226 - tmp |= ~0UL << size; 227 - found_middle: 228 - return result + ffz(tmp); 229 - } 230 - 231 - static __inline__ unsigned long find_next_bit(const unsigned long *addr, 232 - unsigned long size, unsigned long offset) 233 - { 234 - unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); 235 - unsigned int result = offset & ~31UL; 236 - unsigned int tmp; 237 - 238 - if (offset >= size) 239 - return size; 240 - size -= result; 241 - offset &= 31UL; 242 - if (offset) { 243 - tmp = *(p++); 244 - tmp &= ~0UL << offset; 245 - if (size < 32) 246 - goto found_first; 247 - if (tmp) 248 - goto found_middle; 249 - size -= 32; 250 - result += 32; 251 - } 252 - while (size >= 32) { 253 - if ((tmp = *p++) != 0) 254 - goto found_middle; 255 - result += 32; 256 - size -= 32; 257 - } 258 - if (!size) 259 - return result; 260 - tmp = *p; 261 - 262 - found_first: 263 - tmp &= ~0UL >> (32 - size); 264 - if (tmp == 0UL) 265 - return result + size; 266 - found_middle: 267 - return result + __ffs(tmp); 268 - } 269 - 270 - #define find_first_bit(addr, size) find_next_bit(addr, size, 0) 271 - 272 - /* 273 - * Every architecture must define this function. It's the fastest 274 - * way of searching a 140-bit bitmap where the first 100 bits are 275 - * unlikely to be set. It's guaranteed that at least one of the 140 276 - * bits is cleared. 277 - */ 278 - static inline int sched_find_first_bit(unsigned long *b) 279 - { 280 - if (unlikely(b[0])) 281 - return __ffs(b[0]); 282 - if (unlikely(b[1])) 283 - return __ffs(b[1]) + 32; 284 - if (unlikely(b[2])) 285 - return __ffs(b[2]) + 64; 286 - if (b[3]) 287 - return __ffs(b[3]) + 96; 288 - return __ffs(b[4]) + 128; 289 - } 290 - 291 - /* 292 - * hweightN: returns the hamming weight (i.e. the number 293 - * of bits set) of a N-bit word 294 - */ 295 - 296 - #define hweight32(x) generic_hweight32(x) 297 - #define hweight16(x) generic_hweight16(x) 298 - #define hweight8(x) generic_hweight8(x) 299 - 300 - static __inline__ int ext2_set_bit(int nr, volatile void * addr) 301 - { 302 - int mask, retval; 303 - unsigned long flags; 304 - volatile unsigned char *ADDR = (unsigned char *) addr; 305 - 306 - ADDR += nr >> 3; 307 - mask = 1 << (nr & 0x07); 308 - local_irq_save(flags); 309 - retval = (mask & *ADDR) != 0; 310 - *ADDR |= mask; 311 - local_irq_restore(flags); 312 - return retval; 313 - } 314 - #define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) 315 - 316 - static __inline__ int ext2_clear_bit(int nr, volatile void * addr) 317 - { 318 - int mask, retval; 319 - unsigned long flags; 320 - volatile unsigned char *ADDR = (unsigned char *) addr; 321 - 322 - ADDR += nr >> 3; 323 - mask = 1 << (nr & 0x07); 324 - local_irq_save(flags); 325 - retval = (mask & *ADDR) != 0; 326 - *ADDR &= ~mask; 327 - local_irq_restore(flags); 328 - return retval; 329 - } 330 - #define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) 331 - 332 - static __inline__ int ext2_test_bit(int nr, const volatile void * addr) 333 - { 334 - int mask; 335 - const volatile unsigned char *ADDR = (const unsigned char *) addr; 336 - 337 - ADDR += nr >> 3; 338 - mask = 1 << (nr & 0x07); 339 - return ((mask & *ADDR) != 0); 340 - } 341 - 342 - #define ext2_find_first_zero_bit(addr, size) \ 343 - ext2_find_next_zero_bit((addr), (size), 0) 344 - 345 - static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) 346 - { 347 - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 348 - unsigned long result = offset & ~31UL; 349 - unsigned long tmp; 350 - 351 - if (offset >= size) 352 - return size; 353 - size -= result; 354 - offset &= 31UL; 355 - if(offset) { 356 - /* We hold the little endian value in tmp, but then the 357 - * shift is illegal. So we could keep a big endian value 358 - * in tmp, like this: 359 - * 360 - * tmp = __swab32(*(p++)); 361 - * tmp |= ~0UL >> (32-offset); 362 - * 363 - * but this would decrease performance, so we change the 364 - * shift: 365 - */ 366 - tmp = *(p++); 367 - tmp |= __swab32(~0UL >> (32-offset)); 368 - if(size < 32) 369 - goto found_first; 370 - if(~tmp) 371 - goto found_middle; 372 - size -= 32; 373 - result += 32; 374 - } 375 - while(size & ~31UL) { 376 - if(~(tmp = *(p++))) 377 - goto found_middle; 378 - result += 32; 379 - size -= 32; 380 - } 381 - if(!size) 382 - return result; 383 - tmp = *p; 384 - 385 - found_first: 386 - /* tmp is little endian, so we would have to swab the shift, 387 - * see above. But then we have to swab tmp below for ffz, so 388 - * we might as well do this here. 389 - */ 390 - return result + ffz(__swab32(tmp) | (~0UL << size)); 391 - found_middle: 392 - return result + ffz(__swab32(tmp)); 393 - } 394 - 395 - /* Bitmap functions for the minix filesystem. */ 396 - #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) 397 - #define minix_set_bit(nr,addr) __set_bit(nr,addr) 398 - #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) 399 - #define minix_test_bit(nr,addr) test_bit(nr,addr) 400 - #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 199 + #include <asm-generic/bitops/find.h> 200 + #include <asm-generic/bitops/sched.h> 201 + #include <asm-generic/bitops/hweight.h> 202 + #include <asm-generic/bitops/ext2-non-atomic.h> 203 + #include <asm-generic/bitops/ext2-atomic.h> 204 + #include <asm-generic/bitops/minix.h> 401 205 402 206 #endif /* __KERNEL__ */ 403 207 404 - #define fls(x) generic_fls(x) 405 - #define fls64(x) generic_fls64(x) 208 + #include <asm-generic/bitops/fls.h> 209 + #include <asm-generic/bitops/fls64.h> 406 210 407 211 #endif /* _H8300_BITOPS_H */