Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] bitops: sparc64: use generic bitops

- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- remove ffz()
- remove __ffs()
- remove generic_fls()
- remove generic_fls64()
- remove sched_find_first_bit()
- remove ffs()

- unless defined(ULTRA_HAS_POPULATION_COUNT)

- remove generic_hweight{64,32,16,8}()

- remove find_{next,first}{,_zero}_bit()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()

Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Akinobu Mita and committed by
Linus Torvalds
2d78d4be d59288b7

+22 -339
+8
arch/sparc64/Kconfig
··· 162 162 bool 163 163 default y 164 164 165 + config GENERIC_FIND_NEXT_BIT 166 + bool 167 + default y 168 + 169 + config GENERIC_HWEIGHT 170 + bool 171 + default y if !ULTRA_HAS_POPULATION_COUNT 172 + 165 173 config GENERIC_CALIBRATE_DELAY 166 174 bool 167 175 default y
-5
arch/sparc64/kernel/sparc64_ksyms.c
··· 175 175 EXPORT_SYMBOL(clear_bit); 176 176 EXPORT_SYMBOL(change_bit); 177 177 178 - /* Bit searching */ 179 - EXPORT_SYMBOL(find_next_bit); 180 - EXPORT_SYMBOL(find_next_zero_bit); 181 - EXPORT_SYMBOL(find_next_zero_le_bit); 182 - 183 178 EXPORT_SYMBOL(ivector_table); 184 179 EXPORT_SYMBOL(enable_irq); 185 180 EXPORT_SYMBOL(disable_irq);
+1 -1
arch/sparc64/lib/Makefile
··· 14 14 NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ 15 15 NGpage.o NGbzero.o \ 16 16 copy_in_user.o user_fixup.o memmove.o \ 17 - mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 17 + mcount.o ipcsum.o rwsem.o xor.o delay.o 18 18 19 19 obj-y += iomap.o
-127
arch/sparc64/lib/find_bit.c
··· 1 - #include <linux/bitops.h> 2 - 3 - /** 4 - * find_next_bit - find the next set bit in a memory region 5 - * @addr: The address to base the search on 6 - * @offset: The bitnumber to start searching at 7 - * @size: The maximum size to search 8 - */ 9 - unsigned long find_next_bit(const unsigned long *addr, unsigned long size, 10 - unsigned long offset) 11 - { 12 - const unsigned long *p = addr + (offset >> 6); 13 - unsigned long result = offset & ~63UL; 14 - unsigned long tmp; 15 - 16 - if (offset >= size) 17 - return size; 18 - size -= result; 19 - offset &= 63UL; 20 - if (offset) { 21 - tmp = *(p++); 22 - tmp &= (~0UL << offset); 23 - if (size < 64) 24 - goto found_first; 25 - if (tmp) 26 - goto found_middle; 27 - size -= 64; 28 - result += 64; 29 - } 30 - while (size & ~63UL) { 31 - if ((tmp = *(p++))) 32 - goto found_middle; 33 - result += 64; 34 - size -= 64; 35 - } 36 - if (!size) 37 - return result; 38 - tmp = *p; 39 - 40 - found_first: 41 - tmp &= (~0UL >> (64 - size)); 42 - if (tmp == 0UL) /* Are any bits set? */ 43 - return result + size; /* Nope. */ 44 - found_middle: 45 - return result + __ffs(tmp); 46 - } 47 - 48 - /* find_next_zero_bit() finds the first zero bit in a bit string of length 49 - * 'size' bits, starting the search at bit 'offset'. This is largely based 50 - * on Linus's ALPHA routines, which are pretty portable BTW. 51 - */ 52 - 53 - unsigned long find_next_zero_bit(const unsigned long *addr, 54 - unsigned long size, unsigned long offset) 55 - { 56 - const unsigned long *p = addr + (offset >> 6); 57 - unsigned long result = offset & ~63UL; 58 - unsigned long tmp; 59 - 60 - if (offset >= size) 61 - return size; 62 - size -= result; 63 - offset &= 63UL; 64 - if (offset) { 65 - tmp = *(p++); 66 - tmp |= ~0UL >> (64-offset); 67 - if (size < 64) 68 - goto found_first; 69 - if (~tmp) 70 - goto found_middle; 71 - size -= 64; 72 - result += 64; 73 - } 74 - while (size & ~63UL) { 75 - if (~(tmp = *(p++))) 76 - goto found_middle; 77 - result += 64; 78 - size -= 64; 79 - } 80 - if (!size) 81 - return result; 82 - tmp = *p; 83 - 84 - found_first: 85 - tmp |= ~0UL << size; 86 - if (tmp == ~0UL) /* Are any bits zero? */ 87 - return result + size; /* Nope. */ 88 - found_middle: 89 - return result + ffz(tmp); 90 - } 91 - 92 - unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset) 93 - { 94 - unsigned long *p = addr + (offset >> 6); 95 - unsigned long result = offset & ~63UL; 96 - unsigned long tmp; 97 - 98 - if (offset >= size) 99 - return size; 100 - size -= result; 101 - offset &= 63UL; 102 - if(offset) { 103 - tmp = __swab64p(p++); 104 - tmp |= (~0UL >> (64-offset)); 105 - if(size < 64) 106 - goto found_first; 107 - if(~tmp) 108 - goto found_middle; 109 - size -= 64; 110 - result += 64; 111 - } 112 - while(size & ~63) { 113 - if(~(tmp = __swab64p(p++))) 114 - goto found_middle; 115 - result += 64; 116 - size -= 64; 117 - } 118 - if(!size) 119 - return result; 120 - tmp = __swab64p(p); 121 - found_first: 122 - tmp |= (~0UL << size); 123 - if (tmp == ~0UL) /* Are any bits zero? */ 124 - return result + size; /* Nope. */ 125 - found_middle: 126 - return result + ffz(tmp); 127 - }
+13 -206
include/asm-sparc64/bitops.h
··· 18 18 extern void clear_bit(unsigned long nr, volatile unsigned long *addr); 19 19 extern void change_bit(unsigned long nr, volatile unsigned long *addr); 20 20 21 - /* "non-atomic" versions... */ 22 - 23 - static inline void __set_bit(int nr, volatile unsigned long *addr) 24 - { 25 - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); 26 - 27 - *m |= (1UL << (nr & 63)); 28 - } 29 - 30 - static inline void __clear_bit(int nr, volatile unsigned long *addr) 31 - { 32 - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); 33 - 34 - *m &= ~(1UL << (nr & 63)); 35 - } 36 - 37 - static inline void __change_bit(int nr, volatile unsigned long *addr) 38 - { 39 - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); 40 - 41 - *m ^= (1UL << (nr & 63)); 42 - } 43 - 44 - static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) 45 - { 46 - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); 47 - unsigned long old = *m; 48 - unsigned long mask = (1UL << (nr & 63)); 49 - 50 - *m = (old | mask); 51 - return ((old & mask) != 0); 52 - } 53 - 54 - static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) 55 - { 56 - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); 57 - unsigned long old = *m; 58 - unsigned long mask = (1UL << (nr & 63)); 59 - 60 - *m = (old & ~mask); 61 - return ((old & mask) != 0); 62 - } 63 - 64 - static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) 65 - { 66 - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); 67 - unsigned long old = *m; 68 - unsigned long mask = (1UL << (nr & 63)); 69 - 70 - *m = (old ^ mask); 71 - return ((old & mask) != 0); 72 - } 21 + #include <asm-generic/bitops/non-atomic.h> 73 22 74 23 #ifdef CONFIG_SMP 75 24 #define smp_mb__before_clear_bit() membar_storeload_loadload() ··· 28 79 #define smp_mb__after_clear_bit() barrier() 29 80 #endif 30 81 31 - static inline int test_bit(int nr, __const__ volatile unsigned long *addr) 32 - { 33 - return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL; 34 - } 35 - 36 - /* The easy/cheese version for now. */ 37 - static inline unsigned long ffz(unsigned long word) 38 - { 39 - unsigned long result; 40 - 41 - result = 0; 42 - while(word & 1) { 43 - result++; 44 - word >>= 1; 45 - } 46 - return result; 47 - } 48 - 49 - /** 50 - * __ffs - find first bit in word. 51 - * @word: The word to search 52 - * 53 - * Undefined if no bit exists, so code should check against 0 first. 54 - */ 55 - static inline unsigned long __ffs(unsigned long word) 56 - { 57 - unsigned long result = 0; 58 - 59 - while (!(word & 1UL)) { 60 - result++; 61 - word >>= 1; 62 - } 63 - return result; 64 - } 65 - 66 - /* 67 - * fls: find last bit set. 68 - */ 69 - 70 - #define fls(x) generic_fls(x) 71 - #define fls64(x) generic_fls64(x) 82 + #include <asm-generic/bitops/ffz.h> 83 + #include <asm-generic/bitops/__ffs.h> 84 + #include <asm-generic/bitops/fls.h> 85 + #include <asm-generic/bitops/fls64.h> 72 86 73 87 #ifdef __KERNEL__ 74 88 75 - /* 76 - * Every architecture must define this function. It's the fastest 77 - * way of searching a 140-bit bitmap where the first 100 bits are 78 - * unlikely to be set. It's guaranteed that at least one of the 140 79 - * bits is cleared. 80 - */ 81 - static inline int sched_find_first_bit(unsigned long *b) 82 - { 83 - if (unlikely(b[0])) 84 - return __ffs(b[0]); 85 - if (unlikely(((unsigned int)b[1]))) 86 - return __ffs(b[1]) + 64; 87 - if (b[1] >> 32) 88 - return __ffs(b[1] >> 32) + 96; 89 - return __ffs(b[2]) + 128; 90 - } 91 - 92 - /* 93 - * ffs: find first bit set. This is defined the same way as 94 - * the libc and compiler builtin ffs routines, therefore 95 - * differs in spirit from the above ffz (man ffs). 96 - */ 97 - static inline int ffs(int x) 98 - { 99 - if (!x) 100 - return 0; 101 - return __ffs((unsigned long)x) + 1; 102 - } 89 + #include <asm-generic/bitops/sched.h> 90 + #include <asm-generic/bitops/ffs.h> 103 91 104 92 /* 105 93 * hweightN: returns the hamming weight (i.e. the number ··· 79 193 80 194 #else 81 195 82 - #define hweight64(x) generic_hweight64(x) 83 - #define hweight32(x) generic_hweight32(x) 84 - #define hweight16(x) generic_hweight16(x) 85 - #define hweight8(x) generic_hweight8(x) 196 + #include <asm-generic/bitops/hweight.h> 86 197 87 198 #endif 88 199 #endif /* __KERNEL__ */ 89 200 90 - /** 91 - * find_next_bit - find the next set bit in a memory region 92 - * @addr: The address to base the search on 93 - * @offset: The bitnumber to start searching at 94 - * @size: The maximum size to search 95 - */ 96 - extern unsigned long find_next_bit(const unsigned long *, unsigned long, 97 - unsigned long); 98 - 99 - /** 100 - * find_first_bit - find the first set bit in a memory region 101 - * @addr: The address to start the search at 102 - * @size: The maximum size to search 103 - * 104 - * Returns the bit-number of the first set bit, not the number of the byte 105 - * containing a bit. 106 - */ 107 - #define find_first_bit(addr, size) \ 108 - find_next_bit((addr), (size), 0) 109 - 110 - /* find_next_zero_bit() finds the first zero bit in a bit string of length 111 - * 'size' bits, starting the search at bit 'offset'. This is largely based 112 - * on Linus's ALPHA routines, which are pretty portable BTW. 113 - */ 114 - 115 - extern unsigned long find_next_zero_bit(const unsigned long *, 116 - unsigned long, unsigned long); 117 - 118 - #define find_first_zero_bit(addr, size) \ 119 - find_next_zero_bit((addr), (size), 0) 120 - 121 - #define test_and_set_le_bit(nr,addr) \ 122 - test_and_set_bit((nr) ^ 0x38, (addr)) 123 - #define test_and_clear_le_bit(nr,addr) \ 124 - test_and_clear_bit((nr) ^ 0x38, (addr)) 125 - 126 - static inline int test_le_bit(int nr, __const__ unsigned long * addr) 127 - { 128 - int mask; 129 - __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; 130 - 131 - ADDR += nr >> 3; 132 - mask = 1 << (nr & 0x07); 133 - return ((mask & *ADDR) != 0); 134 - } 135 - 136 - #define find_first_zero_le_bit(addr, size) \ 137 - find_next_zero_le_bit((addr), (size), 0) 138 - 139 - extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsigned long); 201 + #include <asm-generic/bitops/find.h> 140 202 141 203 #ifdef __KERNEL__ 142 204 143 - #define __set_le_bit(nr, addr) \ 144 - __set_bit((nr) ^ 0x38, (addr)) 145 - #define __clear_le_bit(nr, addr) \ 146 - __clear_bit((nr) ^ 0x38, (addr)) 147 - #define __test_and_clear_le_bit(nr, addr) \ 148 - __test_and_clear_bit((nr) ^ 0x38, (addr)) 149 - #define __test_and_set_le_bit(nr, addr) \ 150 - __test_and_set_bit((nr) ^ 0x38, (addr)) 205 + #include <asm-generic/bitops/ext2-non-atomic.h> 151 206 152 - #define ext2_set_bit(nr,addr) \ 153 - __test_and_set_le_bit((nr),(unsigned long *)(addr)) 154 207 #define ext2_set_bit_atomic(lock,nr,addr) \ 155 - test_and_set_le_bit((nr),(unsigned long *)(addr)) 156 - #define ext2_clear_bit(nr,addr) \ 157 - __test_and_clear_le_bit((nr),(unsigned long *)(addr)) 208 + test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) 158 209 #define ext2_clear_bit_atomic(lock,nr,addr) \ 159 - test_and_clear_le_bit((nr),(unsigned long *)(addr)) 160 - #define ext2_test_bit(nr,addr) \ 161 - test_le_bit((nr),(unsigned long *)(addr)) 162 - #define ext2_find_first_zero_bit(addr, size) \ 163 - find_first_zero_le_bit((unsigned long *)(addr), (size)) 164 - #define ext2_find_next_zero_bit(addr, size, off) \ 165 - find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) 210 + test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) 166 211 167 - /* Bitmap functions for the minix filesystem. */ 168 - #define minix_test_and_set_bit(nr,addr) \ 169 - __test_and_set_bit((nr),(unsigned long *)(addr)) 170 - #define minix_set_bit(nr,addr) \ 171 - __set_bit((nr),(unsigned long *)(addr)) 172 - #define minix_test_and_clear_bit(nr,addr) \ 173 - __test_and_clear_bit((nr),(unsigned long *)(addr)) 174 - #define minix_test_bit(nr,addr) \ 175 - test_bit((nr),(unsigned long *)(addr)) 176 - #define minix_find_first_zero_bit(addr,size) \ 177 - find_first_zero_bit((unsigned long *)(addr),(size)) 212 + #include <asm-generic/bitops/minix.h> 178 213 179 214 #endif /* __KERNEL__ */ 180 215