Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

alpha: cleanup in bitops.h

Remove 2 functions private to the alpha implemetation,
in favor of similar functions in <linux/log2.h>.

Provide a more efficient version of the fls64 function
for pre-ev67 alphas.

Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Richard Henderson and committed by
Linus Torvalds
74fd1b68 f54496f5

+73 -24
+2 -1
arch/alpha/kernel/pci_iommu.c
··· 7 7 #include <linux/pci.h> 8 8 #include <linux/slab.h> 9 9 #include <linux/bootmem.h> 10 + #include <linux/log2.h> 10 11 11 12 #include <asm/io.h> 12 13 #include <asm/hwrpb.h> ··· 54 53 { 55 54 unsigned long mem = max_low_pfn << PAGE_SHIFT; 56 55 if (mem < max) 57 - max = 1UL << ceil_log2(mem); 56 + max = roundup_pow_of_two(mem); 58 57 return max; 59 58 } 60 59
+2 -1
arch/alpha/kernel/setup.c
··· 43 43 #include <linux/notifier.h> 44 44 #include <asm/setup.h> 45 45 #include <asm/io.h> 46 + #include <linux/log2.h> 46 47 47 48 extern struct atomic_notifier_head panic_notifier_list; 48 49 static int alpha_panic_event(struct notifier_block *, unsigned long, void *); ··· 1304 1303 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2; 1305 1304 1306 1305 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT) 1307 - maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT); 1306 + maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT); 1308 1307 1309 1308 /* Get the first block cached. */ 1310 1309 read_mem_block(__va(0), stride, size);
+2 -1
arch/alpha/lib/Makefile
··· 37 37 $(ev6-y)clear_page.o \ 38 38 $(ev6-y)copy_page.o \ 39 39 fpreg.o \ 40 - callback_srm.o srm_puts.o srm_printk.o 40 + callback_srm.o srm_puts.o srm_printk.o \ 41 + fls.o 41 42 42 43 lib-$(CONFIG_SMP) += dec_and_lock.o 43 44
+38
arch/alpha/lib/fls.c
··· 1 + /* 2 + * arch/alpha/lib/fls.c 3 + */ 4 + 5 + #include <linux/module.h> 6 + #include <asm/bitops.h> 7 + 8 + /* This is fls(x)-1, except zero is held to zero. This allows most 9 + efficent input into extbl, plus it allows easy handling of fls(0)=0. */ 10 + 11 + const unsigned char __flsm1_tab[256] = 12 + { 13 + 0, 14 + 0, 15 + 1, 1, 16 + 2, 2, 2, 2, 17 + 3, 3, 3, 3, 3, 3, 3, 3, 18 + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 19 + 20 + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 21 + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 22 + 23 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 24 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 25 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 26 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 27 + 28 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 29 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 30 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 31 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 32 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 33 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 34 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 35 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 36 + }; 37 + 38 + EXPORT_SYMBOL(__flsm1_tab);
+29 -21
include/asm-alpha/bitops.h
··· 313 313 * fls: find last bit set. 314 314 */ 315 315 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) 316 - static inline int fls(int word) 316 + static inline int fls64(unsigned long word) 317 317 { 318 - return 64 - __kernel_ctlz(word & 0xffffffff); 318 + return 64 - __kernel_ctlz(word); 319 319 } 320 320 #else 321 - #include <asm-generic/bitops/fls.h> 322 - #endif 323 - #include <asm-generic/bitops/fls64.h> 321 + extern const unsigned char __flsm1_tab[256]; 324 322 325 - /* Compute powers of two for the given integer. */ 326 - static inline long floor_log2(unsigned long word) 323 + static inline int fls64(unsigned long x) 327 324 { 328 - #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) 329 - return 63 - __kernel_ctlz(word); 330 - #else 331 - long bit; 332 - for (bit = -1; word ; bit++) 333 - word >>= 1; 334 - return bit; 335 - #endif 325 + unsigned long t, a, r; 326 + 327 + t = __kernel_cmpbge (x, 0x0101010101010101); 328 + a = __flsm1_tab[t]; 329 + t = __kernel_extbl (x, a); 330 + r = a*8 + __flsm1_tab[t] + (x != 0); 331 + 332 + return r; 336 333 } 334 + #endif 337 335 338 - static inline long ceil_log2(unsigned long word) 336 + static inline int fls(int x) 339 337 { 340 - long bit = floor_log2(word); 341 - return bit + (word > (1UL << bit)); 338 + return fls64((unsigned int) x); 342 339 } 343 340 344 341 /* ··· 350 353 return __kernel_ctpop(w); 351 354 } 352 355 353 - #define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) 354 - #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) 355 - #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) 356 + static inline unsigned int hweight32(unsigned int w) 357 + { 358 + return hweight64(w); 359 + } 360 + 361 + static inline unsigned int hweight16(unsigned int w) 362 + { 363 + return hweight64(w & 0xffff); 364 + } 365 + 366 + static inline unsigned int hweight8(unsigned int w) 367 + { 368 + return hweight64(w & 0xff); 369 + } 356 370 #else 357 371 #include <asm-generic/bitops/hweight.h> 358 372 #endif