Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

parisc: Convert to BIT_MASK() and BIT_WORD()

Drop own open-coded implementation to set bits and use the kernel
provided BIT_MASK() and BIT_WORD() macros.

Signed-off-by: Helge Deller <deller@gmx.de>

+19 -34
+13 -28
arch/parisc/include/asm/bitops.h
··· 12 12 #include <asm/barrier.h> 13 13 #include <linux/atomic.h> 14 14 15 - /* 16 - * HP-PARISC specific bit operations 17 - * for a detailed description of the functions please refer 18 - * to include/asm-i386/bitops.h or kerneldoc 19 - */ 20 - 21 - #if __BITS_PER_LONG == 64 22 - #define SHIFT_PER_LONG 6 23 - #else 24 - #define SHIFT_PER_LONG 5 25 - #endif 26 - 27 - #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) 28 - 29 - 30 15 /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion 31 16 * on use of volatile and __*_bit() (set/clear/change): 32 17 * *_bit() want use of volatile. ··· 20 35 21 36 static __inline__ void set_bit(int nr, volatile unsigned long * addr) 22 37 { 23 - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 38 + unsigned long mask = BIT_MASK(nr); 24 39 unsigned long flags; 25 40 26 - addr += (nr >> SHIFT_PER_LONG); 41 + addr += BIT_WORD(nr); 27 42 _atomic_spin_lock_irqsave(addr, flags); 28 43 *addr |= mask; 29 44 _atomic_spin_unlock_irqrestore(addr, flags); ··· 31 46 32 47 static __inline__ void clear_bit(int nr, volatile unsigned long * addr) 33 48 { 34 - unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); 49 + unsigned long mask = BIT_MASK(nr); 35 50 unsigned long flags; 36 51 37 - addr += (nr >> SHIFT_PER_LONG); 52 + addr += BIT_WORD(nr); 38 53 _atomic_spin_lock_irqsave(addr, flags); 39 - *addr &= mask; 54 + *addr &= ~mask; 40 55 _atomic_spin_unlock_irqrestore(addr, flags); 41 56 } 42 57 43 58 static __inline__ void change_bit(int nr, volatile unsigned long * addr) 44 59 { 45 - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 60 + unsigned long mask = BIT_MASK(nr); 46 61 unsigned long flags; 47 62 48 - addr += (nr >> SHIFT_PER_LONG); 63 + addr += BIT_WORD(nr); 49 64 _atomic_spin_lock_irqsave(addr, flags); 50 65 *addr ^= mask; 51 66 _atomic_spin_unlock_irqrestore(addr, flags); ··· 53 68 54 69 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) 55 70 { 56 - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 71 + unsigned long mask = BIT_MASK(nr); 57 72 unsigned long old; 58 73 unsigned long flags; 59 74 int set; 60 75 61 - addr += (nr >> SHIFT_PER_LONG); 76 + addr += BIT_WORD(nr); 62 77 _atomic_spin_lock_irqsave(addr, flags); 63 78 old = *addr; 64 79 set = (old & mask) ? 1 : 0; ··· 71 86 72 87 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) 73 88 { 74 - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 89 + unsigned long mask = BIT_MASK(nr); 75 90 unsigned long old; 76 91 unsigned long flags; 77 92 int set; 78 93 79 - addr += (nr >> SHIFT_PER_LONG); 94 + addr += BIT_WORD(nr); 80 95 _atomic_spin_lock_irqsave(addr, flags); 81 96 old = *addr; 82 97 set = (old & mask) ? 1 : 0; ··· 89 104 90 105 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) 91 106 { 92 - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 107 + unsigned long mask = BIT_MASK(nr); 93 108 unsigned long oldbit; 94 109 unsigned long flags; 95 110 96 - addr += (nr >> SHIFT_PER_LONG); 111 + addr += BIT_WORD(nr); 97 112 _atomic_spin_lock_irqsave(addr, flags); 98 113 oldbit = *addr; 99 114 *addr = oldbit ^ mask;
+6 -6
arch/parisc/mm/init.c
··· 750 750 free_space_ids--; 751 751 752 752 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); 753 - space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); 753 + space_id[BIT_WORD(index)] |= BIT_MASK(index); 754 754 space_id_index = index; 755 755 756 756 spin_unlock(&sid_lock); ··· 761 761 void free_sid(unsigned long spaceid) 762 762 { 763 763 unsigned long index = spaceid >> SPACEID_SHIFT; 764 - unsigned long *dirty_space_offset; 764 + unsigned long *dirty_space_offset, mask; 765 765 766 - dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); 767 - index &= (BITS_PER_LONG - 1); 766 + dirty_space_offset = &dirty_space_id[BIT_WORD(index)]; 767 + mask = BIT_MASK(index); 768 768 769 769 spin_lock(&sid_lock); 770 770 771 - BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ 771 + BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */ 772 772 773 - *dirty_space_offset |= (1L << index); 773 + *dirty_space_offset |= mask; 774 774 dirty_space_ids++; 775 775 776 776 spin_unlock(&sid_lock);