Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.15-rc6 119 lines 2.9 kB view raw
1#ifndef _ASM_WORD_AT_A_TIME_H 2#define _ASM_WORD_AT_A_TIME_H 3 4/* 5 * Word-at-a-time interfaces for PowerPC. 6 */ 7 8#include <linux/kernel.h> 9#include <asm/asm-compat.h> 10 11#ifdef __BIG_ENDIAN__ 12 13struct word_at_a_time { 14 const unsigned long high_bits, low_bits; 15}; 16 17#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) } 18 19/* Bit set in the bytes that have a zero */ 20static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) 21{ 22 unsigned long mask = (val & c->low_bits) + c->low_bits; 23 return ~(mask | rhs); 24} 25 26#define create_zero_mask(mask) (mask) 27 28static inline long find_zero(unsigned long mask) 29{ 30 long leading_zero_bits; 31 32 asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask)); 33 return leading_zero_bits >> 3; 34} 35 36static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) 37{ 38 unsigned long rhs = val | c->low_bits; 39 *data = rhs; 40 return (val + c->high_bits) & ~rhs; 41} 42 43#else 44 45struct word_at_a_time { 46 const unsigned long one_bits, high_bits; 47}; 48 49#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } 50 51#ifdef CONFIG_64BIT 52 53/* Alan Modra's little-endian strlen tail for 64-bit */ 54#define create_zero_mask(mask) (mask) 55 56static inline unsigned long find_zero(unsigned long mask) 57{ 58 unsigned long leading_zero_bits; 59 long trailing_zero_bit_mask; 60 61 asm ("addi %1,%2,-1\n\t" 62 "andc %1,%1,%2\n\t" 63 "popcntd %0,%1" 64 : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) 65 : "r" (mask)); 66 return leading_zero_bits >> 3; 67} 68 69#else /* 32-bit case */ 70 71/* 72 * This is largely generic for little-endian machines, but the 73 * optimal byte mask counting is probably going to be something 74 * that is architecture-specific. If you have a reliably fast 75 * bit count instruction, that might be better than the multiply 76 * and shift, for example. 77 */ 78 79/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ 80static inline long count_masked_bytes(long mask) 81{ 82 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ 83 long a = (0x0ff0001+mask) >> 23; 84 /* Fix the 1 for 00 case */ 85 return a & mask; 86} 87 88static inline unsigned long create_zero_mask(unsigned long bits) 89{ 90 bits = (bits - 1) & ~bits; 91 return bits >> 7; 92} 93 94static inline unsigned long find_zero(unsigned long mask) 95{ 96 return count_masked_bytes(mask); 97} 98 99#endif 100 101/* Return nonzero if it has a zero */ 102static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) 103{ 104 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; 105 *bits = mask; 106 return mask; 107} 108 109static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) 110{ 111 return bits; 112} 113 114/* The mask we created is directly usable as a bytemask */ 115#define zero_bytemask(mask) (mask) 116 117#endif 118 119#endif /* _ASM_WORD_AT_A_TIME_H */