Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: POWERPC checksum annotations and cleanups.

* sanitize prototypes, annotate
* kill useless shifts

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
879178cf 72685fcd

+21 -38
+21 -38
include/asm-powerpc/checksum.h
··· 14 14 * which always checksum on 4 octet boundaries. ihl is the number 15 15 * of 32-bit words and is always >= 5. 16 16 */ 17 - extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl); 17 + extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 18 18 19 19 /* 20 20 * computes the checksum of the TCP/UDP pseudo-header 21 21 * returns a 16-bit checksum, already complemented 22 22 */ 23 - extern unsigned short csum_tcpudp_magic(unsigned long saddr, 24 - unsigned long daddr, 23 + extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 25 24 unsigned short len, 26 25 unsigned short proto, 27 - unsigned int sum); 26 + __wsum sum); 28 27 29 28 /* 30 29 * computes the checksum of a memory block at buff, length len, ··· 37 38 * 38 39 * it's best to have buff aligned on a 32-bit boundary 39 40 */ 40 - extern unsigned int csum_partial(const unsigned char * buff, int len, 41 - unsigned int sum); 41 + extern __wsum csum_partial(const void *buff, int len, __wsum sum); 42 42 43 43 /* 44 44 * Computes the checksum of a memory block at src, length len, ··· 49 51 * Like csum_partial, this must be called with even lengths, 50 52 * except for the last fragment. 51 53 */ 52 - extern unsigned int csum_partial_copy_generic(const char *src, char *dst, 53 - int len, unsigned int sum, 54 + extern __wsum csum_partial_copy_generic(const void *src, void *dst, 55 + int len, __wsum sum, 54 56 int *src_err, int *dst_err); 55 57 /* 56 58 * the same as csum_partial, but copies from src to dst while it 57 59 * checksums. 58 60 */ 59 - unsigned int csum_partial_copy_nocheck(const char *src, 60 - char *dst, 61 - int len, 62 - unsigned int sum); 63 - 64 61 #define csum_partial_copy_from_user(src, dst, len, sum, errp) \ 65 - csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL) 62 + csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL) 66 63 67 64 #define csum_partial_copy_nocheck(src, dst, len, sum) \ 68 65 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) ··· 67 74 * turns a 32-bit partial checksum (e.g. from csum_partial) into a 68 75 * 1's complement 16-bit checksum. 69 76 */ 70 - static inline unsigned int csum_fold(unsigned int sum) 77 + static inline __sum16 csum_fold(__wsum sum) 71 78 { 72 79 unsigned int tmp; 73 80 ··· 76 83 /* if there is a carry from adding the two 16-bit halves, 77 84 it will carry from the lower half into the upper half, 78 85 giving us the correct sum in the upper half. */ 79 - sum = ~(sum + tmp) >> 16; 80 - return sum; 86 + return (__force __sum16)(~((__force u32)sum + tmp) >> 16); 81 87 } 82 88 83 89 /* 84 90 * this routine is used for miscellaneous IP-like checksums, mainly 85 91 * in icmp.c 86 92 */ 87 - static inline unsigned short ip_compute_csum(unsigned char * buff, int len) 93 + static inline __sum16 ip_compute_csum(const void *buff, int len) 88 94 { 89 95 return csum_fold(csum_partial(buff, len, 0)); 90 96 } 91 97 92 - #ifdef __powerpc64__ 93 - static inline u32 csum_tcpudp_nofold(u32 saddr, 94 - u32 daddr, 98 + static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 95 99 unsigned short len, 96 100 unsigned short proto, 97 - unsigned int sum) 101 + __wsum sum) 98 102 { 99 - unsigned long s = sum; 103 + #ifdef __powerpc64__ 104 + unsigned long s = (__force u32)sum; 100 105 101 - s += saddr; 102 - s += daddr; 103 - s += (proto << 16) + len; 106 + s += (__force u32)saddr; 107 + s += (__force u32)daddr; 108 + s += proto + len; 104 109 s += (s >> 32); 105 - return (u32) s; 106 - } 110 + return (__force __wsum) s; 107 111 #else 108 - static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, 109 - unsigned long daddr, 110 - unsigned short len, 111 - unsigned short proto, 112 - unsigned int sum) 113 - { 114 112 __asm__("\n\ 115 113 addc %0,%0,%1 \n\ 116 114 adde %0,%0,%2 \n\ ··· 109 125 addze %0,%0 \n\ 110 126 " 111 127 : "=r" (sum) 112 - : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); 113 - return sum; 114 - } 115 - 128 + : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum)); 129 + return sum; 116 130 #endif 131 + } 117 132 #endif /* __KERNEL__ */ 118 133 #endif