Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: PARISC checksum annotations and cleanups.

* sanitized prototypes, annotated
* kill shift-by-16 in checksum calculation

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
7814e4b6 8e3d8433

+36 -36
+10 -7
arch/parisc/lib/checksum.c
··· 101 101 /* 102 102 * computes a partial checksum, e.g. for TCP/UDP fragments 103 103 */ 104 - unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) 104 + /* 105 + * why bother folding? 106 + */ 107 + __wsum csum_partial(const void *buff, int len, __wsum sum) 105 108 { 106 109 unsigned int result = do_csum(buff, len); 107 110 addc(result, sum); 108 - return from32to16(result); 111 + return (__force __wsum)from32to16(result); 109 112 } 110 113 111 114 EXPORT_SYMBOL(csum_partial); ··· 116 113 /* 117 114 * copy while checksumming, otherwise like csum_partial 118 115 */ 119 - unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, 120 - int len, unsigned int sum) 116 + __wsum csum_partial_copy_nocheck(const void *src, void *dst, 117 + int len, __wsum sum) 121 118 { 122 119 /* 123 120 * It's 2:30 am and I don't feel like doing it real ... ··· 134 131 * Copy from userspace and compute checksum. If we catch an exception 135 132 * then zero the rest of the buffer. 136 133 */ 137 - unsigned int csum_partial_copy_from_user(const unsigned char __user *src, 138 - unsigned char *dst, int len, 139 - unsigned int sum, int *err_ptr) 134 + __wsum csum_partial_copy_from_user(const void __user *src, 135 + void *dst, int len, 136 + __wsum sum, int *err_ptr) 140 137 { 141 138 int missing; 142 139
+26 -29
include/asm-parisc/checksum.h
··· 15 15 * 16 16 * it's best to have buff aligned on a 32-bit boundary 17 17 */ 18 - extern unsigned int csum_partial(const unsigned char *, int, unsigned int); 18 + extern __wsum csum_partial(const void *, int, __wsum); 19 19 20 20 /* 21 21 * The same as csum_partial, but copies from src while it checksums. ··· 23 23 * Here even more important to align src and dst on a 32-bit (or even 24 24 * better 64-bit) boundary 25 25 */ 26 - extern unsigned int csum_partial_copy_nocheck(const unsigned char *, unsigned char *, 27 - int, unsigned int); 26 + extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); 28 27 29 28 /* 30 29 * this is a new version of the above that records errors it finds in *errp, 31 30 * but continues and zeros the rest of the buffer. 32 31 */ 33 - extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, 34 - unsigned char *dst, int len, unsigned int sum, int *errp); 32 + extern __wsum csum_partial_copy_from_user(const void __user *src, 33 + void *dst, int len, __wsum sum, int *errp); 35 34 36 35 /* 37 36 * Optimized for IP headers, which always checksum on 4 octet boundaries. ··· 38 39 * Written by Randolph Chung <tausq@debian.org>, and then mucked with by 39 40 * LaMont Jones <lamont@debian.org> 40 41 */ 41 - static inline unsigned short ip_fast_csum(unsigned char * iph, 42 - unsigned int ihl) { 42 + static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 43 + { 43 44 unsigned int sum; 44 - 45 45 46 46 __asm__ __volatile__ ( 47 47 " ldws,ma 4(%1), %0\n" ··· 67 69 : "1" (iph), "2" (ihl) 68 70 : "r19", "r20", "r21" ); 69 71 70 - return(sum); 72 + return (__force __sum16)sum; 71 73 } 72 74 73 75 /* 74 76 * Fold a partial checksum 75 77 */ 76 - static inline unsigned int csum_fold(unsigned int sum) 78 + static inline __sum16 csum_fold(__wsum csum) 77 79 { 80 + u32 sum = (__force u32)csum; 78 81 /* add the swapped two 16-bit halves of sum, 79 82 a possible carry from adding the two 16-bit halves, 80 83 will carry from the lower half into the upper half, 81 84 giving us the correct sum in the upper half. */ 82 85 sum += (sum << 16) + (sum >> 16); 83 - return (~sum) >> 16; 86 + return (__force __sum16)(~sum >> 16); 84 87 } 85 88 86 - static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, 87 - unsigned long daddr, 89 + static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 88 90 unsigned short len, 89 91 unsigned short proto, 90 - unsigned int sum) 92 + __wsum sum) 91 93 { 92 94 __asm__( 93 95 " add %1, %0, %0\n" ··· 95 97 " addc %3, %0, %0\n" 96 98 " addc %%r0, %0, %0\n" 97 99 : "=r" (sum) 98 - : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); 99 - return sum; 100 + : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum)); 101 + return sum; 100 102 } 101 103 102 104 /* 103 105 * computes the checksum of the TCP/UDP pseudo-header 104 106 * returns a 16-bit checksum, already complemented 105 107 */ 106 - static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, 107 - unsigned long daddr, 108 + static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 108 109 unsigned short len, 109 110 unsigned short proto, 110 - unsigned int sum) 111 + __wsum sum) 111 112 { 112 113 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 113 114 } ··· 115 118 * this routine is used for miscellaneous IP-like checksums, mainly 116 119 * in icmp.c 117 120 */ 118 - static inline unsigned short ip_compute_csum(unsigned char * buf, int len) { 121 + static inline __sum16 ip_compute_csum(const void *buf, int len) 122 + { 119 123 return csum_fold (csum_partial(buf, len, 0)); 120 124 } 121 125 122 126 123 127 #define _HAVE_ARCH_IPV6_CSUM 124 - static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, 125 - struct in6_addr *daddr, 126 - __u16 len, 127 - unsigned short proto, 128 - unsigned int sum) 128 + static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 129 + const struct in6_addr *daddr, 130 + __u32 len, unsigned short proto, 131 + __wsum sum) 129 132 { 130 133 __asm__ __volatile__ ( 131 134 ··· 190 193 * Copy and checksum to user 191 194 */ 192 195 #define HAVE_CSUM_COPY_USER 193 - static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, 194 - unsigned char __user *dst, 195 - int len, int sum, 196 + static __inline__ __wsum csum_and_copy_to_user(const void *src, 197 + void __user *dst, 198 + int len, __wsum sum, 196 199 int *err_ptr) 197 200 { 198 201 /* code stolen from include/asm-mips64 */ ··· 200 203 201 204 if (copy_to_user(dst, src, len)) { 202 205 *err_ptr = -EFAULT; 203 - return -1; 206 + return (__force __wsum)-1; 204 207 } 205 208 206 209 return sum;