Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: M68Knommu checksum annotations and cleanups.

* sanitize prototypes, annotated
* collapsed csum_partial_copy()

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
59ed05a7 2061acaa

+37 -39
+1 -1
arch/m68knommu/kernel/m68k_ksyms.c
··· 38 38 EXPORT_SYMBOL(kernel_thread); 39 39 40 40 /* Networking helper routines. */ 41 - EXPORT_SYMBOL(csum_partial_copy); 41 + EXPORT_SYMBOL(csum_partial_copy_nocheck); 42 42 43 43 /* The following are special because they're not called 44 44 explicitly (the C compiler generates them). Fortunately,
+14 -14
arch/m68knommu/lib/checksum.c
··· 96 96 * This is a version of ip_compute_csum() optimized for IP headers, 97 97 * which always checksum on 4 octet boundaries. 98 98 */ 99 - unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) 99 + __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 100 100 { 101 - return ~do_csum(iph,ihl*4); 101 + return (__force __sum16)~do_csum(iph,ihl*4); 102 102 } 103 103 104 104 /* ··· 113 113 * 114 114 * it's best to have buff aligned on a 32-bit boundary 115 115 */ 116 - unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) 116 + __wsum csum_partial(const void *buff, int len, __wsum sum) 117 117 { 118 118 unsigned int result = do_csum(buff, len); 119 119 120 120 /* add in old sum, and carry.. */ 121 - result += sum; 122 - if (sum > result) 121 + result += (__force u32)sum; 122 + if ((__force u32)sum > result) 123 123 result += 1; 124 - return result; 124 + return (__force __wsum)result; 125 125 } 126 126 127 127 EXPORT_SYMBOL(csum_partial); ··· 130 130 * this routine is used for miscellaneous IP-like checksums, mainly 131 131 * in icmp.c 132 132 */ 133 - unsigned short ip_compute_csum(const unsigned char * buff, int len) 133 + __sum16 ip_compute_csum(const void *buff, int len) 134 134 { 135 - return ~do_csum(buff,len); 135 + return (__force __sum16)~do_csum(buff,len); 136 136 } 137 137 138 138 /* 139 139 * copy from fs while checksumming, otherwise like csum_partial 140 140 */ 141 141 142 - unsigned int 143 - csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, 144 - int len, int sum, int *csum_err) 142 + __wsum 143 + csum_partial_copy_from_user(const void __user *src, void *dst, 144 + int len, __wsum sum, int *csum_err) 145 145 { 146 146 if (csum_err) *csum_err = 0; 147 - memcpy(dst, src, len); 147 + memcpy(dst, (__force const void *)src, len); 148 148 return csum_partial(dst, len, sum); 149 149 } 150 150 ··· 152 152 * copy from ds while checksumming, otherwise like csum_partial 153 153 */ 154 154 155 - unsigned int 156 - csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, int sum) 155 + __wsum 156 + csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) 157 157 { 158 158 memcpy(dst, src, len); 159 159 return csum_partial(dst, len, sum);
+22 -24
include/asm-m68knommu/checksum.h
··· 15 15 * 16 16 * it's best to have buff aligned on a 32-bit boundary 17 17 */ 18 - unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 18 + __wsum csum_partial(const void *buff, int len, __wsum sum); 19 19 20 20 /* 21 21 * the same as csum_partial, but copies from src while it ··· 25 25 * better 64-bit) boundary 26 26 */ 27 27 28 - unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, 29 - int len, int sum); 28 + __wsum csum_partial_copy_nocheck(const void *src, void *dst, 29 + int len, __wsum sum); 30 30 31 31 32 32 /* ··· 36 36 * better 64-bit) boundary 37 37 */ 38 38 39 - extern unsigned int csum_partial_copy_from_user(const unsigned char *src, 40 - unsigned char *dst, int len, int sum, int *csum_err); 39 + extern __wsum csum_partial_copy_from_user(const void __user *src, 40 + void *dst, int len, __wsum sum, int *csum_err); 41 41 42 - #define csum_partial_copy_nocheck(src, dst, len, sum) \ 43 - csum_partial_copy((src), (dst), (len), (sum)) 44 - 45 - unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl); 42 + __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 46 43 47 44 /* 48 45 * Fold a partial checksum 49 46 */ 50 47 51 - static inline unsigned int csum_fold(unsigned int sum) 48 + static inline __sum16 csum_fold(__wsum sum) 52 49 { 50 + unsigned int tmp = (__force u32)sum; 53 51 #ifdef CONFIG_COLDFIRE 54 - sum = (sum & 0xffff) + (sum >> 16); 55 - sum = (sum & 0xffff) + (sum >> 16); 52 + tmp = (tmp & 0xffff) + (tmp >> 16); 53 + tmp = (tmp & 0xffff) + (tmp >> 16); 54 + return (__force __sum16)~tmp; 56 55 #else 57 - unsigned int tmp = sum; 58 56 __asm__("swap %1\n\t" 59 57 "addw %1, %0\n\t" 60 58 "clrw %1\n\t" 61 59 "addxw %1, %0" 62 60 : "=&d" (sum), "=&d" (tmp) 63 61 : "0" (sum), "1" (sum)); 62 + return (__force __sum16)~sum; 64 63 #endif 65 - return ~sum; 66 64 } 67 65 68 66 ··· 69 71 * returns a 16-bit checksum, already complemented 70 72 */ 71 73 72 - static inline unsigned int 73 - csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, 74 - unsigned short proto, unsigned int sum) 74 + static inline __wsum 75 + csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, 76 + unsigned short proto, __wsum sum) 75 77 { 76 78 __asm__ ("addl %1,%0\n\t" 77 79 "addxl %4,%0\n\t" ··· 84 86 return sum; 85 87 } 86 88 87 - static inline unsigned short int 88 - csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, 89 - unsigned short proto, unsigned int sum) 89 + static inline __sum16 90 + csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, 91 + unsigned short proto, __wsum sum) 90 92 { 91 93 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 92 94 } ··· 96 98 * in icmp.c 97 99 */ 98 100 99 - extern unsigned short ip_compute_csum(const unsigned char * buff, int len); 101 + extern __sum16 ip_compute_csum(const void *buff, int len); 100 102 101 103 #define _HAVE_ARCH_IPV6_CSUM 102 - static __inline__ unsigned short int 103 - csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, 104 - __u32 len, unsigned short proto, unsigned int sum) 104 + static __inline__ __sum16 105 + csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, 106 + __u32 len, unsigned short proto, __wsum sum) 105 107 { 106 108 register unsigned long tmp; 107 109 __asm__("addl %2@,%0\n\t"