Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: H8300 checksum annotations and cleanups.

* sanitize prototypes and annotate
* collapse csum_partial_copy

NB: csum_partial() is almost certainly still buggy.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
db521083 8042c44b

+32 -30
+1 -1
arch/h8300/kernel/h8300_ksyms.c
··· 39 39 EXPORT_SYMBOL(disable_irq); 40 40 41 41 /* Networking helper routines. */ 42 - EXPORT_SYMBOL(csum_partial_copy); 42 + EXPORT_SYMBOL(csum_partial_copy_nocheck); 43 43 44 44 /* The following are special because they're not called 45 45 explicitly (the C compiler generates them). Fortunately,
+17 -12
arch/h8300/lib/checksum.c
··· 96 96 * This is a version of ip_compute_csum() optimized for IP headers, 97 97 * which always checksum on 4 octet boundaries. 98 98 */ 99 - unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) 99 + __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 100 100 { 101 - return ~do_csum(iph,ihl*4); 101 + return (__force __sum16)~do_csum(iph,ihl*4); 102 102 } 103 103 104 104 /* ··· 113 113 * 114 114 * it's best to have buff aligned on a 32-bit boundary 115 115 */ 116 - unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) 116 + /* 117 + * Egads... That thing apparently assumes that *all* checksums it ever sees will 118 + * be folded. Very likely a bug. 119 + */ 120 + __wsum csum_partial(const void *buff, int len, __wsum sum) 117 121 { 118 122 unsigned int result = do_csum(buff, len); 119 123 120 124 /* add in old sum, and carry.. */ 121 - result += sum; 125 + result += (__force u32)sum; 122 126 /* 16+c bits -> 16 bits */ 123 127 result = (result & 0xffff) + (result >> 16); 124 - return result; 128 + return (__force __wsum)result; 125 129 } 126 130 127 131 EXPORT_SYMBOL(csum_partial); ··· 134 130 * this routine is used for miscellaneous IP-like checksums, mainly 135 131 * in icmp.c 136 132 */ 137 - unsigned short ip_compute_csum(const unsigned char * buff, int len) 133 + __sum16 ip_compute_csum(const void *buff, int len) 138 134 { 139 - return ~do_csum(buff,len); 135 + return (__force __sum16)~do_csum(buff,len); 140 136 } 141 137 142 138 /* 143 139 * copy from fs while checksumming, otherwise like csum_partial 144 140 */ 145 141 146 - unsigned int 147 - csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *csum_err) 142 + __wsum 143 + csum_partial_copy_from_user(const void __user *src, void *dst, int len, 144 + __wsum sum, int *csum_err) 148 145 { 149 146 if (csum_err) *csum_err = 0; 150 - memcpy(dst, src, len); 147 + memcpy(dst, (__force const void *)src, len); 151 148 return csum_partial(dst, len, sum); 152 149 } 153 150 ··· 156 151 * copy from ds while checksumming, otherwise like csum_partial 157 152 */ 158 153 159 - unsigned int 160 - csum_partial_copy(const char *src, char *dst, int len, int sum) 154 + __wsum 155 + csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) 161 156 { 162 157 memcpy(dst, src, len); 163 158 return csum_partial(dst, len, sum);
+14 -17
include/asm-h8300/checksum.h
··· 13 13 * 14 14 * it's best to have buff aligned on a 32-bit boundary 15 15 */ 16 - unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 16 + __wsum csum_partial(const void *buff, int len, __wsum sum); 17 17 18 18 /* 19 19 * the same as csum_partial, but copies from src while it ··· 23 23 * better 64-bit) boundary 24 24 */ 25 25 26 - unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum); 26 + __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); 27 27 28 28 29 29 /* ··· 33 33 * better 64-bit) boundary 34 34 */ 35 35 36 - extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, 37 - int len, int sum, int *csum_err); 36 + extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, 37 + int len, __wsum sum, int *csum_err); 38 38 39 - #define csum_partial_copy_nocheck(src, dst, len, sum) \ 40 - csum_partial_copy((src), (dst), (len), (sum)) 41 - 42 - unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl); 39 + __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 43 40 44 41 45 42 /* 46 43 * Fold a partial checksum 47 44 */ 48 45 49 - static inline unsigned int csum_fold(unsigned int sum) 46 + static inline __sum16 csum_fold(__wsum sum) 50 47 { 51 48 __asm__("mov.l %0,er0\n\t" 52 49 "add.w e0,r0\n\t" ··· 55 58 : "=r"(sum) 56 59 : "0"(sum) 57 60 : "er0"); 58 - return ~sum; 61 + return (__force __sum16)~sum; 59 62 } 60 63 61 64 ··· 64 67 * returns a 16-bit checksum, already complemented 65 68 */ 66 69 67 - static inline unsigned int 68 - csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, 69 - unsigned short proto, unsigned int sum) 70 + static inline __wsum 71 + csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, 72 + unsigned short proto, __wsum sum) 70 73 { 71 74 __asm__ ("sub.l er0,er0\n\t" 72 75 "add.l %2,%0\n\t" ··· 85 88 return sum; 86 89 } 87 90 88 - static inline unsigned short int 89 - csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, 90 - unsigned short proto, unsigned int sum) 91 + static inline __sum16 92 + csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, 93 + unsigned short proto, __wsum sum) 91 94 { 92 95 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 93 96 } ··· 97 100 * in icmp.c 98 101 */ 99 102 100 - extern unsigned short ip_compute_csum(const unsigned char * buff, int len); 103 + extern __sum16 ip_compute_csum(const void *buff, int len); 101 104 102 105 #endif /* _H8300_CHECKSUM_H */