Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: V850 checksum annotations and cleanups.

* sanitize prototypes, annotate
* collapse csum_partial_copy
* usual ntohs->shift

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
9d3d4195 abf419b8

+31 -35
+1 -1
arch/v850/kernel/v850_ksyms.c
··· 24 24 EXPORT_SYMBOL (__bug); 25 25 26 26 /* Networking helper routines. */ 27 - EXPORT_SYMBOL (csum_partial_copy); 27 + EXPORT_SYMBOL (csum_partial_copy_nocheck); 28 28 EXPORT_SYMBOL (csum_partial_copy_from_user); 29 29 EXPORT_SYMBOL (ip_compute_csum); 30 30 EXPORT_SYMBOL (ip_fast_csum);
+13 -13
arch/v850/lib/checksum.c
··· 88 88 * This is a version of ip_compute_csum() optimized for IP headers, 89 89 * which always checksum on 4 octet boundaries. 90 90 */ 91 - unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) 91 + __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 92 92 { 93 - return ~do_csum(iph,ihl*4); 93 + return (__force __sum16)~do_csum(iph,ihl*4); 94 94 } 95 95 96 96 /* 97 97 * this routine is used for miscellaneous IP-like checksums, mainly 98 98 * in icmp.c 99 99 */ 100 - unsigned short ip_compute_csum(const unsigned char * buff, int len) 100 + __sum16 ip_compute_csum(const void *buff, int len) 101 101 { 102 - return ~do_csum(buff,len); 102 + return (__force __sum16)~do_csum(buff,len); 103 103 } 104 104 105 105 /* 106 106 * computes a partial checksum, e.g. for TCP/UDP fragments 107 107 */ 108 - unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) 108 + __wsum csum_partial(const void *buff, int len, __wsum sum) 109 109 { 110 110 unsigned int result = do_csum(buff, len); 111 111 112 112 /* add in old sum, and carry.. */ 113 - result += sum; 114 - if(sum > result) 113 + result += (__force u32)sum; 114 + if ((__force u32)sum > result) 115 115 result += 1; 116 - return result; 116 + return (__force __wsum)result; 117 117 } 118 118 119 119 EXPORT_SYMBOL(csum_partial); ··· 121 121 /* 122 122 * copy while checksumming, otherwise like csum_partial 123 123 */ 124 - unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, 125 - int len, unsigned int sum) 124 + __wsum csum_partial_copy_nocheck(const void *src, void *dst, 125 + int len, __wsum sum) 126 126 { 127 127 /* 128 128 * It's 2:30 am and I don't feel like doing it real ... ··· 138 138 * Copy from userspace and compute checksum. If we catch an exception 139 139 * then zero the rest of the buffer. 140 140 */ 141 - unsigned int csum_partial_copy_from_user (const unsigned char *src, 142 - unsigned char *dst, 143 - int len, unsigned int sum, 141 + __wsum csum_partial_copy_from_user (const void *src, 142 + void *dst, 143 + int len, __wsum sum, 144 144 int *err_ptr) 145 145 { 146 146 int missing;
+17 -21
include/asm-v850/checksum.h
··· 26 26 * 27 27 * it's best to have buff aligned on a 32-bit boundary 28 28 */ 29 - extern unsigned int csum_partial (const unsigned char * buff, int len, 30 - unsigned int sum); 29 + extern __wsum csum_partial(const void *buff, int len, __wsum sum); 31 30 32 31 /* 33 32 * the same as csum_partial, but copies from src while it ··· 35 36 * here even more important to align src and dst on a 32-bit (or even 36 37 * better 64-bit) boundary 37 38 */ 38 - extern unsigned csum_partial_copy (const unsigned char *src, 39 - unsigned char *dst, int len, unsigned sum); 39 + extern __wsum csum_partial_copy_nocheck(const void *src, 40 + void *dst, int len, __wsum sum); 40 41 41 42 42 43 /* ··· 45 46 * here even more important to align src and dst on a 32-bit (or even 46 47 * better 64-bit) boundary 47 48 */ 48 - extern unsigned csum_partial_copy_from_user (const unsigned char *src, 49 - unsigned char *dst, 50 - int len, unsigned sum, 49 + extern __wsum csum_partial_copy_from_user (const void *src, 50 + void *dst, 51 + int len, __wsum sum, 51 52 int *csum_err); 52 53 53 - #define csum_partial_copy_nocheck(src, dst, len, sum) \ 54 - csum_partial_copy ((src), (dst), (len), (sum)) 55 - 56 - unsigned short ip_fast_csum (unsigned char *iph, unsigned int ihl); 54 + __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 57 55 58 56 /* 59 57 * Fold a partial checksum 60 58 */ 61 - static inline unsigned int csum_fold (unsigned long sum) 59 + static inline __sum16 csum_fold (__wsum sum) 62 60 { 63 61 unsigned int result; 64 62 /* ··· 64 68 add %1, %0 H L H+L+C H+L 65 69 */ 66 70 asm ("hsw %1, %0; add %1, %0" : "=&r" (result) : "r" (sum)); 67 - return (~result) >> 16; 71 + return (__force __sum16)(~result >> 16); 68 72 } 69 73 70 74 ··· 72 76 * computes the checksum of the TCP/UDP pseudo-header 73 77 * returns a 16-bit checksum, already complemented 74 78 */ 75 - static inline unsigned int 76 - csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, 79 + static inline __wsum 80 + csum_tcpudp_nofold (__be32 saddr, __be32 daddr, 77 81 unsigned short len, 78 - unsigned short proto, unsigned int sum) 82 + unsigned short proto, __wsum sum) 79 83 { 80 84 int __carry; 81 85 __asm__ ("add %2, %0;" ··· 89 93 "add %1, %0" 90 94 : "=&r" (sum), "=&r" (__carry) 91 95 : "r" (daddr), "r" (saddr), 92 - "r" (ntohs (len) + (proto << 8)), 96 + "r" ((len + proto) << 8), 93 97 "0" (sum)); 94 98 return sum; 95 99 } 96 100 97 - static inline unsigned short int 98 - csum_tcpudp_magic (unsigned long saddr, unsigned long daddr, 101 + static inline __sum16 102 + csum_tcpudp_magic (__be32 saddr, __be32 daddr, 99 103 unsigned short len, 100 - unsigned short proto, unsigned int sum) 104 + unsigned short proto, __wsum sum) 101 105 { 102 106 return csum_fold (csum_tcpudp_nofold (saddr, daddr, len, proto, sum)); 103 107 } ··· 106 110 * this routine is used for miscellaneous IP-like checksums, mainly 107 111 * in icmp.c 108 112 */ 109 - extern unsigned short ip_compute_csum (const unsigned char * buff, int len); 113 + extern __sum16 ip_compute_csum(const void *buff, int len); 110 114 111 115 112 116 #endif /* __V850_CHECKSUM_H__ */