Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: M68K checksum annotations and cleanups.

* sanitize prototypes, annotate

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
2061acaa 85d20dee

+28 -31
+6 -7
arch/m68k/lib/checksum.c
··· 39 39 * computes a partial checksum, e.g. for TCP/UDP fragments 40 40 */ 41 41 42 - unsigned int 43 - csum_partial (const unsigned char *buff, int len, unsigned int sum) 42 + __wsum csum_partial(const void *buff, int len, __wsum sum) 44 43 { 45 44 unsigned long tmp1, tmp2; 46 45 /* ··· 132 133 * copy from user space while checksumming, with exception handling. 133 134 */ 134 135 135 - unsigned int 136 - csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, 137 - int len, int sum, int *csum_err) 136 + __wsum 137 + csum_partial_copy_from_user(const void __user *src, void *dst, 138 + int len, __wsum sum, int *csum_err) 138 139 { 139 140 /* 140 141 * GCC doesn't like more than 10 operands for the asm ··· 324 325 * copy from kernel space while checksumming, otherwise like csum_partial 325 326 */ 326 327 327 - unsigned int 328 - csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, int sum) 328 + __wsum 329 + csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) 329 330 { 330 331 unsigned long tmp1, tmp2; 331 332 __asm__("movel %2,%4\n\t"
+22 -24
include/asm-m68k/checksum.h
··· 15 15 * 16 16 * it's best to have buff aligned on a 32-bit boundary 17 17 */ 18 - unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 18 + __wsum csum_partial(const void *buff, int len, __wsum sum); 19 19 20 20 /* 21 21 * the same as csum_partial, but copies from src while it ··· 25 25 * better 64-bit) boundary 26 26 */ 27 27 28 - extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, 29 - unsigned char *dst, 30 - int len, int sum, 28 + extern __wsum csum_partial_copy_from_user(const void __user *src, 29 + void *dst, 30 + int len, __wsum sum, 31 31 int *csum_err); 32 32 33 - extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, 34 - unsigned char *dst, int len, 35 - int sum); 33 + extern __wsum csum_partial_copy_nocheck(const void *src, 34 + void *dst, int len, 35 + __wsum sum); 36 36 37 37 /* 38 38 * This is a version of ip_compute_csum() optimized for IP headers, 39 39 * which always checksum on 4 octet boundaries. 40 40 * 41 41 */ 42 - static inline unsigned short 43 - ip_fast_csum(unsigned char *iph, unsigned int ihl) 42 + static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 44 43 { 45 44 unsigned int sum = 0; 46 45 unsigned long tmp; ··· 57 58 : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp) 58 59 : "0" (sum), "1" (iph), "2" (ihl) 59 60 : "memory"); 60 - return ~sum; 61 + return (__force __sum16)~sum; 61 62 } 62 63 63 64 /* 64 65 * Fold a partial checksum 65 66 */ 66 67 67 - static inline unsigned int csum_fold(unsigned int sum) 68 + static inline __sum16 csum_fold(__wsum sum) 68 69 { 69 - unsigned int tmp = sum; 70 + unsigned int tmp = (__force u32)sum; 70 71 __asm__("swap %1\n\t" 71 72 "addw %1, %0\n\t" 72 73 "clrw %1\n\t" 73 74 "addxw %1, %0" 74 75 : "=&d" (sum), "=&d" (tmp) 75 76 : "0" (sum), "1" (tmp)); 76 - return ~sum; 77 + return (__force __sum16)~sum; 77 78 } 78 79 79 80 80 - static inline unsigned int 81 - csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, 82 - unsigned short proto, unsigned int sum) 81 + static inline __wsum 82 + csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, 83 + unsigned short proto, __wsum sum) 83 84 { 84 85 __asm__ ("addl %2,%0\n\t" 85 86 "addxl %3,%0\n\t" ··· 97 98 * computes the checksum of the TCP/UDP pseudo-header 98 99 * returns a 16-bit checksum, already complemented 99 100 */ 100 - static inline unsigned short int 101 - csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, 102 - unsigned short proto, unsigned int sum) 101 + static inline __sum16 102 + csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, 103 + unsigned short proto, __wsum sum) 103 104 { 104 105 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 105 106 } ··· 109 110 * in icmp.c 110 111 */ 111 112 112 - static inline unsigned short 113 - ip_compute_csum(unsigned char * buff, int len) 113 + static inline __sum16 ip_compute_csum(const void *buff, int len) 114 114 { 115 115 return csum_fold (csum_partial(buff, len, 0)); 116 116 } 117 117 118 118 #define _HAVE_ARCH_IPV6_CSUM 119 - static __inline__ unsigned short int 120 - csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, 121 - __u32 len, unsigned short proto, unsigned int sum) 119 + static __inline__ __sum16 120 + csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, 121 + __u32 len, unsigned short proto, __wsum sum) 122 122 { 123 123 register unsigned long tmp; 124 124 __asm__("addl %2@,%0\n\t"