Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: Cris checksum annotations and cleanups.

* sanitize prototypes and annotate
* kill cast-as-lvalue abuses in csum_partial()
* usual ntohs-equals-shift for checksum purposes

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
3532010b 9be259aa

+57 -59
+32 -30
arch/cris/arch-v10/lib/old_checksum.c
··· 47 47 48 48 #include <asm/delay.h> 49 49 50 - unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) 50 + __wsum csum_partial(const void *p, int len, __wsum __sum) 51 51 { 52 - /* 53 - * Experiments with ethernet and slip connections show that buff 54 - * is aligned on either a 2-byte or 4-byte boundary. 55 - */ 56 - const unsigned char *endMarker = buff + len; 57 - const unsigned char *marker = endMarker - (len % 16); 52 + u32 sum = (__force u32)__sum; 53 + const u16 *buff = p; 54 + /* 55 + * Experiments with ethernet and slip connections show that buff 56 + * is aligned on either a 2-byte or 4-byte boundary. 57 + */ 58 + const void *endMarker = p + len; 59 + const void *marker = endMarker - (len % 16); 58 60 #if 0 59 - if((int)buff & 0x3) 60 - printk("unaligned buff %p\n", buff); 61 - __delay(900); /* extra delay of 90 us to test performance hit */ 61 + if((int)buff & 0x3) 62 + printk("unaligned buff %p\n", buff); 63 + __delay(900); /* extra delay of 90 us to test performance hit */ 62 64 #endif 63 - BITON; 64 - while (buff < marker) { 65 - sum += *((unsigned short *)buff)++; 66 - sum += *((unsigned short *)buff)++; 67 - sum += *((unsigned short *)buff)++; 68 - sum += *((unsigned short *)buff)++; 69 - sum += *((unsigned short *)buff)++; 70 - sum += *((unsigned short *)buff)++; 71 - sum += *((unsigned short *)buff)++; 72 - sum += *((unsigned short *)buff)++; 73 - } 74 - marker = endMarker - (len % 2); 75 - while(buff < marker) { 76 - sum += *((unsigned short *)buff)++; 77 - } 78 - if(endMarker - buff > 0) { 79 - sum += *buff; /* add extra byte seperately */ 80 - } 81 - BITOFF; 82 - return(sum); 65 + BITON; 66 + while (buff < marker) { 67 + sum += *buff++; 68 + sum += *buff++; 69 + sum += *buff++; 70 + sum += *buff++; 71 + sum += *buff++; 72 + sum += *buff++; 73 + sum += *buff++; 74 + sum += *buff++; 75 + } 76 + marker = endMarker - (len % 2); 77 + while (buff < marker) 78 + sum += *buff++; 79 + 80 + if (endMarker > buff) 81 + sum += *(const u8 *)buff; /* add extra byte seperately */ 82 + 83 + BITOFF; 84 + return (__force __wsum)sum; 83 85 } 84 86 85 87 EXPORT_SYMBOL(csum_partial);
+5 -5
include/asm-cris/arch-v10/checksum.h
··· 8 8 * to split all of those into 16-bit components, then add. 9 9 */ 10 10 11 - static inline unsigned int 12 - csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, 13 - unsigned short proto, unsigned int sum) 11 + static inline __wsum 12 + csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, 13 + unsigned short proto, __wsum sum) 14 14 { 15 - int res; 15 + __wsum res; 16 16 __asm__ ("add.d %2, %0\n\t" 17 17 "ax\n\t" 18 18 "add.d %3, %0\n\t" ··· 21 21 "ax\n\t" 22 22 "addq 0, %0\n" 23 23 : "=r" (res) 24 - : "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8))); 24 + : "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8)); 25 25 26 26 return res; 27 27 }
+5 -5
include/asm-cris/arch-v32/checksum.h
··· 9 9 * checksum. Which means it would be necessary to split all those into 10 10 * 16-bit components and then add. 11 11 */ 12 - static inline unsigned int 13 - csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, 14 - unsigned short len, unsigned short proto, unsigned int sum) 12 + static inline __wsum 13 + csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 14 + unsigned short len, unsigned short proto, __wsum sum) 15 15 { 16 - int res; 16 + __wsum res; 17 17 18 18 __asm__ __volatile__ ("add.d %2, %0\n\t" 19 19 "addc %3, %0\n\t" ··· 21 21 "addc 0, %0\n\t" 22 22 : "=r" (res) 23 23 : "0" (sum), "r" (daddr), "r" (saddr), \ 24 - "r" ((ntohs(len) << 16) + (proto << 8))); 24 + "r" ((len + proto) << 8)); 25 25 26 26 return res; 27 27 }
+15 -19
include/asm-cris/checksum.h
··· 17 17 * 18 18 * it's best to have buff aligned on a 32-bit boundary 19 19 */ 20 - unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 20 + __wsum csum_partial(const void *buff, int len, __wsum sum); 21 21 22 22 /* 23 23 * the same as csum_partial, but copies from src while it ··· 27 27 * better 64-bit) boundary 28 28 */ 29 29 30 - unsigned int csum_partial_copy_nocheck(const char *src, char *dst, 31 - int len, unsigned int sum); 30 + __wsum csum_partial_copy_nocheck(const void *src, void *dst, 31 + int len, __wsum sum); 32 32 33 33 /* 34 34 * Fold a partial checksum into a word 35 35 */ 36 36 37 - static inline unsigned int csum_fold(unsigned int sum) 37 + static inline __sum16 csum_fold(__wsum csum) 38 38 { 39 - /* the while loop is unnecessary really, it's always enough with two 40 - iterations */ 41 - 42 - while(sum >> 16) 43 - sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ 44 - 45 - return ~sum; 39 + u32 sum = (__force u32)csum; 40 + sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ 41 + sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ 42 + return (__force __sum16)~sum; 46 43 } 47 44 48 - extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, 49 - int len, unsigned int sum, 45 + extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, 46 + int len, __wsum sum, 50 47 int *errptr); 51 48 52 49 /* ··· 52 55 * 53 56 */ 54 57 55 - static inline unsigned short ip_fast_csum(unsigned char * iph, 56 - unsigned int ihl) 58 + static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 57 59 { 58 60 return csum_fold(csum_partial(iph, ihl * 4, 0)); 59 61 } ··· 62 66 * returns a 16-bit checksum, already complemented 63 67 */ 64 68 65 - static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, 66 - unsigned long daddr, 69 + static inline __sum16 int csum_tcpudp_magic(__be32 saddr, __be32 daddr, 67 70 unsigned short len, 68 71 unsigned short proto, 69 - unsigned int sum) 72 + __wsum sum) 70 73 { 71 74 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 72 75 } ··· 75 80 * in icmp.c 76 81 */ 77 82 78 - static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { 83 + static inline __sum16 ip_compute_csum(const void *buff, int len) 84 + { 79 85 return csum_fold (csum_partial(buff, len, 0)); 80 86 } 81 87