Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

include/net net/ - csum_partial - remove unnecessary casts

The first argument to csum_partial is const void *
casts to char/u8 * are not necessary

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Joe Perches and committed by
David S. Miller
07f0757a a7a0d6a8

+19 -19
+1 -1
include/net/checksum.h
··· 98 98 { 99 99 __be32 diff[] = { ~from, to }; 100 100 101 - *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(*sum))); 101 + *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum))); 102 102 } 103 103 104 104 static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
+3 -3
include/net/ip_vs.h
··· 913 913 { 914 914 __be32 diff[2] = { ~old, new }; 915 915 916 - return csum_partial((char *) diff, sizeof(diff), oldsum); 916 + return csum_partial(diff, sizeof(diff), oldsum); 917 917 } 918 918 919 919 #ifdef CONFIG_IP_VS_IPV6 ··· 923 923 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], 924 924 new[3], new[2], new[1], new[0] }; 925 925 926 - return csum_partial((char *) diff, sizeof(diff), oldsum); 926 + return csum_partial(diff, sizeof(diff), oldsum); 927 927 } 928 928 #endif 929 929 ··· 931 931 { 932 932 __be16 diff[2] = { ~old, new }; 933 933 934 - return csum_partial((char *) diff, sizeof(diff), oldsum); 934 + return csum_partial(diff, sizeof(diff), oldsum); 935 935 } 936 936 937 937 #endif /* __KERNEL__ */
+1 -1
net/core/netpoll.c
··· 343 343 udph->check = csum_tcpudp_magic(htonl(np->local_ip), 344 344 htonl(np->remote_ip), 345 345 udp_len, IPPROTO_UDP, 346 - csum_partial((unsigned char *)udph, udp_len, 0)); 346 + csum_partial(udph, udp_len, 0)); 347 347 if (udph->check == 0) 348 348 udph->check = CSUM_MANGLED_0; 349 349
+2 -2
net/ipv4/inet_lro.c
··· 120 120 iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl); 121 121 122 122 tcph->check = 0; 123 - tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0); 123 + tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), 0); 124 124 lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum); 125 125 tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 126 126 lro_desc->ip_tot_len - ··· 135 135 __wsum tcp_ps_hdr_csum; 136 136 137 137 tcp_csum = ~csum_unfold(tcph->check); 138 - tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum); 138 + tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), tcp_csum); 139 139 140 140 tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, 141 141 len + TCP_HDR_LEN(tcph),
+2 -2
net/ipv4/tcp_ipv4.c
··· 492 492 skb->csum_offset = offsetof(struct tcphdr, check); 493 493 } else { 494 494 th->check = tcp_v4_check(len, inet->saddr, inet->daddr, 495 - csum_partial((char *)th, 495 + csum_partial(th, 496 496 th->doff << 2, 497 497 skb->csum)); 498 498 } ··· 726 726 th->check = tcp_v4_check(skb->len, 727 727 ireq->loc_addr, 728 728 ireq->rmt_addr, 729 - csum_partial((char *)th, skb->len, 729 + csum_partial(th, skb->len, 730 730 skb->csum)); 731 731 732 732 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
+2 -2
net/ipv6/icmp.c
··· 233 233 icmp6h->icmp6_cksum = 0; 234 234 235 235 if (skb_queue_len(&sk->sk_write_queue) == 1) { 236 - skb->csum = csum_partial((char *)icmp6h, 236 + skb->csum = csum_partial(icmp6h, 237 237 sizeof(struct icmp6hdr), skb->csum); 238 238 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src, 239 239 &fl->fl6_dst, ··· 246 246 tmp_csum = csum_add(tmp_csum, skb->csum); 247 247 } 248 248 249 - tmp_csum = csum_partial((char *)icmp6h, 249 + tmp_csum = csum_partial(icmp6h, 250 250 sizeof(struct icmp6hdr), tmp_csum); 251 251 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src, 252 252 &fl->fl6_dst,
+1 -1
net/ipv6/mcast.c
··· 1817 1817 1818 1818 hdr->icmp6_cksum = csum_ipv6_magic(saddr, snd_addr, len, 1819 1819 IPPROTO_ICMPV6, 1820 - csum_partial((__u8 *) hdr, len, 0)); 1820 + csum_partial(hdr, len, 0)); 1821 1821 1822 1822 idev = in6_dev_get(skb->dev); 1823 1823
+2 -2
net/ipv6/ndisc.c
··· 491 491 492 492 hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len, 493 493 IPPROTO_ICMPV6, 494 - csum_partial((__u8 *) hdr, 494 + csum_partial(hdr, 495 495 len, 0)); 496 496 497 497 return skb; ··· 1612 1612 1613 1613 icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &ipv6_hdr(skb)->saddr, 1614 1614 len, IPPROTO_ICMPV6, 1615 - csum_partial((u8 *) icmph, len, 0)); 1615 + csum_partial(icmph, len, 0)); 1616 1616 1617 1617 buff->dst = dst; 1618 1618 idev = in6_dev_get(dst->dev);
+3 -3
net/ipv6/tcp_ipv6.c
··· 501 501 502 502 th->check = tcp_v6_check(th, skb->len, 503 503 &treq->loc_addr, &treq->rmt_addr, 504 - csum_partial((char *)th, skb->len, skb->csum)); 504 + csum_partial(th, skb->len, skb->csum)); 505 505 506 506 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 507 507 err = ip6_xmit(sk, skb, &fl, opt, 0); ··· 915 915 skb->csum_offset = offsetof(struct tcphdr, check); 916 916 } else { 917 917 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 918 - csum_partial((char *)th, th->doff<<2, 918 + csum_partial(th, th->doff<<2, 919 919 skb->csum)); 920 920 } 921 921 } ··· 997 997 } 998 998 #endif 999 999 1000 - buff->csum = csum_partial((char *)t1, tot_len, 0); 1000 + buff->csum = csum_partial(t1, tot_len, 0); 1001 1001 1002 1002 memset(&fl, 0, sizeof(fl)); 1003 1003 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
+2 -2
net/unix/af_unix.c
··· 216 216 return len; 217 217 } 218 218 219 - *hashp = unix_hash_fold(csum_partial((char *)sunaddr, len, 0)); 219 + *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0)); 220 220 return len; 221 221 } 222 222 ··· 686 686 687 687 retry: 688 688 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short); 689 - addr->hash = unix_hash_fold(csum_partial((void *)addr->name, addr->len, 0)); 689 + addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0)); 690 690 691 691 spin_lock(&unix_table_lock); 692 692 ordernum = (ordernum+1)&0xFFFFF;