Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: Remove unnecessary cast on void pointer

The following Coccinelle script was used to detect this:
@r@
expression x;
void* e;
type T;
identifier f;
@@
(
*((T *)e)
|
((T *)x)[...]
|
((T*)x)->f
|

- (T*)
e
)

Unnecessary parantheses are also remove.

Signed-off-by: simran singhal <singhalsimran0@gmail.com>
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

authored by

simran singhal and committed by
Pablo Neira Ayuso
68ad546a dedb67c4

+35 -49
+1 -1
net/bridge/netfilter/ebtables.c
··· 1713 1713 if (*size < sizeof(*ce)) 1714 1714 return -EINVAL; 1715 1715 1716 - ce = (struct ebt_entry __user *)*dstptr; 1716 + ce = *dstptr; 1717 1717 if (copy_to_user(ce, e, sizeof(*ce))) 1718 1718 return -EFAULT; 1719 1719
+8 -13
net/ipv4/netfilter/arp_tables.c
··· 309 309 */ 310 310 for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { 311 311 unsigned int pos = newinfo->hook_entry[hook]; 312 - struct arpt_entry *e 313 - = (struct arpt_entry *)(entry0 + pos); 312 + struct arpt_entry *e = entry0 + pos; 314 313 315 314 if (!(valid_hooks & (1 << hook))) 316 315 continue; ··· 353 354 if (pos == oldpos) 354 355 goto next; 355 356 356 - e = (struct arpt_entry *) 357 - (entry0 + pos); 357 + e = entry0 + pos; 358 358 } while (oldpos == pos + e->next_offset); 359 359 360 360 /* Move along one */ 361 361 size = e->next_offset; 362 - e = (struct arpt_entry *) 363 - (entry0 + pos + size); 362 + e = entry0 + pos + size; 364 363 if (pos + size >= newinfo->size) 365 364 return 0; 366 365 e->counters.pcnt = pos; ··· 373 376 if (!xt_find_jump_offset(offsets, newpos, 374 377 newinfo->number)) 375 378 return 0; 376 - e = (struct arpt_entry *) 377 - (entry0 + newpos); 379 + e = entry0 + newpos; 378 380 } else { 379 381 /* ... this is a fallthru */ 380 382 newpos = pos + e->next_offset; 381 383 if (newpos >= newinfo->size) 382 384 return 0; 383 385 } 384 - e = (struct arpt_entry *) 385 - (entry0 + newpos); 386 + e = entry0 + newpos; 386 387 e->counters.pcnt = pos; 387 388 pos = newpos; 388 389 } ··· 676 681 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 677 682 const struct xt_entry_target *t; 678 683 679 - e = (struct arpt_entry *)(loc_cpu_entry + off); 684 + e = loc_cpu_entry + off; 680 685 if (copy_to_user(userptr + off, e, sizeof(*e))) { 681 686 ret = -EFAULT; 682 687 goto free_counters; ··· 1123 1128 int h; 1124 1129 1125 1130 origsize = *size; 1126 - de = (struct arpt_entry *)*dstptr; 1131 + de = *dstptr; 1127 1132 memcpy(de, e, sizeof(struct arpt_entry)); 1128 1133 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1129 1134 ··· 1317 1322 int ret; 1318 1323 1319 1324 origsize = *size; 1320 - ce = (struct compat_arpt_entry __user *)*dstptr; 1325 + ce = *dstptr; 1321 1326 if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || 1322 1327 copy_to_user(&ce->counters, &counters[i], 1323 1328 sizeof(counters[i])) != 0)
+8 -12
net/ipv4/netfilter/ip_tables.c
··· 382 382 to 0 as we leave), and comefrom to save source hook bitmask */ 383 383 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { 384 384 unsigned int pos = newinfo->hook_entry[hook]; 385 - struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); 385 + struct ipt_entry *e = entry0 + pos; 386 386 387 387 if (!(valid_hooks & (1 << hook))) 388 388 continue; ··· 424 424 if (pos == oldpos) 425 425 goto next; 426 426 427 - e = (struct ipt_entry *) 428 - (entry0 + pos); 427 + e = entry0 + pos; 429 428 } while (oldpos == pos + e->next_offset); 430 429 431 430 /* Move along one */ 432 431 size = e->next_offset; 433 - e = (struct ipt_entry *) 434 - (entry0 + pos + size); 432 + e = entry0 + pos + size; 435 433 if (pos + size >= newinfo->size) 436 434 return 0; 437 435 e->counters.pcnt = pos; ··· 444 446 if (!xt_find_jump_offset(offsets, newpos, 445 447 newinfo->number)) 446 448 return 0; 447 - e = (struct ipt_entry *) 448 - (entry0 + newpos); 449 + e = entry0 + newpos; 449 450 } else { 450 451 /* ... this is a fallthru */ 451 452 newpos = pos + e->next_offset; 452 453 if (newpos >= newinfo->size) 453 454 return 0; 454 455 } 455 - e = (struct ipt_entry *) 456 - (entry0 + newpos); 456 + e = entry0 + newpos; 457 457 e->counters.pcnt = pos; 458 458 pos = newpos; 459 459 } ··· 830 834 const struct xt_entry_match *m; 831 835 const struct xt_entry_target *t; 832 836 833 - e = (struct ipt_entry *)(loc_cpu_entry + off); 837 + e = loc_cpu_entry + off; 834 838 if (copy_to_user(userptr + off, e, sizeof(*e))) { 835 839 ret = -EFAULT; 836 840 goto free_counters; ··· 1225 1229 int ret = 0; 1226 1230 1227 1231 origsize = *size; 1228 - ce = (struct compat_ipt_entry __user *)*dstptr; 1232 + ce = *dstptr; 1229 1233 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || 1230 1234 copy_to_user(&ce->counters, &counters[i], 1231 1235 sizeof(counters[i])) != 0) ··· 1362 1366 struct xt_entry_match *ematch; 1363 1367 1364 1368 origsize = *size; 1365 - de = (struct ipt_entry *)*dstptr; 1369 + de = *dstptr; 1366 1370 memcpy(de, e, sizeof(struct ipt_entry)); 1367 1371 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1368 1372
+8 -12
net/ipv6/netfilter/ip6_tables.c
··· 411 411 to 0 as we leave), and comefrom to save source hook bitmask */ 412 412 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { 413 413 unsigned int pos = newinfo->hook_entry[hook]; 414 - struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); 414 + struct ip6t_entry *e = entry0 + pos; 415 415 416 416 if (!(valid_hooks & (1 << hook))) 417 417 continue; ··· 453 453 if (pos == oldpos) 454 454 goto next; 455 455 456 - e = (struct ip6t_entry *) 457 - (entry0 + pos); 456 + e = entry0 + pos; 458 457 } while (oldpos == pos + e->next_offset); 459 458 460 459 /* Move along one */ 461 460 size = e->next_offset; 462 - e = (struct ip6t_entry *) 463 - (entry0 + pos + size); 461 + e = entry0 + pos + size; 464 462 if (pos + size >= newinfo->size) 465 463 return 0; 466 464 e->counters.pcnt = pos; ··· 473 475 if (!xt_find_jump_offset(offsets, newpos, 474 476 newinfo->number)) 475 477 return 0; 476 - e = (struct ip6t_entry *) 477 - (entry0 + newpos); 478 + e = entry0 + newpos; 478 479 } else { 479 480 /* ... this is a fallthru */ 480 481 newpos = pos + e->next_offset; 481 482 if (newpos >= newinfo->size) 482 483 return 0; 483 484 } 484 - e = (struct ip6t_entry *) 485 - (entry0 + newpos); 485 + e = entry0 + newpos; 486 486 e->counters.pcnt = pos; 487 487 pos = newpos; 488 488 } ··· 859 863 const struct xt_entry_match *m; 860 864 const struct xt_entry_target *t; 861 865 862 - e = (struct ip6t_entry *)(loc_cpu_entry + off); 866 + e = loc_cpu_entry + off; 863 867 if (copy_to_user(userptr + off, e, sizeof(*e))) { 864 868 ret = -EFAULT; 865 869 goto free_counters; ··· 1254 1258 int ret = 0; 1255 1259 1256 1260 origsize = *size; 1257 - ce = (struct compat_ip6t_entry __user *)*dstptr; 1261 + ce = *dstptr; 1258 1262 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || 1259 1263 copy_to_user(&ce->counters, &counters[i], 1260 1264 sizeof(counters[i])) != 0) ··· 1390 1394 struct xt_entry_match *ematch; 1391 1395 1392 1396 origsize = *size; 1393 - de = (struct ip6t_entry *)*dstptr; 1397 + de = *dstptr; 1394 1398 memcpy(de, e, sizeof(struct ip6t_entry)); 1395 1399 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1396 1400
+2 -3
net/netfilter/ipset/ip_set_bitmap_gen.h
··· 232 232 if (!test_bit(id, map->members) || 233 233 (SET_WITH_TIMEOUT(set) && 234 234 #ifdef IP_SET_BITMAP_STORED_TIMEOUT 235 - mtype_is_filled((const struct mtype_elem *)x) && 235 + mtype_is_filled(x) && 236 236 #endif 237 237 ip_set_timeout_expired(ext_timeout(x, set)))) 238 238 continue; ··· 248 248 } 249 249 if (mtype_do_list(skb, map, id, set->dsize)) 250 250 goto nla_put_failure; 251 - if (ip_set_put_extensions(skb, set, x, 252 - mtype_is_filled((const struct mtype_elem *)x))) 251 + if (ip_set_put_extensions(skb, set, x, mtype_is_filled(x))) 253 252 goto nla_put_failure; 254 253 ipset_nest_end(skb, nested); 255 254 }
+1 -1
net/netfilter/ipset/ip_set_core.c
··· 1915 1915 ret = -EFAULT; 1916 1916 goto done; 1917 1917 } 1918 - op = (unsigned int *)data; 1918 + op = data; 1919 1919 1920 1920 if (*op < IP_SET_OP_VERSION) { 1921 1921 /* Check the version at the beginning of operations */
+1 -1
net/netfilter/nf_conntrack_proto.c
··· 202 202 static int kill_l4proto(struct nf_conn *i, void *data) 203 203 { 204 204 struct nf_conntrack_l4proto *l4proto; 205 - l4proto = (struct nf_conntrack_l4proto *)data; 205 + l4proto = data; 206 206 return nf_ct_protonum(i) == l4proto->l4proto && 207 207 nf_ct_l3num(i) == l4proto->l3proto; 208 208 }
+1 -1
net/netfilter/nft_set_hash.c
··· 352 352 353 353 static void nft_hash_elem_destroy(void *ptr, void *arg) 354 354 { 355 - nft_set_elem_destroy((const struct nft_set *)arg, ptr, true); 355 + nft_set_elem_destroy(arg, ptr, true); 356 356 } 357 357 358 358 static void nft_hash_destroy(const struct nft_set *set)
+5 -5
net/netfilter/xt_hashlimit.c
··· 119 119 cfg_copy(struct hashlimit_cfg2 *to, void *from, int revision) 120 120 { 121 121 if (revision == 1) { 122 - struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from; 122 + struct hashlimit_cfg1 *cfg = from; 123 123 124 124 to->mode = cfg->mode; 125 125 to->avg = cfg->avg; ··· 895 895 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 896 896 { 897 897 struct xt_hashlimit_htable *htable = s->private; 898 - unsigned int *bucket = (unsigned int *)v; 898 + unsigned int *bucket = v; 899 899 900 900 *pos = ++(*bucket); 901 901 if (*pos >= htable->cfg.size) { ··· 909 909 __releases(htable->lock) 910 910 { 911 911 struct xt_hashlimit_htable *htable = s->private; 912 - unsigned int *bucket = (unsigned int *)v; 912 + unsigned int *bucket = v; 913 913 914 914 if (!IS_ERR(bucket)) 915 915 kfree(bucket); ··· 980 980 static int dl_seq_show_v1(struct seq_file *s, void *v) 981 981 { 982 982 struct xt_hashlimit_htable *htable = s->private; 983 - unsigned int *bucket = (unsigned int *)v; 983 + unsigned int *bucket = v; 984 984 struct dsthash_ent *ent; 985 985 986 986 if (!hlist_empty(&htable->hash[*bucket])) { ··· 994 994 static int dl_seq_show(struct seq_file *s, void *v) 995 995 { 996 996 struct xt_hashlimit_htable *htable = s->private; 997 - unsigned int *bucket = (unsigned int *)v; 997 + unsigned int *bucket = v; 998 998 struct dsthash_ent *ent; 999 999 1000 1000 if (!hlist_empty(&htable->hash[*bucket])) {