Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: Kill hold_net release_net

hold_net and release_net were an idea that turned out to be useless.
The code has been disabled since 2008. Kill the code it is long past due.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric W. Biederman and committed by
David S. Miller
efd7ef1c 6c7005f6

+14 -90
+1 -2
include/linux/netdevice.h
··· 1864 1864 void dev_net_set(struct net_device *dev, struct net *net) 1865 1865 { 1866 1866 #ifdef CONFIG_NET_NS 1867 - release_net(dev->nd_net); 1868 - dev->nd_net = hold_net(net); 1867 + dev->nd_net = net; 1869 1868 #endif 1870 1869 } 1871 1870
+1 -8
include/net/fib_rules.h
··· 95 95 atomic_inc(&rule->refcnt); 96 96 } 97 97 98 - static inline void fib_rule_put_rcu(struct rcu_head *head) 99 - { 100 - struct fib_rule *rule = container_of(head, struct fib_rule, rcu); 101 - release_net(rule->fr_net); 102 - kfree(rule); 103 - } 104 - 105 98 static inline void fib_rule_put(struct fib_rule *rule) 106 99 { 107 100 if (atomic_dec_and_test(&rule->refcnt)) 108 - call_rcu(&rule->rcu, fib_rule_put_rcu); 101 + kfree_rcu(rule, rcu); 109 102 } 110 103 111 104 static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
-29
include/net/net_namespace.h
··· 49 49 atomic_t count; /* To decided when the network 50 50 * namespace should be shut down. 51 51 */ 52 - #ifdef NETNS_REFCNT_DEBUG 53 - atomic_t use_count; /* To track references we 54 - * destroy on demand 55 - */ 56 - #endif 57 52 spinlock_t rules_mod_lock; 58 53 59 54 atomic64_t cookie_gen; ··· 230 235 #define net_drop_ns NULL 231 236 #endif 232 237 233 - 234 - #ifdef NETNS_REFCNT_DEBUG 235 - static inline struct net *hold_net(struct net *net) 236 - { 237 - if (net) 238 - atomic_inc(&net->use_count); 239 - return net; 240 - } 241 - 242 - static inline void release_net(struct net *net) 243 - { 244 - if (net) 245 - atomic_dec(&net->use_count); 246 - } 247 - #else 248 - static inline struct net *hold_net(struct net *net) 249 - { 250 - return net; 251 - } 252 - 253 - static inline void release_net(struct net *net) 254 - { 255 - } 256 - #endif 257 238 258 239 #ifdef CONFIG_NET_NS 259 240
+1 -1
include/net/sock.h
··· 2204 2204 2205 2205 if (!net_eq(current_net, net)) { 2206 2206 put_net(current_net); 2207 - sock_net_set(sk, hold_net(net)); 2207 + sock_net_set(sk, net); 2208 2208 } 2209 2209 } 2210 2210
-2
net/core/dev.c
··· 6841 6841 { 6842 6842 struct napi_struct *p, *n; 6843 6843 6844 - release_net(dev_net(dev)); 6845 - 6846 6844 netif_free_tx_queues(dev); 6847 6845 #ifdef CONFIG_SYSFS 6848 6846 kvfree(dev->_rx);
+3 -14
net/core/fib_rules.c
··· 31 31 r->pref = pref; 32 32 r->table = table; 33 33 r->flags = flags; 34 - r->fr_net = hold_net(ops->fro_net); 34 + r->fr_net = ops->fro_net; 35 35 36 36 r->suppress_prefixlen = -1; 37 37 r->suppress_ifgroup = -1; ··· 116 116 if (ops->family == o->family) 117 117 goto errout; 118 118 119 - hold_net(net); 120 119 list_add_tail_rcu(&ops->list, &net->rules_ops); 121 120 err = 0; 122 121 errout: ··· 159 160 } 160 161 } 161 162 162 - static void fib_rules_put_rcu(struct rcu_head *head) 163 - { 164 - struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu); 165 - struct net *net = ops->fro_net; 166 - 167 - release_net(net); 168 - kfree(ops); 169 - } 170 - 171 163 void fib_rules_unregister(struct fib_rules_ops *ops) 172 164 { 173 165 struct net *net = ops->fro_net; ··· 168 178 fib_rules_cleanup_ops(ops); 169 179 spin_unlock(&net->rules_mod_lock); 170 180 171 - call_rcu(&ops->rcu, fib_rules_put_rcu); 181 + kfree_rcu(ops, rcu); 172 182 } 173 183 EXPORT_SYMBOL_GPL(fib_rules_unregister); 174 184 ··· 293 303 err = -ENOMEM; 294 304 goto errout; 295 305 } 296 - rule->fr_net = hold_net(net); 306 + rule->fr_net = net; 297 307 298 308 if (tb[FRA_PRIORITY]) 299 309 rule->pref = nla_get_u32(tb[FRA_PRIORITY]); ··· 413 423 return 0; 414 424 415 425 errout_free: 416 - release_net(rule->fr_net); 417 426 kfree(rule); 418 427 errout: 419 428 rules_ops_put(ops);
+2 -7
net/core/neighbour.c
··· 591 591 if (!n) 592 592 goto out; 593 593 594 - write_pnet(&n->net, hold_net(net)); 594 + write_pnet(&n->net, net); 595 595 memcpy(n->key, pkey, key_len); 596 596 n->dev = dev; 597 597 if (dev) ··· 600 600 if (tbl->pconstructor && tbl->pconstructor(n)) { 601 601 if (dev) 602 602 dev_put(dev); 603 - release_net(net); 604 603 kfree(n); 605 604 n = NULL; 606 605 goto out; ··· 633 634 tbl->pdestructor(n); 634 635 if (n->dev) 635 636 dev_put(n->dev); 636 - release_net(pneigh_net(n)); 637 637 kfree(n); 638 638 return 0; 639 639 } ··· 655 657 tbl->pdestructor(n); 656 658 if (n->dev) 657 659 dev_put(n->dev); 658 - release_net(pneigh_net(n)); 659 660 kfree(n); 660 661 continue; 661 662 } ··· 1425 1428 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 1426 1429 dev_hold(dev); 1427 1430 p->dev = dev; 1428 - write_pnet(&p->net, hold_net(net)); 1431 + write_pnet(&p->net, net); 1429 1432 p->sysctl_table = NULL; 1430 1433 1431 1434 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1432 - release_net(net); 1433 1435 dev_put(dev); 1434 1436 kfree(p); 1435 1437 return NULL; ··· 1468 1472 1469 1473 static void neigh_parms_destroy(struct neigh_parms *parms) 1470 1474 { 1471 - release_net(neigh_parms_net(parms)); 1472 1475 kfree(parms); 1473 1476 } 1474 1477
-11
net/core/net_namespace.c
··· 236 236 net->user_ns = user_ns; 237 237 idr_init(&net->netns_ids); 238 238 239 - #ifdef NETNS_REFCNT_DEBUG 240 - atomic_set(&net->use_count, 0); 241 - #endif 242 - 243 239 list_for_each_entry(ops, &pernet_list, list) { 244 240 error = ops_init(ops, net); 245 241 if (error < 0) ··· 290 294 291 295 static void net_free(struct net *net) 292 296 { 293 - #ifdef NETNS_REFCNT_DEBUG 294 - if (unlikely(atomic_read(&net->use_count) != 0)) { 295 - pr_emerg("network namespace not free! Usage: %d\n", 296 - atomic_read(&net->use_count)); 297 - return; 298 - } 299 - #endif 300 297 kfree(rcu_access_pointer(net->gen)); 301 298 kmem_cache_free(net_cachep, net); 302 299 }
-1
net/core/sock.c
··· 1455 1455 1456 1456 sock_hold(sk); 1457 1457 sock_release(sk->sk_socket); 1458 - release_net(sock_net(sk)); 1459 1458 sock_net_set(sk, get_net(&init_net)); 1460 1459 sock_put(sk); 1461 1460 }
+1 -2
net/ipv4/fib_semantics.c
··· 213 213 rt_fibinfo_free(&nexthop_nh->nh_rth_input); 214 214 } endfor_nexthops(fi); 215 215 216 - release_net(fi->fib_net); 217 216 if (fi->fib_metrics != (u32 *) dst_default_metrics) 218 217 kfree(fi->fib_metrics); 219 218 kfree(fi); ··· 813 814 } else 814 815 fi->fib_metrics = (u32 *) dst_default_metrics; 815 816 816 - fi->fib_net = hold_net(net); 817 + fi->fib_net = net; 817 818 fi->fib_protocol = cfg->fc_protocol; 818 819 fi->fib_scope = cfg->fc_scope; 819 820 fi->fib_flags = cfg->fc_flags;
+1 -2
net/ipv4/inet_hashtables.c
··· 61 61 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 62 62 63 63 if (tb != NULL) { 64 - write_pnet(&tb->ib_net, hold_net(net)); 64 + write_pnet(&tb->ib_net, net); 65 65 tb->port = snum; 66 66 tb->fastreuse = 0; 67 67 tb->fastreuseport = 0; ··· 79 79 { 80 80 if (hlist_empty(&tb->owners)) { 81 81 __hlist_del(&tb->node); 82 - release_net(ib_net(tb)); 83 82 kmem_cache_free(cachep, tb); 84 83 } 85 84 }
+1 -2
net/ipv4/inet_timewait_sock.c
··· 98 98 #ifdef SOCK_REFCNT_DEBUG 99 99 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw); 100 100 #endif 101 - release_net(twsk_net(tw)); 102 101 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); 103 102 module_put(owner); 104 103 } ··· 195 196 tw->tw_transparent = inet->transparent; 196 197 tw->tw_prot = sk->sk_prot_creator; 197 198 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie)); 198 - twsk_net_set(tw, hold_net(sock_net(sk))); 199 + twsk_net_set(tw, sock_net(sk)); 199 200 /* 200 201 * Because we use RCU lookups, we should not set tw_refcnt 201 202 * to a non null value before everything is setup for this
+1 -4
net/ipv6/addrlabel.c
··· 129 129 /* Object management */ 130 130 static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) 131 131 { 132 - #ifdef CONFIG_NET_NS 133 - release_net(p->lbl_net); 134 - #endif 135 132 kfree(p); 136 133 } 137 134 ··· 238 241 newp->label = label; 239 242 INIT_HLIST_NODE(&newp->list); 240 243 #ifdef CONFIG_NET_NS 241 - newp->lbl_net = hold_net(net); 244 + newp->lbl_net = net; 242 245 #endif 243 246 atomic_set(&newp->refcnt, 1); 244 247 return newp;
+1 -2
net/ipv6/ip6_flowlabel.c
··· 100 100 if (fl) { 101 101 if (fl->share == IPV6_FL_S_PROCESS) 102 102 put_pid(fl->owner.pid); 103 - release_net(fl->fl_net); 104 103 kfree(fl->opt); 105 104 kfree_rcu(fl, rcu); 106 105 } ··· 402 403 } 403 404 } 404 405 405 - fl->fl_net = hold_net(net); 406 + fl->fl_net = net; 406 407 fl->expires = jiffies; 407 408 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); 408 409 if (err)
+1 -3
net/openvswitch/datapath.c
··· 203 203 204 204 ovs_flow_tbl_destroy(&dp->table); 205 205 free_percpu(dp->stats_percpu); 206 - release_net(ovs_dp_get_net(dp)); 207 206 kfree(dp->ports); 208 207 kfree(dp); 209 208 } ··· 1500 1501 if (dp == NULL) 1501 1502 goto err_free_reply; 1502 1503 1503 - ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); 1504 + ovs_dp_set_net(dp, sock_net(skb->sk)); 1504 1505 1505 1506 /* Allocate table. */ 1506 1507 err = ovs_flow_tbl_init(&dp->table); ··· 1574 1575 err_destroy_table: 1575 1576 ovs_flow_tbl_destroy(&dp->table); 1576 1577 err_free_dp: 1577 - release_net(ovs_dp_get_net(dp)); 1578 1578 kfree(dp); 1579 1579 err_free_reply: 1580 1580 kfree_skb(reply);