Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET] NETNS: Omit net_device->nd_net without CONFIG_NET_NS.

Introduce per-net_device inlines: dev_net(), dev_net_set().
Without CONFIG_NET_NS, no namespace other than &init_net exists.
Let's explicitly define them to help compiler optimizations.

Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>

+251 -228
+1 -1
arch/ia64/hp/sim/simeth.c
··· 294 294 return NOTIFY_DONE; 295 295 } 296 296 297 - if (dev->nd_net != &init_net) 297 + if (dev_net(dev) != &init_net) 298 298 return NOTIFY_DONE; 299 299 300 300 if ( event != NETDEV_UP && event != NETDEV_DOWN ) return NOTIFY_DONE;
+1 -1
drivers/block/aoe/aoenet.c
··· 115 115 struct aoe_hdr *h; 116 116 u32 n; 117 117 118 - if (ifp->nd_net != &init_net) 118 + if (dev_net(ifp) != &init_net) 119 119 goto exit; 120 120 121 121 skb = skb_share_check(skb, GFP_ATOMIC);
+1 -1
drivers/net/bonding/bond_3ad.c
··· 2429 2429 struct slave *slave = NULL; 2430 2430 int ret = NET_RX_DROP; 2431 2431 2432 - if (dev->nd_net != &init_net) 2432 + if (dev_net(dev) != &init_net) 2433 2433 goto out; 2434 2434 2435 2435 if (!(dev->flags & IFF_MASTER))
+1 -1
drivers/net/bonding/bond_alb.c
··· 345 345 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 346 346 int res = NET_RX_DROP; 347 347 348 - if (bond_dev->nd_net != &init_net) 348 + if (dev_net(bond_dev) != &init_net) 349 349 goto out; 350 350 351 351 if (!(bond_dev->flags & IFF_MASTER))
+3 -3
drivers/net/bonding/bond_main.c
··· 2629 2629 unsigned char *arp_ptr; 2630 2630 __be32 sip, tip; 2631 2631 2632 - if (dev->nd_net != &init_net) 2632 + if (dev_net(dev) != &init_net) 2633 2633 goto out; 2634 2634 2635 2635 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) ··· 3470 3470 { 3471 3471 struct net_device *event_dev = (struct net_device *)ptr; 3472 3472 3473 - if (event_dev->nd_net != &init_net) 3473 + if (dev_net(event_dev) != &init_net) 3474 3474 return NOTIFY_DONE; 3475 3475 3476 3476 dprintk("event_dev: %s, event: %lx\n", ··· 3508 3508 struct bonding *bond, *bond_next; 3509 3509 struct vlan_entry *vlan, *vlan_next; 3510 3510 3511 - if (ifa->ifa_dev->dev->nd_net != &init_net) 3511 + if (dev_net(ifa->ifa_dev->dev) != &init_net) 3512 3512 return NOTIFY_DONE; 3513 3513 3514 3514 list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) {
+2 -2
drivers/net/hamradio/bpqether.c
··· 172 172 struct ethhdr *eth; 173 173 struct bpqdev *bpq; 174 174 175 - if (dev->nd_net != &init_net) 175 + if (dev_net(dev) != &init_net) 176 176 goto drop; 177 177 178 178 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) ··· 553 553 { 554 554 struct net_device *dev = (struct net_device *)ptr; 555 555 556 - if (dev->nd_net != &init_net) 556 + if (dev_net(dev) != &init_net) 557 557 return NOTIFY_DONE; 558 558 559 559 if (!dev_is_ethdev(dev))
+1 -1
drivers/net/loopback.c
··· 258 258 if (!dev) 259 259 goto out; 260 260 261 - dev->nd_net = net; 261 + dev_net_set(dev, net); 262 262 err = register_netdev(dev); 263 263 if (err) 264 264 goto out_free_netdev;
+1 -1
drivers/net/macvlan.c
··· 402 402 if (!tb[IFLA_LINK]) 403 403 return -EINVAL; 404 404 405 - lowerdev = __dev_get_by_index(dev->nd_net, nla_get_u32(tb[IFLA_LINK])); 405 + lowerdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); 406 406 if (lowerdev == NULL) 407 407 return -ENODEV; 408 408
+3 -3
drivers/net/pppoe.c
··· 301 301 { 302 302 struct net_device *dev = (struct net_device *) ptr; 303 303 304 - if (dev->nd_net != &init_net) 304 + if (dev_net(dev) != &init_net) 305 305 return NOTIFY_DONE; 306 306 307 307 /* Only look at sockets that are using this specific device. */ ··· 392 392 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 393 393 goto out; 394 394 395 - if (dev->nd_net != &init_net) 395 + if (dev_net(dev) != &init_net) 396 396 goto drop; 397 397 398 398 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) ··· 424 424 struct pppoe_hdr *ph; 425 425 struct pppox_sock *po; 426 426 427 - if (dev->nd_net != &init_net) 427 + if (dev_net(dev) != &init_net) 428 428 goto abort; 429 429 430 430 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
+1 -1
drivers/net/veth.c
··· 375 375 else 376 376 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); 377 377 378 - peer = rtnl_create_link(dev->nd_net, ifname, &veth_link_ops, tbp); 378 + peer = rtnl_create_link(dev_net(dev), ifname, &veth_link_ops, tbp); 379 379 if (IS_ERR(peer)) 380 380 return PTR_ERR(peer); 381 381
+1 -1
drivers/net/via-velocity.c
··· 3464 3464 struct velocity_info *vptr; 3465 3465 unsigned long flags; 3466 3466 3467 - if (dev->nd_net != &init_net) 3467 + if (dev_net(dev) != &init_net) 3468 3468 return NOTIFY_DONE; 3469 3469 3470 3470 spin_lock_irqsave(&velocity_dev_list_lock, flags);
+1 -1
drivers/net/wan/dlci.c
··· 517 517 { 518 518 struct net_device *dev = (struct net_device *) ptr; 519 519 520 - if (dev->nd_net != &init_net) 520 + if (dev_net(dev) != &init_net) 521 521 return NOTIFY_DONE; 522 522 523 523 if (event == NETDEV_UNREGISTER) {
+2 -2
drivers/net/wan/hdlc.c
··· 68 68 { 69 69 struct hdlc_device *hdlc = dev_to_hdlc(dev); 70 70 71 - if (dev->nd_net != &init_net) { 71 + if (dev_net(dev) != &init_net) { 72 72 kfree_skb(skb); 73 73 return 0; 74 74 } ··· 105 105 unsigned long flags; 106 106 int on; 107 107 108 - if (dev->nd_net != &init_net) 108 + if (dev_net(dev) != &init_net) 109 109 return NOTIFY_DONE; 110 110 111 111 if (dev->get_stats != hdlc_get_stats)
+2 -2
drivers/net/wan/lapbether.c
··· 91 91 int len, err; 92 92 struct lapbethdev *lapbeth; 93 93 94 - if (dev->nd_net != &init_net) 94 + if (dev_net(dev) != &init_net) 95 95 goto drop; 96 96 97 97 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) ··· 393 393 struct lapbethdev *lapbeth; 394 394 struct net_device *dev = ptr; 395 395 396 - if (dev->nd_net != &init_net) 396 + if (dev_net(dev) != &init_net) 397 397 return NOTIFY_DONE; 398 398 399 399 if (!dev_is_ethdev(dev))
+1 -1
drivers/net/wan/syncppp.c
··· 1444 1444 1445 1445 static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) 1446 1446 { 1447 - if (dev->nd_net != &init_net) { 1447 + if (dev_net(dev) != &init_net) { 1448 1448 kfree_skb(skb); 1449 1449 return 0; 1450 1450 }
+1 -1
drivers/s390/net/qeth_l3_main.c
··· 3250 3250 struct qeth_ipaddr *addr; 3251 3251 struct qeth_card *card; 3252 3252 3253 - if (dev->nd_net != &init_net) 3253 + if (dev_net(dev) != &init_net) 3254 3254 return NOTIFY_DONE; 3255 3255 3256 3256 QETH_DBF_TEXT(trace, 3, "ipevent");
+3 -3
include/linux/inetdevice.h
··· 70 70 ipv4_devconf_set((in_dev), NET_IPV4_CONF_ ## attr, (val)) 71 71 72 72 #define IN_DEV_ANDCONF(in_dev, attr) \ 73 - (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) && \ 73 + (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \ 74 74 IN_DEV_CONF_GET((in_dev), attr)) 75 75 #define IN_DEV_ORCONF(in_dev, attr) \ 76 - (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) || \ 76 + (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \ 77 77 IN_DEV_CONF_GET((in_dev), attr)) 78 78 #define IN_DEV_MAXCONF(in_dev, attr) \ 79 - (max(IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr), \ 79 + (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \ 80 80 IN_DEV_CONF_GET((in_dev), attr))) 81 81 82 82 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
+24 -1
include/linux/netdevice.h
··· 708 708 void (*poll_controller)(struct net_device *dev); 709 709 #endif 710 710 711 + #ifdef CONFIG_NET_NS 711 712 /* Network namespace this network device is inside */ 712 713 struct net *nd_net; 714 + #endif 713 715 714 716 /* bridge stuff */ 715 717 struct net_bridge_port *br_port; ··· 738 736 739 737 #define NETDEV_ALIGN 32 740 738 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) 739 + 740 + /* 741 + * Net namespace inlines 742 + */ 743 + static inline 744 + struct net *dev_net(const struct net_device *dev) 745 + { 746 + #ifdef CONFIG_NET_NS 747 + return dev->nd_net; 748 + #else 749 + return &init_net; 750 + #endif 751 + } 752 + 753 + static inline 754 + void dev_net_set(struct net_device *dev, const struct net *net) 755 + { 756 + #ifdef CONFIG_NET_NS 757 + dev->nd_dev = net; 758 + #endif 759 + } 741 760 742 761 /** 743 762 * netdev_priv - access network device private data ··· 836 813 struct list_head *lh; 837 814 struct net *net; 838 815 839 - net = dev->nd_net; 816 + net = dev_net(dev); 840 817 lh = dev->dev_list.next; 841 818 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 842 819 }
+1 -1
net/8021q/vlan.c
··· 382 382 int i, flgs; 383 383 struct net_device *vlandev; 384 384 385 - if (dev->nd_net != &init_net) 385 + if (dev_net(dev) != &init_net) 386 386 return NOTIFY_DONE; 387 387 388 388 if (!grp)
+1 -1
net/8021q/vlan_dev.c
··· 153 153 struct net_device_stats *stats; 154 154 unsigned short vlan_TCI; 155 155 156 - if (dev->nd_net != &init_net) 156 + if (dev_net(dev) != &init_net) 157 157 goto err_free; 158 158 159 159 skb = skb_share_check(skb, GFP_ATOMIC);
+2 -2
net/appletalk/aarp.c
··· 333 333 struct net_device *dev = ptr; 334 334 int ct; 335 335 336 - if (dev->nd_net != &init_net) 336 + if (dev_net(dev) != &init_net) 337 337 return NOTIFY_DONE; 338 338 339 339 if (event == NETDEV_DOWN) { ··· 716 716 struct atalk_addr sa, *ma, da; 717 717 struct atalk_iface *ifa; 718 718 719 - if (dev->nd_net != &init_net) 719 + if (dev_net(dev) != &init_net) 720 720 goto out0; 721 721 722 722 /* We only do Ethernet SNAP AARP. */
+3 -3
net/appletalk/ddp.c
··· 648 648 { 649 649 struct net_device *dev = ptr; 650 650 651 - if (dev->nd_net != &init_net) 651 + if (dev_net(dev) != &init_net) 652 652 return NOTIFY_DONE; 653 653 654 654 if (event == NETDEV_DOWN) ··· 1405 1405 int origlen; 1406 1406 __u16 len_hops; 1407 1407 1408 - if (dev->nd_net != &init_net) 1408 + if (dev_net(dev) != &init_net) 1409 1409 goto freeit; 1410 1410 1411 1411 /* Don't mangle buffer if shared */ ··· 1493 1493 static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, 1494 1494 struct packet_type *pt, struct net_device *orig_dev) 1495 1495 { 1496 - if (dev->nd_net != &init_net) 1496 + if (dev_net(dev) != &init_net) 1497 1497 goto freeit; 1498 1498 1499 1499 /* Expand any short form frames */
+1 -1
net/atm/clip.c
··· 612 612 { 613 613 struct net_device *dev = arg; 614 614 615 - if (dev->nd_net != &init_net) 615 + if (dev_net(dev) != &init_net) 616 616 return NOTIFY_DONE; 617 617 618 618 if (event == NETDEV_UNREGISTER) {
+1 -1
net/atm/mpc.c
··· 964 964 965 965 dev = (struct net_device *)dev_ptr; 966 966 967 - if (dev->nd_net != &init_net) 967 + if (dev_net(dev) != &init_net) 968 968 return NOTIFY_DONE; 969 969 970 970 if (dev->name == NULL || strncmp(dev->name, "lec", 3))
+1 -1
net/ax25/af_ax25.c
··· 116 116 { 117 117 struct net_device *dev = (struct net_device *)ptr; 118 118 119 - if (dev->nd_net != &init_net) 119 + if (dev_net(dev) != &init_net) 120 120 return NOTIFY_DONE; 121 121 122 122 /* Reject non AX.25 devices */
+1 -1
net/ax25/ax25_in.c
··· 451 451 skb->sk = NULL; /* Initially we don't know who it's for */ 452 452 skb->destructor = NULL; /* Who initializes this, dammit?! */ 453 453 454 - if (dev->nd_net != &init_net) { 454 + if (dev_net(dev) != &init_net) { 455 455 kfree_skb(skb); 456 456 return 0; 457 457 }
+1 -1
net/bridge/br_notify.c
··· 37 37 struct net_bridge_port *p = dev->br_port; 38 38 struct net_bridge *br; 39 39 40 - if (dev->nd_net != &init_net) 40 + if (dev_net(dev) != &init_net) 41 41 return NOTIFY_DONE; 42 42 43 43 /* not a port of a bridge */
+1 -1
net/bridge/br_stp_bpdu.c
··· 142 142 struct net_bridge *br; 143 143 const unsigned char *buf; 144 144 145 - if (dev->nd_net != &init_net) 145 + if (dev_net(dev) != &init_net) 146 146 goto err; 147 147 148 148 if (!p)
+2 -2
net/can/af_can.c
··· 599 599 struct dev_rcv_lists *d; 600 600 int matches; 601 601 602 - if (dev->type != ARPHRD_CAN || dev->nd_net != &init_net) { 602 + if (dev->type != ARPHRD_CAN || dev_net(dev) != &init_net) { 603 603 kfree_skb(skb); 604 604 return 0; 605 605 } ··· 710 710 struct net_device *dev = (struct net_device *)data; 711 711 struct dev_rcv_lists *d; 712 712 713 - if (dev->nd_net != &init_net) 713 + if (dev_net(dev) != &init_net) 714 714 return NOTIFY_DONE; 715 715 716 716 if (dev->type != ARPHRD_CAN)
+1 -1
net/can/bcm.c
··· 1285 1285 struct bcm_op *op; 1286 1286 int notify_enodev = 0; 1287 1287 1288 - if (dev->nd_net != &init_net) 1288 + if (dev_net(dev) != &init_net) 1289 1289 return NOTIFY_DONE; 1290 1290 1291 1291 if (dev->type != ARPHRD_CAN)
+1 -1
net/can/raw.c
··· 210 210 struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); 211 211 struct sock *sk = &ro->sk; 212 212 213 - if (dev->nd_net != &init_net) 213 + if (dev_net(dev) != &init_net) 214 214 return NOTIFY_DONE; 215 215 216 216 if (dev->type != ARPHRD_CAN)
+11 -11
net/core/dev.c
··· 216 216 /* Device list insertion */ 217 217 static int list_netdevice(struct net_device *dev) 218 218 { 219 - struct net *net = dev->nd_net; 219 + struct net *net = dev_net(dev); 220 220 221 221 ASSERT_RTNL(); 222 222 ··· 852 852 struct net *net; 853 853 int ret; 854 854 855 - BUG_ON(!dev->nd_net); 856 - net = dev->nd_net; 855 + BUG_ON(!dev_net(dev)); 856 + net = dev_net(dev); 857 857 ret = __dev_alloc_name(net, name, buf); 858 858 if (ret >= 0) 859 859 strlcpy(dev->name, buf, IFNAMSIZ); ··· 877 877 struct net *net; 878 878 879 879 ASSERT_RTNL(); 880 - BUG_ON(!dev->nd_net); 880 + BUG_ON(!dev_net(dev)); 881 881 882 - net = dev->nd_net; 882 + net = dev_net(dev); 883 883 if (dev->flags & IFF_UP) 884 884 return -EBUSY; 885 885 ··· 2615 2615 2616 2616 if (v == SEQ_START_TOKEN) 2617 2617 seq_puts(seq, "Type Device Function\n"); 2618 - else if (pt->dev == NULL || pt->dev->nd_net == seq_file_net(seq)) { 2618 + else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { 2619 2619 if (pt->type == htons(ETH_P_ALL)) 2620 2620 seq_puts(seq, "ALL "); 2621 2621 else ··· 3689 3689 3690 3690 /* When net_device's are persistent, this will be fatal. */ 3691 3691 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 3692 - BUG_ON(!dev->nd_net); 3693 - net = dev->nd_net; 3692 + BUG_ON(!dev_net(dev)); 3693 + net = dev_net(dev); 3694 3694 3695 3695 spin_lock_init(&dev->queue_lock); 3696 3696 spin_lock_init(&dev->_xmit_lock); ··· 4011 4011 dev = (struct net_device *) 4012 4012 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 4013 4013 dev->padded = (char *)dev - (char *)p; 4014 - dev->nd_net = &init_net; 4014 + dev_net_set(dev, &init_net); 4015 4015 4016 4016 if (sizeof_priv) { 4017 4017 dev->priv = ((char *)dev + ··· 4136 4136 4137 4137 /* Get out if there is nothing todo */ 4138 4138 err = 0; 4139 - if (dev->nd_net == net) 4139 + if (dev_net(dev) == net) 4140 4140 goto out; 4141 4141 4142 4142 /* Pick the destination device name, and ensure ··· 4187 4187 dev_addr_discard(dev); 4188 4188 4189 4189 /* Actually switch the network namespace */ 4190 - dev->nd_net = net; 4190 + dev_net_set(dev, net); 4191 4191 4192 4192 /* Assign the new device name */ 4193 4193 if (destname != dev->name)
+1 -1
net/core/dst.c
··· 279 279 if (!unregister) { 280 280 dst->input = dst->output = dst_discard; 281 281 } else { 282 - dst->dev = dst->dev->nd_net->loopback_dev; 282 + dst->dev = dev_net(dst->dev)->loopback_dev; 283 283 dev_hold(dst->dev); 284 284 dev_put(dev); 285 285 if (dst->neighbour && dst->neighbour->dev == dev) {
+1 -1
net/core/fib_rules.c
··· 618 618 void *ptr) 619 619 { 620 620 struct net_device *dev = ptr; 621 - struct net *net = dev->nd_net; 621 + struct net *net = dev_net(dev); 622 622 struct fib_rules_ops *ops; 623 623 624 624 ASSERT_RTNL();
+6 -6
net/core/neighbour.c
··· 388 388 hash_val = tbl->hash(pkey, NULL); 389 389 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { 390 390 if (!memcmp(n->primary_key, pkey, key_len) && 391 - (net == n->dev->nd_net)) { 391 + dev_net(n->dev) == net) { 392 392 neigh_hold(n); 393 393 NEIGH_CACHE_STAT_INC(tbl, hits); 394 394 break; ··· 1298 1298 struct neigh_parms *p, *ref; 1299 1299 struct net *net; 1300 1300 1301 - net = dev->nd_net; 1301 + net = dev_net(dev); 1302 1302 ref = lookup_neigh_params(tbl, net, 0); 1303 1303 if (!ref) 1304 1304 return NULL; ··· 2050 2050 s_idx = 0; 2051 2051 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { 2052 2052 int lidx; 2053 - if (n->dev->nd_net != net) 2053 + if (dev_net(n->dev) != net) 2054 2054 continue; 2055 2055 lidx = idx++; 2056 2056 if (lidx < s_idx) ··· 2155 2155 n = tbl->hash_buckets[bucket]; 2156 2156 2157 2157 while (n) { 2158 - if (n->dev->nd_net != net) 2158 + if (dev_net(n->dev) != net) 2159 2159 goto next; 2160 2160 if (state->neigh_sub_iter) { 2161 2161 loff_t fakep = 0; ··· 2198 2198 2199 2199 while (1) { 2200 2200 while (n) { 2201 - if (n->dev->nd_net != net) 2201 + if (dev_net(n->dev) != net) 2202 2202 goto next; 2203 2203 if (state->neigh_sub_iter) { 2204 2204 void *v = state->neigh_sub_iter(state, n, pos); ··· 2482 2482 2483 2483 static void __neigh_notify(struct neighbour *n, int type, int flags) 2484 2484 { 2485 - struct net *net = n->dev->nd_net; 2485 + struct net *net = dev_net(n->dev); 2486 2486 struct sk_buff *skb; 2487 2487 int err = -ENOBUFS; 2488 2488
+1 -1
net/core/pktgen.c
··· 1874 1874 { 1875 1875 struct net_device *dev = ptr; 1876 1876 1877 - if (dev->nd_net != &init_net) 1877 + if (dev_net(dev) != &init_net) 1878 1878 return NOTIFY_DONE; 1879 1879 1880 1880 /* It is OK that we do not hold the group lock right now,
+2 -2
net/core/rtnetlink.c
··· 972 972 goto err_free; 973 973 } 974 974 975 - dev->nd_net = net; 975 + dev_net_set(dev, net); 976 976 dev->rtnl_link_ops = ops; 977 977 978 978 if (tb[IFLA_MTU]) ··· 1198 1198 1199 1199 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) 1200 1200 { 1201 - struct net *net = dev->nd_net; 1201 + struct net *net = dev_net(dev); 1202 1202 struct sk_buff *skb; 1203 1203 int err = -ENOBUFS; 1204 1204
+1 -1
net/decnet/af_decnet.c
··· 2089 2089 { 2090 2090 struct net_device *dev = (struct net_device *)ptr; 2091 2091 2092 - if (dev->nd_net != &init_net) 2092 + if (dev_net(dev) != &init_net) 2093 2093 return NOTIFY_DONE; 2094 2094 2095 2095 switch(event) {
+1 -1
net/decnet/dn_route.c
··· 580 580 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 581 581 unsigned char padlen = 0; 582 582 583 - if (dev->nd_net != &init_net) 583 + if (dev_net(dev) != &init_net) 584 584 goto dump_it; 585 585 586 586 if (dn == NULL)
+2 -2
net/econet/af_econet.c
··· 1064 1064 struct sock *sk; 1065 1065 struct ec_device *edev = dev->ec_ptr; 1066 1066 1067 - if (dev->nd_net != &init_net) 1067 + if (dev_net(dev) != &init_net) 1068 1068 goto drop; 1069 1069 1070 1070 if (skb->pkt_type == PACKET_OTHERHOST) ··· 1121 1121 struct net_device *dev = (struct net_device *)data; 1122 1122 struct ec_device *edev; 1123 1123 1124 - if (dev->nd_net != &init_net) 1124 + if (dev_net(dev) != &init_net) 1125 1125 return NOTIFY_DONE; 1126 1126 1127 1127 switch (msg) {
+7 -7
net/ipv4/arp.c
··· 242 242 return -EINVAL; 243 243 } 244 244 245 - neigh->type = inet_addr_type(dev->nd_net, addr); 245 + neigh->type = inet_addr_type(dev_net(dev), addr); 246 246 247 247 parms = in_dev->arp_parms; 248 248 __neigh_parms_put(neigh->parms); ··· 341 341 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { 342 342 default: 343 343 case 0: /* By default announce any local IP */ 344 - if (skb && inet_addr_type(dev->nd_net, ip_hdr(skb)->saddr) == RTN_LOCAL) 344 + if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL) 345 345 saddr = ip_hdr(skb)->saddr; 346 346 break; 347 347 case 1: /* Restrict announcements of saddr in same subnet */ 348 348 if (!skb) 349 349 break; 350 350 saddr = ip_hdr(skb)->saddr; 351 - if (inet_addr_type(dev->nd_net, saddr) == RTN_LOCAL) { 351 + if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) { 352 352 /* saddr should be known to target */ 353 353 if (inet_addr_onlink(in_dev, target, saddr)) 354 354 break; ··· 424 424 int flag = 0; 425 425 /*unsigned long now; */ 426 426 427 - if (ip_route_output_key(dev->nd_net, &rt, &fl) < 0) 427 + if (ip_route_output_key(dev_net(dev), &rt, &fl) < 0) 428 428 return 1; 429 429 if (rt->u.dst.dev != dev) { 430 430 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); ··· 477 477 478 478 paddr = skb->rtable->rt_gateway; 479 479 480 - if (arp_set_predefined(inet_addr_type(dev->nd_net, paddr), haddr, paddr, dev)) 480 + if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) 481 481 return 0; 482 482 483 483 n = __neigh_lookup(&arp_tbl, &paddr, dev, 1); ··· 709 709 u16 dev_type = dev->type; 710 710 int addr_type; 711 711 struct neighbour *n; 712 - struct net *net = dev->nd_net; 712 + struct net *net = dev_net(dev); 713 713 714 714 /* arp_rcv below verifies the ARP header and verifies the device 715 715 * is ARP'able. ··· 858 858 859 859 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); 860 860 861 - if (IPV4_DEVCONF_ALL(dev->nd_net, ARP_ACCEPT)) { 861 + if (IPV4_DEVCONF_ALL(dev_net(dev), ARP_ACCEPT)) { 862 862 /* Unsolicited ARP is not accepted by default. 863 863 It is possible, that this option should be enabled for some 864 864 devices (strip is candidate)
+5 -5
net/ipv4/devinet.c
··· 165 165 if (!in_dev) 166 166 goto out; 167 167 INIT_RCU_HEAD(&in_dev->rcu_head); 168 - memcpy(&in_dev->cnf, dev->nd_net->ipv4.devconf_dflt, 168 + memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt, 169 169 sizeof(in_dev->cnf)); 170 170 in_dev->cnf.sysctl = NULL; 171 171 in_dev->dev = dev; ··· 872 872 { 873 873 __be32 addr = 0; 874 874 struct in_device *in_dev; 875 - struct net *net = dev->nd_net; 875 + struct net *net = dev_net(dev); 876 876 877 877 rcu_read_lock(); 878 878 in_dev = __in_dev_get_rcu(dev); ··· 974 974 if (scope != RT_SCOPE_LINK) 975 975 return confirm_addr_indev(in_dev, dst, local, scope); 976 976 977 - net = in_dev->dev->nd_net; 977 + net = dev_net(in_dev->dev); 978 978 read_lock(&dev_base_lock); 979 979 rcu_read_lock(); 980 980 for_each_netdev(net, dev) { ··· 1203 1203 int err = -ENOBUFS; 1204 1204 struct net *net; 1205 1205 1206 - net = ifa->ifa_dev->dev->nd_net; 1206 + net = dev_net(ifa->ifa_dev->dev); 1207 1207 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); 1208 1208 if (skb == NULL) 1209 1209 goto errout; ··· 1517 1517 { 1518 1518 neigh_sysctl_register(idev->dev, idev->arp_parms, NET_IPV4, 1519 1519 NET_IPV4_NEIGH, "ipv4", NULL, NULL); 1520 - __devinet_sysctl_register(idev->dev->nd_net, idev->dev->name, 1520 + __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name, 1521 1521 idev->dev->ifindex, &idev->cnf); 1522 1522 } 1523 1523
+6 -6
net/ipv4/fib_frontend.c
··· 257 257 if (in_dev == NULL) 258 258 goto e_inval; 259 259 260 - net = dev->nd_net; 260 + net = dev_net(dev); 261 261 if (fib_lookup(net, &fl, &res)) 262 262 goto last_resort; 263 263 if (res.type != RTN_UNICAST) ··· 674 674 675 675 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa) 676 676 { 677 - struct net *net = ifa->ifa_dev->dev->nd_net; 677 + struct net *net = dev_net(ifa->ifa_dev->dev); 678 678 struct fib_table *tb; 679 679 struct fib_config cfg = { 680 680 .fc_protocol = RTPROT_KERNEL, ··· 801 801 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); 802 802 803 803 /* Check, that this local address finally disappeared. */ 804 - if (inet_addr_type(dev->nd_net, ifa->ifa_local) != RTN_LOCAL) { 804 + if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) { 805 805 /* And the last, but not the least thing. 806 806 We must flush stray FIB entries. 807 807 808 808 First of all, we scan fib_info list searching 809 809 for stray nexthop entries, then ignite fib_flush. 810 810 */ 811 - if (fib_sync_down_addr(dev->nd_net, ifa->ifa_local)) 812 - fib_flush(dev->nd_net); 811 + if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local)) 812 + fib_flush(dev_net(dev)); 813 813 } 814 814 } 815 815 #undef LOCAL_OK ··· 899 899 static void fib_disable_ip(struct net_device *dev, int force) 900 900 { 901 901 if (fib_sync_down_dev(dev, force)) 902 - fib_flush(dev->nd_net); 902 + fib_flush(dev_net(dev)); 903 903 rt_cache_flush(0); 904 904 arp_ifdown(dev); 905 905 }
+4 -4
net/ipv4/icmp.c
··· 351 351 struct sock *sk; 352 352 struct sk_buff *skb; 353 353 354 - sk = icmp_sk(rt->u.dst.dev->nd_net); 354 + sk = icmp_sk(dev_net(rt->u.dst.dev)); 355 355 if (ip_append_data(sk, icmp_glue_bits, icmp_param, 356 356 icmp_param->data_len+icmp_param->head_len, 357 357 icmp_param->head_len, ··· 382 382 { 383 383 struct ipcm_cookie ipc; 384 384 struct rtable *rt = skb->rtable; 385 - struct net *net = rt->u.dst.dev->nd_net; 385 + struct net *net = dev_net(rt->u.dst.dev); 386 386 struct sock *sk = icmp_sk(net); 387 387 struct inet_sock *inet = inet_sk(sk); 388 388 __be32 daddr; ··· 447 447 448 448 if (!rt) 449 449 goto out; 450 - net = rt->u.dst.dev->nd_net; 450 + net = dev_net(rt->u.dst.dev); 451 451 sk = icmp_sk(net); 452 452 453 453 /* ··· 677 677 u32 info = 0; 678 678 struct net *net; 679 679 680 - net = skb->dst->dev->nd_net; 680 + net = dev_net(skb->dst->dev); 681 681 682 682 /* 683 683 * Incomplete header ?
+8 -8
net/ipv4/igmp.c
··· 130 130 */ 131 131 132 132 #define IGMP_V1_SEEN(in_dev) \ 133 - (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, FORCE_IGMP_VERSION) == 1 || \ 133 + (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \ 134 134 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \ 135 135 ((in_dev)->mr_v1_seen && \ 136 136 time_before(jiffies, (in_dev)->mr_v1_seen))) 137 137 #define IGMP_V2_SEEN(in_dev) \ 138 - (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, FORCE_IGMP_VERSION) == 2 || \ 138 + (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \ 139 139 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \ 140 140 ((in_dev)->mr_v2_seen && \ 141 141 time_before(jiffies, (in_dev)->mr_v2_seen))) ··· 1198 1198 1199 1199 ASSERT_RTNL(); 1200 1200 1201 - if (in_dev->dev->nd_net != &init_net) 1201 + if (dev_net(in_dev->dev) != &init_net) 1202 1202 return; 1203 1203 1204 1204 for (im=in_dev->mc_list; im; im=im->next) { ··· 1280 1280 1281 1281 ASSERT_RTNL(); 1282 1282 1283 - if (in_dev->dev->nd_net != &init_net) 1283 + if (dev_net(in_dev->dev) != &init_net) 1284 1284 return; 1285 1285 1286 1286 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { ··· 1310 1310 1311 1311 ASSERT_RTNL(); 1312 1312 1313 - if (in_dev->dev->nd_net != &init_net) 1313 + if (dev_net(in_dev->dev) != &init_net) 1314 1314 return; 1315 1315 1316 1316 for (i=in_dev->mc_list; i; i=i->next) ··· 1333 1333 { 1334 1334 ASSERT_RTNL(); 1335 1335 1336 - if (in_dev->dev->nd_net != &init_net) 1336 + if (dev_net(in_dev->dev) != &init_net) 1337 1337 return; 1338 1338 1339 1339 in_dev->mc_tomb = NULL; ··· 1359 1359 1360 1360 ASSERT_RTNL(); 1361 1361 1362 - if (in_dev->dev->nd_net != &init_net) 1362 + if (dev_net(in_dev->dev) != &init_net) 1363 1363 return; 1364 1364 1365 1365 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); ··· 1378 1378 1379 1379 ASSERT_RTNL(); 1380 1380 1381 - if (in_dev->dev->nd_net != &init_net) 1381 + if (dev_net(in_dev->dev) != &init_net) 1382 1382 return; 1383 1383 1384 1384 /* Deactivate timers */
+1 -1
net/ipv4/ip_fragment.c
··· 571 571 572 572 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 573 573 574 - net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net; 574 + net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev); 575 575 /* Start by cleaning up the memory. */ 576 576 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) 577 577 ip_evictor(net);
+1 -1
net/ipv4/ip_gre.c
··· 1190 1190 struct ip_tunnel *t = netdev_priv(dev); 1191 1191 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 1192 1192 struct in_device *in_dev; 1193 - in_dev = inetdev_by_index(dev->nd_net, t->mlink); 1193 + in_dev = inetdev_by_index(dev_net(dev), t->mlink); 1194 1194 if (in_dev) { 1195 1195 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 1196 1196 in_dev_put(in_dev);
+3 -3
net/ipv4/ip_input.c
··· 172 172 if (sk && inet_sk(sk)->num == protocol && 173 173 (!sk->sk_bound_dev_if || 174 174 sk->sk_bound_dev_if == dev->ifindex) && 175 - sk->sk_net == dev->nd_net) { 175 + sk->sk_net == dev_net(dev)) { 176 176 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 177 177 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { 178 178 read_unlock(&ip_ra_lock); ··· 199 199 200 200 static int ip_local_deliver_finish(struct sk_buff *skb) 201 201 { 202 - struct net *net = skb->dev->nd_net; 202 + struct net *net = dev_net(skb->dev); 203 203 204 204 __skb_pull(skb, ip_hdrlen(skb)); 205 205 ··· 291 291 opt = &(IPCB(skb)->opt); 292 292 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 293 293 294 - if (ip_options_compile(dev->nd_net, opt, skb)) { 294 + if (ip_options_compile(dev_net(dev), opt, skb)) { 295 295 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 296 296 goto drop; 297 297 }
+1 -1
net/ipv4/ip_options.c
··· 145 145 __be32 addr; 146 146 147 147 memcpy(&addr, sptr+soffset-1, 4); 148 - if (inet_addr_type(skb->dst->dev->nd_net, addr) != RTN_LOCAL) { 148 + if (inet_addr_type(dev_net(skb->dst->dev), addr) != RTN_LOCAL) { 149 149 dopt->ts_needtime = 1; 150 150 soffset += 8; 151 151 }
+2 -2
net/ipv4/ipconfig.c
··· 434 434 unsigned char *sha, *tha; /* s for "source", t for "target" */ 435 435 struct ic_device *d; 436 436 437 - if (dev->nd_net != &init_net) 437 + if (dev_net(dev) != &init_net) 438 438 goto drop; 439 439 440 440 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) ··· 854 854 struct ic_device *d; 855 855 int len, ext_len; 856 856 857 - if (dev->nd_net != &init_net) 857 + if (dev_net(dev) != &init_net) 858 858 goto drop; 859 859 860 860 /* Perform verifications before taking the lock. */
+1 -1
net/ipv4/ipmr.c
··· 1089 1089 struct vif_device *v; 1090 1090 int ct; 1091 1091 1092 - if (dev->nd_net != &init_net) 1092 + if (dev_net(dev) != &init_net) 1093 1093 return NOTIFY_DONE; 1094 1094 1095 1095 if (event != NETDEV_UNREGISTER)
+1 -1
net/ipv4/netfilter/ip_queue.c
··· 481 481 { 482 482 struct net_device *dev = ptr; 483 483 484 - if (dev->nd_net != &init_net) 484 + if (dev_net(dev) != &init_net) 485 485 return NOTIFY_DONE; 486 486 487 487 /* Drop any packets associated with the downed device */
+1 -1
net/ipv4/netfilter/ipt_MASQUERADE.c
··· 120 120 { 121 121 const struct net_device *dev = ptr; 122 122 123 - if (dev->nd_net != &init_net) 123 + if (dev_net(dev) != &init_net) 124 124 return NOTIFY_DONE; 125 125 126 126 if (event == NETDEV_DOWN) {
+2 -2
net/ipv4/raw.c
··· 168 168 if (hlist_empty(head)) 169 169 goto out; 170 170 171 - net = skb->dev->nd_net; 171 + net = dev_net(skb->dev); 172 172 sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, 173 173 iph->saddr, iph->daddr, 174 174 skb->dev->ifindex); ··· 276 276 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 277 277 if (raw_sk != NULL) { 278 278 iph = (struct iphdr *)skb->data; 279 - net = skb->dev->nd_net; 279 + net = dev_net(skb->dev); 280 280 281 281 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, 282 282 iph->daddr, iph->saddr,
+14 -14
net/ipv4/route.c
··· 284 284 rcu_read_lock_bh(); 285 285 r = rcu_dereference(rt_hash_table[st->bucket].chain); 286 286 while (r) { 287 - if (r->u.dst.dev->nd_net == st->p.net && 287 + if (dev_net(r->u.dst.dev) == st->p.net && 288 288 r->rt_genid == st->genid) 289 289 return r; 290 290 r = rcu_dereference(r->u.dst.rt_next); ··· 312 312 struct rtable *r) 313 313 { 314 314 while ((r = __rt_cache_get_next(st, r)) != NULL) { 315 - if (r->u.dst.dev->nd_net != st->p.net) 315 + if (dev_net(r->u.dst.dev) != st->p.net) 316 316 continue; 317 317 if (r->rt_genid == st->genid) 318 318 break; ··· 680 680 681 681 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 682 682 { 683 - return rt1->u.dst.dev->nd_net == rt2->u.dst.dev->nd_net; 683 + return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); 684 684 } 685 685 686 686 /* ··· 1164 1164 if (!in_dev) 1165 1165 return; 1166 1166 1167 - net = dev->nd_net; 1167 + net = dev_net(dev); 1168 1168 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) 1169 1169 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) 1170 1170 || ipv4_is_zeronet(new_gw)) ··· 1195 1195 rth->fl.oif != ikeys[k] || 1196 1196 rth->fl.iif != 0 || 1197 1197 rth->rt_genid != atomic_read(&rt_genid) || 1198 - rth->u.dst.dev->nd_net != net) { 1198 + dev_net(rth->u.dst.dev) != net) { 1199 1199 rthp = &rth->u.dst.rt_next; 1200 1200 continue; 1201 1201 } ··· 1454 1454 rth->rt_src == iph->saddr && 1455 1455 rth->fl.iif == 0 && 1456 1456 !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && 1457 - rth->u.dst.dev->nd_net == net && 1457 + dev_net(rth->u.dst.dev) == net && 1458 1458 rth->rt_genid == atomic_read(&rt_genid)) { 1459 1459 unsigned short mtu = new_mtu; 1460 1460 ··· 1530 1530 { 1531 1531 struct rtable *rt = (struct rtable *) dst; 1532 1532 struct in_device *idev = rt->idev; 1533 - if (dev != dev->nd_net->loopback_dev && idev && idev->dev == dev) { 1533 + if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) { 1534 1534 struct in_device *loopback_idev = 1535 - in_dev_get(dev->nd_net->loopback_dev); 1535 + in_dev_get(dev_net(dev)->loopback_dev); 1536 1536 if (loopback_idev) { 1537 1537 rt->idev = loopback_idev; 1538 1538 in_dev_put(idev); ··· 1576 1576 1577 1577 if (rt->fl.iif == 0) 1578 1578 src = rt->rt_src; 1579 - else if (fib_lookup(rt->u.dst.dev->nd_net, &rt->fl, &res) == 0) { 1579 + else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) { 1580 1580 src = FIB_RES_PREFSRC(res); 1581 1581 fib_res_put(&res); 1582 1582 } else ··· 1900 1900 __be32 spec_dst; 1901 1901 int err = -EINVAL; 1902 1902 int free_res = 0; 1903 - struct net * net = dev->nd_net; 1903 + struct net * net = dev_net(dev); 1904 1904 1905 1905 /* IP on this device is disabled. */ 1906 1906 ··· 2071 2071 int iif = dev->ifindex; 2072 2072 struct net *net; 2073 2073 2074 - net = dev->nd_net; 2074 + net = dev_net(dev); 2075 2075 tos &= IPTOS_RT_MASK; 2076 2076 hash = rt_hash(daddr, saddr, iif); 2077 2077 ··· 2084 2084 rth->fl.oif == 0 && 2085 2085 rth->fl.mark == skb->mark && 2086 2086 rth->fl.fl4_tos == tos && 2087 - rth->u.dst.dev->nd_net == net && 2087 + dev_net(rth->u.dst.dev) == net && 2088 2088 rth->rt_genid == atomic_read(&rt_genid)) { 2089 2089 dst_use(&rth->u.dst, jiffies); 2090 2090 RT_CACHE_STAT_INC(in_hit); ··· 2486 2486 rth->fl.mark == flp->mark && 2487 2487 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2488 2488 (IPTOS_RT_MASK | RTO_ONLINK)) && 2489 - rth->u.dst.dev->nd_net == net && 2489 + dev_net(rth->u.dst.dev) == net && 2490 2490 rth->rt_genid == atomic_read(&rt_genid)) { 2491 2491 dst_use(&rth->u.dst, jiffies); 2492 2492 RT_CACHE_STAT_INC(out_hit); ··· 2795 2795 rcu_read_lock_bh(); 2796 2796 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2797 2797 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2798 - if (rt->u.dst.dev->nd_net != net || idx < s_idx) 2798 + if (dev_net(rt->u.dst.dev) != net || idx < s_idx) 2799 2799 continue; 2800 2800 if (rt->rt_genid != atomic_read(&rt_genid)) 2801 2801 continue;
+3 -3
net/ipv4/tcp_ipv4.c
··· 353 353 return; 354 354 } 355 355 356 - sk = inet_lookup(skb->dev->nd_net, &tcp_hashinfo, iph->daddr, th->dest, 356 + sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest, 357 357 iph->saddr, th->source, inet_iif(skb)); 358 358 if (!sk) { 359 359 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); ··· 1644 1644 TCP_SKB_CB(skb)->flags = iph->tos; 1645 1645 TCP_SKB_CB(skb)->sacked = 0; 1646 1646 1647 - sk = __inet_lookup(skb->dev->nd_net, &tcp_hashinfo, iph->saddr, 1647 + sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, 1648 1648 th->source, iph->daddr, th->dest, inet_iif(skb)); 1649 1649 if (!sk) 1650 1650 goto no_tcp_socket; ··· 1718 1718 } 1719 1719 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1720 1720 case TCP_TW_SYN: { 1721 - struct sock *sk2 = inet_lookup_listener(skb->dev->nd_net, 1721 + struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), 1722 1722 &tcp_hashinfo, 1723 1723 iph->daddr, th->dest, 1724 1724 inet_iif(skb));
+2 -2
net/ipv4/udp.c
··· 357 357 int harderr; 358 358 int err; 359 359 360 - sk = __udp4_lib_lookup(skb->dev->nd_net, iph->daddr, uh->dest, 360 + sk = __udp4_lib_lookup(dev_net(skb->dev), iph->daddr, uh->dest, 361 361 iph->saddr, uh->source, skb->dev->ifindex, udptable); 362 362 if (sk == NULL) { 363 363 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); ··· 1181 1181 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1182 1182 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1183 1183 1184 - sk = __udp4_lib_lookup(skb->dev->nd_net, saddr, uh->source, daddr, 1184 + sk = __udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr, 1185 1185 uh->dest, inet_iif(skb), udptable); 1186 1186 1187 1187 if (sk != NULL) {
+1 -1
net/ipv4/xfrm4_policy.c
··· 221 221 xdst = (struct xfrm_dst *)dst; 222 222 if (xdst->u.rt.idev->dev == dev) { 223 223 struct in_device *loopback_idev = 224 - in_dev_get(dev->nd_net->loopback_dev); 224 + in_dev_get(dev_net(dev)->loopback_dev); 225 225 BUG_ON(!loopback_idev); 226 226 227 227 do {
+22 -22
net/ipv6/addrconf.c
··· 335 335 336 336 rwlock_init(&ndev->lock); 337 337 ndev->dev = dev; 338 - memcpy(&ndev->cnf, dev->nd_net->ipv6.devconf_dflt, sizeof(ndev->cnf)); 338 + memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); 339 339 ndev->cnf.mtu6 = dev->mtu; 340 340 ndev->cnf.sysctl = NULL; 341 341 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); ··· 561 561 write_lock(&addrconf_hash_lock); 562 562 563 563 /* Ignore adding duplicate addresses on an interface */ 564 - if (ipv6_chk_same_addr(idev->dev->nd_net, addr, idev->dev)) { 564 + if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { 565 565 ADBG(("ipv6_add_addr: already assigned\n")); 566 566 err = -EEXIST; 567 567 goto out; ··· 751 751 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) { 752 752 struct in6_addr prefix; 753 753 struct rt6_info *rt; 754 - struct net *net = ifp->idev->dev->nd_net; 754 + struct net *net = dev_net(ifp->idev->dev); 755 755 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 756 756 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); 757 757 ··· 1044 1044 { 1045 1045 struct ipv6_saddr_score scores[2], 1046 1046 *score = &scores[0], *hiscore = &scores[1]; 1047 - struct net *net = dst_dev->nd_net; 1047 + struct net *net = dev_net(dst_dev); 1048 1048 struct ipv6_saddr_dst dst; 1049 1049 struct net_device *dev; 1050 1050 int dst_type; ··· 1217 1217 1218 1218 read_lock_bh(&addrconf_hash_lock); 1219 1219 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1220 - if (ifp->idev->dev->nd_net != net) 1220 + if (dev_net(ifp->idev->dev) != net) 1221 1221 continue; 1222 1222 if (ipv6_addr_equal(&ifp->addr, addr) && 1223 1223 !(ifp->flags&IFA_F_TENTATIVE)) { ··· 1239 1239 u8 hash = ipv6_addr_hash(addr); 1240 1240 1241 1241 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1242 - if (ifp->idev->dev->nd_net != net) 1242 + if (dev_net(ifp->idev->dev) != net) 1243 1243 continue; 1244 1244 if (ipv6_addr_equal(&ifp->addr, addr)) { 1245 1245 if (dev == NULL || ifp->idev->dev == dev) ··· 1257 1257 1258 1258 read_lock_bh(&addrconf_hash_lock); 1259 1259 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1260 - if (ifp->idev->dev->nd_net != net) 1260 + if (dev_net(ifp->idev->dev) != net) 1261 1261 continue; 1262 1262 if (ipv6_addr_equal(&ifp->addr, addr)) { 1263 1263 if (dev == NULL || ifp->idev->dev == dev || ··· 1559 1559 .fc_expires = expires, 1560 1560 .fc_dst_len = plen, 1561 1561 .fc_flags = RTF_UP | flags, 1562 - .fc_nlinfo.nl_net = dev->nd_net, 1562 + .fc_nlinfo.nl_net = dev_net(dev), 1563 1563 }; 1564 1564 1565 1565 ipv6_addr_copy(&cfg.fc_dst, pfx); ··· 1586 1586 .fc_ifindex = dev->ifindex, 1587 1587 .fc_dst_len = 8, 1588 1588 .fc_flags = RTF_UP, 1589 - .fc_nlinfo.nl_net = dev->nd_net, 1589 + .fc_nlinfo.nl_net = dev_net(dev), 1590 1590 }; 1591 1591 1592 1592 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); ··· 1603 1603 .fc_ifindex = dev->ifindex, 1604 1604 .fc_dst_len = 96, 1605 1605 .fc_flags = RTF_UP | RTF_NONEXTHOP, 1606 - .fc_nlinfo.nl_net = dev->nd_net, 1606 + .fc_nlinfo.nl_net = dev_net(dev), 1607 1607 }; 1608 1608 1609 1609 /* prefix length - 96 bits "::d.d.d.d" */ ··· 1704 1704 1705 1705 if (pinfo->onlink) { 1706 1706 struct rt6_info *rt; 1707 - rt = rt6_lookup(dev->nd_net, &pinfo->prefix, NULL, 1707 + rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1708 1708 dev->ifindex, 1); 1709 1709 1710 1710 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { ··· 1748 1748 1749 1749 ok: 1750 1750 1751 - ifp = ipv6_get_ifaddr(dev->nd_net, &addr, dev, 1); 1751 + ifp = ipv6_get_ifaddr(dev_net(dev), &addr, dev, 1); 1752 1752 1753 1753 if (ifp == NULL && valid_lft) { 1754 1754 int max_addresses = in6_dev->cnf.max_addresses; ··· 2071 2071 struct inet6_ifaddr * ifp; 2072 2072 struct in6_addr addr; 2073 2073 struct net_device *dev; 2074 - struct net *net = idev->dev->nd_net; 2074 + struct net *net = dev_net(idev->dev); 2075 2075 int scope; 2076 2076 2077 2077 ASSERT_RTNL(); ··· 2261 2261 static void ip6_tnl_add_linklocal(struct inet6_dev *idev) 2262 2262 { 2263 2263 struct net_device *link_dev; 2264 - struct net *net = idev->dev->nd_net; 2264 + struct net *net = dev_net(idev->dev); 2265 2265 2266 2266 /* first try to inherit the link-local address from the link device */ 2267 2267 if (idev->dev->iflink && ··· 2442 2442 { 2443 2443 struct inet6_dev *idev; 2444 2444 struct inet6_ifaddr *ifa, **bifa; 2445 - struct net *net = dev->nd_net; 2445 + struct net *net = dev_net(dev); 2446 2446 int i; 2447 2447 2448 2448 ASSERT_RTNL(); ··· 2771 2771 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 2772 2772 ifa = inet6_addr_lst[state->bucket]; 2773 2773 2774 - while (ifa && ifa->idev->dev->nd_net != net) 2774 + while (ifa && dev_net(ifa->idev->dev) != net) 2775 2775 ifa = ifa->lst_next; 2776 2776 if (ifa) 2777 2777 break; ··· 2787 2787 ifa = ifa->lst_next; 2788 2788 try_again: 2789 2789 if (ifa) { 2790 - if (ifa->idev->dev->nd_net != net) { 2790 + if (dev_net(ifa->idev->dev) != net) { 2791 2791 ifa = ifa->lst_next; 2792 2792 goto try_again; 2793 2793 } ··· 2905 2905 u8 hash = ipv6_addr_hash(addr); 2906 2906 read_lock_bh(&addrconf_hash_lock); 2907 2907 for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { 2908 - if (ifp->idev->dev->nd_net != net) 2908 + if (dev_net(ifp->idev->dev) != net) 2909 2909 continue; 2910 2910 if (ipv6_addr_cmp(&ifp->addr, addr) == 0 && 2911 2911 (ifp->flags & IFA_F_HOMEADDRESS)) { ··· 3469 3469 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) 3470 3470 { 3471 3471 struct sk_buff *skb; 3472 - struct net *net = ifa->idev->dev->nd_net; 3472 + struct net *net = dev_net(ifa->idev->dev); 3473 3473 int err = -ENOBUFS; 3474 3474 3475 3475 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); ··· 3675 3675 void inet6_ifinfo_notify(int event, struct inet6_dev *idev) 3676 3676 { 3677 3677 struct sk_buff *skb; 3678 - struct net *net = idev->dev->nd_net; 3678 + struct net *net = dev_net(idev->dev); 3679 3679 int err = -ENOBUFS; 3680 3680 3681 3681 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); ··· 3745 3745 struct prefix_info *pinfo) 3746 3746 { 3747 3747 struct sk_buff *skb; 3748 - struct net *net = idev->dev->nd_net; 3748 + struct net *net = dev_net(idev->dev); 3749 3749 int err = -ENOBUFS; 3750 3750 3751 3751 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); ··· 4157 4157 NET_IPV6_NEIGH, "ipv6", 4158 4158 &ndisc_ifinfo_sysctl_change, 4159 4159 NULL); 4160 - __addrconf_sysctl_register(idev->dev->nd_net, idev->dev->name, 4160 + __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name, 4161 4161 idev->dev->ifindex, idev, &idev->cnf); 4162 4162 } 4163 4163
+2 -2
net/ipv6/icmp.c
··· 306 306 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, 307 307 struct net_device *dev) 308 308 { 309 - struct net *net = skb->dev->nd_net; 309 + struct net *net = dev_net(skb->dev); 310 310 struct inet6_dev *idev = NULL; 311 311 struct ipv6hdr *hdr = ipv6_hdr(skb); 312 312 struct sock *sk; ··· 507 507 508 508 static void icmpv6_echo_reply(struct sk_buff *skb) 509 509 { 510 - struct net *net = skb->dev->nd_net; 510 + struct net *net = dev_net(skb->dev); 511 511 struct sock *sk; 512 512 struct inet6_dev *idev; 513 513 struct ipv6_pinfo *np;
+1 -1
net/ipv6/ip6_output.c
··· 402 402 struct dst_entry *dst = skb->dst; 403 403 struct ipv6hdr *hdr = ipv6_hdr(skb); 404 404 struct inet6_skb_parm *opt = IP6CB(skb); 405 - struct net *net = dst->dev->nd_net; 405 + struct net *net = dev_net(dst->dev); 406 406 407 407 if (ipv6_devconf.forwarding == 0) 408 408 goto error;
+3 -3
net/ipv6/mcast.c
··· 1400 1400 1401 1401 static struct sk_buff *mld_newpack(struct net_device *dev, int size) 1402 1402 { 1403 - struct net *net = dev->nd_net; 1403 + struct net *net = dev_net(dev); 1404 1404 struct sock *sk = net->ipv6.igmp_sk; 1405 1405 struct sk_buff *skb; 1406 1406 struct mld2_report *pmr; ··· 1448 1448 (struct mld2_report *)skb_transport_header(skb); 1449 1449 int payload_len, mldlen; 1450 1450 struct inet6_dev *idev = in6_dev_get(skb->dev); 1451 - struct net *net = skb->dev->nd_net; 1451 + struct net *net = dev_net(skb->dev); 1452 1452 int err; 1453 1453 struct flowi fl; 1454 1454 ··· 1762 1762 1763 1763 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) 1764 1764 { 1765 - struct net *net = dev->nd_net; 1765 + struct net *net = dev_net(dev); 1766 1766 struct sock *sk = net->ipv6.igmp_sk; 1767 1767 struct inet6_dev *idev; 1768 1768 struct sk_buff *skb;
+12 -12
net/ipv6/ndisc.c
··· 447 447 { 448 448 struct flowi fl; 449 449 struct dst_entry *dst; 450 - struct net *net = dev->nd_net; 450 + struct net *net = dev_net(dev); 451 451 struct sock *sk = net->ipv6.ndisc_sk; 452 452 struct sk_buff *skb; 453 453 struct icmp6hdr *hdr; ··· 539 539 }; 540 540 541 541 /* for anycast or proxy, solicited_addr != src_addr */ 542 - ifp = ipv6_get_ifaddr(dev->nd_net, solicited_addr, dev, 1); 542 + ifp = ipv6_get_ifaddr(dev_net(dev), solicited_addr, dev, 1); 543 543 if (ifp) { 544 544 src_addr = solicited_addr; 545 545 if (ifp->flags & IFA_F_OPTIMISTIC) ··· 547 547 in6_ifa_put(ifp); 548 548 } else { 549 549 if (ipv6_dev_get_saddr(dev, daddr, 550 - inet6_sk(dev->nd_net->ipv6.ndisc_sk)->srcprefs, 550 + inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs, 551 551 &tmpaddr)) 552 552 return; 553 553 src_addr = &tmpaddr; ··· 601 601 * suppress the inclusion of the sllao. 602 602 */ 603 603 if (send_sllao) { 604 - struct inet6_ifaddr *ifp = ipv6_get_ifaddr(dev->nd_net, saddr, 604 + struct inet6_ifaddr *ifp = ipv6_get_ifaddr(dev_net(dev), saddr, 605 605 dev, 1); 606 606 if (ifp) { 607 607 if (ifp->flags & IFA_F_OPTIMISTIC) { ··· 639 639 struct in6_addr *target = (struct in6_addr *)&neigh->primary_key; 640 640 int probes = atomic_read(&neigh->probes); 641 641 642 - if (skb && ipv6_chk_addr(dev->nd_net, &ipv6_hdr(skb)->saddr, dev, 1)) 642 + if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1)) 643 643 saddr = &ipv6_hdr(skb)->saddr; 644 644 645 645 if ((probes -= neigh->parms->ucast_probes) < 0) { ··· 727 727 728 728 inc = ipv6_addr_is_multicast(daddr); 729 729 730 - ifp = ipv6_get_ifaddr(dev->nd_net, &msg->target, dev, 1); 730 + ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); 731 731 if (ifp) { 732 732 733 733 if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { ··· 776 776 if (ipv6_chk_acast_addr(dev, &msg->target) || 777 777 (idev->cnf.forwarding && 778 778 (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) && 779 - (pneigh = pneigh_lookup(&nd_tbl, dev->nd_net, 779 + (pneigh = pneigh_lookup(&nd_tbl, dev_net(dev), 780 780 &msg->target, dev, 0)) != NULL)) { 781 781 if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && 782 782 skb->pkt_type != PACKET_HOST && ··· 886 886 return; 887 887 } 888 888 } 889 - ifp = ipv6_get_ifaddr(dev->nd_net, &msg->target, dev, 1); 889 + ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); 890 890 if (ifp) { 891 891 if (ifp->flags & IFA_F_TENTATIVE) { 892 892 addrconf_dad_failure(ifp); ··· 918 918 */ 919 919 if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && 920 920 ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp && 921 - pneigh_lookup(&nd_tbl, dev->nd_net, &msg->target, dev, 0)) { 921 + pneigh_lookup(&nd_tbl, dev_net(dev), &msg->target, dev, 0)) { 922 922 /* XXX: idev->cnf.prixy_ndp */ 923 923 goto out; 924 924 } ··· 1008 1008 struct sk_buff *skb; 1009 1009 struct nlmsghdr *nlh; 1010 1010 struct nduseroptmsg *ndmsg; 1011 - struct net *net = ra->dev->nd_net; 1011 + struct net *net = dev_net(ra->dev); 1012 1012 int err; 1013 1013 int base_size = NLMSG_ALIGN(sizeof(struct nduseroptmsg) 1014 1014 + (opt->nd_opt_len << 3)); ··· 1395 1395 struct in6_addr *target) 1396 1396 { 1397 1397 struct net_device *dev = skb->dev; 1398 - struct net *net = dev->nd_net; 1398 + struct net *net = dev_net(dev); 1399 1399 struct sock *sk = net->ipv6.ndisc_sk; 1400 1400 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1401 1401 struct sk_buff *buff; ··· 1597 1597 static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1598 1598 { 1599 1599 struct net_device *dev = ptr; 1600 - struct net *net = dev->nd_net; 1600 + struct net *net = dev_net(dev); 1601 1601 1602 1602 switch (event) { 1603 1603 case NETDEV_CHANGEADDR:
+1 -1
net/ipv6/netfilter/ip6_queue.c
··· 484 484 { 485 485 struct net_device *dev = ptr; 486 486 487 - if (dev->nd_net != &init_net) 487 + if (dev_net(dev) != &init_net) 488 488 return NOTIFY_DONE; 489 489 490 490 /* Drop any packets associated with the downed device */
+1 -1
net/ipv6/proc.c
··· 214 214 if (!idev || !idev->dev) 215 215 return -EINVAL; 216 216 217 - if (idev->dev->nd_net != &init_net) 217 + if (dev_net(idev->dev) != &init_net) 218 218 return 0; 219 219 220 220 if (!proc_net_devsnmp6)
+2 -2
net/ipv6/raw.c
··· 176 176 if (sk == NULL) 177 177 goto out; 178 178 179 - net = skb->dev->nd_net; 179 + net = dev_net(skb->dev); 180 180 sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif); 181 181 182 182 while (sk) { ··· 363 363 if (sk != NULL) { 364 364 saddr = &ipv6_hdr(skb)->saddr; 365 365 daddr = &ipv6_hdr(skb)->daddr; 366 - net = skb->dev->nd_net; 366 + net = dev_net(skb->dev); 367 367 368 368 while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, 369 369 IP6CB(skb)->iif))) {
+1 -1
net/ipv6/reassembly.c
··· 600 600 return 1; 601 601 } 602 602 603 - net = skb->dev->nd_net; 603 + net = dev_net(skb->dev); 604 604 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 605 605 ip6_evictor(net, ip6_dst_idev(skb->dst)); 606 606
+20 -20
net/ipv6/route.c
··· 208 208 struct rt6_info *rt = (struct rt6_info *)dst; 209 209 struct inet6_dev *idev = rt->rt6i_idev; 210 210 struct net_device *loopback_dev = 211 - dev->nd_net->loopback_dev; 211 + dev_net(dev)->loopback_dev; 212 212 213 213 if (dev != loopback_dev && idev != NULL && idev->dev == dev) { 214 214 struct inet6_dev *loopback_idev = ··· 433 433 RT6_TRACE("%s() => %p\n", 434 434 __func__, match); 435 435 436 - net = rt0->rt6i_dev->nd_net; 436 + net = dev_net(rt0->rt6i_dev); 437 437 return (match ? match : net->ipv6.ip6_null_entry); 438 438 } 439 439 ··· 441 441 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 442 442 struct in6_addr *gwaddr) 443 443 { 444 - struct net *net = dev->nd_net; 444 + struct net *net = dev_net(dev); 445 445 struct route_info *rinfo = (struct route_info *) opt; 446 446 struct in6_addr prefix_buf, *prefix; 447 447 unsigned int pref; ··· 607 607 int ip6_ins_rt(struct rt6_info *rt) 608 608 { 609 609 struct nl_info info = { 610 - .nl_net = rt->rt6i_dev->nd_net, 610 + .nl_net = dev_net(rt->rt6i_dev), 611 611 }; 612 612 return __ip6_ins_rt(rt, &info); 613 613 } ··· 745 745 void ip6_route_input(struct sk_buff *skb) 746 746 { 747 747 struct ipv6hdr *iph = ipv6_hdr(skb); 748 - struct net *net = skb->dev->nd_net; 748 + struct net *net = dev_net(skb->dev); 749 749 int flags = RT6_LOOKUP_F_HAS_SADDR; 750 750 struct flowi fl = { 751 751 .iif = skb->dev->ifindex, ··· 928 928 { 929 929 struct rt6_info *rt; 930 930 struct inet6_dev *idev = in6_dev_get(dev); 931 - struct net *net = dev->nd_net; 931 + struct net *net = dev_net(dev); 932 932 933 933 if (unlikely(idev == NULL)) 934 934 return NULL; ··· 1252 1252 rt->rt6i_idev = idev; 1253 1253 rt->rt6i_table = table; 1254 1254 1255 - cfg->fc_nlinfo.nl_net = dev->nd_net; 1255 + cfg->fc_nlinfo.nl_net = dev_net(dev); 1256 1256 1257 1257 return __ip6_ins_rt(rt, &cfg->fc_nlinfo); 1258 1258 ··· 1270 1270 { 1271 1271 int err; 1272 1272 struct fib6_table *table; 1273 - struct net *net = rt->rt6i_dev->nd_net; 1273 + struct net *net = dev_net(rt->rt6i_dev); 1274 1274 1275 1275 if (rt == net->ipv6.ip6_null_entry) 1276 1276 return -ENOENT; ··· 1289 1289 int ip6_del_rt(struct rt6_info *rt) 1290 1290 { 1291 1291 struct nl_info info = { 1292 - .nl_net = rt->rt6i_dev->nd_net, 1292 + .nl_net = dev_net(rt->rt6i_dev), 1293 1293 }; 1294 1294 return __ip6_del_rt(rt, &info); 1295 1295 } ··· 1401 1401 struct net_device *dev) 1402 1402 { 1403 1403 int flags = RT6_LOOKUP_F_HAS_SADDR; 1404 - struct net *net = dev->nd_net; 1404 + struct net *net = dev_net(dev); 1405 1405 struct ip6rd_flowi rdfl = { 1406 1406 .fl = { 1407 1407 .oif = dev->ifindex, ··· 1428 1428 { 1429 1429 struct rt6_info *rt, *nrt = NULL; 1430 1430 struct netevent_redirect netevent; 1431 - struct net *net = neigh->dev->nd_net; 1431 + struct net *net = dev_net(neigh->dev); 1432 1432 1433 1433 rt = ip6_route_redirect(dest, src, saddr, neigh->dev); 1434 1434 ··· 1477 1477 nrt->rt6i_nexthop = neigh_clone(neigh); 1478 1478 /* Reset pmtu, it may be better */ 1479 1479 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); 1480 - nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(neigh->dev->nd_net, 1480 + nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev), 1481 1481 dst_mtu(&nrt->u.dst)); 1482 1482 1483 1483 if (ip6_ins_rt(nrt)) ··· 1506 1506 struct net_device *dev, u32 pmtu) 1507 1507 { 1508 1508 struct rt6_info *rt, *nrt; 1509 - struct net *net = dev->nd_net; 1509 + struct net *net = dev_net(dev); 1510 1510 int allfrag = 0; 1511 1511 1512 1512 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0); ··· 1583 1583 1584 1584 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1585 1585 { 1586 - struct net *net = ort->rt6i_dev->nd_net; 1586 + struct net *net = dev_net(ort->rt6i_dev); 1587 1587 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 1588 1588 1589 1589 if (rt) { ··· 1682 1682 struct rt6_info *rt; 1683 1683 struct fib6_table *table; 1684 1684 1685 - table = fib6_get_table(dev->nd_net, RT6_TABLE_DFLT); 1685 + table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT); 1686 1686 if (table == NULL) 1687 1687 return NULL; 1688 1688 ··· 1713 1713 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 1714 1714 .fc_nlinfo.pid = 0, 1715 1715 .fc_nlinfo.nlh = NULL, 1716 - .fc_nlinfo.nl_net = dev->nd_net, 1716 + .fc_nlinfo.nl_net = dev_net(dev), 1717 1717 }; 1718 1718 1719 1719 ipv6_addr_copy(&cfg.fc_gateway, gwaddr); ··· 1862 1862 const struct in6_addr *addr, 1863 1863 int anycast) 1864 1864 { 1865 - struct net *net = idev->dev->nd_net; 1865 + struct net *net = dev_net(idev->dev); 1866 1866 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 1867 1867 1868 1868 if (rt == NULL) ··· 1939 1939 { 1940 1940 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 1941 1941 struct inet6_dev *idev; 1942 - struct net *net = arg->dev->nd_net; 1942 + struct net *net = dev_net(arg->dev); 1943 1943 1944 1944 /* In IPv6 pmtu discovery is not optional, 1945 1945 so that RTAX_MTU lock cannot disable it. ··· 1983 1983 .mtu = mtu, 1984 1984 }; 1985 1985 1986 - fib6_clean_all(dev->nd_net, rt6_mtu_change_route, 0, &arg); 1986 + fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg); 1987 1987 } 1988 1988 1989 1989 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { ··· 2321 2321 unsigned long event, void *data) 2322 2322 { 2323 2323 struct net_device *dev = (struct net_device *)data; 2324 - struct net *net = dev->nd_net; 2324 + struct net *net = dev_net(dev); 2325 2325 2326 2326 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { 2327 2327 net->ipv6.ip6_null_entry->u.dst.dev = dev;
+5 -5
net/ipv6/tcp_ipv6.c
··· 321 321 struct tcp_sock *tp; 322 322 __u32 seq; 323 323 324 - sk = inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, &hdr->daddr, 324 + sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr, 325 325 th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 326 326 327 327 if (sk == NULL) { ··· 988 988 struct tcphdr *th = tcp_hdr(skb), *t1; 989 989 struct sk_buff *buff; 990 990 struct flowi fl; 991 - struct net *net = skb->dst->dev->nd_net; 991 + struct net *net = dev_net(skb->dst->dev); 992 992 struct sock *ctl_sk = net->ipv6.tcp_sk; 993 993 unsigned int tot_len = sizeof(*th); 994 994 #ifdef CONFIG_TCP_MD5SIG ··· 1093 1093 struct tcphdr *th = tcp_hdr(skb), *t1; 1094 1094 struct sk_buff *buff; 1095 1095 struct flowi fl; 1096 - struct net *net = skb->dev->nd_net; 1096 + struct net *net = dev_net(skb->dev); 1097 1097 struct sock *ctl_sk = net->ipv6.tcp_sk; 1098 1098 unsigned int tot_len = sizeof(struct tcphdr); 1099 1099 __be32 *topt; ··· 1739 1739 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); 1740 1740 TCP_SKB_CB(skb)->sacked = 0; 1741 1741 1742 - sk = __inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, 1742 + sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, 1743 1743 &ipv6_hdr(skb)->saddr, th->source, 1744 1744 &ipv6_hdr(skb)->daddr, ntohs(th->dest), 1745 1745 inet6_iif(skb)); ··· 1822 1822 { 1823 1823 struct sock *sk2; 1824 1824 1825 - sk2 = inet6_lookup_listener(skb->dev->nd_net, &tcp_hashinfo, 1825 + sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, 1826 1826 &ipv6_hdr(skb)->daddr, 1827 1827 ntohs(th->dest), inet6_iif(skb)); 1828 1828 if (sk2 != NULL) {
+2 -2
net/ipv6/udp.c
··· 235 235 struct sock *sk; 236 236 int err; 237 237 238 - sk = __udp6_lib_lookup(skb->dev->nd_net, daddr, uh->dest, 238 + sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest, 239 239 saddr, uh->source, inet6_iif(skb), udptable); 240 240 if (sk == NULL) 241 241 return; ··· 483 483 * check socket cache ... must talk to Alan about his plans 484 484 * for sock caches... i'll skip this for now. 485 485 */ 486 - sk = __udp6_lib_lookup(skb->dev->nd_net, saddr, uh->source, 486 + sk = __udp6_lib_lookup(dev_net(skb->dev), saddr, uh->source, 487 487 daddr, uh->dest, inet6_iif(skb), udptable); 488 488 489 489 if (sk == NULL) {
+1 -1
net/ipv6/xfrm6_policy.c
··· 247 247 xdst = (struct xfrm_dst *)dst; 248 248 if (xdst->u.rt6.rt6i_idev->dev == dev) { 249 249 struct inet6_dev *loopback_idev = 250 - in6_dev_get(dev->nd_net->loopback_dev); 250 + in6_dev_get(dev_net(dev)->loopback_dev); 251 251 BUG_ON(!loopback_idev); 252 252 253 253 do {
+2 -2
net/ipx/af_ipx.c
··· 335 335 struct net_device *dev = ptr; 336 336 struct ipx_interface *i, *tmp; 337 337 338 - if (dev->nd_net != &init_net) 338 + if (dev_net(dev) != &init_net) 339 339 return NOTIFY_DONE; 340 340 341 341 if (event != NETDEV_DOWN && event != NETDEV_UP) ··· 1636 1636 u16 ipx_pktsize; 1637 1637 int rc = 0; 1638 1638 1639 - if (dev->nd_net != &init_net) 1639 + if (dev_net(dev) != &init_net) 1640 1640 goto drop; 1641 1641 1642 1642 /* Not ours */
+1 -1
net/irda/irlap_frame.c
··· 1326 1326 int command; 1327 1327 __u8 control; 1328 1328 1329 - if (dev->nd_net != &init_net) 1329 + if (dev_net(dev) != &init_net) 1330 1330 goto out; 1331 1331 1332 1332 /* FIXME: should we get our own field? */
+1 -1
net/llc/llc_input.c
··· 146 146 int (*rcv)(struct sk_buff *, struct net_device *, 147 147 struct packet_type *, struct net_device *); 148 148 149 - if (dev->nd_net != &init_net) 149 + if (dev_net(dev) != &init_net) 150 150 goto drop; 151 151 152 152 /*
+1 -1
net/netfilter/core.c
··· 168 168 #ifdef CONFIG_NET_NS 169 169 struct net *net; 170 170 171 - net = indev == NULL ? outdev->nd_net : indev->nd_net; 171 + net = indev == NULL ? dev_net(outdev) : dev_net(indev); 172 172 if (net != &init_net) 173 173 return 1; 174 174 #endif
+1 -1
net/netlabel/netlabel_unlabeled.c
··· 954 954 struct net_device *dev = ptr; 955 955 struct netlbl_unlhsh_iface *iface = NULL; 956 956 957 - if (dev->nd_net != &init_net) 957 + if (dev_net(dev) != &init_net) 958 958 return NOTIFY_DONE; 959 959 960 960 /* XXX - should this be a check for NETDEV_DOWN or _UNREGISTER? */
+1 -1
net/netrom/af_netrom.c
··· 106 106 { 107 107 struct net_device *dev = (struct net_device *)ptr; 108 108 109 - if (dev->nd_net != &init_net) 109 + if (dev_net(dev) != &init_net) 110 110 return NOTIFY_DONE; 111 111 112 112 if (event != NETDEV_DOWN)
+4 -4
net/packet/af_packet.c
··· 263 263 if (skb->pkt_type == PACKET_LOOPBACK) 264 264 goto out; 265 265 266 - if (dev->nd_net != sk->sk_net) 266 + if (dev_net(dev) != sk->sk_net) 267 267 goto out; 268 268 269 269 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) ··· 451 451 sk = pt->af_packet_priv; 452 452 po = pkt_sk(sk); 453 453 454 - if (dev->nd_net != sk->sk_net) 454 + if (dev_net(dev) != sk->sk_net) 455 455 goto drop; 456 456 457 457 skb->dev = dev; ··· 568 568 sk = pt->af_packet_priv; 569 569 po = pkt_sk(sk); 570 570 571 - if (dev->nd_net != sk->sk_net) 571 + if (dev_net(dev) != sk->sk_net) 572 572 goto drop; 573 573 574 574 if (dev->header_ops) { ··· 1450 1450 struct sock *sk; 1451 1451 struct hlist_node *node; 1452 1452 struct net_device *dev = data; 1453 - struct net *net = dev->nd_net; 1453 + struct net *net = dev_net(dev); 1454 1454 1455 1455 read_lock(&net->packet.sklist_lock); 1456 1456 sk_for_each(sk, node, &net->packet.sklist) {
+1 -1
net/rose/af_rose.c
··· 197 197 { 198 198 struct net_device *dev = (struct net_device *)ptr; 199 199 200 - if (dev->nd_net != &init_net) 200 + if (dev_net(dev) != &init_net) 201 201 return NOTIFY_DONE; 202 202 203 203 if (event != NETDEV_DOWN)
+1 -1
net/sctp/protocol.c
··· 630 630 struct sctp_sockaddr_entry *temp; 631 631 int found = 0; 632 632 633 - if (ifa->ifa_dev->dev->nd_net != &init_net) 633 + if (dev_net(ifa->ifa_dev->dev) != &init_net) 634 634 return NOTIFY_DONE; 635 635 636 636 switch (ev) {
+2 -2
net/tipc/eth_media.c
··· 101 101 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; 102 102 u32 size; 103 103 104 - if (dev->nd_net != &init_net) { 104 + if (dev_net(dev) != &init_net) { 105 105 kfree_skb(buf); 106 106 return 0; 107 107 } ··· 198 198 struct eth_bearer *eb_ptr = &eth_bearers[0]; 199 199 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS]; 200 200 201 - if (dev->nd_net != &init_net) 201 + if (dev_net(dev) != &init_net) 202 202 return NOTIFY_DONE; 203 203 204 204 while ((eb_ptr->dev != dev)) {
+1 -1
net/wireless/wext.c
··· 1157 1157 struct sk_buff *skb; 1158 1158 int err; 1159 1159 1160 - if (dev->nd_net != &init_net) 1160 + if (dev_net(dev) != &init_net) 1161 1161 return; 1162 1162 1163 1163 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+1 -1
net/x25/af_x25.c
··· 191 191 struct net_device *dev = ptr; 192 192 struct x25_neigh *nb; 193 193 194 - if (dev->nd_net != &init_net) 194 + if (dev_net(dev) != &init_net) 195 195 return NOTIFY_DONE; 196 196 197 197 if (dev->type == ARPHRD_X25
+1 -1
net/x25/x25_dev.c
··· 95 95 struct sk_buff *nskb; 96 96 struct x25_neigh *nb; 97 97 98 - if (dev->nd_net != &init_net) 98 + if (dev_net(dev) != &init_net) 99 99 goto drop; 100 100 101 101 nskb = skb_copy(skb, GFP_ATOMIC);
+2 -2
net/xfrm/xfrm_policy.c
··· 2079 2079 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2080 2080 { 2081 2081 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2082 - dst->dev = dev->nd_net->loopback_dev; 2082 + dst->dev = dev_net(dev)->loopback_dev; 2083 2083 dev_hold(dst->dev); 2084 2084 dev_put(dev); 2085 2085 } ··· 2350 2350 { 2351 2351 struct net_device *dev = ptr; 2352 2352 2353 - if (dev->nd_net != &init_net) 2353 + if (dev_net(dev) != &init_net) 2354 2354 return NOTIFY_DONE; 2355 2355 2356 2356 switch (event) {
+1 -1
security/selinux/netif.c
··· 281 281 { 282 282 struct net_device *dev = ptr; 283 283 284 - if (dev->nd_net != &init_net) 284 + if (dev_net(dev) != &init_net) 285 285 return NOTIFY_DONE; 286 286 287 287 if (event == NETDEV_DOWN)