Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netdev: add more functions to netdevice ops

This patch moves neigh_setup and hard_start_xmit into the network device ops
structure. For bisection, fix all the previously converted drivers as well.
Bonding driver took the biggest hit on this.

Added a prefetch of the hard_start_xmit in the fast path to try and reduce
any impact this would have.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Stephen Hemminger and committed by
David S. Miller
00829823 6ab33d51

+183 -93
+1 -1
drivers/net/8139cp.c
··· 1824 1824 .ndo_set_multicast_list = cp_set_rx_mode, 1825 1825 .ndo_get_stats = cp_get_stats, 1826 1826 .ndo_do_ioctl = cp_ioctl, 1827 + .ndo_start_xmit = cp_start_xmit, 1827 1828 .ndo_tx_timeout = cp_tx_timeout, 1828 1829 #if CP_VLAN_TAG_USED 1829 1830 .ndo_vlan_rx_register = cp_vlan_rx_register, ··· 1950 1949 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 1951 1950 1952 1951 dev->netdev_ops = &cp_netdev_ops; 1953 - dev->hard_start_xmit = cp_start_xmit; 1954 1952 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); 1955 1953 dev->ethtool_ops = &cp_ethtool_ops; 1956 1954 dev->watchdog_timeo = TX_TIMEOUT;
+1 -1
drivers/net/8139too.c
··· 921 921 .ndo_stop = rtl8139_close, 922 922 .ndo_get_stats = rtl8139_get_stats, 923 923 .ndo_validate_addr = eth_validate_addr, 924 + .ndo_start_xmit = rtl8139_start_xmit, 924 925 .ndo_set_multicast_list = rtl8139_set_rx_mode, 925 926 .ndo_do_ioctl = netdev_ioctl, 926 927 .ndo_tx_timeout = rtl8139_tx_timeout, ··· 993 992 dev->netdev_ops = &rtl8139_netdev_ops; 994 993 dev->ethtool_ops = &rtl8139_ethtool_ops; 995 994 dev->watchdog_timeo = TX_TIMEOUT; 996 - dev->hard_start_xmit = rtl8139_start_xmit; 997 995 netif_napi_add(dev, &tp->napi, rtl8139_poll, 64); 998 996 999 997 /* note: the hardware is not capable of sg/csum/highdma, however
+3 -1
drivers/net/acenic.c
··· 455 455 .ndo_stop = ace_close, 456 456 .ndo_tx_timeout = ace_watchdog, 457 457 .ndo_get_stats = ace_get_stats, 458 + .ndo_start_xmit = ace_start_xmit, 458 459 .ndo_set_multicast_list = ace_set_multicast_list, 459 460 .ndo_set_mac_address = ace_set_mac_addr, 460 461 .ndo_change_mtu = ace_change_mtu, 462 + #if ACENIC_DO_VLAN 461 463 .ndo_vlan_rx_register = ace_vlan_rx_register, 464 + #endif 462 465 }; 463 466 464 467 static int __devinit acenic_probe_one(struct pci_dev *pdev, ··· 492 489 dev->watchdog_timeo = 5*HZ; 493 490 494 491 dev->netdev_ops = &ace_netdev_ops; 495 - dev->hard_start_xmit = &ace_start_xmit; 496 492 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); 497 493 498 494 /* we only display this string ONCE */
+2 -1
drivers/net/atl1e/atl1e_main.c
··· 2256 2256 static const struct net_device_ops atl1e_netdev_ops = { 2257 2257 .ndo_open = atl1e_open, 2258 2258 .ndo_stop = atl1e_close, 2259 + .ndo_start_xmit = atl1e_xmit_frame, 2259 2260 .ndo_get_stats = atl1e_get_stats, 2260 2261 .ndo_set_multicast_list = atl1e_set_multi, 2261 2262 .ndo_validate_addr = eth_validate_addr, ··· 2278 2277 2279 2278 netdev->irq = pdev->irq; 2280 2279 netdev->netdev_ops = &atl1e_netdev_ops; 2281 - netdev->hard_start_xmit = atl1e_xmit_frame, 2280 + 2282 2281 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2283 2282 atl1e_set_ethtool_ops(netdev); 2284 2283
+2 -2
drivers/net/atlx/atl1.c
··· 2883 2883 static const struct net_device_ops atl1_netdev_ops = { 2884 2884 .ndo_open = atl1_open, 2885 2885 .ndo_stop = atl1_close, 2886 + .ndo_start_xmit = atl1_xmit_frame, 2886 2887 .ndo_set_multicast_list = atlx_set_multi, 2887 2888 .ndo_validate_addr = eth_validate_addr, 2888 2889 .ndo_set_mac_address = atl1_set_mac, 2889 2890 .ndo_change_mtu = atl1_change_mtu, 2890 2891 .ndo_do_ioctl = atlx_ioctl, 2891 - .ndo_tx_timeout = atlx_tx_timeout, 2892 + .ndo_tx_timeout = atlx_tx_timeout, 2892 2893 .ndo_vlan_rx_register = atlx_vlan_rx_register, 2893 2894 #ifdef CONFIG_NET_POLL_CONTROLLER 2894 2895 .ndo_poll_controller = atl1_poll_controller, ··· 2984 2983 adapter->mii.reg_num_mask = 0x1f; 2985 2984 2986 2985 netdev->netdev_ops = &atl1_netdev_ops; 2987 - netdev->hard_start_xmit = &atl1_xmit_frame; 2988 2986 netdev->watchdog_timeo = 5 * HZ; 2989 2987 2990 2988 netdev->ethtool_ops = &atl1_ethtool_ops;
+1 -1
drivers/net/atlx/atl2.c
··· 1315 1315 static const struct net_device_ops atl2_netdev_ops = { 1316 1316 .ndo_open = atl2_open, 1317 1317 .ndo_stop = atl2_close, 1318 + .ndo_start_xmit = atl2_xmit_frame, 1318 1319 .ndo_set_multicast_list = atl2_set_multi, 1319 1320 .ndo_validate_addr = eth_validate_addr, 1320 1321 .ndo_set_mac_address = atl2_set_mac, ··· 1401 1400 1402 1401 atl2_setup_pcicmd(pdev); 1403 1402 1404 - netdev->hard_start_xmit = &atl2_xmit_frame; 1405 1403 netdev->netdev_ops = &atl2_netdev_ops; 1406 1404 atl2_set_ethtool_ops(netdev); 1407 1405 netdev->watchdog_timeo = 5 * HZ;
+46 -10
drivers/net/bonding/bond_main.c
··· 1377 1377 return 0; 1378 1378 } 1379 1379 1380 - 1381 1380 static void bond_setup_by_slave(struct net_device *bond_dev, 1382 1381 struct net_device *slave_dev) 1383 1382 { 1384 1383 struct bonding *bond = netdev_priv(bond_dev); 1385 1384 1386 - bond_dev->neigh_setup = slave_dev->neigh_setup; 1387 - bond_dev->header_ops = slave_dev->header_ops; 1385 + bond_dev->header_ops = slave_dev->header_ops; 1388 1386 1389 1387 bond_dev->type = slave_dev->type; 1390 1388 bond_dev->hard_header_len = slave_dev->hard_header_len; ··· 4122 4124 read_unlock(&bond->lock); 4123 4125 } 4124 4126 4127 + static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms) 4128 + { 4129 + struct bonding *bond = netdev_priv(dev); 4130 + struct slave *slave = bond->first_slave; 4131 + 4132 + if (slave) { 4133 + const struct net_device_ops *slave_ops 4134 + = slave->dev->netdev_ops; 4135 + if (slave_ops->ndo_neigh_setup) 4136 + return slave_ops->ndo_neigh_setup(dev, parms); 4137 + } 4138 + return 0; 4139 + } 4140 + 4125 4141 /* 4126 4142 * Change the MTU of all of a master's slaves to match the master 4127 4143 */ ··· 4502 4490 } 4503 4491 } 4504 4492 4493 + static int bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4494 + { 4495 + const struct bonding *bond = netdev_priv(dev); 4496 + 4497 + switch (bond->params.mode) { 4498 + case BOND_MODE_ROUNDROBIN: 4499 + return bond_xmit_roundrobin(skb, dev); 4500 + case BOND_MODE_ACTIVEBACKUP: 4501 + return bond_xmit_activebackup(skb, dev); 4502 + case BOND_MODE_XOR: 4503 + return bond_xmit_xor(skb, dev); 4504 + case BOND_MODE_BROADCAST: 4505 + return bond_xmit_broadcast(skb, dev); 4506 + case BOND_MODE_8023AD: 4507 + return bond_3ad_xmit_xor(skb, dev); 4508 + case BOND_MODE_ALB: 4509 + case BOND_MODE_TLB: 4510 + return bond_alb_xmit(skb, dev); 4511 + default: 4512 + /* Should never happen, mode already checked */ 4513 + printk(KERN_ERR DRV_NAME ": %s: Error: Unknown bonding mode %d\n", 4514 + dev->name, bond->params.mode); 4515 + WARN_ON_ONCE(1); 4516 + dev_kfree_skb(skb); 4517 + return NETDEV_TX_OK; 4518 + } 4519 + } 4520 + 4521 + 4505 4522 /* 4506 4523 * set bond mode specific net device operations 4507 4524 */ ··· 4540 4499 4541 4500 switch (mode) { 4542 4501 case BOND_MODE_ROUNDROBIN: 4543 - bond_dev->hard_start_xmit = bond_xmit_roundrobin; 4544 4502 break; 4545 4503 case BOND_MODE_ACTIVEBACKUP: 4546 - bond_dev->hard_start_xmit = bond_xmit_activebackup; 4547 4504 break; 4548 4505 case BOND_MODE_XOR: 4549 - bond_dev->hard_start_xmit = bond_xmit_xor; 4550 4506 bond_set_xmit_hash_policy(bond); 4551 4507 break; 4552 4508 case BOND_MODE_BROADCAST: 4553 - bond_dev->hard_start_xmit = bond_xmit_broadcast; 4554 4509 break; 4555 4510 case BOND_MODE_8023AD: 4556 4511 bond_set_master_3ad_flags(bond); 4557 - bond_dev->hard_start_xmit = bond_3ad_xmit_xor; 4558 4512 bond_set_xmit_hash_policy(bond); 4559 4513 break; 4560 4514 case BOND_MODE_ALB: 4561 4515 bond_set_master_alb_flags(bond); 4562 4516 /* FALLTHRU */ 4563 4517 case BOND_MODE_TLB: 4564 - bond_dev->hard_start_xmit = bond_alb_xmit; 4565 4518 break; 4566 4519 default: 4567 4520 /* Should never happen, mode already checked */ ··· 4588 4553 static const struct net_device_ops bond_netdev_ops = { 4589 4554 .ndo_open = bond_open, 4590 4555 .ndo_stop = bond_close, 4556 + .ndo_start_xmit = bond_start_xmit, 4591 4557 .ndo_get_stats = bond_get_stats, 4592 4558 .ndo_do_ioctl = bond_do_ioctl, 4593 4559 .ndo_set_multicast_list = bond_set_multicast_list, 4594 4560 .ndo_change_mtu = bond_change_mtu, 4595 - .ndo_validate_addr = NULL, 4596 4561 .ndo_set_mac_address = bond_set_mac_address, 4562 + .ndo_neigh_setup = bond_neigh_setup, 4597 4563 .ndo_vlan_rx_register = bond_vlan_rx_register, 4598 4564 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4599 4565 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+3 -3
drivers/net/chelsio/cxgb2.c
··· 915 915 } 916 916 917 917 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 918 - static void vlan_rx_register(struct net_device *dev, 918 + static void t1_vlan_rx_register(struct net_device *dev, 919 919 struct vlan_group *grp) 920 920 { 921 921 struct adapter *adapter = dev->ml_priv; ··· 1013 1013 static const struct net_device_ops cxgb_netdev_ops = { 1014 1014 .ndo_open = cxgb_open, 1015 1015 .ndo_stop = cxgb_close, 1016 + .ndo_start_xmit = t1_start_xmit, 1016 1017 .ndo_get_stats = t1_get_stats, 1017 1018 .ndo_validate_addr = eth_validate_addr, 1018 1019 .ndo_set_multicast_list = t1_set_rxmode, ··· 1021 1020 .ndo_change_mtu = t1_change_mtu, 1022 1021 .ndo_set_mac_address = t1_set_mac_addr, 1023 1022 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1024 - .ndo_vlan_rx_register = vlan_rx_register, 1023 + .ndo_vlan_rx_register = t1_vlan_rx_register, 1025 1024 #endif 1026 1025 #ifdef CONFIG_NET_POLL_CONTROLLER 1027 1026 .ndo_poll_controller = t1_netpoll, ··· 1158 1157 } 1159 1158 1160 1159 netdev->netdev_ops = &cxgb_netdev_ops; 1161 - netdev->hard_start_xmit = t1_start_xmit; 1162 1160 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? 1163 1161 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); 1164 1162
-1
drivers/net/cxgb3/cxgb3_main.c
··· 2955 2955 2956 2956 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2957 2957 netdev->netdev_ops = &cxgb_netdev_ops; 2958 - netdev->hard_start_xmit = t3_eth_xmit; 2959 2958 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 2960 2959 } 2961 2960
+1 -1
drivers/net/e100.c
··· 2615 2615 static const struct net_device_ops e100_netdev_ops = { 2616 2616 .ndo_open = e100_open, 2617 2617 .ndo_stop = e100_close, 2618 + .ndo_start_xmit = e100_xmit_frame, 2618 2619 .ndo_validate_addr = eth_validate_addr, 2619 2620 .ndo_set_multicast_list = e100_set_multicast_list, 2620 2621 .ndo_set_mac_address = e100_set_mac_address, ··· 2641 2640 } 2642 2641 2643 2642 netdev->netdev_ops = &e100_netdev_ops; 2644 - netdev->hard_start_xmit = e100_xmit_frame; 2645 2643 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); 2646 2644 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; 2647 2645 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+1 -1
drivers/net/e1000/e1000_main.c
··· 891 891 static const struct net_device_ops e1000_netdev_ops = { 892 892 .ndo_open = e1000_open, 893 893 .ndo_stop = e1000_close, 894 + .ndo_start_xmit = e1000_xmit_frame, 894 895 .ndo_get_stats = e1000_get_stats, 895 896 .ndo_set_rx_mode = e1000_set_rx_mode, 896 897 .ndo_set_mac_address = e1000_set_mac, ··· 1002 1001 } 1003 1002 1004 1003 netdev->netdev_ops = &e1000_netdev_ops; 1005 - netdev->hard_start_xmit = &e1000_xmit_frame; 1006 1004 e1000_set_ethtool_ops(netdev); 1007 1005 netdev->watchdog_timeo = 5 * HZ; 1008 1006 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+1 -1
drivers/net/e1000e/netdev.c
··· 4707 4707 static const struct net_device_ops e1000e_netdev_ops = { 4708 4708 .ndo_open = e1000_open, 4709 4709 .ndo_stop = e1000_close, 4710 + .ndo_start_xmit = e1000_xmit_frame, 4710 4711 .ndo_get_stats = e1000_get_stats, 4711 4712 .ndo_set_multicast_list = e1000_set_multi, 4712 4713 .ndo_set_mac_address = e1000_set_mac, ··· 4823 4822 4824 4823 /* construct the net_device struct */ 4825 4824 netdev->netdev_ops = &e1000e_netdev_ops; 4826 - netdev->hard_start_xmit = &e1000_xmit_frame; 4827 4825 e1000e_set_ethtool_ops(netdev); 4828 4826 netdev->watchdog_timeo = 5 * HZ; 4829 4827 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+1 -1
drivers/net/enic/enic_main.c
··· 1593 1593 static const struct net_device_ops enic_netdev_ops = { 1594 1594 .ndo_open = enic_open, 1595 1595 .ndo_stop = enic_stop, 1596 + .ndo_start_xmit = enic_hard_start_xmit, 1596 1597 .ndo_get_stats = enic_get_stats, 1597 1598 .ndo_validate_addr = eth_validate_addr, 1598 1599 .ndo_set_multicast_list = enic_set_multicast_list, ··· 1831 1830 } 1832 1831 1833 1832 netdev->netdev_ops = &enic_netdev_ops; 1834 - netdev->hard_start_xmit = enic_hard_start_xmit; 1835 1833 netdev->watchdog_timeo = 2 * HZ; 1836 1834 netdev->ethtool_ops = &enic_ethtool_ops; 1837 1835
+19 -3
drivers/net/forcedeth.c
··· 5412 5412 .ndo_open = nv_open, 5413 5413 .ndo_stop = nv_close, 5414 5414 .ndo_get_stats = nv_get_stats, 5415 + .ndo_start_xmit = nv_start_xmit, 5416 + .ndo_tx_timeout = nv_tx_timeout, 5417 + .ndo_change_mtu = nv_change_mtu, 5418 + .ndo_validate_addr = eth_validate_addr, 5419 + .ndo_set_mac_address = nv_set_mac_address, 5420 + .ndo_set_multicast_list = nv_set_multicast, 5421 + .ndo_vlan_rx_register = nv_vlan_rx_register, 5422 + #ifdef CONFIG_NET_POLL_CONTROLLER 5423 + .ndo_poll_controller = nv_poll_controller, 5424 + #endif 5425 + }; 5426 + 5427 + static const struct net_device_ops nv_netdev_ops_optimized = { 5428 + .ndo_open = nv_open, 5429 + .ndo_stop = nv_close, 5430 + .ndo_get_stats = nv_get_stats, 5431 + .ndo_start_xmit = nv_start_xmit_optimized, 5415 5432 .ndo_tx_timeout = nv_tx_timeout, 5416 5433 .ndo_change_mtu = nv_change_mtu, 5417 5434 .ndo_validate_addr = eth_validate_addr, ··· 5609 5592 goto out_freering; 5610 5593 5611 5594 if (!nv_optimized(np)) 5612 - dev->hard_start_xmit = nv_start_xmit; 5595 + dev->netdev_ops = &nv_netdev_ops; 5613 5596 else 5614 - dev->hard_start_xmit = nv_start_xmit_optimized; 5597 + dev->netdev_ops = &nv_netdev_ops_optimized; 5615 5598 5616 - dev->netdev_ops = &nv_netdev_ops; 5617 5599 #ifdef CONFIG_FORCEDETH_NAPI 5618 5600 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5619 5601 #endif
+2 -2
drivers/net/ifb.c
··· 138 138 } 139 139 140 140 static const struct net_device_ops ifb_netdev_ops = { 141 - .ndo_validate_addr = eth_validate_addr, 142 141 .ndo_open = ifb_open, 143 142 .ndo_stop = ifb_close, 143 + .ndo_start_xmit = ifb_xmit, 144 + .ndo_validate_addr = eth_validate_addr, 144 145 }; 145 146 146 147 static void ifb_setup(struct net_device *dev) 147 148 { 148 149 /* Initialize the device structure. */ 149 - dev->hard_start_xmit = ifb_xmit; 150 150 dev->destructor = free_netdev; 151 151 dev->netdev_ops = &ifb_netdev_ops; 152 152
+1 -1
drivers/net/igb/igb_main.c
··· 953 953 static const struct net_device_ops igb_netdev_ops = { 954 954 .ndo_open = igb_open, 955 955 .ndo_stop = igb_close, 956 + .ndo_start_xmit = igb_xmit_frame_adv, 956 957 .ndo_get_stats = igb_get_stats, 957 958 .ndo_set_multicast_list = igb_set_multi, 958 959 .ndo_set_mac_address = igb_set_mac, ··· 1081 1080 netdev->netdev_ops = &igb_netdev_ops; 1082 1081 igb_set_ethtool_ops(netdev); 1083 1082 netdev->watchdog_timeo = 5 * HZ; 1084 - netdev->hard_start_xmit = &igb_xmit_frame_adv; 1085 1083 1086 1084 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1087 1085
+1 -1
drivers/net/ixgb/ixgb_main.c
··· 324 324 static const struct net_device_ops ixgb_netdev_ops = { 325 325 .ndo_open = ixgb_open, 326 326 .ndo_stop = ixgb_close, 327 + .ndo_start_xmit = ixgb_xmit_frame, 327 328 .ndo_get_stats = ixgb_get_stats, 328 329 .ndo_set_multicast_list = ixgb_set_multi, 329 330 .ndo_validate_addr = eth_validate_addr, ··· 415 414 } 416 415 417 416 netdev->netdev_ops = &ixgb_netdev_ops; 418 - netdev->hard_start_xmit = &ixgb_xmit_frame; 419 417 ixgb_set_ethtool_ops(netdev); 420 418 netdev->watchdog_timeo = 5 * HZ; 421 419 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
+1 -1
drivers/net/ixgbe/ixgbe_main.c
··· 3728 3728 static const struct net_device_ops ixgbe_netdev_ops = { 3729 3729 .ndo_open = ixgbe_open, 3730 3730 .ndo_stop = ixgbe_close, 3731 + .ndo_start_xmit = ixgbe_xmit_frame, 3731 3732 .ndo_get_stats = ixgbe_get_stats, 3732 3733 .ndo_set_multicast_list = ixgbe_set_rx_mode, 3733 3734 .ndo_validate_addr = eth_validate_addr, ··· 3825 3824 } 3826 3825 3827 3826 netdev->netdev_ops = &ixgbe_netdev_ops; 3828 - netdev->hard_start_xmit = &ixgbe_xmit_frame; 3829 3827 ixgbe_set_ethtool_ops(netdev); 3830 3828 netdev->watchdog_timeo = 5 * HZ; 3831 3829 strcpy(netdev->name, pci_name(pdev));
+1 -1
drivers/net/loopback.c
··· 145 145 146 146 static const struct net_device_ops loopback_ops = { 147 147 .ndo_init = loopback_dev_init, 148 + .ndo_start_xmit= loopback_xmit, 148 149 .ndo_get_stats = loopback_get_stats, 149 150 }; 150 151 ··· 156 155 static void loopback_setup(struct net_device *dev) 157 156 { 158 157 dev->mtu = (16 * 1024) + 20 + 20 + 12; 159 - dev->hard_start_xmit = loopback_xmit; 160 158 dev->hard_header_len = ETH_HLEN; /* 14 */ 161 159 dev->addr_len = ETH_ALEN; /* 6 */ 162 160 dev->tx_queue_len = 0;
+2 -2
drivers/net/macvlan.c
··· 140 140 return NULL; 141 141 } 142 142 143 - static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 143 + static int macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev) 144 144 { 145 145 const struct macvlan_dev *vlan = netdev_priv(dev); 146 146 unsigned int len = skb->len; ··· 365 365 .ndo_init = macvlan_init, 366 366 .ndo_open = macvlan_open, 367 367 .ndo_stop = macvlan_stop, 368 + .ndo_start_xmit = macvlan_start_xmit, 368 369 .ndo_change_mtu = macvlan_change_mtu, 369 370 .ndo_change_rx_flags = macvlan_change_rx_flags, 370 371 .ndo_set_mac_address = macvlan_set_mac_address, ··· 378 377 ether_setup(dev); 379 378 380 379 dev->netdev_ops = &macvlan_netdev_ops; 381 - dev->hard_start_xmit = macvlan_hard_start_xmit; 382 380 dev->destructor = free_netdev; 383 381 dev->header_ops = &macvlan_hard_header_ops, 384 382 dev->ethtool_ops = &macvlan_ethtool_ops;
+1 -1
drivers/net/niu.c
··· 8892 8892 static const struct net_device_ops niu_netdev_ops = { 8893 8893 .ndo_open = niu_open, 8894 8894 .ndo_stop = niu_close, 8895 + .ndo_start_xmit = niu_start_xmit, 8895 8896 .ndo_get_stats = niu_get_stats, 8896 8897 .ndo_set_multicast_list = niu_set_rx_mode, 8897 8898 .ndo_validate_addr = eth_validate_addr, ··· 8905 8904 static void __devinit niu_assign_netdev_ops(struct net_device *dev) 8906 8905 { 8907 8906 dev->netdev_ops = &niu_netdev_ops; 8908 - dev->hard_start_xmit = niu_start_xmit; 8909 8907 dev->ethtool_ops = &niu_ethtool_ops; 8910 8908 dev->watchdog_timeo = NIU_TX_TIMEOUT; 8911 8909 }
+2 -3
drivers/net/ppp_generic.c
··· 972 972 } 973 973 974 974 static const struct net_device_ops ppp_netdev_ops = { 975 - .ndo_do_ioctl = ppp_net_ioctl, 975 + .ndo_start_xmit = ppp_start_xmit, 976 + .ndo_do_ioctl = ppp_net_ioctl, 976 977 }; 977 978 978 979 static void ppp_setup(struct net_device *dev) ··· 2437 2436 ppp->minseq = -1; 2438 2437 skb_queue_head_init(&ppp->mrq); 2439 2438 #endif /* CONFIG_PPP_MULTILINK */ 2440 - 2441 - dev->hard_start_xmit = ppp_start_xmit; 2442 2439 2443 2440 ret = -EEXIST; 2444 2441 mutex_lock(&all_ppp_mutex);
+1 -1
drivers/net/r8169.c
··· 1927 1927 .ndo_open = rtl8169_open, 1928 1928 .ndo_stop = rtl8169_close, 1929 1929 .ndo_get_stats = rtl8169_get_stats, 1930 + .ndo_start_xmit = rtl8169_start_xmit, 1930 1931 .ndo_tx_timeout = rtl8169_tx_timeout, 1931 1932 .ndo_validate_addr = eth_validate_addr, 1932 1933 .ndo_change_mtu = rtl8169_change_mtu, ··· 2126 2125 dev->dev_addr[i] = RTL_R8(MAC0 + i); 2127 2126 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 2128 2127 2129 - dev->hard_start_xmit = rtl8169_start_xmit; 2130 2128 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); 2131 2129 dev->watchdog_timeo = RTL8169_TX_TIMEOUT; 2132 2130 dev->irq = pdev->irq;
+1 -1
drivers/net/skge.c
··· 3805 3805 static const struct net_device_ops skge_netdev_ops = { 3806 3806 .ndo_open = skge_up, 3807 3807 .ndo_stop = skge_down, 3808 + .ndo_start_xmit = skge_xmit_frame, 3808 3809 .ndo_do_ioctl = skge_ioctl, 3809 3810 .ndo_get_stats = skge_get_stats, 3810 3811 .ndo_tx_timeout = skge_tx_timeout, ··· 3832 3831 } 3833 3832 3834 3833 SET_NETDEV_DEV(dev, &hw->pdev->dev); 3835 - dev->hard_start_xmit = skge_xmit_frame; 3836 3834 dev->netdev_ops = &skge_netdev_ops; 3837 3835 dev->ethtool_ops = &skge_ethtool_ops; 3838 3836 dev->watchdog_timeo = TX_WATCHDOG;
+2 -1
drivers/net/sky2.c
··· 4047 4047 { 4048 4048 .ndo_open = sky2_up, 4049 4049 .ndo_stop = sky2_down, 4050 + .ndo_start_xmit = sky2_xmit_frame, 4050 4051 .ndo_do_ioctl = sky2_ioctl, 4051 4052 .ndo_validate_addr = eth_validate_addr, 4052 4053 .ndo_set_mac_address = sky2_set_mac_address, ··· 4064 4063 { 4065 4064 .ndo_open = sky2_up, 4066 4065 .ndo_stop = sky2_down, 4066 + .ndo_start_xmit = sky2_xmit_frame, 4067 4067 .ndo_do_ioctl = sky2_ioctl, 4068 4068 .ndo_validate_addr = eth_validate_addr, 4069 4069 .ndo_set_mac_address = sky2_set_mac_address, ··· 4092 4090 4093 4091 SET_NETDEV_DEV(dev, &hw->pdev->dev); 4094 4092 dev->irq = hw->pdev->irq; 4095 - dev->hard_start_xmit = sky2_xmit_frame; 4096 4093 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); 4097 4094 dev->watchdog_timeo = TX_WATCHDOG; 4098 4095 dev->netdev_ops = &sky2_netdev_ops[port];
+31 -14
drivers/net/tg3.c
··· 12614 12614 else 12615 12615 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 12616 12616 12617 - /* All chips before 5787 can get confused if TX buffers 12618 - * straddle the 4GB address boundary in some cases. 12619 - */ 12620 - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 12621 - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12622 - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12623 - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12624 - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 12625 - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12626 - tp->dev->hard_start_xmit = tg3_start_xmit; 12627 - else 12628 - tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; 12629 - 12630 12617 tp->rx_offset = 2; 12631 12618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 12632 12619 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) ··· 13333 13346 static const struct net_device_ops tg3_netdev_ops = { 13334 13347 .ndo_open = tg3_open, 13335 13348 .ndo_stop = tg3_close, 13349 + .ndo_start_xmit = tg3_start_xmit, 13350 + .ndo_get_stats = tg3_get_stats, 13351 + .ndo_validate_addr = eth_validate_addr, 13352 + .ndo_set_multicast_list = tg3_set_rx_mode, 13353 + .ndo_set_mac_address = tg3_set_mac_addr, 13354 + .ndo_do_ioctl = tg3_ioctl, 13355 + .ndo_tx_timeout = tg3_tx_timeout, 13356 + .ndo_change_mtu = tg3_change_mtu, 13357 + #if TG3_VLAN_TAG_USED 13358 + .ndo_vlan_rx_register = tg3_vlan_rx_register, 13359 + #endif 13360 + #ifdef CONFIG_NET_POLL_CONTROLLER 13361 + .ndo_poll_controller = tg3_poll_controller, 13362 + #endif 13363 + }; 13364 + 13365 + static const struct net_device_ops tg3_netdev_ops_dma_bug = { 13366 + .ndo_open = tg3_open, 13367 + .ndo_stop = tg3_close, 13368 + .ndo_start_xmit = tg3_start_xmit_dma_bug, 13336 13369 .ndo_get_stats = tg3_get_stats, 13337 13370 .ndo_validate_addr = eth_validate_addr, 13338 13371 .ndo_set_multicast_list = tg3_set_rx_mode, ··· 13482 13475 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 13483 13476 tp->tx_pending = TG3_DEF_TX_RING_PENDING; 13484 13477 13485 - dev->netdev_ops = &tg3_netdev_ops; 13486 13478 netif_napi_add(dev, &tp->napi, tg3_poll, 64); 13487 13479 dev->ethtool_ops = &tg3_ethtool_ops; 13488 13480 dev->watchdog_timeo = TG3_TX_TIMEOUT; ··· 13493 13487 "aborting.\n"); 13494 13488 goto err_out_iounmap; 13495 13489 } 13490 + 13491 + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13492 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13493 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13494 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13495 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13496 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13497 + dev->netdev_ops = &tg3_netdev_ops; 13498 + else 13499 + dev->netdev_ops = &tg3_netdev_ops_dma_bug; 13500 + 13496 13501 13497 13502 /* The EPB bridge inside 5714, 5715, and 5780 and any 13498 13503 * device behind the EPB cannot support DMA addresses > 40-bit.
+2 -2
drivers/net/tun.c
··· 308 308 static const struct net_device_ops tun_netdev_ops = { 309 309 .ndo_open = tun_net_open, 310 310 .ndo_stop = tun_net_close, 311 + .ndo_start_xmit = tun_net_xmit, 311 312 .ndo_change_mtu = tun_net_change_mtu, 312 - 313 313 }; 314 314 315 315 static const struct net_device_ops tap_netdev_ops = { 316 316 .ndo_open = tun_net_open, 317 317 .ndo_stop = tun_net_close, 318 + .ndo_start_xmit = tun_net_xmit, 318 319 .ndo_change_mtu = tun_net_change_mtu, 319 320 .ndo_set_multicast_list = tun_net_mclist, 320 321 .ndo_set_mac_address = eth_mac_addr, ··· 692 691 tun->owner = -1; 693 692 tun->group = -1; 694 693 695 - dev->hard_start_xmit = tun_net_xmit; 696 694 dev->ethtool_ops = &tun_ethtool_ops; 697 695 dev->destructor = free_netdev; 698 696 dev->features |= NETIF_F_NETNS_LOCAL;
+1 -1
drivers/net/veth.c
··· 265 265 static const struct net_device_ops veth_netdev_ops = { 266 266 .ndo_init = veth_dev_init, 267 267 .ndo_open = veth_open, 268 + .ndo_start_xmit = veth_xmit, 268 269 .ndo_get_stats = veth_get_stats, 269 270 }; 270 271 ··· 274 273 ether_setup(dev); 275 274 276 275 dev->netdev_ops = &veth_netdev_ops; 277 - dev->hard_start_xmit = veth_xmit; 278 276 dev->ethtool_ops = &veth_ethtool_ops; 279 277 dev->features |= NETIF_F_LLTX; 280 278 dev->destructor = veth_dev_free;
+1 -1
drivers/net/via-velocity.c
··· 852 852 static const struct net_device_ops velocity_netdev_ops = { 853 853 .ndo_open = velocity_open, 854 854 .ndo_stop = velocity_close, 855 + .ndo_start_xmit = velocity_xmit, 855 856 .ndo_get_stats = velocity_get_stats, 856 857 .ndo_validate_addr = eth_validate_addr, 857 858 .ndo_set_multicast_list = velocity_set_multi, ··· 972 971 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 973 972 974 973 dev->irq = pdev->irq; 975 - dev->hard_start_xmit = velocity_xmit; 976 974 dev->netdev_ops = &velocity_netdev_ops; 977 975 dev->ethtool_ops = &velocity_ethtool_ops; 978 976
+26 -13
include/linux/netdevice.h
··· 454 454 455 455 /* 456 456 * This structure defines the management hooks for network devices. 457 - * The following hooks can bed defined and are optonal (can be null) 458 - * unless otherwise noted. 457 + * The following hooks can be defined; unless noted otherwise, they are 458 + * optional and can be filled with a null pointer. 459 459 * 460 460 * int (*ndo_init)(struct net_device *dev); 461 461 * This function is called once when network device is registered. ··· 474 474 * int (*ndo_stop)(struct net_device *dev); 475 475 * This function is called when network device transistions to the down 476 476 * state. 477 + * 478 + * int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev); 479 + * Called when a packet needs to be transmitted. 480 + * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED, 481 + * Required can not be NULL. 482 + * 483 + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 484 + * Called to decide which queue to when device supports multiple 485 + * transmit queues. 477 486 * 478 487 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 479 488 * This function is called to allow device receiver to make ··· 517 508 * of a device. If not defined, any request to change MTU will 518 509 * will return an error. 519 510 * 520 - * void (*ndo_tx_timeout) (struct net_device *dev); 511 + * void (*ndo_tx_timeout)(struct net_device *dev); 521 512 * Callback uses when the transmitter has not made any progress 522 513 * for dev->watchdog ticks. 523 514 * ··· 547 538 void (*ndo_uninit)(struct net_device *dev); 548 539 int (*ndo_open)(struct net_device *dev); 549 540 int (*ndo_stop)(struct net_device *dev); 541 + int (*ndo_start_xmit) (struct sk_buff *skb, 542 + struct net_device *dev); 543 + u16 (*ndo_select_queue)(struct net_device *dev, 544 + struct sk_buff *skb); 550 545 #define HAVE_CHANGE_RX_FLAGS 551 546 void (*ndo_change_rx_flags)(struct net_device *dev, 552 547 int flags); ··· 570 557 int (*ndo_set_config)(struct net_device *dev, 571 558 struct ifmap *map); 572 559 #define HAVE_CHANGE_MTU 573 - int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 574 - 560 + int (*ndo_change_mtu)(struct net_device *dev, 561 + int new_mtu); 562 + int (*ndo_neigh_setup)(struct net_device *dev, 563 + struct neigh_parms *); 575 564 #define HAVE_TX_TIMEOUT 576 565 void (*ndo_tx_timeout) (struct net_device *dev); 577 566 ··· 776 761 /* Number of TX queues currently active in device */ 777 762 unsigned int real_num_tx_queues; 778 763 779 - /* Map buffer to appropriate transmit queue */ 780 - u16 (*select_queue)(struct net_device *dev, 781 - struct sk_buff *skb); 782 - 783 764 unsigned long tx_queue_len; /* Max frames per queue allowed */ 784 765 spinlock_t tx_global_lock; 785 766 /* 786 767 * One part is mostly used on xmit path (device) 787 768 */ 788 769 void *priv; /* pointer to private data */ 789 - int (*hard_start_xmit) (struct sk_buff *skb, 790 - struct net_device *dev); 791 770 /* These may be needed for future network-power-down code. */ 792 771 unsigned long trans_start; /* Time (in jiffies) of last Tx */ 793 772 ··· 808 799 809 800 /* Called from unregister, can be used to call free_netdev */ 810 801 void (*destructor)(struct net_device *dev); 811 - 812 - int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); 813 802 814 803 #ifdef CONFIG_NETPOLL 815 804 struct netpoll_info *npinfo; ··· 849 842 void (*uninit)(struct net_device *dev); 850 843 int (*open)(struct net_device *dev); 851 844 int (*stop)(struct net_device *dev); 845 + int (*hard_start_xmit) (struct sk_buff *skb, 846 + struct net_device *dev); 847 + u16 (*select_queue)(struct net_device *dev, 848 + struct sk_buff *skb); 852 849 void (*change_rx_flags)(struct net_device *dev, 853 850 int flags); 854 851 void (*set_rx_mode)(struct net_device *dev); ··· 865 854 int (*set_config)(struct net_device *dev, 866 855 struct ifmap *map); 867 856 int (*change_mtu)(struct net_device *dev, int new_mtu); 857 + int (*neigh_setup)(struct net_device *dev, 858 + struct neigh_parms *); 868 859 void (*tx_timeout) (struct net_device *dev); 869 860 struct net_device_stats* (*get_stats)(struct net_device *dev); 870 861 void (*vlan_rx_register)(struct net_device *dev,
+5 -5
net/bridge/br_device.c
··· 163 163 static const struct net_device_ops br_netdev_ops = { 164 164 .ndo_open = br_dev_open, 165 165 .ndo_stop = br_dev_stop, 166 - .ndo_set_mac_address = br_set_mac_address, 167 - .ndo_set_multicast_list = br_dev_set_multicast_list, 168 - .ndo_change_mtu = br_change_mtu, 169 - .ndo_do_ioctl = br_dev_ioctl, 166 + .ndo_start_xmit = br_dev_xmit, 167 + .ndo_set_mac_address = br_set_mac_address, 168 + .ndo_set_multicast_list = br_dev_set_multicast_list, 169 + .ndo_change_mtu = br_change_mtu, 170 + .ndo_do_ioctl = br_dev_ioctl, 170 171 }; 171 172 172 173 void br_dev_setup(struct net_device *dev) ··· 176 175 ether_setup(dev); 177 176 178 177 dev->netdev_ops = &br_netdev_ops; 179 - dev->hard_start_xmit = br_dev_xmit; 180 178 dev->destructor = free_netdev; 181 179 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 182 180 dev->tx_queue_len = 0;
+1 -1
net/bridge/br_if.c
··· 373 373 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER) 374 374 return -EINVAL; 375 375 376 - if (dev->hard_start_xmit == br_dev_xmit) 376 + if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) 377 377 return -ELOOP; 378 378 379 379 if (dev->br_port != NULL)
+8 -4
net/core/dev.c
··· 1660 1660 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1661 1661 struct netdev_queue *txq) 1662 1662 { 1663 + const struct net_device_ops *ops = dev->netdev_ops; 1664 + 1665 + prefetch(&dev->netdev_ops->ndo_start_xmit); 1663 1666 if (likely(!skb->next)) { 1664 1667 if (!list_empty(&ptype_all)) 1665 1668 dev_queue_xmit_nit(skb, dev); ··· 1674 1671 goto gso; 1675 1672 } 1676 1673 1677 - return dev->hard_start_xmit(skb, dev); 1674 + return ops->ndo_start_xmit(skb, dev); 1678 1675 } 1679 1676 1680 1677 gso: ··· 1684 1681 1685 1682 skb->next = nskb->next; 1686 1683 nskb->next = NULL; 1687 - rc = dev->hard_start_xmit(nskb, dev); 1684 + rc = ops->ndo_start_xmit(nskb, dev); 1688 1685 if (unlikely(rc)) { 1689 1686 nskb->next = skb->next; 1690 1687 skb->next = nskb; ··· 1758 1755 static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1759 1756 struct sk_buff *skb) 1760 1757 { 1758 + const struct net_device_ops *ops = dev->netdev_ops; 1761 1759 u16 queue_index = 0; 1762 1760 1763 - if (dev->select_queue) 1764 - queue_index = dev->select_queue(dev, skb); 1761 + if (ops->ndo_select_queue) 1762 + queue_index = ops->ndo_select_queue(dev, skb); 1765 1763 else if (dev->real_num_tx_queues > 1) 1766 1764 queue_index = simple_tx_hash(dev, skb); 1767 1765
+3 -3
net/core/neighbour.c
··· 1327 1327 struct neigh_table *tbl) 1328 1328 { 1329 1329 struct neigh_parms *p, *ref; 1330 - struct net *net; 1330 + struct net *net = dev_net(dev); 1331 + const struct net_device_ops *ops = dev->netdev_ops; 1331 1332 1332 - net = dev_net(dev); 1333 1333 ref = lookup_neigh_params(tbl, net, 0); 1334 1334 if (!ref) 1335 1335 return NULL; ··· 1341 1341 p->reachable_time = 1342 1342 neigh_rand_reach_time(p->base_reachable_time); 1343 1343 1344 - if (dev->neigh_setup && dev->neigh_setup(dev, p)) { 1344 + if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1345 1345 kfree(p); 1346 1346 return NULL; 1347 1347 }
+4 -2
net/core/netpoll.c
··· 58 58 59 59 while ((skb = skb_dequeue(&npinfo->txq))) { 60 60 struct net_device *dev = skb->dev; 61 + const struct net_device_ops *ops = dev->netdev_ops; 61 62 struct netdev_queue *txq; 62 63 63 64 if (!netif_device_present(dev) || !netif_running(dev)) { ··· 72 71 __netif_tx_lock(txq, smp_processor_id()); 73 72 if (netif_tx_queue_stopped(txq) || 74 73 netif_tx_queue_frozen(txq) || 75 - dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 74 + ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { 76 75 skb_queue_head(&npinfo->txq, skb); 77 76 __netif_tx_unlock(txq); 78 77 local_irq_restore(flags); ··· 274 273 int status = NETDEV_TX_BUSY; 275 274 unsigned long tries; 276 275 struct net_device *dev = np->dev; 276 + const struct net_device_ops *ops = dev->netdev_ops; 277 277 struct netpoll_info *npinfo = np->dev->npinfo; 278 278 279 279 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { ··· 295 293 tries > 0; --tries) { 296 294 if (__netif_tx_trylock(txq)) { 297 295 if (!netif_tx_queue_stopped(txq)) 298 - status = dev->hard_start_xmit(skb, dev); 296 + status = ops->ndo_start_xmit(skb, dev); 299 297 __netif_tx_unlock(txq); 300 298 301 299 if (status == NETDEV_TX_OK)
+4 -4
net/core/pktgen.c
··· 3352 3352 3353 3353 static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3354 3354 { 3355 - struct net_device *odev = NULL; 3355 + struct net_device *odev = pkt_dev->odev; 3356 + int (*xmit)(struct sk_buff *, struct net_device *) 3357 + = odev->netdev_ops->ndo_start_xmit; 3356 3358 struct netdev_queue *txq; 3357 3359 __u64 idle_start = 0; 3358 3360 u16 queue_map; 3359 3361 int ret; 3360 - 3361 - odev = pkt_dev->odev; 3362 3362 3363 3363 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 3364 3364 u64 now; ··· 3440 3440 3441 3441 atomic_inc(&(pkt_dev->skb->users)); 3442 3442 retry_now: 3443 - ret = odev->hard_start_xmit(pkt_dev->skb, odev); 3443 + ret = (*xmit)(pkt_dev->skb, odev); 3444 3444 if (likely(ret == NETDEV_TX_OK)) { 3445 3445 pkt_dev->last_ok = 1; 3446 3446 pkt_dev->sofar++;