Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'macvlan_hwaccel'

John Fastabend says:

====================
l2 hardware accelerated macvlans

This patch adds support to offload macvlan net_devices to the
hardware. With these patches packets are pushed to the macvlan
net_device directly and do not pass through the lower dev.

The patches here have made it through multiple iterations
each with a slightly different focus. First I tried to
push these as a new link type called "VMDQ". The patches
shown here,

http://comments.gmane.org/gmane.linux.network/237617

Following this implementation I renamed the link type
"VSI" and addressed various comments. Finally Neil
Horman picked up the patches and integrated the offload
into the macvlan code. Here,

http://permalink.gmane.org/gmane.linux.network/285658

The attached series is clean-up of his patches, with a
few fixes.

If folks find this series acceptable there are a few
items we can work on next. First broadcast and multicast
will use the hardware even for local traffic with this
series. It would be best (I think) to use the software
path for macvlan to macvlan traffic and save the PCIe
bus. This depends on how much you value CPU time vs
PCIE bandwidth. This will need another patch series
to flush out.

Also this series only allows for layer 2 mac forwarding
where some hardware supports more interesting forwarding
capabilities. Integrating with OVS may be useful here.

As always any comments/feedback welcome.

My basic I/O test is here but I've also done some link
testing, SRIOV/DCB with macvlans and others,

Changelog:
v2: two fixes to ixgbe when all features DCB, FCoE, SR-IOV
are enabled with macvlans. A VMDQ_P() reference
should have been accel->pool and do not set the offset
of the ring index from dfwd add call. The offset is used
by SR-IOV so clearing it can cause SR-IOV quue index's
to go sideways. With these fixes testing macvlan's with
SRIOV enabled was successful.
v3: addressed Neil's comments in ixgbe
fixed error path on dfwd_add_station() in ixgbe
fixed ixgbe to allow SRIOV and accelerated macvlans to
coexist.
v4: Dave caught some strange indentation, fixed it here
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+532 -97
+20
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 223 223 __IXGBE_RX_FCOE, 224 224 }; 225 225 226 + struct ixgbe_fwd_adapter { 227 + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 228 + struct net_device *netdev; 229 + struct ixgbe_adapter *real_adapter; 230 + unsigned int tx_base_queue; 231 + unsigned int rx_base_queue; 232 + int pool; 233 + }; 234 + 226 235 #define check_for_tx_hang(ring) \ 227 236 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 228 237 #define set_check_for_tx_hang(ring) \ ··· 249 240 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ 250 241 struct net_device *netdev; /* netdev ring belongs to */ 251 242 struct device *dev; /* device for DMA mapping */ 243 + struct ixgbe_fwd_adapter *l2_accel_priv; 252 244 void *desc; /* descriptor ring memory */ 253 245 union { 254 246 struct ixgbe_tx_buffer *tx_buffer_info; ··· 307 297 #define IXGBE_MAX_FCOE_INDICES 8 308 298 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 309 299 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 300 + #define IXGBE_MAX_L2A_QUEUES 4 301 + #define IXGBE_MAX_L2A_QUEUES 4 302 + #define IXGBE_BAD_L2A_QUEUE 3 303 + #define IXGBE_MAX_MACVLANS 31 304 + #define IXGBE_MAX_DCBMACVLANS 8 305 + 310 306 struct ixgbe_ring_feature { 311 307 u16 limit; /* upper limit on feature indices */ 312 308 u16 indices; /* current value of indices */ ··· 782 766 #endif /*CONFIG_DEBUG_FS*/ 783 767 784 768 u8 default_up; 769 + unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ 785 770 }; 786 771 787 772 struct ixgbe_fdir_filter { ··· 956 939 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); 957 940 #endif 958 941 942 + netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 943 + struct ixgbe_adapter *adapter, 944 + struct ixgbe_ring *tx_ring); 959 945 #endif /* _IXGBE_H_ */
+12 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 498 498 #ifdef IXGBE_FCOE 499 499 u16 fcoe_i = 0; 500 500 #endif 501 + bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 501 502 502 503 /* only proceed if SR-IOV is enabled */ 503 504 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ··· 511 510 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 512 511 513 512 /* 64 pool mode with 2 queues per pool */ 514 - if ((vmdq_i > 32) || (rss_i < 4)) { 513 + if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { 515 514 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 516 515 rss_m = IXGBE_RSS_2Q_MASK; 517 516 rss_i = min_t(u16, rss_i, 2); ··· 853 852 854 853 /* apply Tx specific ring traits */ 855 854 ring->count = adapter->tx_ring_count; 856 - ring->queue_index = txr_idx; 855 + if (adapter->num_rx_pools > 1) 856 + ring->queue_index = 857 + txr_idx % adapter->num_rx_queues_per_pool; 858 + else 859 + ring->queue_index = txr_idx; 857 860 858 861 /* assign ring to adapter */ 859 862 adapter->tx_ring[txr_idx] = ring; ··· 900 895 #endif /* IXGBE_FCOE */ 901 896 /* apply Rx specific ring traits */ 902 897 ring->count = adapter->rx_ring_count; 903 - ring->queue_index = rxr_idx; 898 + if (adapter->num_rx_pools > 1) 899 + ring->queue_index = 900 + rxr_idx % adapter->num_rx_queues_per_pool; 901 + else 902 + ring->queue_index = rxr_idx; 904 903 905 904 /* assign ring to adapter */ 906 905 adapter->rx_ring[rxr_idx] = ring;
+403 -77
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 44 44 #include <linux/ethtool.h> 45 45 #include <linux/if.h> 46 46 #include <linux/if_vlan.h> 47 + #include <linux/if_macvlan.h> 47 48 #include <linux/if_bridge.h> 48 49 #include <linux/prefetch.h> 49 50 #include <scsi/fc/fc_fcoe.h> ··· 871 870 872 871 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) 873 872 { 874 - struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); 875 - struct ixgbe_hw *hw = &adapter->hw; 873 + struct ixgbe_adapter *adapter; 874 + struct ixgbe_hw *hw; 875 + u32 head, tail; 876 876 877 - u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); 878 - u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); 877 + if (ring->l2_accel_priv) 878 + adapter = ring->l2_accel_priv->real_adapter; 879 + else 880 + adapter = netdev_priv(ring->netdev); 881 + 882 + hw = &adapter->hw; 883 + head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); 884 + tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); 879 885 880 886 if (head != tail) 881 887 return (head < tail) ? ··· 3011 3003 struct ixgbe_q_vector *q_vector = ring->q_vector; 3012 3004 3013 3005 if (q_vector) 3014 - netif_set_xps_queue(adapter->netdev, 3006 + netif_set_xps_queue(ring->netdev, 3015 3007 &q_vector->affinity_mask, 3016 3008 ring->queue_index); 3017 3009 } ··· 3401 3393 { 3402 3394 struct ixgbe_hw *hw = &adapter->hw; 3403 3395 int rss_i = adapter->ring_feature[RING_F_RSS].indices; 3404 - int p; 3396 + u16 pool; 3405 3397 3406 3398 /* PSRTYPE must be initialized in non 82598 adapters */ 3407 3399 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | ··· 3418 3410 else if (rss_i > 1) 3419 3411 psrtype |= 1 << 29; 3420 3412 3421 - for (p = 0; p < adapter->num_rx_pools; p++) 3422 - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), 3423 - psrtype); 3413 + for_each_set_bit(pool, &adapter->fwd_bitmask, 32) 3414 + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 3424 3415 } 3425 3416 3426 3417 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) ··· 3688 3681 case ixgbe_mac_82599EB: 3689 3682 case ixgbe_mac_X540: 3690 3683 for (i = 0; i < adapter->num_rx_queues; i++) { 3691 - j = adapter->rx_ring[i]->reg_idx; 3684 + struct ixgbe_ring *ring = adapter->rx_ring[i]; 3685 + 3686 + if (ring->l2_accel_priv) 3687 + continue; 3688 + j = ring->reg_idx; 3692 3689 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3693 3690 vlnctrl &= ~IXGBE_RXDCTL_VME; 3694 3691 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); ··· 3722 3711 case ixgbe_mac_82599EB: 3723 3712 case ixgbe_mac_X540: 3724 3713 for (i = 0; i < adapter->num_rx_queues; i++) { 3725 - j = adapter->rx_ring[i]->reg_idx; 3714 + struct ixgbe_ring *ring = adapter->rx_ring[i]; 3715 + 3716 + if (ring->l2_accel_priv) 3717 + continue; 3718 + j = ring->reg_idx; 3726 3719 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3727 3720 vlnctrl |= IXGBE_RXDCTL_VME; 3728 3721 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); ··· 3763 3748 unsigned int rar_entries = hw->mac.num_rar_entries - 1; 3764 3749 int count = 0; 3765 3750 3766 - /* In SR-IOV mode significantly less RAR entries are available */ 3751 + /* In SR-IOV/VMDQ modes significantly less RAR entries are available */ 3767 3752 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 3768 3753 rar_entries = IXGBE_MAX_PF_MACVLANS - 1; 3769 3754 ··· 4128 4113 spin_unlock(&adapter->fdir_perfect_lock); 4129 4114 } 4130 4115 4116 + static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, 4117 + struct ixgbe_adapter *adapter) 4118 + { 4119 + struct ixgbe_hw *hw = &adapter->hw; 4120 + u32 vmolr; 4121 + 4122 + /* No unicast promiscuous support for VMDQ devices. */ 4123 + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); 4124 + vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); 4125 + 4126 + /* clear the affected bit */ 4127 + vmolr &= ~IXGBE_VMOLR_MPE; 4128 + 4129 + if (dev->flags & IFF_ALLMULTI) { 4130 + vmolr |= IXGBE_VMOLR_MPE; 4131 + } else { 4132 + vmolr |= IXGBE_VMOLR_ROMPE; 4133 + hw->mac.ops.update_mc_addr_list(hw, dev); 4134 + } 4135 + ixgbe_write_uc_addr_list(adapter->netdev); 4136 + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4137 + } 4138 + 4139 + static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, 4140 + u8 *addr, u16 pool) 4141 + { 4142 + struct ixgbe_hw *hw = &adapter->hw; 4143 + unsigned int entry; 4144 + 4145 + entry = hw->mac.num_rar_entries - pool; 4146 + hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV); 4147 + } 4148 + 4149 + static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) 4150 + { 4151 + struct ixgbe_adapter *adapter = vadapter->real_adapter; 4152 + int rss_i = vadapter->netdev->real_num_rx_queues; 4153 + struct ixgbe_hw *hw = &adapter->hw; 4154 + u16 pool = vadapter->pool; 4155 + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 4156 + IXGBE_PSRTYPE_UDPHDR | 4157 + IXGBE_PSRTYPE_IPV4HDR | 4158 + IXGBE_PSRTYPE_L2HDR | 4159 + IXGBE_PSRTYPE_IPV6HDR; 4160 + 4161 + if (hw->mac.type == ixgbe_mac_82598EB) 4162 + return; 4163 + 4164 + if (rss_i > 3) 4165 + psrtype |= 2 << 29; 4166 + else if (rss_i > 1) 4167 + psrtype |= 1 << 29; 4168 + 4169 + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 4170 + } 4171 + 4172 + /** 4173 + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 4174 + * @rx_ring: ring to free buffers from 4175 + **/ 4176 + static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) 4177 + { 4178 + struct device *dev = rx_ring->dev; 4179 + unsigned long size; 4180 + u16 i; 4181 + 4182 + /* ring already cleared, nothing to do */ 4183 + if (!rx_ring->rx_buffer_info) 4184 + return; 4185 + 4186 + /* Free all the Rx ring sk_buffs */ 4187 + for (i = 0; i < rx_ring->count; i++) { 4188 + struct ixgbe_rx_buffer *rx_buffer; 4189 + 4190 + rx_buffer = &rx_ring->rx_buffer_info[i]; 4191 + if (rx_buffer->skb) { 4192 + struct sk_buff *skb = rx_buffer->skb; 4193 + if (IXGBE_CB(skb)->page_released) { 4194 + dma_unmap_page(dev, 4195 + IXGBE_CB(skb)->dma, 4196 + ixgbe_rx_bufsz(rx_ring), 4197 + DMA_FROM_DEVICE); 4198 + IXGBE_CB(skb)->page_released = false; 4199 + } 4200 + dev_kfree_skb(skb); 4201 + } 4202 + rx_buffer->skb = NULL; 4203 + if (rx_buffer->dma) 4204 + dma_unmap_page(dev, rx_buffer->dma, 4205 + ixgbe_rx_pg_size(rx_ring), 4206 + DMA_FROM_DEVICE); 4207 + rx_buffer->dma = 0; 4208 + if (rx_buffer->page) 4209 + __free_pages(rx_buffer->page, 4210 + ixgbe_rx_pg_order(rx_ring)); 4211 + rx_buffer->page = NULL; 4212 + } 4213 + 4214 + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4215 + memset(rx_ring->rx_buffer_info, 0, size); 4216 + 4217 + /* Zero out the descriptor ring */ 4218 + memset(rx_ring->desc, 0, rx_ring->size); 4219 + 4220 + rx_ring->next_to_alloc = 0; 4221 + rx_ring->next_to_clean = 0; 4222 + rx_ring->next_to_use = 0; 4223 + } 4224 + 4225 + static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, 4226 + struct ixgbe_ring *rx_ring) 4227 + { 4228 + struct ixgbe_adapter *adapter = vadapter->real_adapter; 4229 + int index = rx_ring->queue_index + vadapter->rx_base_queue; 4230 + 4231 + /* shutdown specific queue receive and wait for dma to settle */ 4232 + ixgbe_disable_rx_queue(adapter, rx_ring); 4233 + usleep_range(10000, 20000); 4234 + ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); 4235 + ixgbe_clean_rx_ring(rx_ring); 4236 + rx_ring->l2_accel_priv = NULL; 4237 + } 4238 + 4239 + int ixgbe_fwd_ring_down(struct net_device *vdev, 4240 + struct ixgbe_fwd_adapter *accel) 4241 + { 4242 + struct ixgbe_adapter *adapter = accel->real_adapter; 4243 + unsigned int rxbase = accel->rx_base_queue; 4244 + unsigned int txbase = accel->tx_base_queue; 4245 + int i; 4246 + 4247 + netif_tx_stop_all_queues(vdev); 4248 + 4249 + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4250 + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 4251 + adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; 4252 + } 4253 + 4254 + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4255 + adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; 4256 + adapter->tx_ring[txbase + i]->netdev = adapter->netdev; 4257 + } 4258 + 4259 + 4260 + return 0; 4261 + } 4262 + 4263 + static int ixgbe_fwd_ring_up(struct net_device *vdev, 4264 + struct ixgbe_fwd_adapter *accel) 4265 + { 4266 + struct ixgbe_adapter *adapter = accel->real_adapter; 4267 + unsigned int rxbase, txbase, queues; 4268 + int i, baseq, err = 0; 4269 + 4270 + if (!test_bit(accel->pool, &adapter->fwd_bitmask)) 4271 + return 0; 4272 + 4273 + baseq = accel->pool * adapter->num_rx_queues_per_pool; 4274 + netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 4275 + accel->pool, adapter->num_rx_pools, 4276 + baseq, baseq + adapter->num_rx_queues_per_pool, 4277 + adapter->fwd_bitmask); 4278 + 4279 + accel->netdev = vdev; 4280 + accel->rx_base_queue = rxbase = baseq; 4281 + accel->tx_base_queue = txbase = baseq; 4282 + 4283 + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 4284 + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 4285 + 4286 + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4287 + adapter->rx_ring[rxbase + i]->netdev = vdev; 4288 + adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; 4289 + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); 4290 + } 4291 + 4292 + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4293 + adapter->tx_ring[txbase + i]->netdev = vdev; 4294 + adapter->tx_ring[txbase + i]->l2_accel_priv = accel; 4295 + } 4296 + 4297 + queues = min_t(unsigned int, 4298 + adapter->num_rx_queues_per_pool, vdev->num_tx_queues); 4299 + err = netif_set_real_num_tx_queues(vdev, queues); 4300 + if (err) 4301 + goto fwd_queue_err; 4302 + 4303 + queues = min_t(unsigned int, 4304 + adapter->num_rx_queues_per_pool, vdev->num_rx_queues); 4305 + err = netif_set_real_num_rx_queues(vdev, queues); 4306 + if (err) 4307 + goto fwd_queue_err; 4308 + 4309 + if (is_valid_ether_addr(vdev->dev_addr)) 4310 + ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); 4311 + 4312 + ixgbe_fwd_psrtype(accel); 4313 + ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); 4314 + return err; 4315 + fwd_queue_err: 4316 + ixgbe_fwd_ring_down(vdev, accel); 4317 + return err; 4318 + } 4319 + 4320 + static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) 4321 + { 4322 + struct net_device *upper; 4323 + struct list_head *iter; 4324 + int err; 4325 + 4326 + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 4327 + if (netif_is_macvlan(upper)) { 4328 + struct macvlan_dev *dfwd = netdev_priv(upper); 4329 + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; 4330 + 4331 + if (dfwd->fwd_priv) { 4332 + err = ixgbe_fwd_ring_up(upper, vadapter); 4333 + if (err) 4334 + continue; 4335 + } 4336 + } 4337 + } 4338 + } 4339 + 4131 4340 static void ixgbe_configure(struct ixgbe_adapter *adapter) 4132 4341 { 4133 4342 struct ixgbe_hw *hw = &adapter->hw; ··· 4403 4164 #endif /* IXGBE_FCOE */ 4404 4165 ixgbe_configure_tx(adapter); 4405 4166 ixgbe_configure_rx(adapter); 4167 + ixgbe_configure_dfwd(adapter); 4406 4168 } 4407 4169 4408 4170 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) ··· 4557 4317 static void ixgbe_up_complete(struct ixgbe_adapter *adapter) 4558 4318 { 4559 4319 struct ixgbe_hw *hw = &adapter->hw; 4320 + struct net_device *upper; 4321 + struct list_head *iter; 4560 4322 int err; 4561 4323 u32 ctrl_ext; 4562 4324 ··· 4601 4359 4602 4360 /* enable transmits */ 4603 4361 netif_tx_start_all_queues(adapter->netdev); 4362 + 4363 + /* enable any upper devices */ 4364 + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 4365 + if (netif_is_macvlan(upper)) { 4366 + struct macvlan_dev *vlan = netdev_priv(upper); 4367 + 4368 + if (vlan->fwd_priv) 4369 + netif_tx_start_all_queues(upper); 4370 + } 4371 + } 4604 4372 4605 4373 /* bring the link up in the watchdog, this could race with our first 4606 4374 * link up interrupt but shouldn't be a problem */ ··· 4703 4451 } 4704 4452 4705 4453 /** 4706 - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 4707 - * @rx_ring: ring to free buffers from 4708 - **/ 4709 - static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) 4710 - { 4711 - struct device *dev = rx_ring->dev; 4712 - unsigned long size; 4713 - u16 i; 4714 - 4715 - /* ring already cleared, nothing to do */ 4716 - if (!rx_ring->rx_buffer_info) 4717 - return; 4718 - 4719 - /* Free all the Rx ring sk_buffs */ 4720 - for (i = 0; i < rx_ring->count; i++) { 4721 - struct ixgbe_rx_buffer *rx_buffer; 4722 - 4723 - rx_buffer = &rx_ring->rx_buffer_info[i]; 4724 - if (rx_buffer->skb) { 4725 - struct sk_buff *skb = rx_buffer->skb; 4726 - if (IXGBE_CB(skb)->page_released) { 4727 - dma_unmap_page(dev, 4728 - IXGBE_CB(skb)->dma, 4729 - ixgbe_rx_bufsz(rx_ring), 4730 - DMA_FROM_DEVICE); 4731 - IXGBE_CB(skb)->page_released = false; 4732 - } 4733 - dev_kfree_skb(skb); 4734 - } 4735 - rx_buffer->skb = NULL; 4736 - if (rx_buffer->dma) 4737 - dma_unmap_page(dev, rx_buffer->dma, 4738 - ixgbe_rx_pg_size(rx_ring), 4739 - DMA_FROM_DEVICE); 4740 - rx_buffer->dma = 0; 4741 - if (rx_buffer->page) 4742 - __free_pages(rx_buffer->page, 4743 - ixgbe_rx_pg_order(rx_ring)); 4744 - rx_buffer->page = NULL; 4745 - } 4746 - 4747 - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4748 - memset(rx_ring->rx_buffer_info, 0, size); 4749 - 4750 - /* Zero out the descriptor ring */ 4751 - memset(rx_ring->desc, 0, rx_ring->size); 4752 - 4753 - rx_ring->next_to_alloc = 0; 4754 - rx_ring->next_to_clean = 0; 4755 - rx_ring->next_to_use = 0; 4756 - } 4757 - 4758 - /** 4759 4454 * ixgbe_clean_tx_ring - Free Tx Buffers 4760 4455 * @tx_ring: ring to be cleaned 4761 4456 **/ ··· 4779 4580 { 4780 4581 struct net_device *netdev = adapter->netdev; 4781 4582 struct ixgbe_hw *hw = &adapter->hw; 4583 + struct net_device *upper; 4584 + struct list_head *iter; 4782 4585 u32 rxctrl; 4783 4586 int i; 4784 4587 ··· 4803 4602 /* call carrier off first to avoid false dev_watchdog timeouts */ 4804 4603 netif_carrier_off(netdev); 4805 4604 netif_tx_disable(netdev); 4605 + 4606 + /* disable any upper devices */ 4607 + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 4608 + if (netif_is_macvlan(upper)) { 4609 + struct macvlan_dev *vlan = netdev_priv(upper); 4610 + 4611 + if (vlan->fwd_priv) { 4612 + netif_tx_stop_all_queues(upper); 4613 + netif_carrier_off(upper); 4614 + netif_tx_disable(upper); 4615 + } 4616 + } 4617 + } 4806 4618 4807 4619 ixgbe_irq_disable(adapter); 4808 4620 ··· 5047 4833 return -EIO; 5048 4834 } 5049 4835 4836 + /* PF holds first pool slot */ 4837 + set_bit(0, &adapter->fwd_bitmask); 5050 4838 set_bit(__IXGBE_DOWN, &adapter->state); 5051 4839 5052 4840 return 0; ··· 5354 5138 static int ixgbe_open(struct net_device *netdev) 5355 5139 { 5356 5140 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5357 - int err; 5141 + int err, queues; 5358 5142 5359 5143 /* disallow open during test */ 5360 5144 if (test_bit(__IXGBE_TESTING, &adapter->state)) ··· 5379 5163 goto err_req_irq; 5380 5164 5381 5165 /* Notify the stack of the actual queue counts. */ 5382 - err = netif_set_real_num_tx_queues(netdev, 5383 - adapter->num_rx_pools > 1 ? 1 : 5384 - adapter->num_tx_queues); 5166 + if (adapter->num_rx_pools > 1) 5167 + queues = adapter->num_rx_queues_per_pool; 5168 + else 5169 + queues = adapter->num_tx_queues; 5170 + 5171 + err = netif_set_real_num_tx_queues(netdev, queues); 5385 5172 if (err) 5386 5173 goto err_set_queues; 5387 5174 5388 - 5389 - err = netif_set_real_num_rx_queues(netdev, 5390 - adapter->num_rx_pools > 1 ? 1 : 5391 - adapter->num_rx_queues); 5175 + if (adapter->num_rx_pools > 1 && 5176 + adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) 5177 + queues = IXGBE_MAX_L2A_QUEUES; 5178 + else 5179 + queues = adapter->num_rx_queues; 5180 + err = netif_set_real_num_rx_queues(netdev, queues); 5392 5181 if (err) 5393 5182 goto err_set_queues; 5394 5183 ··· 6983 6762 return NETDEV_TX_OK; 6984 6763 } 6985 6764 6986 - static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 6987 - struct net_device *netdev) 6765 + static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, 6766 + struct net_device *netdev, 6767 + struct ixgbe_ring *ring) 6988 6768 { 6989 6769 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6990 6770 struct ixgbe_ring *tx_ring; ··· 7001 6779 skb_set_tail_pointer(skb, 17); 7002 6780 } 7003 6781 7004 - tx_ring = adapter->tx_ring[skb->queue_mapping]; 6782 + tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; 6783 + 7005 6784 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); 6785 + } 6786 + 6787 + static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 6788 + struct net_device *netdev) 6789 + { 6790 + return __ixgbe_xmit_frame(skb, netdev, NULL); 7006 6791 } 7007 6792 7008 6793 /** ··· 7268 7039 { 7269 7040 struct ixgbe_adapter *adapter = netdev_priv(dev); 7270 7041 struct ixgbe_hw *hw = &adapter->hw; 7042 + bool pools; 7271 7043 7272 7044 /* Hardware supports up to 8 traffic classes */ 7273 7045 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 7274 7046 (hw->mac.type == ixgbe_mac_82598EB && 7275 7047 tc < MAX_TRAFFIC_CLASS)) 7276 7048 return -EINVAL; 7049 + 7050 + pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 7051 + if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) 7052 + return -EBUSY; 7277 7053 7278 7054 /* Hardware has to reinitialize queues and interrupts to 7279 7055 * match packet buffer alignment. Unfortunately, the ··· 7534 7300 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); 7535 7301 } 7536 7302 7303 + static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) 7304 + { 7305 + struct ixgbe_fwd_adapter *fwd_adapter = NULL; 7306 + struct ixgbe_adapter *adapter = netdev_priv(pdev); 7307 + int pool, err; 7308 + 7309 + /* Check for hardware restriction on number of rx/tx queues */ 7310 + if (vdev->num_rx_queues != vdev->num_tx_queues || 7311 + vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || 7312 + vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { 7313 + netdev_info(pdev, 7314 + "%s: Supports RX/TX Queue counts 1,2, and 4\n", 7315 + pdev->name); 7316 + return ERR_PTR(-EINVAL); 7317 + } 7318 + 7319 + if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 7320 + adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || 7321 + (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) 7322 + return ERR_PTR(-EBUSY); 7323 + 7324 + fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); 7325 + if (!fwd_adapter) 7326 + return ERR_PTR(-ENOMEM); 7327 + 7328 + pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); 7329 + adapter->num_rx_pools++; 7330 + set_bit(pool, &adapter->fwd_bitmask); 7331 + 7332 + /* Enable VMDq flag so device will be set in VM mode */ 7333 + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; 7334 + adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools; 7335 + adapter->ring_feature[RING_F_RSS].limit = vdev->num_rx_queues; 7336 + 7337 + /* Force reinit of ring allocation with VMDQ enabled */ 7338 + err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); 7339 + if (err) 7340 + goto fwd_add_err; 7341 + fwd_adapter->pool = pool; 7342 + fwd_adapter->real_adapter = adapter; 7343 + err = ixgbe_fwd_ring_up(vdev, fwd_adapter); 7344 + if (err) 7345 + goto fwd_add_err; 7346 + netif_tx_start_all_queues(vdev); 7347 + return fwd_adapter; 7348 + fwd_add_err: 7349 + /* unwind counter and free adapter struct */ 7350 + netdev_info(pdev, 7351 + "%s: dfwd hardware acceleration failed\n", vdev->name); 7352 + clear_bit(pool, &adapter->fwd_bitmask); 7353 + adapter->num_rx_pools--; 7354 + kfree(fwd_adapter); 7355 + return ERR_PTR(err); 7356 + } 7357 + 7358 + static void ixgbe_fwd_del(struct net_device *pdev, void *priv) 7359 + { 7360 + struct ixgbe_fwd_adapter *fwd_adapter = priv; 7361 + struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; 7362 + 7363 + clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); 7364 + adapter->num_rx_pools--; 7365 + 7366 + adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools; 7367 + ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); 7368 + ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); 7369 + netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 7370 + fwd_adapter->pool, adapter->num_rx_pools, 7371 + fwd_adapter->rx_base_queue, 7372 + fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, 7373 + adapter->fwd_bitmask); 7374 + kfree(fwd_adapter); 7375 + } 7376 + 7377 + static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, 7378 + struct net_device *dev, 7379 + void *priv) 7380 + { 7381 + struct ixgbe_fwd_adapter *fwd_adapter = priv; 7382 + unsigned int queue; 7383 + struct ixgbe_ring *tx_ring; 7384 + 7385 + queue = skb->queue_mapping + fwd_adapter->tx_base_queue; 7386 + tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; 7387 + 7388 + return __ixgbe_xmit_frame(skb, dev, tx_ring); 7389 + } 7390 + 7537 7391 static const struct net_device_ops ixgbe_netdev_ops = { 7538 7392 .ndo_open = ixgbe_open, 7539 7393 .ndo_stop = ixgbe_close, ··· 7666 7344 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7667 7345 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, 7668 7346 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7347 + .ndo_dfwd_add_station = ixgbe_fwd_add, 7348 + .ndo_dfwd_del_station = ixgbe_fwd_del, 7349 + .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, 7669 7350 }; 7670 7351 7671 7352 /** ··· 7970 7645 NETIF_F_TSO | 7971 7646 NETIF_F_TSO6 | 7972 7647 NETIF_F_RXHASH | 7973 - NETIF_F_RXCSUM; 7648 + NETIF_F_RXCSUM | 7649 + NETIF_F_HW_L2FW_DOFFLOAD; 7974 7650 7975 7651 netdev->hw_features = netdev->features; 7976 7652
+8 -9
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
··· 223 223 IXGBE_WRITE_FLUSH(hw); 224 224 225 225 /* Disable VMDq flag so device will be set in VM mode */ 226 - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) 226 + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { 227 227 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; 228 - adapter->ring_feature[RING_F_VMDQ].offset = 0; 228 + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 229 + rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); 230 + } else { 231 + rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); 232 + } 229 233 230 - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); 234 + adapter->ring_feature[RING_F_VMDQ].offset = 0; 231 235 adapter->ring_feature[RING_F_RSS].limit = rss; 232 236 233 237 /* take a breather then clean up driver data */ 234 238 msleep(100); 235 - 236 - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 237 239 return 0; 238 240 } 239 241 ··· 300 298 err = ixgbe_disable_sriov(adapter); 301 299 302 300 /* Only reinit if no error and state changed */ 303 - if (!err && current_flags != adapter->flags) { 304 - /* ixgbe_disable_sriov() doesn't clear VMDQ flag */ 305 - adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; 306 301 #ifdef CONFIG_PCI_IOV 302 + if (!err && current_flags != adapter->flags) 307 303 ixgbe_sriov_reinit(adapter); 308 304 #endif 309 - } 310 305 311 306 return err; 312 307 }
+35 -1
drivers/net/macvlan.c
··· 297 297 int ret; 298 298 const struct macvlan_dev *vlan = netdev_priv(dev); 299 299 300 - ret = macvlan_queue_xmit(skb, dev); 300 + if (vlan->fwd_priv) { 301 + skb->dev = vlan->lowerdev; 302 + ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 303 + } else { 304 + ret = macvlan_queue_xmit(skb, dev); 305 + } 306 + 301 307 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 302 308 struct macvlan_pcpu_stats *pcpu_stats; 303 309 ··· 353 347 goto hash_add; 354 348 } 355 349 350 + if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 351 + vlan->fwd_priv = 352 + lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 353 + 354 + /* If we get a NULL pointer back, or if we get an error 355 + * then we should just fall through to the non accelerated path 356 + */ 357 + if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 358 + vlan->fwd_priv = NULL; 359 + } else { 360 + dev->features &= ~NETIF_F_LLTX; 361 + return 0; 362 + } 363 + } 364 + 356 365 err = -EBUSY; 357 366 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 358 367 goto out; ··· 388 367 del_unicast: 389 368 dev_uc_del(lowerdev, dev->dev_addr); 390 369 out: 370 + if (vlan->fwd_priv) { 371 + lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, 372 + vlan->fwd_priv); 373 + vlan->fwd_priv = NULL; 374 + } 391 375 return err; 392 376 } 393 377 ··· 400 374 { 401 375 struct macvlan_dev *vlan = netdev_priv(dev); 402 376 struct net_device *lowerdev = vlan->lowerdev; 377 + 378 + if (vlan->fwd_priv) { 379 + lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, 380 + vlan->fwd_priv); 381 + vlan->fwd_priv = NULL; 382 + return 0; 383 + } 403 384 404 385 dev_uc_unsync(lowerdev, dev); 405 386 dev_mc_unsync(lowerdev, dev); ··· 866 833 if (err < 0) 867 834 goto destroy_port; 868 835 836 + dev->priv_flags |= IFF_MACVLAN; 869 837 err = netdev_upper_dev_link(lowerdev, dev); 870 838 if (err) 871 839 goto destroy_port;
+1
include/linux/if_macvlan.h
··· 61 61 struct hlist_node hlist; 62 62 struct macvlan_port *port; 63 63 struct net_device *lowerdev; 64 + void *fwd_priv; 64 65 struct macvlan_pcpu_stats __percpu *pcpu_stats; 65 66 66 67 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
+2
include/linux/netdev_features.h
··· 62 62 NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */ 63 63 NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ 64 64 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ 65 + NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ 65 66 66 67 /* 67 68 * Add your fresh new feature above and remember to update ··· 117 116 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 118 117 #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 119 118 #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 119 + #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) 120 120 121 121 /* Features valid for ethtool to change */ 122 122 /* = all defined minus driver/device-class-related */
+35 -1
include/linux/netdevice.h
··· 962 962 * Called by vxlan to notify the driver about a UDP port and socket 963 963 * address family that vxlan is not listening to anymore. The operation 964 964 * is protected by the vxlan_net->sock_lock. 965 + * 966 + * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 967 + * struct net_device *dev) 968 + * Called by upper layer devices to accelerate switching or other 969 + * station functionality into hardware. 'pdev is the lowerdev 970 + * to use for the offload and 'dev' is the net device that will 971 + * back the offload. Returns a pointer to the private structure 972 + * the upper layer will maintain. 973 + * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 974 + * Called by upper layer device to delete the station created 975 + * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 976 + * the station and priv is the structure returned by the add 977 + * operation. 978 + * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb, 979 + * struct net_device *dev, 980 + * void *priv); 981 + * Callback to use for xmit over the accelerated station. This 982 + * is used in place of ndo_start_xmit on accelerated net 983 + * devices. 965 984 */ 966 985 struct net_device_ops { 967 986 int (*ndo_init)(struct net_device *dev); ··· 1117 1098 void (*ndo_del_vxlan_port)(struct net_device *dev, 1118 1099 sa_family_t sa_family, 1119 1100 __be16 port); 1101 + 1102 + void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1103 + struct net_device *dev); 1104 + void (*ndo_dfwd_del_station)(struct net_device *pdev, 1105 + void *priv); 1106 + 1107 + netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, 1108 + struct net_device *dev, 1109 + void *priv); 1120 1110 }; 1121 1111 1122 1112 /* ··· 1223 1195 /* Management operations */ 1224 1196 const struct net_device_ops *netdev_ops; 1225 1197 const struct ethtool_ops *ethtool_ops; 1198 + const struct forwarding_accel_ops *fwd_ops; 1226 1199 1227 1200 /* Hardware header description */ 1228 1201 const struct header_ops *header_ops; ··· 2417 2388 int dev_get_phys_port_id(struct net_device *dev, 2418 2389 struct netdev_phys_port_id *ppid); 2419 2390 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2420 - struct netdev_queue *txq); 2391 + struct netdev_queue *txq, void *accel_priv); 2421 2392 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2422 2393 2423 2394 extern int netdev_budget; ··· 2994 2965 unsigned int size) 2995 2966 { 2996 2967 dev->gso_max_size = size; 2968 + } 2969 + 2970 + static inline bool netif_is_macvlan(struct net_device *dev) 2971 + { 2972 + return dev->priv_flags & IFF_MACVLAN; 2997 2973 } 2998 2974 2999 2975 static inline bool netif_is_bond_master(struct net_device *dev)
+1
include/uapi/linux/if.h
··· 83 83 #define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */ 84 84 #define IFF_LIVE_ADDR_CHANGE 0x100000 /* device supports hardware address 85 85 * change when it's running */ 86 + #define IFF_MACVLAN 0x200000 /* Macvlan device */ 86 87 87 88 88 89 #define IF_GET_IFACE 0x0001 /* for querying only */
+13 -5
net/core/dev.c
··· 2538 2538 } 2539 2539 2540 2540 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2541 - struct netdev_queue *txq) 2541 + struct netdev_queue *txq, void *accel_priv) 2542 2542 { 2543 2543 const struct net_device_ops *ops = dev->netdev_ops; 2544 2544 int rc = NETDEV_TX_OK; ··· 2604 2604 dev_queue_xmit_nit(skb, dev); 2605 2605 2606 2606 skb_len = skb->len; 2607 - rc = ops->ndo_start_xmit(skb, dev); 2607 + if (accel_priv) 2608 + rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); 2609 + else 2610 + rc = ops->ndo_start_xmit(skb, dev); 2611 + 2608 2612 trace_net_dev_xmit(skb, rc, dev, skb_len); 2609 - if (rc == NETDEV_TX_OK) 2613 + if (rc == NETDEV_TX_OK && txq) 2610 2614 txq_trans_update(txq); 2611 2615 return rc; 2612 2616 } ··· 2626 2622 dev_queue_xmit_nit(nskb, dev); 2627 2623 2628 2624 skb_len = nskb->len; 2629 - rc = ops->ndo_start_xmit(nskb, dev); 2625 + if (accel_priv) 2626 + rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); 2627 + else 2628 + rc = ops->ndo_start_xmit(nskb, dev); 2630 2629 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2631 2630 if (unlikely(rc != NETDEV_TX_OK)) { 2632 2631 if (rc & ~NETDEV_TX_MASK) ··· 2654 2647 out: 2655 2648 return rc; 2656 2649 } 2650 + EXPORT_SYMBOL_GPL(dev_hard_start_xmit); 2657 2651 2658 2652 static void qdisc_pkt_len_init(struct sk_buff *skb) 2659 2653 { ··· 2862 2854 2863 2855 if (!netif_xmit_stopped(txq)) { 2864 2856 __this_cpu_inc(xmit_recursion); 2865 - rc = dev_hard_start_xmit(skb, dev, txq); 2857 + rc = dev_hard_start_xmit(skb, dev, txq, NULL); 2866 2858 __this_cpu_dec(xmit_recursion); 2867 2859 if (dev_xmit_complete(rc)) { 2868 2860 HARD_TX_UNLOCK(dev, txq);
+1
net/core/ethtool.c
··· 96 96 [NETIF_F_LOOPBACK_BIT] = "loopback", 97 97 [NETIF_F_RXFCS_BIT] = "rx-fcs", 98 98 [NETIF_F_RXALL_BIT] = "rx-all", 99 + [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", 99 100 }; 100 101 101 102 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
+1 -1
net/sched/sch_generic.c
··· 126 126 127 127 HARD_TX_LOCK(dev, txq, smp_processor_id()); 128 128 if (!netif_xmit_frozen_or_stopped(txq)) 129 - ret = dev_hard_start_xmit(skb, dev, txq); 129 + ret = dev_hard_start_xmit(skb, dev, txq, NULL); 130 130 131 131 HARD_TX_UNLOCK(dev, txq); 132 132