Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix route leak in xfrm_bundle_create().

2) In mac80211, validate user rate mask before configuring it. From
Johannes Berg.

3) Properly enforce memory limits in fair queueing code, from Toke
Hoiland-Jorgensen.

4) Fix lockdep splat in inet_csk_route_req(), from Eric Dumazet.

5) Fix TSO header allocation and management in mvpp2 driver, from Yan
Markman.

6) Don't take socket lock in BH handler in strparser code, from Tom
Herbert.

7) Don't show sockets from other namespaces in AF_UNIX code, from
Andrei Vagin.

8) Fix double free in error path of tap_open(), from Girish Moodalbail.

9) Fix TX map failure path in igb and ixgbe, from Jean-Philippe Brucker
and Alexander Duyck.

10) Fix DCB mode programming in stmmac driver, from Jose Abreu.

11) Fix err_count handling in various tunnels (ipip, ip6_gre). From Xin
Long.

12) Properly align SKB head before building SKB in tuntap, from Jason
Wang.

13) Avoid matching qdiscs with a zero handle during lookups, from Cong
Wang.

14) Fix various endianness bugs in sctp, from Xin Long.

15) Fix tc filter callback races and add selftests which trigger the
problem, from Cong Wang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (73 commits)
selftests: Introduce a new test case to tc testsuite
selftests: Introduce a new script to generate tc batch file
net_sched: fix call_rcu() race on act_sample module removal
net_sched: add rtnl assertion to tcf_exts_destroy()
net_sched: use tcf_queue_work() in tcindex filter
net_sched: use tcf_queue_work() in rsvp filter
net_sched: use tcf_queue_work() in route filter
net_sched: use tcf_queue_work() in u32 filter
net_sched: use tcf_queue_work() in matchall filter
net_sched: use tcf_queue_work() in fw filter
net_sched: use tcf_queue_work() in flower filter
net_sched: use tcf_queue_work() in flow filter
net_sched: use tcf_queue_work() in cgroup filter
net_sched: use tcf_queue_work() in bpf filter
net_sched: use tcf_queue_work() in basic filter
net_sched: introduce a workqueue for RCU callbacks of tc filter
sctp: fix some type cast warnings introduced since very beginning
sctp: fix a type cast warnings that causes a_rwnd gets the wrong value
sctp: fix some type cast warnings introduced by transport rhashtable
sctp: fix some type cast warnings introduced by stream reconf
...

+1 -2
drivers/net/can/sun4i_can.c
··· 342 342 343 343 /* enter the selected mode */ 344 344 mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); 345 - if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) 345 + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 346 346 mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE; 347 347 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 348 348 mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE; ··· 811 811 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | 812 812 CAN_CTRLMODE_LISTENONLY | 813 813 CAN_CTRLMODE_LOOPBACK | 814 - CAN_CTRLMODE_PRESUME_ACK | 815 814 CAN_CTRLMODE_3_SAMPLES; 816 815 priv->base = addr; 817 816 priv->clk = clk;
+8 -1
drivers/net/can/usb/kvaser_usb.c
··· 137 137 #define CMD_RESET_ERROR_COUNTER 49 138 138 #define CMD_TX_ACKNOWLEDGE 50 139 139 #define CMD_CAN_ERROR_EVENT 51 140 + #define CMD_FLUSH_QUEUE_REPLY 68 140 141 141 142 #define CMD_LEAF_USB_THROTTLE 77 142 143 #define CMD_LEAF_LOG_MESSAGE 106 ··· 1302 1301 goto warn; 1303 1302 break; 1304 1303 1304 + case CMD_FLUSH_QUEUE_REPLY: 1305 + if (dev->family != KVASER_LEAF) 1306 + goto warn; 1307 + break; 1308 + 1305 1309 default: 1306 1310 warn: dev_warn(dev->udev->dev.parent, 1307 1311 "Unhandled message (%d)\n", msg->id); ··· 1615 1609 if (err) 1616 1610 netdev_warn(netdev, "Cannot flush queue, error %d\n", err); 1617 1611 1618 - if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel)) 1612 + err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel); 1613 + if (err) 1619 1614 netdev_warn(netdev, "Cannot reset card, error %d\n", err); 1620 1615 1621 1616 err = kvaser_usb_stop_chip(priv);
+4 -5
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
··· 1824 1824 { 1825 1825 struct e1000_adapter *adapter = netdev_priv(netdev); 1826 1826 int i; 1827 - char *p = NULL; 1828 1827 const struct e1000_stats *stat = e1000_gstrings_stats; 1829 1828 1830 1829 e1000_update_stats(adapter); 1831 - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1830 + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { 1831 + char *p; 1832 + 1832 1833 switch (stat->type) { 1833 1834 case NETDEV_STATS: 1834 1835 p = (char *)netdev + stat->stat_offset; ··· 1840 1839 default: 1841 1840 WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", 1842 1841 stat->type, i); 1843 - break; 1842 + continue; 1844 1843 } 1845 1844 1846 1845 if (stat->sizeof_stat == sizeof(u64)) 1847 1846 data[i] = *(u64 *)p; 1848 1847 else 1849 1848 data[i] = *(u32 *)p; 1850 - 1851 - stat++; 1852 1849 } 1853 1850 /* BUG_ON(i != E1000_STATS_LEN); */ 1854 1851 }
+9 -2
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 520 520 struct net_device *netdev = adapter->netdev; 521 521 u32 rctl, tctl; 522 522 523 - netif_carrier_off(netdev); 524 - 525 523 /* disable receives in the hardware */ 526 524 rctl = er32(RCTL); 527 525 ew32(RCTL, rctl & ~E1000_RCTL_EN); ··· 534 536 /* flush both disables and wait for them to finish */ 535 537 E1000_WRITE_FLUSH(); 536 538 msleep(10); 539 + 540 + /* Set the carrier off after transmits have been disabled in the 541 + * hardware, to avoid race conditions with e1000_watchdog() (which 542 + * may be running concurrently to us, checking for the carrier 543 + * bit to decide whether it should enable transmits again). Such 544 + * a race condition would result into transmission being disabled 545 + * in the hardware until the next IFF_DOWN+IFF_UP cycle. 546 + */ 547 + netif_carrier_off(netdev); 537 548 538 549 napi_disable(&adapter->napi); 539 550
+2 -1
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 2102 2102 2103 2103 if (unlikely(i40e_rx_is_programming_status(qword))) { 2104 2104 i40e_clean_programming_status(rx_ring, rx_desc, qword); 2105 + cleaned_count++; 2105 2106 continue; 2106 2107 } 2107 2108 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> ··· 2270 2269 goto enable_int; 2271 2270 } 2272 2271 2273 - if (ITR_IS_DYNAMIC(tx_itr_setting)) { 2272 + if (ITR_IS_DYNAMIC(rx_itr_setting)) { 2274 2273 rx = i40e_set_new_dynamic_itr(&q_vector->rx); 2275 2274 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); 2276 2275 }
+1 -1
drivers/net/ethernet/intel/igb/igb_main.c
··· 5326 5326 DMA_TO_DEVICE); 5327 5327 dma_unmap_len_set(tx_buffer, len, 0); 5328 5328 5329 - if (i--) 5329 + if (i-- == 0) 5330 5330 i += tx_ring->count; 5331 5331 tx_buffer = &tx_ring->tx_buffer_info[i]; 5332 5332 }
+6 -12
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 8020 8020 return 0; 8021 8021 dma_error: 8022 8022 dev_err(tx_ring->dev, "TX DMA map failed\n"); 8023 - tx_buffer = &tx_ring->tx_buffer_info[i]; 8024 8023 8025 8024 /* clear dma mappings for failed tx_buffer_info map */ 8026 - while (tx_buffer != first) { 8025 + for (;;) { 8026 + tx_buffer = &tx_ring->tx_buffer_info[i]; 8027 8027 if (dma_unmap_len(tx_buffer, len)) 8028 8028 dma_unmap_page(tx_ring->dev, 8029 8029 dma_unmap_addr(tx_buffer, dma), 8030 8030 dma_unmap_len(tx_buffer, len), 8031 8031 DMA_TO_DEVICE); 8032 8032 dma_unmap_len_set(tx_buffer, len, 0); 8033 - 8034 - if (i--) 8033 + if (tx_buffer == first) 8034 + break; 8035 + if (i == 0) 8035 8036 i += tx_ring->count; 8036 - tx_buffer = &tx_ring->tx_buffer_info[i]; 8037 + i--; 8037 8038 } 8038 - 8039 - if (dma_unmap_len(tx_buffer, len)) 8040 - dma_unmap_single(tx_ring->dev, 8041 - dma_unmap_addr(tx_buffer, dma), 8042 - dma_unmap_len(tx_buffer, len), 8043 - DMA_TO_DEVICE); 8044 - dma_unmap_len_set(tx_buffer, len, 0); 8045 8039 8046 8040 dev_kfree_skb_any(first->skb); 8047 8041 first->skb = NULL;
+22 -13
drivers/net/ethernet/marvell/mvpp2.c
··· 1167 1167 u32 port_map; 1168 1168 }; 1169 1169 1170 + #define IS_TSO_HEADER(txq_pcpu, addr) \ 1171 + ((addr) >= (txq_pcpu)->tso_headers_dma && \ 1172 + (addr) < (txq_pcpu)->tso_headers_dma + \ 1173 + (txq_pcpu)->size * TSO_HEADER_SIZE) 1174 + 1170 1175 /* Queue modes */ 1171 1176 #define MVPP2_QDIST_SINGLE_MODE 0 1172 1177 #define MVPP2_QDIST_MULTI_MODE 1 ··· 1539 1534 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); 1540 1535 u16 tcam_data; 1541 1536 1542 - tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; 1537 + tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; 1543 1538 if (tcam_data != data) 1544 1539 return false; 1545 1540 return true; ··· 2614 2609 /* place holders only - no ports */ 2615 2610 mvpp2_prs_mac_drop_all_set(priv, 0, false); 2616 2611 mvpp2_prs_mac_promisc_set(priv, 0, false); 2617 - mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 2618 - mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 2612 + mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false); 2613 + mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false); 2619 2614 } 2620 2615 2621 2616 /* Set default entries for various types of dsa packets */ ··· 3396 3391 struct mvpp2_prs_entry *pe; 3397 3392 int tid; 3398 3393 3399 - pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3394 + pe = kzalloc(sizeof(*pe), GFP_ATOMIC); 3400 3395 if (!pe) 3401 3396 return NULL; 3402 3397 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); ··· 3458 3453 if (tid < 0) 3459 3454 return tid; 3460 3455 3461 - pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3456 + pe = kzalloc(sizeof(*pe), GFP_ATOMIC); 3462 3457 if (!pe) 3463 3458 return -ENOMEM; 3464 3459 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); ··· 5326 5321 struct mvpp2_txq_pcpu_buf *tx_buf = 5327 5322 txq_pcpu->buffs + txq_pcpu->txq_get_index; 5328 5323 5329 - dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 5330 - tx_buf->size, DMA_TO_DEVICE); 5324 + if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) 5325 + dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 5326 + tx_buf->size, DMA_TO_DEVICE); 5331 5327 if (tx_buf->skb) 5332 5328 dev_kfree_skb_any(tx_buf->skb); 5333 5329 ··· 5615 5609 5616 5610 txq_pcpu->tso_headers = 5617 5611 dma_alloc_coherent(port->dev->dev.parent, 5618 - MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE, 5612 + txq_pcpu->size * TSO_HEADER_SIZE, 5619 5613 &txq_pcpu->tso_headers_dma, 5620 5614 GFP_KERNEL); 5621 5615 if (!txq_pcpu->tso_headers) ··· 5629 5623 kfree(txq_pcpu->buffs); 5630 5624 5631 5625 dma_free_coherent(port->dev->dev.parent, 5632 - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5626 + txq_pcpu->size * TSO_HEADER_SIZE, 5633 5627 txq_pcpu->tso_headers, 5634 5628 txq_pcpu->tso_headers_dma); 5635 5629 } ··· 5653 5647 kfree(txq_pcpu->buffs); 5654 5648 5655 5649 dma_free_coherent(port->dev->dev.parent, 5656 - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5650 + txq_pcpu->size * TSO_HEADER_SIZE, 5657 5651 txq_pcpu->tso_headers, 5658 5652 txq_pcpu->tso_headers_dma); 5659 5653 } ··· 6218 6212 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 6219 6213 struct mvpp2_tx_desc *desc) 6220 6214 { 6215 + struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); 6216 + 6221 6217 dma_addr_t buf_dma_addr = 6222 6218 mvpp2_txdesc_dma_addr_get(port, desc); 6223 6219 size_t buf_sz = 6224 6220 mvpp2_txdesc_size_get(port, desc); 6225 - dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 6226 - buf_sz, DMA_TO_DEVICE); 6221 + if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) 6222 + dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 6223 + buf_sz, DMA_TO_DEVICE); 6227 6224 mvpp2_txq_desc_put(txq); 6228 6225 } 6229 6226 ··· 6499 6490 } 6500 6491 6501 6492 /* Finalize TX processing */ 6502 - if (txq_pcpu->count >= txq->done_pkts_coal) 6493 + if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 6503 6494 mvpp2_txq_done(port, txq, txq_pcpu); 6504 6495 6505 6496 /* Set the timer in case not all frags were processed */
+41 -29
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 77 77 list_add_tail(&delayed_event->list, &priv->waiting_events_list); 78 78 } 79 79 80 - static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, 81 - struct mlx5_core_dev *dev, 82 - struct mlx5_priv *priv) 80 + static void delayed_event_release(struct mlx5_device_context *dev_ctx, 81 + struct mlx5_priv *priv) 83 82 { 83 + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 84 84 struct mlx5_delayed_event *de; 85 85 struct mlx5_delayed_event *n; 86 + struct list_head temp; 86 87 87 - /* stop delaying events */ 88 + INIT_LIST_HEAD(&temp); 89 + 90 + spin_lock_irq(&priv->ctx_lock); 91 + 88 92 priv->is_accum_events = false; 89 - 90 - /* fire all accumulated events before new event comes */ 91 - list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { 93 + list_splice_init(&priv->waiting_events_list, &temp); 94 + if (!dev_ctx->context) 95 + goto out; 96 + list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) 92 97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); 98 + 99 + out: 100 + spin_unlock_irq(&priv->ctx_lock); 101 + 102 + list_for_each_entry_safe(de, n, &temp, list) { 93 103 list_del(&de->list); 94 104 kfree(de); 95 105 } 96 106 } 97 107 98 - static void cleanup_delayed_evets(struct mlx5_priv *priv) 108 + /* accumulating events that can come after mlx5_ib calls to 109 + * ib_register_device, till adding that interface to the events list. 110 + */ 111 + static void delayed_event_start(struct mlx5_priv *priv) 99 112 { 100 - struct mlx5_delayed_event *de; 101 - struct mlx5_delayed_event *n; 102 - 103 113 spin_lock_irq(&priv->ctx_lock); 104 - priv->is_accum_events = false; 105 - list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { 106 - list_del(&de->list); 107 - kfree(de); 108 - } 114 + priv->is_accum_events = true; 109 115 spin_unlock_irq(&priv->ctx_lock); 110 116 } 111 117 ··· 128 122 return; 129 123 130 124 dev_ctx->intf = intf; 131 - /* accumulating events that can come after mlx5_ib calls to 132 - * ib_register_device, till adding that interface to the events list. 133 - */ 134 125 135 - priv->is_accum_events = true; 126 + delayed_event_start(priv); 136 127 137 128 dev_ctx->context = intf->add(dev); 138 129 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); ··· 139 136 if (dev_ctx->context) { 140 137 spin_lock_irq(&priv->ctx_lock); 141 138 list_add_tail(&dev_ctx->list, &priv->ctx_list); 142 - 143 - fire_delayed_event_locked(dev_ctx, dev, priv); 144 139 145 140 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 146 141 if (dev_ctx->intf->pfault) { ··· 151 150 } 152 151 #endif 153 152 spin_unlock_irq(&priv->ctx_lock); 154 - } else { 155 - kfree(dev_ctx); 156 - /* delete all accumulated events */ 157 - cleanup_delayed_evets(priv); 158 153 } 154 + 155 + delayed_event_release(dev_ctx, priv); 156 + 157 + if (!dev_ctx->context) 158 + kfree(dev_ctx); 159 159 } 160 160 161 161 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, ··· 207 205 if (!dev_ctx) 208 206 return; 209 207 208 + delayed_event_start(priv); 210 209 if (intf->attach) { 211 210 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) 212 - return; 211 + goto out; 213 212 intf->attach(dev, dev_ctx->context); 214 213 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 215 214 } else { 216 215 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) 217 - return; 216 + goto out; 218 217 dev_ctx->context = intf->add(dev); 219 218 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 220 219 } 220 + 221 + out: 222 + delayed_event_release(dev_ctx, priv); 221 223 } 222 224 223 225 void mlx5_attach_device(struct mlx5_core_dev *dev) ··· 420 414 if (priv->is_accum_events) 421 415 add_delayed_event(priv, dev, event, param); 422 416 417 + /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is 418 + * still in priv->ctx_list. In this case, only notify the dev_ctx if its 419 + * ADDED or ATTACHED bit are set. 420 + */ 423 421 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 424 - if (dev_ctx->intf->event) 422 + if (dev_ctx->intf->event && 423 + (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) || 424 + test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))) 425 425 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 426 426 427 427 spin_unlock_irqrestore(&priv->ctx_lock, flags);
+84 -31
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 41 41 #define MLX5E_CEE_STATE_UP 1 42 42 #define MLX5E_CEE_STATE_DOWN 0 43 43 44 + enum { 45 + MLX5E_VENDOR_TC_GROUP_NUM = 7, 46 + MLX5E_LOWEST_PRIO_GROUP = 0, 47 + }; 48 + 44 49 /* If dcbx mode is non-host set the dcbx mode to host. 45 50 */ 46 51 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, ··· 90 85 { 91 86 struct mlx5e_priv *priv = netdev_priv(netdev); 92 87 struct mlx5_core_dev *mdev = priv->mdev; 88 + u8 tc_group[IEEE_8021QAZ_MAX_TCS]; 89 + bool is_tc_group_6_exist = false; 90 + bool is_zero_bw_ets_tc = false; 93 91 int err = 0; 94 92 int i; 95 93 ··· 104 96 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); 105 97 if (err) 106 98 return err; 107 - } 108 99 109 - for (i = 0; i < ets->ets_cap; i++) { 100 + err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]); 101 + if (err) 102 + return err; 103 + 110 104 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); 111 105 if (err) 112 106 return err; 113 - if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) 114 - priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 107 + 108 + if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC && 109 + tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1)) 110 + is_zero_bw_ets_tc = true; 111 + 112 + if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1)) 113 + is_tc_group_6_exist = true; 115 114 } 116 115 116 + /* Report 0% ets tc if exits*/ 117 + if (is_zero_bw_ets_tc) { 118 + for (i = 0; i < ets->ets_cap; i++) 119 + if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP) 120 + ets->tc_tx_bw[i] = 0; 121 + } 122 + 123 + /* Update tc_tsa based on fw setting*/ 124 + for (i = 0; i < ets->ets_cap; i++) { 125 + if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) 126 + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 127 + else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM && 128 + !is_tc_group_6_exist) 129 + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; 130 + } 117 131 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); 118 132 119 133 return err; 120 134 } 121 135 122 - enum { 123 - MLX5E_VENDOR_TC_GROUP_NUM = 7, 124 - MLX5E_ETS_TC_GROUP_NUM = 0, 125 - }; 126 - 127 136 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) 128 137 { 129 138 bool any_tc_mapped_to_ets = false; 139 + bool ets_zero_bw = false; 130 140 int strict_group; 131 141 int i; 132 142 133 - for (i = 0; i <= max_tc; i++) 134 - if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 143 + for (i = 0; i <= max_tc; i++) { 144 + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 135 145 any_tc_mapped_to_ets = true; 146 + if (!ets->tc_tx_bw[i]) 147 + ets_zero_bw = true; 148 + } 149 + } 136 150 137 - strict_group = any_tc_mapped_to_ets ? 1 : 0; 151 + /* strict group has higher priority than ets group */ 152 + strict_group = MLX5E_LOWEST_PRIO_GROUP; 153 + if (any_tc_mapped_to_ets) 154 + strict_group++; 155 + if (ets_zero_bw) 156 + strict_group++; 138 157 139 158 for (i = 0; i <= max_tc; i++) { 140 159 switch (ets->tc_tsa[i]) { ··· 172 137 tc_group[i] = strict_group++; 173 138 break; 174 139 case IEEE_8021QAZ_TSA_ETS: 175 - tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; 140 + tc_group[i] = MLX5E_LOWEST_PRIO_GROUP; 141 + if (ets->tc_tx_bw[i] && ets_zero_bw) 142 + tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1; 176 143 break; 177 144 } 178 145 } ··· 183 146 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, 184 147 u8 *tc_group, int max_tc) 185 148 { 149 + int bw_for_ets_zero_bw_tc = 0; 150 + int last_ets_zero_bw_tc = -1; 151 + int num_ets_zero_bw = 0; 186 152 int i; 153 + 154 + for (i = 0; i <= max_tc; i++) { 155 + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS && 156 + !ets->tc_tx_bw[i]) { 157 + num_ets_zero_bw++; 158 + last_ets_zero_bw_tc = i; 159 + } 160 + } 161 + 162 + if (num_ets_zero_bw) 163 + bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw; 187 164 188 165 for (i = 0; i <= max_tc; i++) { 189 166 switch (ets->tc_tsa[i]) { ··· 208 157 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 209 158 break; 210 159 case IEEE_8021QAZ_TSA_ETS: 211 - tc_tx_bw[i] = ets->tc_tx_bw[i]; 160 + tc_tx_bw[i] = ets->tc_tx_bw[i] ? 161 + ets->tc_tx_bw[i] : 162 + bw_for_ets_zero_bw_tc; 212 163 break; 213 164 } 214 165 } 166 + 167 + /* Make sure the total bw for ets zero bw group is 100% */ 168 + if (last_ets_zero_bw_tc != -1) 169 + tc_tx_bw[last_ets_zero_bw_tc] += 170 + MLX5E_MAX_BW_ALLOC % num_ets_zero_bw; 215 171 } 216 172 173 + /* If there are ETS BW 0, 174 + * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%. 175 + * Set group #0 to all the ETS BW 0 tcs and 176 + * equally splits the 100% BW between them 177 + * Report both group #0 and #1 as ETS type. 178 + * All the tcs in group #0 will be reported with 0% BW. 179 + */ 217 180 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) 218 181 { 219 182 struct mlx5_core_dev *mdev = priv->mdev; ··· 253 188 return err; 254 189 255 190 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); 256 - 257 191 return err; 258 192 } 259 193 ··· 273 209 } 274 210 275 211 /* Validate Bandwidth Sum */ 276 - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 277 - if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 278 - if (!ets->tc_tx_bw[i]) { 279 - netdev_err(netdev, 280 - "Failed to validate ETS: BW 0 is illegal\n"); 281 - return -EINVAL; 282 - } 283 - 212 + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 213 + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 284 214 bw_sum += ets->tc_tx_bw[i]; 285 - } 286 - } 287 215 288 216 if (bw_sum != 0 && bw_sum != 100) { 289 217 netdev_err(netdev, ··· 589 533 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, 590 534 int pgid, u8 *bw_pct) 591 535 { 592 - struct mlx5e_priv *priv = netdev_priv(netdev); 593 - struct mlx5_core_dev *mdev = priv->mdev; 536 + struct ieee_ets ets; 594 537 595 538 if (pgid >= CEE_DCBX_MAX_PGS) { 596 539 netdev_err(netdev, ··· 597 542 return; 598 543 } 599 544 600 - if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct)) 601 - *bw_pct = 0; 545 + mlx5e_dcbnl_ieee_getets(netdev, &ets); 546 + *bw_pct = ets.tc_tx_bw[pgid]; 602 547 } 603 548 604 549 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, ··· 793 738 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; 794 739 ets.prio_tc[i] = i; 795 740 } 796 - 797 - memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa)); 798 741 799 742 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 800 743 ets.prio_tc[0] = 1;
+54 -35
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 78 78 }; 79 79 80 80 struct mlx5e_tc_flow_parse_attr { 81 + struct ip_tunnel_info tun_info; 81 82 struct mlx5_flow_spec spec; 82 83 int num_mod_hdr_actions; 83 84 void *mod_hdr_actions; 85 + int mirred_ifindex; 84 86 }; 85 87 86 88 enum { ··· 324 322 static void mlx5e_detach_encap(struct mlx5e_priv *priv, 325 323 struct mlx5e_tc_flow *flow); 326 324 325 + static int mlx5e_attach_encap(struct mlx5e_priv *priv, 326 + struct ip_tunnel_info *tun_info, 327 + struct net_device *mirred_dev, 328 + struct net_device **encap_dev, 329 + struct mlx5e_tc_flow *flow); 330 + 327 331 static struct mlx5_flow_handle * 328 332 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 329 333 struct mlx5e_tc_flow_parse_attr *parse_attr, ··· 337 329 { 338 330 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 339 331 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 340 - struct mlx5_flow_handle *rule; 332 + struct net_device *out_dev, *encap_dev = NULL; 333 + struct mlx5_flow_handle *rule = NULL; 334 + struct mlx5e_rep_priv *rpriv; 335 + struct mlx5e_priv *out_priv; 341 336 int err; 337 + 338 + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) { 339 + out_dev = __dev_get_by_index(dev_net(priv->netdev), 340 + attr->parse_attr->mirred_ifindex); 341 + err = mlx5e_attach_encap(priv, &parse_attr->tun_info, 342 + out_dev, &encap_dev, flow); 343 + if (err) { 344 + rule = ERR_PTR(err); 345 + if (err != -EAGAIN) 346 + goto err_attach_encap; 347 + } 348 + out_priv = netdev_priv(encap_dev); 349 + rpriv = out_priv->ppriv; 350 + attr->out_rep = rpriv->rep; 351 + } 342 352 343 353 err = mlx5_eswitch_add_vlan_action(esw, attr); 344 354 if (err) { ··· 373 347 } 374 348 } 375 349 376 - rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); 377 - if (IS_ERR(rule)) 378 - goto err_add_rule; 379 - 350 + /* we get here if (1) there's no error (rule being null) or when 351 + * (2) there's an encap action and we're on -EAGAIN (no valid neigh) 352 + */ 353 + if (rule != ERR_PTR(-EAGAIN)) { 354 + rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); 355 + if (IS_ERR(rule)) 356 + goto err_add_rule; 357 + } 380 358 return rule; 381 359 382 360 err_add_rule: ··· 391 361 err_add_vlan: 392 362 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 393 363 mlx5e_detach_encap(priv, flow); 364 + err_attach_encap: 394 365 return rule; 395 366 } 396 367 ··· 420 389 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, 421 390 struct mlx5e_encap_entry *e) 422 391 { 392 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 393 + struct mlx5_esw_flow_attr *esw_attr; 423 394 struct mlx5e_tc_flow *flow; 424 395 int err; 425 396 ··· 437 404 mlx5e_rep_queue_neigh_stats_work(priv); 438 405 439 406 list_for_each_entry(flow, &e->flows, encap) { 440 - flow->esw_attr->encap_id = e->encap_id; 441 - flow->rule = mlx5e_tc_add_fdb_flow(priv, 442 - flow->esw_attr->parse_attr, 443 - flow); 407 + esw_attr = flow->esw_attr; 408 + esw_attr->encap_id = e->encap_id; 409 + flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); 444 410 if (IS_ERR(flow->rule)) { 445 411 err = PTR_ERR(flow->rule); 446 412 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", ··· 453 421 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, 454 422 struct mlx5e_encap_entry *e) 455 423 { 424 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 456 425 struct mlx5e_tc_flow *flow; 457 - struct mlx5_fc *counter; 458 426 459 427 list_for_each_entry(flow, &e->flows, encap) { 460 428 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { 461 429 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; 462 - counter = mlx5_flow_rule_counter(flow->rule); 463 - mlx5_del_flow_rules(flow->rule); 464 - mlx5_fc_destroy(priv->mdev, counter); 430 + mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr); 465 431 } 466 432 } 467 433 ··· 1972 1942 1973 1943 if (is_tcf_mirred_egress_redirect(a)) { 1974 1944 int ifindex = tcf_mirred_ifindex(a); 1975 - struct net_device *out_dev, *encap_dev = NULL; 1945 + struct net_device *out_dev; 1976 1946 struct mlx5e_priv *out_priv; 1977 1947 1978 1948 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); ··· 1985 1955 rpriv = out_priv->ppriv; 1986 1956 attr->out_rep = rpriv->rep; 1987 1957 } else if (encap) { 1988 - err = mlx5e_attach_encap(priv, info, 1989 - out_dev, &encap_dev, flow); 1990 - if (err && err != -EAGAIN) 1991 - return err; 1958 + parse_attr->mirred_ifindex = ifindex; 1959 + parse_attr->tun_info = *info; 1960 + attr->parse_attr = parse_attr; 1992 1961 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | 1993 1962 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1994 1963 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1995 - out_priv = netdev_priv(encap_dev); 1996 - rpriv = out_priv->ppriv; 1997 - attr->out_rep = rpriv->rep; 1998 - attr->parse_attr = parse_attr; 1964 + /* attr->out_rep is resolved when we handle encap */ 1999 1965 } else { 2000 1966 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 2001 1967 priv->netdev->name, out_dev->name); ··· 2073 2047 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 2074 2048 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); 2075 2049 if (err < 0) 2076 - goto err_handle_encap_flow; 2050 + goto err_free; 2077 2051 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); 2078 2052 } else { 2079 2053 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); ··· 2084 2058 2085 2059 if (IS_ERR(flow->rule)) { 2086 2060 err = PTR_ERR(flow->rule); 2087 - goto err_free; 2061 + if (err != -EAGAIN) 2062 + goto err_free; 2088 2063 } 2089 2064 2090 - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; 2065 + if (err != -EAGAIN) 2066 + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; 2067 + 2091 2068 err = rhashtable_insert_fast(&tc->ht, &flow->node, 2092 2069 tc->ht_params); 2093 2070 if (err) ··· 2103 2074 2104 2075 err_del_rule: 2105 2076 mlx5e_tc_del_flow(priv, flow); 2106 - 2107 - err_handle_encap_flow: 2108 - if (err == -EAGAIN) { 2109 - err = rhashtable_insert_fast(&tc->ht, &flow->node, 2110 - tc->ht_params); 2111 - if (err) 2112 - mlx5e_tc_del_flow(priv, flow); 2113 - else 2114 - return 0; 2115 - } 2116 2077 2117 2078 err_free: 2118 2079 kvfree(parse_attr);
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 356 356 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) 357 357 { 358 358 struct mlx5_core_health *health = &dev->priv.health; 359 + unsigned long flags; 359 360 360 - spin_lock(&health->wq_lock); 361 + spin_lock_irqsave(&health->wq_lock, flags); 361 362 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); 362 - spin_unlock(&health->wq_lock); 363 + spin_unlock_irqrestore(&health->wq_lock, flags); 363 364 cancel_delayed_work_sync(&dev->priv.health.recover_work); 364 365 } 365 366
+21
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 677 677 } 678 678 EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); 679 679 680 + int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, 681 + u8 tc, u8 *tc_group) 682 + { 683 + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; 684 + void *ets_tcn_conf; 685 + int err; 686 + 687 + err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); 688 + if (err) 689 + return err; 690 + 691 + ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, 692 + tc_configuration[tc]); 693 + 694 + *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, 695 + group); 696 + 697 + return 0; 698 + } 699 + EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group); 700 + 680 701 int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) 681 702 { 682 703 u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
+2
drivers/net/ethernet/netronome/nfp/flower/action.c
··· 110 110 */ 111 111 if (!switchdev_port_same_parent_id(in_dev, out_dev)) 112 112 return -EOPNOTSUPP; 113 + if (!nfp_netdev_is_nfp_repr(out_dev)) 114 + return -EOPNOTSUPP; 113 115 114 116 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); 115 117 if (!output->port)
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
··· 74 74 plat_dat->axi->axi_wr_osr_lmt--; 75 75 } 76 76 77 - if (of_property_read_u32(np, "read,read-requests", 77 + if (of_property_read_u32(np, "snps,read-requests", 78 78 &plat_dat->axi->axi_rd_osr_lmt)) { 79 79 /** 80 80 * Since the register has a reset value of 1, if property
+7
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 150 150 plat->rx_queues_to_use = 1; 151 151 plat->tx_queues_to_use = 1; 152 152 153 + /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need 154 + * to always set this, otherwise Queue will be classified as AVB 155 + * (because MTL_QUEUE_AVB = 0). 156 + */ 157 + plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 158 + plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 159 + 153 160 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 154 161 if (!rx_node) 155 162 return;
+2 -2
drivers/net/ipvlan/ipvtap.c
··· 197 197 { 198 198 int err; 199 199 200 - err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap"); 201 - 200 + err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap", 201 + THIS_MODULE); 202 202 if (err) 203 203 goto out1; 204 204
+2 -2
drivers/net/macvtap.c
··· 204 204 { 205 205 int err; 206 206 207 - err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap"); 208 - 207 + err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap", 208 + THIS_MODULE); 209 209 if (err) 210 210 goto out1; 211 211
+12 -11
drivers/net/tap.c
··· 517 517 &tap_proto, 0); 518 518 if (!q) 519 519 goto err; 520 + if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) { 521 + sk_free(&q->sk); 522 + goto err; 523 + } 520 524 521 525 RCU_INIT_POINTER(q->sock.wq, &q->wq); 522 526 init_waitqueue_head(&q->wq.wait); ··· 544 540 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) 545 541 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 546 542 547 - err = -ENOMEM; 548 - if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) 549 - goto err_array; 550 - 551 543 err = tap_set_queue(tap, file, q); 552 - if (err) 553 - goto err_queue; 544 + if (err) { 545 + /* tap_sock_destruct() will take care of freeing skb_array */ 546 + goto err_put; 547 + } 554 548 555 549 dev_put(tap->dev); 556 550 557 551 rtnl_unlock(); 558 552 return err; 559 553 560 - err_queue: 561 - skb_array_cleanup(&q->skb_array); 562 - err_array: 554 + err_put: 563 555 sock_put(&q->sk); 564 556 err: 565 557 if (tap) ··· 1249 1249 return 0; 1250 1250 } 1251 1251 1252 - int tap_create_cdev(struct cdev *tap_cdev, 1253 - dev_t *tap_major, const char *device_name) 1252 + int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, 1253 + const char *device_name, struct module *module) 1254 1254 { 1255 1255 int err; 1256 1256 ··· 1259 1259 goto out1; 1260 1260 1261 1261 cdev_init(tap_cdev, &tap_fops); 1262 + tap_cdev->owner = module; 1262 1263 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); 1263 1264 if (err) 1264 1265 goto out2;
+2 -1
drivers/net/tun.c
··· 1286 1286 buflen += SKB_DATA_ALIGN(len + pad); 1287 1287 rcu_read_unlock(); 1288 1288 1289 + alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 1289 1290 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1290 1291 return ERR_PTR(-ENOMEM); 1291 1292 ··· 2029 2028 if (!dev) 2030 2029 return -ENOMEM; 2031 2030 err = dev_get_valid_name(net, dev, name); 2032 - if (err) 2031 + if (err < 0) 2033 2032 goto err_free_dev; 2034 2033 2035 2034 dev_net_set(dev, net);
+14
drivers/net/usb/cdc_ether.c
··· 561 561 #define HP_VENDOR_ID 0x03f0 562 562 #define MICROSOFT_VENDOR_ID 0x045e 563 563 #define UBLOX_VENDOR_ID 0x1546 564 + #define TPLINK_VENDOR_ID 0x2357 564 565 565 566 static const struct usb_device_id products[] = { 566 567 /* BLACKLIST !! ··· 814 813 .driver_info = 0, 815 814 }, 816 815 816 + /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 817 + { 818 + USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, 819 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 820 + .driver_info = 0, 821 + }, 822 + 817 823 /* WHITELIST!!! 818 824 * 819 825 * CDC Ether uses two interfaces, not necessarily consecutive. ··· 871 863 USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM, 872 864 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 873 865 .driver_info = (kernel_ulong_t)&wwan_info, 866 + }, { 867 + /* Huawei ME906 and ME909 */ 868 + USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM, 869 + USB_CDC_SUBCLASS_ETHERNET, 870 + USB_CDC_PROTO_NONE), 871 + .driver_info = (unsigned long)&wwan_info, 874 872 }, { 875 873 /* ZTE modules */ 876 874 USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,
+2
drivers/net/usb/r8152.c
··· 615 615 #define VENDOR_ID_LENOVO 0x17ef 616 616 #define VENDOR_ID_LINKSYS 0x13b1 617 617 #define VENDOR_ID_NVIDIA 0x0955 618 + #define VENDOR_ID_TPLINK 0x2357 618 619 619 620 #define MCU_TYPE_PLA 0x0100 620 621 #define MCU_TYPE_USB 0x0000 ··· 5320 5319 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, 5321 5320 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, 5322 5321 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, 5322 + {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, 5323 5323 {} 5324 5324 }; 5325 5325
+2 -2
include/linux/if_tap.h
··· 73 73 int tap_get_minor(dev_t major, struct tap_dev *tap); 74 74 void tap_free_minor(dev_t major, struct tap_dev *tap); 75 75 int tap_queue_resize(struct tap_dev *tap); 76 - int tap_create_cdev(struct cdev *tap_cdev, 77 - dev_t *tap_major, const char *device_name); 76 + int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, 77 + const char *device_name, struct module *module); 78 78 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); 79 79 80 80 #endif /*_LINUX_IF_TAP_H_*/
+2
include/linux/mlx5/port.h
··· 157 157 int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, 158 158 u8 prio, u8 *tc); 159 159 int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); 160 + int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, 161 + u8 tc, u8 *tc_group); 160 162 int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); 161 163 int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, 162 164 u8 tc, u8 *bw_pct);
+17 -17
include/linux/sctp.h
··· 231 231 __be32 tsn; 232 232 __be16 stream; 233 233 __be16 ssn; 234 - __be32 ppid; 234 + __u32 ppid; 235 235 __u8 payload[0]; 236 236 }; 237 237 ··· 716 716 717 717 struct sctp_strreset_outreq { 718 718 struct sctp_paramhdr param_hdr; 719 - __u32 request_seq; 720 - __u32 response_seq; 721 - __u32 send_reset_at_tsn; 722 - __u16 list_of_streams[0]; 719 + __be32 request_seq; 720 + __be32 response_seq; 721 + __be32 send_reset_at_tsn; 722 + __be16 list_of_streams[0]; 723 723 }; 724 724 725 725 struct sctp_strreset_inreq { 726 726 struct sctp_paramhdr param_hdr; 727 - __u32 request_seq; 728 - __u16 list_of_streams[0]; 727 + __be32 request_seq; 728 + __be16 list_of_streams[0]; 729 729 }; 730 730 731 731 struct sctp_strreset_tsnreq { 732 732 struct sctp_paramhdr param_hdr; 733 - __u32 request_seq; 733 + __be32 request_seq; 734 734 }; 735 735 736 736 struct sctp_strreset_addstrm { 737 737 struct sctp_paramhdr param_hdr; 738 - __u32 request_seq; 739 - __u16 number_of_streams; 740 - __u16 reserved; 738 + __be32 request_seq; 739 + __be16 number_of_streams; 740 + __be16 reserved; 741 741 }; 742 742 743 743 enum { ··· 752 752 753 753 struct sctp_strreset_resp { 754 754 struct sctp_paramhdr param_hdr; 755 - __u32 response_seq; 756 - __u32 result; 755 + __be32 response_seq; 756 + __be32 result; 757 757 }; 758 758 759 759 struct sctp_strreset_resptsn { 760 760 struct sctp_paramhdr param_hdr; 761 - __u32 response_seq; 762 - __u32 result; 763 - __u32 senders_next_tsn; 764 - __u32 receivers_next_tsn; 761 + __be32 response_seq; 762 + __be32 result; 763 + __be32 senders_next_tsn; 764 + __be32 receivers_next_tsn; 765 765 }; 766 766 767 767 #endif /* __LINUX_SCTP_H__ */
+6 -3
include/net/fq_impl.h
··· 146 146 fq_flow_get_default_t get_default_func) 147 147 { 148 148 struct fq_flow *flow; 149 + bool oom; 149 150 150 151 lockdep_assert_held(&fq->lock); 151 152 ··· 168 167 } 169 168 170 169 __skb_queue_tail(&flow->queue, skb); 171 - 172 - if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) { 170 + oom = (fq->memory_usage > fq->memory_limit); 171 + while (fq->backlog > fq->limit || oom) { 173 172 flow = list_first_entry_or_null(&fq->backlogs, 174 173 struct fq_flow, 175 174 backlogchain); ··· 184 183 185 184 flow->tin->overlimit++; 186 185 fq->overlimit++; 187 - if (fq->memory_usage > fq->memory_limit) 186 + if (oom) { 188 187 fq->overmemory++; 188 + oom = (fq->memory_usage > fq->memory_limit); 189 + } 189 190 } 190 191 } 191 192
+6
include/net/inet_sock.h
··· 132 132 return sk->sk_bound_dev_if; 133 133 } 134 134 135 + static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq) 136 + { 137 + return rcu_dereference_check(ireq->ireq_opt, 138 + refcount_read(&ireq->req.rsk_refcnt) > 0); 139 + } 140 + 135 141 struct inet_cork { 136 142 unsigned int flags; 137 143 __be32 addr;
+3
include/net/pkt_cls.h
··· 2 2 #define __NET_PKT_CLS_H 3 3 4 4 #include <linux/pkt_cls.h> 5 + #include <linux/workqueue.h> 5 6 #include <net/sch_generic.h> 6 7 #include <net/act_api.h> 7 8 ··· 17 16 18 17 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 19 18 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 19 + 20 + bool tcf_queue_work(struct work_struct *work); 20 21 21 22 #ifdef CONFIG_NET_CLS 22 23 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
+2
include/net/sch_generic.h
··· 10 10 #include <linux/dynamic_queue_limits.h> 11 11 #include <linux/list.h> 12 12 #include <linux/refcount.h> 13 + #include <linux/workqueue.h> 13 14 #include <net/gen_stats.h> 14 15 #include <net/rtnetlink.h> 15 16 ··· 272 271 273 272 struct tcf_block { 274 273 struct list_head chain_list; 274 + struct work_struct work; 275 275 }; 276 276 277 277 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+1 -1
include/net/sctp/sm.h
··· 261 261 struct sctp_fwdtsn_skip *skiplist); 262 262 struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); 263 263 struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc, 264 - __u16 stream_num, __u16 *stream_list, 264 + __u16 stream_num, __be16 *stream_list, 265 265 bool out, bool in); 266 266 struct sctp_chunk *sctp_make_strreset_tsnreq( 267 267 const struct sctp_association *asoc);
+1 -1
include/net/sctp/ulpevent.h
··· 130 130 131 131 struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( 132 132 const struct sctp_association *asoc, __u16 flags, 133 - __u16 stream_num, __u16 *stream_list, gfp_t gfp); 133 + __u16 stream_num, __be16 *stream_list, gfp_t gfp); 134 134 135 135 struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event( 136 136 const struct sctp_association *asoc, __u16 flags,
+1 -2
include/net/strparser.h
··· 74 74 u32 unrecov_intr : 1; 75 75 76 76 struct sk_buff **skb_nextp; 77 - struct timer_list msg_timer; 78 77 struct sk_buff *skb_head; 79 78 unsigned int need_bytes; 80 - struct delayed_work delayed_work; 79 + struct delayed_work msg_timer_work; 81 80 struct work_struct work; 82 81 struct strp_stats stats; 83 82 struct strp_callbacks cb;
+1
include/net/tcp.h
··· 844 844 __u32 key; 845 845 __u32 flags; 846 846 struct bpf_map *map; 847 + void *data_end; 847 848 } bpf; 848 849 }; 849 850 };
+3 -3
include/uapi/linux/bpf.h
··· 575 575 * @map: pointer to sockmap 576 576 * @key: key to lookup sock in map 577 577 * @flags: reserved for future use 578 - * Return: SK_REDIRECT 578 + * Return: SK_PASS 579 579 * 580 580 * int bpf_sock_map_update(skops, map, key, flags) 581 581 * @skops: pointer to bpf_sock_ops ··· 786 786 }; 787 787 788 788 enum sk_action { 789 - SK_ABORTED = 0, 790 - SK_DROP, 789 + SK_DROP = 0, 790 + SK_PASS, 791 791 SK_REDIRECT, 792 792 }; 793 793
+1 -1
include/uapi/linux/sctp.h
··· 376 376 __u16 sre_type; 377 377 __u16 sre_flags; 378 378 __u32 sre_length; 379 - __u16 sre_error; 379 + __be16 sre_error; 380 380 sctp_assoc_t sre_assoc_id; 381 381 __u8 sre_data[0]; 382 382 };
+12 -3
kernel/bpf/sockmap.c
··· 93 93 return rcu_dereference_sk_user_data(sk); 94 94 } 95 95 96 + /* compute the linear packet data range [data, data_end) for skb when 97 + * sk_skb type programs are in use. 98 + */ 99 + static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) 100 + { 101 + TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); 102 + } 103 + 96 104 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) 97 105 { 98 106 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); ··· 116 108 */ 117 109 TCP_SKB_CB(skb)->bpf.map = NULL; 118 110 skb->sk = psock->sock; 119 - bpf_compute_data_end(skb); 111 + bpf_compute_data_end_sk_skb(skb); 120 112 preempt_disable(); 121 113 rc = (*prog->bpf_func)(skb, prog->insnsi); 122 114 preempt_enable(); 123 115 skb->sk = NULL; 124 116 125 - return rc; 117 + return rc == SK_PASS ? 118 + (TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP; 126 119 } 127 120 128 121 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) ··· 377 368 * any socket yet. 378 369 */ 379 370 skb->sk = psock->sock; 380 - bpf_compute_data_end(skb); 371 + bpf_compute_data_end_sk_skb(skb); 381 372 rc = (*prog->bpf_func)(skb, prog->insnsi); 382 373 skb->sk = NULL; 383 374 rcu_read_unlock();
+29 -3
net/core/filter.c
··· 1844 1844 { 1845 1845 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 1846 1846 1847 + /* If user passes invalid input drop the packet. */ 1847 1848 if (unlikely(flags)) 1848 - return SK_ABORTED; 1849 + return SK_DROP; 1849 1850 1850 1851 tcb->bpf.key = key; 1851 1852 tcb->bpf.flags = flags; 1852 1853 tcb->bpf.map = map; 1853 1854 1854 - return SK_REDIRECT; 1855 + return SK_PASS; 1855 1856 } 1856 1857 1857 1858 struct sock *do_sk_redirect_map(struct sk_buff *skb) ··· 4244 4243 return insn - insn_buf; 4245 4244 } 4246 4245 4246 + static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, 4247 + const struct bpf_insn *si, 4248 + struct bpf_insn *insn_buf, 4249 + struct bpf_prog *prog, u32 *target_size) 4250 + { 4251 + struct bpf_insn *insn = insn_buf; 4252 + int off; 4253 + 4254 + switch (si->off) { 4255 + case offsetof(struct __sk_buff, data_end): 4256 + off = si->off; 4257 + off -= offsetof(struct __sk_buff, data_end); 4258 + off += offsetof(struct sk_buff, cb); 4259 + off += offsetof(struct tcp_skb_cb, bpf.data_end); 4260 + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 4261 + si->src_reg, off); 4262 + break; 4263 + default: 4264 + return bpf_convert_ctx_access(type, si, insn_buf, prog, 4265 + target_size); 4266 + } 4267 + 4268 + return insn - insn_buf; 4269 + } 4270 + 4247 4271 const struct bpf_verifier_ops sk_filter_prog_ops = { 4248 4272 .get_func_proto = sk_filter_func_proto, 4249 4273 .is_valid_access = sk_filter_is_valid_access, ··· 4327 4301 const struct bpf_verifier_ops sk_skb_prog_ops = { 4328 4302 .get_func_proto = sk_skb_func_proto, 4329 4303 .is_valid_access = sk_skb_is_valid_access, 4330 - .convert_ctx_access = bpf_convert_ctx_access, 4304 + .convert_ctx_access = sk_skb_convert_ctx_access, 4331 4305 .gen_prologue = sk_skb_prologue, 4332 4306 }; 4333 4307
+1 -1
net/dccp/ipv4.c
··· 495 495 ireq->ir_rmt_addr); 496 496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 497 497 ireq->ir_rmt_addr, 498 - rcu_dereference(ireq->ireq_opt)); 498 + ireq_opt_deref(ireq)); 499 499 err = net_xmit_eval(err); 500 500 } 501 501
+4 -3
net/dsa/dsa2.c
··· 496 496 if (!ethernet) 497 497 return -EINVAL; 498 498 ethernet_dev = of_find_net_device_by_node(ethernet); 499 + if (!ethernet_dev) 500 + return -EPROBE_DEFER; 499 501 } else { 500 502 ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]); 503 + if (!ethernet_dev) 504 + return -EPROBE_DEFER; 501 505 dev_put(ethernet_dev); 502 506 } 503 - 504 - if (!ethernet_dev) 505 - return -EPROBE_DEFER; 506 507 507 508 if (!dst->cpu_dp) { 508 509 dst->cpu_dp = port;
+2 -1
net/ipv4/inet_connection_sock.c
··· 543 543 struct ip_options_rcu *opt; 544 544 struct rtable *rt; 545 545 546 - opt = rcu_dereference(ireq->ireq_opt); 546 + opt = ireq_opt_deref(ireq); 547 + 547 548 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 548 549 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 549 550 sk->sk_protocol, inet_sk_flowi_flags(sk),
+42 -17
net/ipv4/ipip.c
··· 128 128 129 129 static int ipip_err(struct sk_buff *skb, u32 info) 130 130 { 131 - 132 - /* All the routers (except for Linux) return only 133 - 8 bytes of packet payload. It means, that precise relaying of 134 - ICMP in the real Internet is absolutely infeasible. 135 - */ 131 + /* All the routers (except for Linux) return only 132 + * 8 bytes of packet payload. It means, that precise relaying of 133 + * ICMP in the real Internet is absolutely infeasible. 134 + */ 136 135 struct net *net = dev_net(skb->dev); 137 136 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); 138 137 const struct iphdr *iph = (const struct iphdr *)skb->data; 139 - struct ip_tunnel *t; 140 - int err; 141 138 const int type = icmp_hdr(skb)->type; 142 139 const int code = icmp_hdr(skb)->code; 140 + struct ip_tunnel *t; 141 + int err = 0; 143 142 144 - err = -ENOENT; 143 + switch (type) { 144 + case ICMP_DEST_UNREACH: 145 + switch (code) { 146 + case ICMP_SR_FAILED: 147 + /* Impossible event. */ 148 + goto out; 149 + default: 150 + /* All others are translated to HOST_UNREACH. 151 + * rfc2003 contains "deep thoughts" about NET_UNREACH, 152 + * I believe they are just ether pollution. --ANK 153 + */ 154 + break; 155 + } 156 + break; 157 + 158 + case ICMP_TIME_EXCEEDED: 159 + if (code != ICMP_EXC_TTL) 160 + goto out; 161 + break; 162 + 163 + case ICMP_REDIRECT: 164 + break; 165 + 166 + default: 167 + goto out; 168 + } 169 + 145 170 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 146 171 iph->daddr, iph->saddr, 0); 147 - if (!t) 172 + if (!t) { 173 + err = -ENOENT; 148 174 goto out; 175 + } 149 176 150 177 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 151 - ipv4_update_pmtu(skb, dev_net(skb->dev), info, 152 - t->parms.link, 0, iph->protocol, 0); 153 - err = 0; 178 + ipv4_update_pmtu(skb, net, info, t->parms.link, 0, 179 + iph->protocol, 0); 154 180 goto out; 155 181 } 156 182 157 183 if (type == ICMP_REDIRECT) { 158 - ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 159 - iph->protocol, 0); 160 - err = 0; 184 + ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0); 161 185 goto out; 162 186 } 163 187 164 - if (t->parms.iph.daddr == 0) 188 + if (t->parms.iph.daddr == 0) { 189 + err = -ENOENT; 165 190 goto out; 191 + } 166 192 167 - err = 0; 168 193 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 169 194 goto out; 170 195
+1 -1
net/ipv4/tcp_ipv4.c
··· 877 877 878 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 879 879 ireq->ir_rmt_addr, 880 - rcu_dereference(ireq->ireq_opt)); 880 + ireq_opt_deref(ireq)); 881 881 err = net_xmit_eval(err); 882 882 } 883 883
+7 -3
net/ipv4/tcp_output.c
··· 739 739 struct tcp_sock *tp = tcp_sk(sk); 740 740 741 741 if (tp->lost_out > tp->retrans_out && 742 - tp->snd_cwnd > tcp_packets_in_flight(tp)) 742 + tp->snd_cwnd > tcp_packets_in_flight(tp)) { 743 + tcp_mstamp_refresh(tp); 743 744 tcp_xmit_retransmit_queue(sk); 745 + } 744 746 745 747 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 746 748 0, GFP_ATOMIC); ··· 2239 2237 2240 2238 sent_pkts = 0; 2241 2239 2240 + tcp_mstamp_refresh(tp); 2242 2241 if (!push_one) { 2243 2242 /* Do MTU probing. */ 2244 2243 result = tcp_mtu_probe(sk); ··· 2251 2248 } 2252 2249 2253 2250 max_segs = tcp_tso_segs(sk, mss_now); 2254 - tcp_mstamp_refresh(tp); 2255 2251 while ((skb = tcp_send_head(sk))) { 2256 2252 unsigned int limit; 2257 2253 ··· 2843 2841 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2844 2842 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2845 2843 -ENOBUFS; 2846 - if (!err) 2844 + if (!err) { 2847 2845 skb->skb_mstamp = tp->tcp_mstamp; 2846 + tcp_rate_skb_sent(sk, skb); 2847 + } 2848 2848 } else { 2849 2849 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2850 2850 }
+14 -6
net/ipv6/ip6_gre.c
··· 408 408 case ICMPV6_DEST_UNREACH: 409 409 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 410 410 t->parms.name); 411 - break; 411 + if (code != ICMPV6_PORT_UNREACH) 412 + break; 413 + return; 412 414 case ICMPV6_TIME_EXCEED: 413 415 if (code == ICMPV6_EXC_HOPLIMIT) { 414 416 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 415 417 t->parms.name); 418 + break; 416 419 } 417 - break; 420 + return; 418 421 case ICMPV6_PARAMPROB: 419 422 teli = 0; 420 423 if (code == ICMPV6_HDR_FIELD) ··· 433 430 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 434 431 t->parms.name); 435 432 } 436 - break; 433 + return; 437 434 case ICMPV6_PKT_TOOBIG: 438 435 mtu = be32_to_cpu(info) - offset - t->tun_hlen; 439 436 if (t->dev->type == ARPHRD_ETHER) ··· 441 438 if (mtu < IPV6_MIN_MTU) 442 439 mtu = IPV6_MIN_MTU; 443 440 t->dev->mtu = mtu; 444 - break; 441 + return; 445 442 } 446 443 447 444 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) ··· 503 500 __u32 *pmtu, __be16 proto) 504 501 { 505 502 struct ip6_tnl *tunnel = netdev_priv(dev); 506 - __be16 protocol = (dev->type == ARPHRD_ETHER) ? 507 - htons(ETH_P_TEB) : proto; 503 + struct dst_entry *dst = skb_dst(skb); 504 + __be16 protocol; 508 505 509 506 if (dev->type == ARPHRD_ETHER) 510 507 IPCB(skb)->flags = 0; ··· 518 515 tunnel->o_seqno++; 519 516 520 517 /* Push GRE header. */ 518 + protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 521 519 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 522 520 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); 521 + 522 + /* TooBig packet may have updated dst->dev's mtu */ 523 + if (dst && dst_mtu(dst) > dst->dev->mtu) 524 + dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu); 523 525 524 526 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 525 527 NEXTHDR_GRE);
+6 -6
net/mac80211/cfg.c
··· 2727 2727 if (!ieee80211_sdata_running(sdata)) 2728 2728 return -ENETDOWN; 2729 2729 2730 - if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { 2731 - ret = drv_set_bitrate_mask(local, sdata, mask); 2732 - if (ret) 2733 - return ret; 2734 - } 2735 - 2736 2730 /* 2737 2731 * If active validate the setting and reject it if it doesn't leave 2738 2732 * at least one basic rate usable, since we really have to be able ··· 2740 2746 2741 2747 if (!(mask->control[band].legacy & basic_rates)) 2742 2748 return -EINVAL; 2749 + } 2750 + 2751 + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { 2752 + ret = drv_set_bitrate_mask(local, sdata, mask); 2753 + if (ret) 2754 + return ret; 2743 2755 } 2744 2756 2745 2757 for (i = 0; i < NUM_NL80211_BANDS; i++) {
+35 -2
net/mac80211/key.c
··· 19 19 #include <linux/slab.h> 20 20 #include <linux/export.h> 21 21 #include <net/mac80211.h> 22 + #include <crypto/algapi.h> 22 23 #include <asm/unaligned.h> 23 24 #include "ieee80211_i.h" 24 25 #include "driver-ops.h" ··· 610 609 ieee80211_key_free_common(key); 611 610 } 612 611 612 + static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, 613 + struct ieee80211_key *old, 614 + struct ieee80211_key *new) 615 + { 616 + u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; 617 + u8 *tk_old, *tk_new; 618 + 619 + if (!old || new->conf.keylen != old->conf.keylen) 620 + return false; 621 + 622 + tk_old = old->conf.key; 623 + tk_new = new->conf.key; 624 + 625 + /* 626 + * In station mode, don't compare the TX MIC key, as it's never used 627 + * and offloaded rekeying may not care to send it to the host. This 628 + * is the case in iwlwifi, for example. 629 + */ 630 + if (sdata->vif.type == NL80211_IFTYPE_STATION && 631 + new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && 632 + new->conf.keylen == WLAN_KEY_LEN_TKIP && 633 + !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 634 + memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); 635 + memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); 636 + memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); 637 + memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); 638 + tk_old = tkip_old; 639 + tk_new = tkip_new; 640 + } 641 + 642 + return !crypto_memneq(tk_old, tk_new, new->conf.keylen); 643 + } 644 + 613 645 int ieee80211_key_link(struct ieee80211_key *key, 614 646 struct ieee80211_sub_if_data *sdata, 615 647 struct sta_info *sta) ··· 668 634 * Silently accept key re-installation without really installing the 669 635 * new version of the key to avoid nonce reuse or replay issues. 670 636 */ 671 - if (old_key && key->conf.keylen == old_key->conf.keylen && 672 - !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) { 637 + if (ieee80211_key_identical(sdata, old_key, key)) { 673 638 ieee80211_key_free_unused(key); 674 639 ret = 0; 675 640 goto out;
+8 -8
net/rds/ib_send.c
··· 661 661 } 662 662 } 663 663 664 - rds_ib_set_wr_signal_state(ic, send, 0); 664 + rds_ib_set_wr_signal_state(ic, send, false); 665 665 666 666 /* 667 667 * Always signal the last one if we're stopping due to flow control. 668 668 */ 669 - if (ic->i_flowctl && flow_controlled && i == (work_alloc-1)) 670 - send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 669 + if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) { 670 + rds_ib_set_wr_signal_state(ic, send, true); 671 + send->s_wr.send_flags |= IB_SEND_SOLICITED; 672 + } 671 673 672 674 if (send->s_wr.send_flags & IB_SEND_SIGNALED) 673 675 nr_sig++; ··· 707 705 if (scat == &rm->data.op_sg[rm->data.op_count]) { 708 706 prev->s_op = ic->i_data_op; 709 707 prev->s_wr.send_flags |= IB_SEND_SOLICITED; 710 - if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) { 711 - ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; 712 - prev->s_wr.send_flags |= IB_SEND_SIGNALED; 713 - nr_sig++; 714 - } 708 + if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) 709 + nr_sig += rds_ib_set_wr_signal_state(ic, prev, true); 715 710 ic->i_data_op = NULL; 716 711 } 717 712 ··· 791 792 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; 792 793 send->s_atomic_wr.swap_mask = 0; 793 794 } 795 + send->s_wr.send_flags = 0; 794 796 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); 795 797 send->s_atomic_wr.wr.num_sge = 1; 796 798 send->s_atomic_wr.wr.next = NULL;
+1
net/sched/act_sample.c
··· 264 264 265 265 static void __exit sample_cleanup_module(void) 266 266 { 267 + rcu_barrier(); 267 268 tcf_unregister_action(&act_sample_ops, &sample_net_ops); 268 269 } 269 270
+52 -17
net/sched/cls_api.c
··· 77 77 } 78 78 EXPORT_SYMBOL(register_tcf_proto_ops); 79 79 80 + static struct workqueue_struct *tc_filter_wq; 81 + 80 82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 81 83 { 82 84 struct tcf_proto_ops *t; ··· 88 86 * tcf_proto_ops's destroy() handler. 89 87 */ 90 88 rcu_barrier(); 89 + flush_workqueue(tc_filter_wq); 91 90 92 91 write_lock(&cls_mod_lock); 93 92 list_for_each_entry(t, &tcf_proto_base, head) { ··· 102 99 return rc; 103 100 } 104 101 EXPORT_SYMBOL(unregister_tcf_proto_ops); 102 + 103 + bool tcf_queue_work(struct work_struct *work) 104 + { 105 + return queue_work(tc_filter_wq, work); 106 + } 107 + EXPORT_SYMBOL(tcf_queue_work); 105 108 106 109 /* Select new prio value from the range, managed by kernel. */ 107 110 ··· 275 266 } 276 267 EXPORT_SYMBOL(tcf_block_get); 277 268 278 - void tcf_block_put(struct tcf_block *block) 269 + static void tcf_block_put_final(struct work_struct *work) 279 270 { 271 + struct tcf_block *block = container_of(work, struct tcf_block, work); 280 272 struct tcf_chain *chain, *tmp; 281 273 282 - if (!block) 283 - return; 274 + /* At this point, all the chains should have refcnt == 1. */ 275 + rtnl_lock(); 276 + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 277 + tcf_chain_put(chain); 278 + rtnl_unlock(); 279 + kfree(block); 280 + } 284 281 285 - /* XXX: Standalone actions are not allowed to jump to any chain, and 286 - * bound actions should be all removed after flushing. However, 287 - * filters are destroyed in RCU callbacks, we have to hold the chains 288 - * first, otherwise we would always race with RCU callbacks on this list 289 - * without proper locking. 290 - */ 282 + /* XXX: Standalone actions are not allowed to jump to any chain, and bound 283 + * actions should be all removed after flushing. However, filters are destroyed 284 + * in RCU callbacks, we have to hold the chains first, otherwise we would 285 + * always race with RCU callbacks on this list without proper locking. 286 + */ 287 + static void tcf_block_put_deferred(struct work_struct *work) 288 + { 289 + struct tcf_block *block = container_of(work, struct tcf_block, work); 290 + struct tcf_chain *chain; 291 291 292 - /* Wait for existing RCU callbacks to cool down. */ 293 - rcu_barrier(); 294 - 292 + rtnl_lock(); 295 293 /* Hold a refcnt for all chains, except 0, in case they are gone. */ 296 294 list_for_each_entry(chain, &block->chain_list, list) 297 295 if (chain->index) ··· 308 292 list_for_each_entry(chain, &block->chain_list, list) 309 293 tcf_chain_flush(chain); 310 294 311 - /* Wait for RCU callbacks to release the reference count. */ 295 + INIT_WORK(&block->work, tcf_block_put_final); 296 + /* Wait for RCU callbacks to release the reference count and make 297 + * sure their works have been queued before this. 298 + */ 312 299 rcu_barrier(); 300 + tcf_queue_work(&block->work); 301 + rtnl_unlock(); 302 + } 313 303 314 - /* At this point, all the chains should have refcnt == 1. */ 315 - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 316 - tcf_chain_put(chain); 317 - kfree(block); 304 + void tcf_block_put(struct tcf_block *block) 305 + { 306 + if (!block) 307 + return; 308 + 309 + INIT_WORK(&block->work, tcf_block_put_deferred); 310 + /* Wait for existing RCU callbacks to cool down, make sure their works 311 + * have been queued before this. We can not flush pending works here 312 + * because we are holding the RTNL lock. 313 + */ 314 + rcu_barrier(); 315 + tcf_queue_work(&block->work); 318 316 } 319 317 EXPORT_SYMBOL(tcf_block_put); 320 318 ··· 909 879 #ifdef CONFIG_NET_CLS_ACT 910 880 LIST_HEAD(actions); 911 881 882 + ASSERT_RTNL(); 912 883 tcf_exts_to_list(exts, &actions); 913 884 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 914 885 kfree(exts->actions); ··· 1061 1030 1062 1031 static int __init tc_filter_init(void) 1063 1032 { 1033 + tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 1034 + if (!tc_filter_wq) 1035 + return -ENOMEM; 1036 + 1064 1037 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1065 1038 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1066 1039 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
+18 -4
net/sched/cls_basic.c
··· 34 34 struct tcf_result res; 35 35 struct tcf_proto *tp; 36 36 struct list_head link; 37 - struct rcu_head rcu; 37 + union { 38 + struct work_struct work; 39 + struct rcu_head rcu; 40 + }; 38 41 }; 39 42 40 43 static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, ··· 85 82 return 0; 86 83 } 87 84 85 + static void basic_delete_filter_work(struct work_struct *work) 86 + { 87 + struct basic_filter *f = container_of(work, struct basic_filter, work); 88 + 89 + rtnl_lock(); 90 + tcf_exts_destroy(&f->exts); 91 + tcf_em_tree_destroy(&f->ematches); 92 + rtnl_unlock(); 93 + 94 + kfree(f); 95 + } 96 + 88 97 static void basic_delete_filter(struct rcu_head *head) 89 98 { 90 99 struct basic_filter *f = container_of(head, struct basic_filter, rcu); 91 100 92 - tcf_exts_destroy(&f->exts); 93 - tcf_em_tree_destroy(&f->ematches); 94 - kfree(f); 101 + INIT_WORK(&f->work, basic_delete_filter_work); 102 + tcf_queue_work(&f->work); 95 103 } 96 104 97 105 static void basic_destroy(struct tcf_proto *tp)
+17 -2
net/sched/cls_bpf.c
··· 49 49 struct sock_filter *bpf_ops; 50 50 const char *bpf_name; 51 51 struct tcf_proto *tp; 52 - struct rcu_head rcu; 52 + union { 53 + struct work_struct work; 54 + struct rcu_head rcu; 55 + }; 53 56 }; 54 57 55 58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { ··· 260 257 kfree(prog); 261 258 } 262 259 260 + static void cls_bpf_delete_prog_work(struct work_struct *work) 261 + { 262 + struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work); 263 + 264 + rtnl_lock(); 265 + __cls_bpf_delete_prog(prog); 266 + rtnl_unlock(); 267 + } 268 + 263 269 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) 264 270 { 265 - __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu)); 271 + struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); 272 + 273 + INIT_WORK(&prog->work, cls_bpf_delete_prog_work); 274 + tcf_queue_work(&prog->work); 266 275 } 267 276 268 277 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
+18 -4
net/sched/cls_cgroup.c
··· 23 23 struct tcf_exts exts; 24 24 struct tcf_ematch_tree ematches; 25 25 struct tcf_proto *tp; 26 - struct rcu_head rcu; 26 + union { 27 + struct work_struct work; 28 + struct rcu_head rcu; 29 + }; 27 30 }; 28 31 29 32 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, ··· 60 57 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 61 58 }; 62 59 60 + static void cls_cgroup_destroy_work(struct work_struct *work) 61 + { 62 + struct cls_cgroup_head *head = container_of(work, 63 + struct cls_cgroup_head, 64 + work); 65 + rtnl_lock(); 66 + tcf_exts_destroy(&head->exts); 67 + tcf_em_tree_destroy(&head->ematches); 68 + kfree(head); 69 + rtnl_unlock(); 70 + } 71 + 63 72 static void cls_cgroup_destroy_rcu(struct rcu_head *root) 64 73 { 65 74 struct cls_cgroup_head *head = container_of(root, 66 75 struct cls_cgroup_head, 67 76 rcu); 68 77 69 - tcf_exts_destroy(&head->exts); 70 - tcf_em_tree_destroy(&head->ematches); 71 - kfree(head); 78 + INIT_WORK(&head->work, cls_cgroup_destroy_work); 79 + tcf_queue_work(&head->work); 72 80 } 73 81 74 82 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
+16 -3
net/sched/cls_flow.c
··· 57 57 u32 divisor; 58 58 u32 baseclass; 59 59 u32 hashrnd; 60 - struct rcu_head rcu; 60 + union { 61 + struct work_struct work; 62 + struct rcu_head rcu; 63 + }; 61 64 }; 62 65 63 66 static inline u32 addr_fold(void *addr) ··· 372 369 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 373 370 }; 374 371 375 - static void flow_destroy_filter(struct rcu_head *head) 372 + static void flow_destroy_filter_work(struct work_struct *work) 376 373 { 377 - struct flow_filter *f = container_of(head, struct flow_filter, rcu); 374 + struct flow_filter *f = container_of(work, struct flow_filter, work); 378 375 376 + rtnl_lock(); 379 377 del_timer_sync(&f->perturb_timer); 380 378 tcf_exts_destroy(&f->exts); 381 379 tcf_em_tree_destroy(&f->ematches); 382 380 kfree(f); 381 + rtnl_unlock(); 382 + } 383 + 384 + static void flow_destroy_filter(struct rcu_head *head) 385 + { 386 + struct flow_filter *f = container_of(head, struct flow_filter, rcu); 387 + 388 + INIT_WORK(&f->work, flow_destroy_filter_work); 389 + tcf_queue_work(&f->work); 383 390 } 384 391 385 392 static int flow_change(struct net *net, struct sk_buff *in_skb,
+16 -3
net/sched/cls_flower.c
··· 87 87 struct list_head list; 88 88 u32 handle; 89 89 u32 flags; 90 - struct rcu_head rcu; 90 + union { 91 + struct work_struct work; 92 + struct rcu_head rcu; 93 + }; 91 94 struct net_device *hw_dev; 92 95 }; 93 96 ··· 218 215 return 0; 219 216 } 220 217 218 + static void fl_destroy_filter_work(struct work_struct *work) 219 + { 220 + struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work); 221 + 222 + rtnl_lock(); 223 + tcf_exts_destroy(&f->exts); 224 + kfree(f); 225 + rtnl_unlock(); 226 + } 227 + 221 228 static void fl_destroy_filter(struct rcu_head *head) 222 229 { 223 230 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu); 224 231 225 - tcf_exts_destroy(&f->exts); 226 - kfree(f); 232 + INIT_WORK(&f->work, fl_destroy_filter_work); 233 + tcf_queue_work(&f->work); 227 234 } 228 235 229 236 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
+16 -3
net/sched/cls_fw.c
··· 46 46 #endif /* CONFIG_NET_CLS_IND */ 47 47 struct tcf_exts exts; 48 48 struct tcf_proto *tp; 49 - struct rcu_head rcu; 49 + union { 50 + struct work_struct work; 51 + struct rcu_head rcu; 52 + }; 50 53 }; 51 54 52 55 static u32 fw_hash(u32 handle) ··· 122 119 return 0; 123 120 } 124 121 122 + static void fw_delete_filter_work(struct work_struct *work) 123 + { 124 + struct fw_filter *f = container_of(work, struct fw_filter, work); 125 + 126 + rtnl_lock(); 127 + tcf_exts_destroy(&f->exts); 128 + kfree(f); 129 + rtnl_unlock(); 130 + } 131 + 125 132 static void fw_delete_filter(struct rcu_head *head) 126 133 { 127 134 struct fw_filter *f = container_of(head, struct fw_filter, rcu); 128 135 129 - tcf_exts_destroy(&f->exts); 130 - kfree(f); 136 + INIT_WORK(&f->work, fw_delete_filter_work); 137 + tcf_queue_work(&f->work); 131 138 } 132 139 133 140 static void fw_destroy(struct tcf_proto *tp)
+16 -3
net/sched/cls_matchall.c
··· 21 21 struct tcf_result res; 22 22 u32 handle; 23 23 u32 flags; 24 - struct rcu_head rcu; 24 + union { 25 + struct work_struct work; 26 + struct rcu_head rcu; 27 + }; 25 28 }; 26 29 27 30 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, ··· 44 41 return 0; 45 42 } 46 43 44 + static void mall_destroy_work(struct work_struct *work) 45 + { 46 + struct cls_mall_head *head = container_of(work, struct cls_mall_head, 47 + work); 48 + rtnl_lock(); 49 + tcf_exts_destroy(&head->exts); 50 + kfree(head); 51 + rtnl_unlock(); 52 + } 53 + 47 54 static void mall_destroy_rcu(struct rcu_head *rcu) 48 55 { 49 56 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head, 50 57 rcu); 51 58 52 - tcf_exts_destroy(&head->exts); 53 - kfree(head); 59 + INIT_WORK(&head->work, mall_destroy_work); 60 + tcf_queue_work(&head->work); 54 61 } 55 62 56 63 static int mall_replace_hw_filter(struct tcf_proto *tp,
+16 -3
net/sched/cls_route.c
··· 57 57 u32 handle; 58 58 struct route4_bucket *bkt; 59 59 struct tcf_proto *tp; 60 - struct rcu_head rcu; 60 + union { 61 + struct work_struct work; 62 + struct rcu_head rcu; 63 + }; 61 64 }; 62 65 63 66 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) ··· 257 254 return 0; 258 255 } 259 256 257 + static void route4_delete_filter_work(struct work_struct *work) 258 + { 259 + struct route4_filter *f = container_of(work, struct route4_filter, work); 260 + 261 + rtnl_lock(); 262 + tcf_exts_destroy(&f->exts); 263 + kfree(f); 264 + rtnl_unlock(); 265 + } 266 + 260 267 static void route4_delete_filter(struct rcu_head *head) 261 268 { 262 269 struct route4_filter *f = container_of(head, struct route4_filter, rcu); 263 270 264 - tcf_exts_destroy(&f->exts); 265 - kfree(f); 271 + INIT_WORK(&f->work, route4_delete_filter_work); 272 + tcf_queue_work(&f->work); 266 273 } 267 274 268 275 static void route4_destroy(struct tcf_proto *tp)
+16 -3
net/sched/cls_rsvp.h
··· 97 97 98 98 u32 handle; 99 99 struct rsvp_session *sess; 100 - struct rcu_head rcu; 100 + union { 101 + struct work_struct work; 102 + struct rcu_head rcu; 103 + }; 101 104 }; 102 105 103 106 static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) ··· 285 282 return -ENOBUFS; 286 283 } 287 284 285 + static void rsvp_delete_filter_work(struct work_struct *work) 286 + { 287 + struct rsvp_filter *f = container_of(work, struct rsvp_filter, work); 288 + 289 + rtnl_lock(); 290 + tcf_exts_destroy(&f->exts); 291 + kfree(f); 292 + rtnl_unlock(); 293 + } 294 + 288 295 static void rsvp_delete_filter_rcu(struct rcu_head *head) 289 296 { 290 297 struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu); 291 298 292 - tcf_exts_destroy(&f->exts); 293 - kfree(f); 299 + INIT_WORK(&f->work, rsvp_delete_filter_work); 300 + tcf_queue_work(&f->work); 294 301 } 295 302 296 303 static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
+33 -5
net/sched/cls_tcindex.c
··· 27 27 struct tcindex_filter_result { 28 28 struct tcf_exts exts; 29 29 struct tcf_result res; 30 - struct rcu_head rcu; 30 + union { 31 + struct work_struct work; 32 + struct rcu_head rcu; 33 + }; 31 34 }; 32 35 33 36 struct tcindex_filter { 34 37 u16 key; 35 38 struct tcindex_filter_result result; 36 39 struct tcindex_filter __rcu *next; 37 - struct rcu_head rcu; 40 + union { 41 + struct work_struct work; 42 + struct rcu_head rcu; 43 + }; 38 44 }; 39 45 40 46 ··· 139 133 return 0; 140 134 } 141 135 136 + static void tcindex_destroy_rexts_work(struct work_struct *work) 137 + { 138 + struct tcindex_filter_result *r; 139 + 140 + r = container_of(work, struct tcindex_filter_result, work); 141 + rtnl_lock(); 142 + tcf_exts_destroy(&r->exts); 143 + rtnl_unlock(); 144 + } 145 + 142 146 static void tcindex_destroy_rexts(struct rcu_head *head) 143 147 { 144 148 struct tcindex_filter_result *r; 145 149 146 150 r = container_of(head, struct tcindex_filter_result, rcu); 147 - tcf_exts_destroy(&r->exts); 151 + INIT_WORK(&r->work, tcindex_destroy_rexts_work); 152 + tcf_queue_work(&r->work); 153 + } 154 + 155 + static void tcindex_destroy_fexts_work(struct work_struct *work) 156 + { 157 + struct tcindex_filter *f = container_of(work, struct tcindex_filter, 158 + work); 159 + 160 + rtnl_lock(); 161 + tcf_exts_destroy(&f->result.exts); 162 + kfree(f); 163 + rtnl_unlock(); 148 164 } 149 165 150 166 static void tcindex_destroy_fexts(struct rcu_head *head) ··· 174 146 struct tcindex_filter *f = container_of(head, struct tcindex_filter, 175 147 rcu); 176 148 177 - tcf_exts_destroy(&f->result.exts); 178 - kfree(f); 149 + INIT_WORK(&f->work, tcindex_destroy_fexts_work); 150 + tcf_queue_work(&f->work); 179 151 } 180 152 181 153 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
+26 -3
net/sched/cls_u32.c
··· 68 68 u32 __percpu *pcpu_success; 69 69 #endif 70 70 struct tcf_proto *tp; 71 - struct rcu_head rcu; 71 + union { 72 + struct work_struct work; 73 + struct rcu_head rcu; 74 + }; 72 75 /* The 'sel' field MUST be the last field in structure to allow for 73 76 * tc_u32_keys allocated at end of structure. 74 77 */ ··· 421 418 * this the u32_delete_key_rcu variant does not free the percpu 422 419 * statistics. 423 420 */ 421 + static void u32_delete_key_work(struct work_struct *work) 422 + { 423 + struct tc_u_knode *key = container_of(work, struct tc_u_knode, work); 424 + 425 + rtnl_lock(); 426 + u32_destroy_key(key->tp, key, false); 427 + rtnl_unlock(); 428 + } 429 + 424 430 static void u32_delete_key_rcu(struct rcu_head *rcu) 425 431 { 426 432 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); 427 433 428 - u32_destroy_key(key->tp, key, false); 434 + INIT_WORK(&key->work, u32_delete_key_work); 435 + tcf_queue_work(&key->work); 429 436 } 430 437 431 438 /* u32_delete_key_freepf_rcu is the rcu callback variant ··· 445 432 * for the variant that should be used with keys return from 446 433 * u32_init_knode() 447 434 */ 435 + static void u32_delete_key_freepf_work(struct work_struct *work) 436 + { 437 + struct tc_u_knode *key = container_of(work, struct tc_u_knode, work); 438 + 439 + rtnl_lock(); 440 + u32_destroy_key(key->tp, key, true); 441 + rtnl_unlock(); 442 + } 443 + 448 444 static void u32_delete_key_freepf_rcu(struct rcu_head *rcu) 449 445 { 450 446 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); 451 447 452 - u32_destroy_key(key->tp, key, true); 448 + INIT_WORK(&key->work, u32_delete_key_freepf_work); 449 + tcf_queue_work(&key->work); 453 450 } 454 451 455 452 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
+2
net/sched/sch_api.c
··· 301 301 { 302 302 struct Qdisc *q; 303 303 304 + if (!handle) 305 + return NULL; 304 306 q = qdisc_match_from_root(dev->qdisc, handle); 305 307 if (q) 306 308 goto out;
+11 -11
net/sctp/input.c
··· 794 794 struct sctp_hash_cmp_arg { 795 795 const union sctp_addr *paddr; 796 796 const struct net *net; 797 - u16 lport; 797 + __be16 lport; 798 798 }; 799 799 800 800 static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, ··· 820 820 return err; 821 821 } 822 822 823 - static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed) 823 + static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed) 824 824 { 825 825 const struct sctp_transport *t = data; 826 826 const union sctp_addr *paddr = &t->ipaddr; 827 827 const struct net *net = sock_net(t->asoc->base.sk); 828 - u16 lport = htons(t->asoc->base.bind_addr.port); 829 - u32 addr; 828 + __be16 lport = htons(t->asoc->base.bind_addr.port); 829 + __u32 addr; 830 830 831 831 if (paddr->sa.sa_family == AF_INET6) 832 832 addr = jhash(&paddr->v6.sin6_addr, 16, seed); 833 833 else 834 - addr = paddr->v4.sin_addr.s_addr; 834 + addr = (__force __u32)paddr->v4.sin_addr.s_addr; 835 835 836 - return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | 836 + return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 | 837 837 (__force __u32)lport, net_hash_mix(net), seed); 838 838 } 839 839 840 - static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed) 840 + static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed) 841 841 { 842 842 const struct sctp_hash_cmp_arg *x = data; 843 843 const union sctp_addr *paddr = x->paddr; 844 844 const struct net *net = x->net; 845 - u16 lport = x->lport; 846 - u32 addr; 845 + __be16 lport = x->lport; 846 + __u32 addr; 847 847 848 848 if (paddr->sa.sa_family == AF_INET6) 849 849 addr = jhash(&paddr->v6.sin6_addr, 16, seed); 850 850 else 851 - addr = paddr->v4.sin_addr.s_addr; 851 + addr = (__force __u32)paddr->v4.sin_addr.s_addr; 852 852 853 - return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | 853 + return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 | 854 854 (__force __u32)lport, net_hash_mix(net), seed); 855 855 } 856 856
+5 -3
net/sctp/ipv6.c
··· 738 738 /* Was this packet marked by Explicit Congestion Notification? */ 739 739 static int sctp_v6_is_ce(const struct sk_buff *skb) 740 740 { 741 - return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); 741 + return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20); 742 742 } 743 743 744 744 /* Dump the v6 addr to the seq file. */ ··· 882 882 net = sock_net(&opt->inet.sk); 883 883 rcu_read_lock(); 884 884 dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); 885 - if (!dev || 886 - !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) { 885 + if (!dev || !(opt->inet.freebind || 886 + net->ipv6.sysctl.ip_nonlocal_bind || 887 + ipv6_chk_addr(net, &addr->v6.sin6_addr, 888 + dev, 0))) { 887 889 rcu_read_unlock(); 888 890 return 0; 889 891 }
+5 -4
net/sctp/sm_make_chunk.c
··· 2854 2854 addr_param_len = af->to_addr_param(addr, &addr_param); 2855 2855 param.param_hdr.type = flags; 2856 2856 param.param_hdr.length = htons(paramlen + addr_param_len); 2857 - param.crr_id = i; 2857 + param.crr_id = htonl(i); 2858 2858 2859 2859 sctp_addto_chunk(retval, paramlen, &param); 2860 2860 sctp_addto_chunk(retval, addr_param_len, &addr_param); ··· 2867 2867 addr_param_len = af->to_addr_param(addr, &addr_param); 2868 2868 param.param_hdr.type = SCTP_PARAM_DEL_IP; 2869 2869 param.param_hdr.length = htons(paramlen + addr_param_len); 2870 - param.crr_id = i; 2870 + param.crr_id = htonl(i); 2871 2871 2872 2872 sctp_addto_chunk(retval, paramlen, &param); 2873 2873 sctp_addto_chunk(retval, addr_param_len, &addr_param); ··· 3591 3591 */ 3592 3592 struct sctp_chunk *sctp_make_strreset_req( 3593 3593 const struct sctp_association *asoc, 3594 - __u16 stream_num, __u16 *stream_list, 3594 + __u16 stream_num, __be16 *stream_list, 3595 3595 bool out, bool in) 3596 3596 { 3597 3597 struct sctp_strreset_outreq outreq; ··· 3788 3788 { 3789 3789 struct sctp_reconf_chunk *hdr; 3790 3790 union sctp_params param; 3791 - __u16 last = 0, cnt = 0; 3791 + __be16 last = 0; 3792 + __u16 cnt = 0; 3792 3793 3793 3794 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; 3794 3795 sctp_walk_params(param, hdr, params) {
+4 -4
net/sctp/sm_sideeffect.c
··· 1607 1607 break; 1608 1608 1609 1609 case SCTP_CMD_INIT_FAILED: 1610 - sctp_cmd_init_failed(commands, asoc, cmd->obj.err); 1610 + sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); 1611 1611 break; 1612 1612 1613 1613 case SCTP_CMD_ASSOC_FAILED: 1614 1614 sctp_cmd_assoc_failed(commands, asoc, event_type, 1615 - subtype, chunk, cmd->obj.err); 1615 + subtype, chunk, cmd->obj.u32); 1616 1616 break; 1617 1617 1618 1618 case SCTP_CMD_INIT_COUNTER_INC: ··· 1680 1680 case SCTP_CMD_PROCESS_CTSN: 1681 1681 /* Dummy up a SACK for processing. */ 1682 1682 sackh.cum_tsn_ack = cmd->obj.be32; 1683 - sackh.a_rwnd = asoc->peer.rwnd + 1684 - asoc->outqueue.outstanding_bytes; 1683 + sackh.a_rwnd = htonl(asoc->peer.rwnd + 1684 + asoc->outqueue.outstanding_bytes); 1685 1685 sackh.num_gap_ack_blocks = 0; 1686 1686 sackh.num_dup_tsns = 0; 1687 1687 chunk->subh.sack_hdr = &sackh;
+32
net/sctp/socket.c
··· 170 170 sk_mem_charge(sk, chunk->skb->truesize); 171 171 } 172 172 173 + static void sctp_clear_owner_w(struct sctp_chunk *chunk) 174 + { 175 + skb_orphan(chunk->skb); 176 + } 177 + 178 + static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, 179 + void (*cb)(struct sctp_chunk *)) 180 + 181 + { 182 + struct sctp_outq *q = &asoc->outqueue; 183 + struct sctp_transport *t; 184 + struct sctp_chunk *chunk; 185 + 186 + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) 187 + list_for_each_entry(chunk, &t->transmitted, transmitted_list) 188 + cb(chunk); 189 + 190 + list_for_each_entry(chunk, &q->retransmit, list) 191 + cb(chunk); 192 + 193 + list_for_each_entry(chunk, &q->sacked, list) 194 + cb(chunk); 195 + 196 + list_for_each_entry(chunk, &q->abandoned, list) 197 + cb(chunk); 198 + 199 + list_for_each_entry(chunk, &q->out_chunk_list, list) 200 + cb(chunk); 201 + } 202 + 173 203 /* Verify that this is a valid address. */ 174 204 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 175 205 int len) ··· 8242 8212 * paths won't try to lock it and then oldsk. 8243 8213 */ 8244 8214 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 8215 + sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); 8245 8216 sctp_assoc_migrate(assoc, newsk); 8217 + sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); 8246 8218 8247 8219 /* If the association on the newsk is already closed before accept() 8248 8220 * is called, set RCV_SHUTDOWN flag.
+18 -10
net/sctp/stream.c
··· 118 118 __u16 i, str_nums, *str_list; 119 119 struct sctp_chunk *chunk; 120 120 int retval = -EINVAL; 121 + __be16 *nstr_list; 121 122 bool out, in; 122 123 123 124 if (!asoc->peer.reconf_capable || ··· 149 148 if (str_list[i] >= stream->incnt) 150 149 goto out; 151 150 152 - for (i = 0; i < str_nums; i++) 153 - str_list[i] = htons(str_list[i]); 151 + nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL); 152 + if (!nstr_list) { 153 + retval = -ENOMEM; 154 + goto out; 155 + } 154 156 155 - chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); 156 - 157 157 for (i = 0; i < str_nums; i++) 158 - str_list[i] = ntohs(str_list[i]); 158 + nstr_list[i] = htons(str_list[i]); 159 + 160 + chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in); 161 + 162 + kfree(nstr_list); 159 163 160 164 if (!chunk) { 161 165 retval = -ENOMEM; ··· 311 305 } 312 306 313 307 static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param( 314 - struct sctp_association *asoc, __u32 resp_seq, 308 + struct sctp_association *asoc, __be32 resp_seq, 315 309 __be16 type) 316 310 { 317 311 struct sctp_chunk *chunk = asoc->strreset_chunk; ··· 351 345 { 352 346 struct sctp_strreset_outreq *outreq = param.v; 353 347 struct sctp_stream *stream = &asoc->stream; 354 - __u16 i, nums, flags = 0, *str_p = NULL; 355 348 __u32 result = SCTP_STRRESET_DENIED; 349 + __u16 i, nums, flags = 0; 350 + __be16 *str_p = NULL; 356 351 __u32 request_seq; 357 352 358 353 request_seq = ntohl(outreq->request_seq); ··· 446 439 struct sctp_stream *stream = &asoc->stream; 447 440 __u32 result = SCTP_STRRESET_DENIED; 448 441 struct sctp_chunk *chunk = NULL; 449 - __u16 i, nums, *str_p; 450 442 __u32 request_seq; 443 + __u16 i, nums; 444 + __be16 *str_p; 451 445 452 446 request_seq = ntohl(inreq->request_seq); 453 447 if (TSN_lt(asoc->strreset_inseq, request_seq) || ··· 777 769 778 770 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) { 779 771 struct sctp_strreset_outreq *outreq; 780 - __u16 *str_p; 772 + __be16 *str_p; 781 773 782 774 outreq = (struct sctp_strreset_outreq *)req; 783 775 str_p = outreq->list_of_streams; ··· 802 794 nums, str_p, GFP_ATOMIC); 803 795 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) { 804 796 struct sctp_strreset_inreq *inreq; 805 - __u16 *str_p; 797 + __be16 *str_p; 806 798 807 799 /* if the result is performed, it's impossible for inreq */ 808 800 if (result == SCTP_STRRESET_PERFORMED)
+1 -1
net/sctp/ulpevent.c
··· 847 847 848 848 struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( 849 849 const struct sctp_association *asoc, __u16 flags, __u16 stream_num, 850 - __u16 *stream_list, gfp_t gfp) 850 + __be16 *stream_list, gfp_t gfp) 851 851 { 852 852 struct sctp_stream_reset_event *sreset; 853 853 struct sctp_ulpevent *event;
+8 -9
net/strparser/strparser.c
··· 49 49 { 50 50 /* Unrecoverable error in receive */ 51 51 52 - del_timer(&strp->msg_timer); 52 + cancel_delayed_work(&strp->msg_timer_work); 53 53 54 54 if (strp->stopped) 55 55 return; ··· 68 68 static void strp_start_timer(struct strparser *strp, long timeo) 69 69 { 70 70 if (timeo) 71 - mod_timer(&strp->msg_timer, timeo); 71 + mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); 72 72 } 73 73 74 74 /* Lower lock held */ ··· 319 319 eaten += (cand_len - extra); 320 320 321 321 /* Hurray, we have a new message! */ 322 - del_timer(&strp->msg_timer); 322 + cancel_delayed_work(&strp->msg_timer_work); 323 323 strp->skb_head = NULL; 324 324 STRP_STATS_INCR(strp->stats.msgs); 325 325 ··· 450 450 do_strp_work(container_of(w, struct strparser, work)); 451 451 } 452 452 453 - static void strp_msg_timeout(unsigned long arg) 453 + static void strp_msg_timeout(struct work_struct *w) 454 454 { 455 - struct strparser *strp = (struct strparser *)arg; 455 + struct strparser *strp = container_of(w, struct strparser, 456 + msg_timer_work.work); 456 457 457 458 /* Message assembly timed out */ 458 459 STRP_STATS_INCR(strp->stats.msg_timeouts); ··· 506 505 strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; 507 506 strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; 508 507 509 - setup_timer(&strp->msg_timer, strp_msg_timeout, 510 - (unsigned long)strp); 511 - 508 + INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout); 512 509 INIT_WORK(&strp->work, strp_work); 513 510 514 511 return 0; ··· 531 532 { 532 533 WARN_ON(!strp->stopped); 533 534 534 - del_timer_sync(&strp->msg_timer); 535 + cancel_delayed_work_sync(&strp->msg_timer_work); 535 536 cancel_work_sync(&strp->work); 536 537 537 538 if (strp->skb_head) {
+2
net/unix/diag.c
··· 257 257 err = -ENOENT; 258 258 if (sk == NULL) 259 259 goto out_nosk; 260 + if (!net_eq(sock_net(sk), net)) 261 + goto out; 260 262 261 263 err = sock_diag_check_cookie(sk, req->udiag_cookie); 262 264 if (err)
+41 -9
net/wireless/sme.c
··· 522 522 return -EOPNOTSUPP; 523 523 524 524 if (wdev->current_bss) { 525 - if (!prev_bssid) 526 - return -EALREADY; 527 - if (prev_bssid && 528 - !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) 529 - return -ENOTCONN; 530 525 cfg80211_unhold_bss(wdev->current_bss); 531 526 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); 532 527 wdev->current_bss = NULL; ··· 1058 1063 1059 1064 ASSERT_WDEV_LOCK(wdev); 1060 1065 1061 - if (WARN_ON(wdev->connect_keys)) { 1062 - kzfree(wdev->connect_keys); 1063 - wdev->connect_keys = NULL; 1066 + /* 1067 + * If we have an ssid_len, we're trying to connect or are 1068 + * already connected, so reject a new SSID unless it's the 1069 + * same (which is the case for re-association.) 1070 + */ 1071 + if (wdev->ssid_len && 1072 + (wdev->ssid_len != connect->ssid_len || 1073 + memcmp(wdev->ssid, connect->ssid, wdev->ssid_len))) 1074 + return -EALREADY; 1075 + 1076 + /* 1077 + * If connected, reject (re-)association unless prev_bssid 1078 + * matches the current BSSID. 1079 + */ 1080 + if (wdev->current_bss) { 1081 + if (!prev_bssid) 1082 + return -EALREADY; 1083 + if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) 1084 + return -ENOTCONN; 1064 1085 } 1086 + 1087 + /* 1088 + * Reject if we're in the process of connecting with WEP, 1089 + * this case isn't very interesting and trying to handle 1090 + * it would make the code much more complex. 1091 + */ 1092 + if (wdev->connect_keys) 1093 + return -EINPROGRESS; 1065 1094 1066 1095 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, 1067 1096 rdev->wiphy.ht_capa_mod_mask); ··· 1137 1118 1138 1119 if (err) { 1139 1120 wdev->connect_keys = NULL; 1140 - wdev->ssid_len = 0; 1121 + /* 1122 + * This could be reassoc getting refused, don't clear 1123 + * ssid_len in that case. 1124 + */ 1125 + if (!wdev->current_bss) 1126 + wdev->ssid_len = 0; 1141 1127 return err; 1142 1128 } 1143 1129 ··· 1168 1144 cfg80211_mlme_down(rdev, dev); 1169 1145 else if (wdev->ssid_len) 1170 1146 err = rdev_disconnect(rdev, dev, reason); 1147 + 1148 + /* 1149 + * Clear ssid_len unless we actually were fully connected, 1150 + * in which case cfg80211_disconnected() will take care of 1151 + * this later. 1152 + */ 1153 + if (!wdev->current_bss) 1154 + wdev->ssid_len = 0; 1171 1155 1172 1156 return err; 1173 1157 }
+8 -8
net/xfrm/xfrm_policy.c
··· 1573 1573 goto put_states; 1574 1574 } 1575 1575 1576 + if (!dst_prev) 1577 + dst0 = dst1; 1578 + else 1579 + /* Ref count is taken during xfrm_alloc_dst() 1580 + * No need to do dst_clone() on dst1 1581 + */ 1582 + dst_prev->child = dst1; 1583 + 1576 1584 if (xfrm[i]->sel.family == AF_UNSPEC) { 1577 1585 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1578 1586 xfrm_af2proto(family)); ··· 1591 1583 } 1592 1584 } else 1593 1585 inner_mode = xfrm[i]->inner_mode; 1594 - 1595 - if (!dst_prev) 1596 - dst0 = dst1; 1597 - else 1598 - /* Ref count is taken during xfrm_alloc_dst() 1599 - * No need to do dst_clone() on dst1 1600 - */ 1601 - dst_prev->child = dst1; 1602 1586 1603 1587 xdst->route = dst; 1604 1588 dst_copy_metrics(dst1, dst);
+15 -10
net/xfrm/xfrm_user.c
··· 1693 1693 1694 1694 static int xfrm_dump_policy_done(struct netlink_callback *cb) 1695 1695 { 1696 - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1696 + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 1697 1697 struct net *net = sock_net(cb->skb->sk); 1698 1698 1699 1699 xfrm_policy_walk_done(walk, net); 1700 1700 return 0; 1701 1701 } 1702 1702 1703 + static int xfrm_dump_policy_start(struct netlink_callback *cb) 1704 + { 1705 + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 1706 + 1707 + BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); 1708 + 1709 + xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 1710 + return 0; 1711 + } 1712 + 1703 1713 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1704 1714 { 1705 1715 struct net *net = sock_net(skb->sk); 1706 - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1716 + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 1707 1717 struct xfrm_dump_info info; 1708 - 1709 - BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > 1710 - sizeof(cb->args) - sizeof(cb->args[0])); 1711 1718 1712 1719 info.in_skb = cb->skb; 1713 1720 info.out_skb = skb; 1714 1721 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1715 1722 info.nlmsg_flags = NLM_F_MULTI; 1716 - 1717 - if (!cb->args[0]) { 1718 - cb->args[0] = 1; 1719 - xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 1720 - } 1721 1723 1722 1724 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 1723 1725 ··· 2476 2474 2477 2475 static const struct xfrm_link { 2478 2476 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2477 + int (*start)(struct netlink_callback *); 2479 2478 int (*dump)(struct sk_buff *, struct netlink_callback *); 2480 2479 int (*done)(struct netlink_callback *); 2481 2480 const struct nla_policy *nla_pol; ··· 2490 2487 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2491 2488 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2492 2489 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2490 + .start = xfrm_dump_policy_start, 2493 2491 .dump = xfrm_dump_policy, 2494 2492 .done = xfrm_dump_policy_done }, 2495 2493 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, ··· 2543 2539 2544 2540 { 2545 2541 struct netlink_dump_control c = { 2542 + .start = link->start, 2546 2543 .dump = link->dump, 2547 2544 .done = link->done, 2548 2545 };
+2 -2
tools/include/uapi/linux/bpf.h
··· 787 787 }; 788 788 789 789 enum sk_action { 790 - SK_ABORTED = 0, 791 - SK_DROP, 790 + SK_DROP = 0, 791 + SK_PASS, 792 792 SK_REDIRECT, 793 793 }; 794 794
+21
tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
··· 17 17 "teardown": [ 18 18 "$TC qdisc del dev $DEV1 ingress" 19 19 ] 20 + }, 21 + { 22 + "id": "d052", 23 + "name": "Add 1M filters with the same action", 24 + "category": [ 25 + "filter", 26 + "flower" 27 + ], 28 + "setup": [ 29 + "$TC qdisc add dev $DEV2 ingress", 30 + "./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000" 31 + ], 32 + "cmdUnderTest": "$TC -b $BATCH_FILE", 33 + "expExitCode": "0", 34 + "verifyCmd": "$TC actions list action gact", 35 + "matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000", 36 + "matchCount": "1", 37 + "teardown": [ 38 + "$TC qdisc del dev $DEV2 ingress", 39 + "/bin/rm $BATCH_FILE" 40 + ] 20 41 } 21 42 ]
+16 -4
tools/testing/selftests/tc-testing/tdc.py
··· 88 88 exit(1) 89 89 90 90 91 - def test_runner(filtered_tests): 91 + def test_runner(filtered_tests, args): 92 92 """ 93 93 Driver function for the unit tests. 94 94 ··· 105 105 for tidx in testlist: 106 106 result = True 107 107 tresult = "" 108 + if "flower" in tidx["category"] and args.device == None: 109 + continue 108 110 print("Test " + tidx["id"] + ": " + tidx["name"]) 109 111 prepare_env(tidx["setup"]) 110 112 (p, procout) = exec_cmd(tidx["cmdUnderTest"]) ··· 153 151 cmd = 'ip link set $DEV0 up' 154 152 exec_cmd(cmd, False) 155 153 cmd = 'ip -s $NS link set $DEV1 up' 154 + exec_cmd(cmd, False) 155 + cmd = 'ip link set $DEV2 netns $NS' 156 + exec_cmd(cmd, False) 157 + cmd = 'ip -s $NS link set $DEV2 up' 156 158 exec_cmd(cmd, False) 157 159 158 160 ··· 217 211 help='Execute the single test case with specified ID') 218 212 parser.add_argument('-i', '--id', action='store_true', dest='gen_id', 219 213 help='Generate ID numbers for new test cases') 220 - return parser 214 + parser.add_argument('-d', '--device', 215 + help='Execute the test case in flower category') 221 216 return parser 222 217 223 218 ··· 232 225 233 226 if args.path != None: 234 227 NAMES['TC'] = args.path 228 + if args.device != None: 229 + NAMES['DEV2'] = args.device 235 230 if not os.path.isfile(NAMES['TC']): 236 231 print("The specified tc path " + NAMES['TC'] + " does not exist.") 237 232 exit(1) ··· 390 381 if (len(alltests) == 0): 391 382 print("Cannot find a test case with ID matching " + target_id) 392 383 exit(1) 393 - catresults = test_runner(alltests) 384 + catresults = test_runner(alltests, args) 394 385 print("All test results: " + "\n\n" + catresults) 395 386 elif (len(target_category) > 0): 387 + if (target_category == "flower") and args.device == None: 388 + print("Please specify a NIC device (-d) to run category flower") 389 + exit(1) 396 390 if (target_category not in ucat): 397 391 print("Specified category is not present in this file.") 398 392 exit(1) 399 393 else: 400 - catresults = test_runner(testcases[target_category]) 394 + catresults = test_runner(testcases[target_category], args) 401 395 print("Category " + target_category + "\n\n" + catresults) 402 396 403 397 ns_destroy()
+62
tools/testing/selftests/tc-testing/tdc_batch.py
··· 1 + #!/usr/bin/python3 2 + 3 + """ 4 + tdc_batch.py - a script to generate TC batch file 5 + 6 + Copyright (C) 2017 Chris Mi <chrism@mellanox.com> 7 + """ 8 + 9 + import argparse 10 + 11 + parser = argparse.ArgumentParser(description='TC batch file generator') 12 + parser.add_argument("device", help="device name") 13 + parser.add_argument("file", help="batch file name") 14 + parser.add_argument("-n", "--number", type=int, 15 + help="how many lines in batch file") 16 + parser.add_argument("-o", "--skip_sw", 17 + help="skip_sw (offload), by default skip_hw", 18 + action="store_true") 19 + parser.add_argument("-s", "--share_action", 20 + help="all filters share the same action", 21 + action="store_true") 22 + parser.add_argument("-p", "--prio", 23 + help="all filters have different prio", 24 + action="store_true") 25 + args = parser.parse_args() 26 + 27 + device = args.device 28 + file = open(args.file, 'w') 29 + 30 + number = 1 31 + if args.number: 32 + number = args.number 33 + 34 + skip = "skip_hw" 35 + if args.skip_sw: 36 + skip = "skip_sw" 37 + 38 + share_action = "" 39 + if args.share_action: 40 + share_action = "index 1" 41 + 42 + prio = "prio 1" 43 + if args.prio: 44 + prio = "" 45 + if number > 0x4000: 46 + number = 0x4000 47 + 48 + index = 0 49 + for i in range(0x100): 50 + for j in range(0x100): 51 + for k in range(0x100): 52 + mac = ("%02x:%02x:%02x" % (i, j, k)) 53 + src_mac = "e4:11:00:" + mac 54 + dst_mac = "e4:12:00:" + mac 55 + cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s " 56 + "src_mac %s dst_mac %s action drop %s" % 57 + (device, prio, skip, src_mac, dst_mac, share_action)) 58 + file.write("%s\n" % cmd) 59 + index += 1 60 + if index >= number: 61 + file.close() 62 + exit(0)
+2
tools/testing/selftests/tc-testing/tdc_config.py
··· 12 12 # Name of veth devices to be created for the namespace 13 13 'DEV0': 'v0p0', 14 14 'DEV1': 'v0p1', 15 + 'DEV2': '', 16 + 'BATCH_FILE': './batch.txt', 15 17 # Name of the namespace to use 16 18 'NS': 'tcut' 17 19 }