Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Use after free in vlan, from Cong Wang.

2) Handle NAPI poll with a zero budget properly in mlx5 driver, from
Saeed Mahameed.

3) If DMA mapping fails in mlx5 driver, NULL out page, from Inbar
Karmy.

4) Handle overrun in RX FIFO of sun4i CAN driver, from Gerhard
Bertelsmann.

5) Missing return in mdb and vlan prepare phase of DSA layer, from
Vivien Didelot.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
vlan: fix a use-after-free in vlan_device_event()
net: dsa: return after vlan prepare phase
net: dsa: return after mdb prepare phase
can: ifi: Fix transmitter delay calculation
tcp: fix tcp_fastretrans_alert warning
tcp: gso: avoid refcount_t warning from tcp_gso_segment()
can: peak: Add support for new PCIe/M2 CAN FD interfaces
can: sun4i: handle overrun in RX FIFO
can: c_can: don't indicate triple sampling support for D_CAN
net/mlx5e: Increase Striding RQ minimum size limit to 4 multi-packet WQEs
net/mlx5e: Set page to null in case dma mapping fails
net/mlx5e: Fix napi poll with zero budget
net/mlx5: Cancel health poll before sending panic teardown command
net/mlx5: Loop over temp list to release delay events
rds: ib: Fix NULL pointer dereference in debug code

+68 -34
-1
drivers/net/can/c_can/c_can_pci.c
··· 178 break; 179 case BOSCH_D_CAN: 180 priv->regs = reg_map_d_can; 181 - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 182 break; 183 default: 184 ret = -EINVAL;
··· 178 break; 179 case BOSCH_D_CAN: 180 priv->regs = reg_map_d_can; 181 break; 182 default: 183 ret = -EINVAL;
-1
drivers/net/can/c_can/c_can_platform.c
··· 320 break; 321 case BOSCH_D_CAN: 322 priv->regs = reg_map_d_can; 323 - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 324 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 325 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 326 priv->read_reg32 = d_can_plat_read_reg32;
··· 320 break; 321 case BOSCH_D_CAN: 322 priv->regs = reg_map_d_can; 323 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 324 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 325 priv->read_reg32 = d_can_plat_read_reg32;
+3 -3
drivers/net/can/ifi_canfd/ifi_canfd.c
··· 670 priv->base + IFI_CANFD_FTIME); 671 672 /* Configure transmitter delay */ 673 - tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK; 674 - writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc, 675 - priv->base + IFI_CANFD_TDELAY); 676 } 677 678 static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
··· 670 priv->base + IFI_CANFD_FTIME); 671 672 /* Configure transmitter delay */ 673 + tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1); 674 + tdc &= IFI_CANFD_TDELAY_MASK; 675 + writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY); 676 } 677 678 static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
+12 -2
drivers/net/can/peak_canfd/peak_pciefd_main.c
··· 29 #include "peak_canfd_user.h" 30 31 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); 32 - MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards"); 33 - MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards"); 34 MODULE_LICENSE("GPL v2"); 35 36 #define PCIEFD_DRV_NAME "peak_pciefd" 37 38 #define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ 39 #define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ 40 41 /* PEAK PCIe board access description */ 42 #define PCIEFD_BAR0_SIZE (64 * 1024) ··· 208 /* supported device ids. */ 209 static const struct pci_device_id peak_pciefd_tbl[] = { 210 {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 211 {0,} 212 }; 213
··· 29 #include "peak_canfd_user.h" 30 31 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); 32 + MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); 33 + MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards"); 34 MODULE_LICENSE("GPL v2"); 35 36 #define PCIEFD_DRV_NAME "peak_pciefd" 37 38 #define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ 39 #define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ 40 + #define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */ 41 + #define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */ 42 + #define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */ 43 + #define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */ 44 + #define PCAN_M2_ID 0x001a /* for M2 slot cards */ 45 46 /* PEAK PCIe board access description */ 47 #define PCIEFD_BAR0_SIZE (64 * 1024) ··· 203 /* supported device ids. */ 204 static const struct pci_device_id peak_pciefd_tbl[] = { 205 {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 206 + {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 207 + {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 208 + {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 209 + {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, 210 + {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,}, 211 {0,} 212 }; 213
+10 -2
drivers/net/can/sun4i_can.c
··· 539 } 540 stats->rx_over_errors++; 541 stats->rx_errors++; 542 /* clear bit */ 543 sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); 544 } ··· 660 netif_wake_queue(dev); 661 can_led_event(dev, CAN_LED_EVENT_TX); 662 } 663 - if (isrc & SUN4I_INT_RBUF_VLD) { 664 - /* receive interrupt */ 665 while (status & SUN4I_STA_RBUF_RDY) { 666 /* RX buffer is not empty */ 667 sun4i_can_rx(dev);
··· 539 } 540 stats->rx_over_errors++; 541 stats->rx_errors++; 542 + 543 + /* reset the CAN IP by entering reset mode 544 + * ignoring timeout error 545 + */ 546 + set_reset_mode(dev); 547 + set_normal_mode(dev); 548 + 549 /* clear bit */ 550 sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); 551 } ··· 653 netif_wake_queue(dev); 654 can_led_event(dev, CAN_LED_EVENT_TX); 655 } 656 + if ((isrc & SUN4I_INT_RBUF_VLD) && 657 + !(isrc & SUN4I_INT_DATA_OR)) { 658 + /* receive interrupt - don't read if overrun occurred */ 659 while (status & SUN4I_STA_RBUF_RDY) { 660 /* RX buffer is not empty */ 661 sun4i_can_rx(dev);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 93 list_splice_init(&priv->waiting_events_list, &temp); 94 if (!dev_ctx->context) 95 goto out; 96 - list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) 97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); 98 99 out:
··· 93 list_splice_init(&priv->waiting_events_list, &temp); 94 if (!dev_ctx->context) 95 goto out; 96 + list_for_each_entry_safe(de, n, &temp, list) 97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); 98 99 out:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 67 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa 68 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd 69 70 - #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1 71 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 72 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 73
··· 67 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa 68 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd 69 70 + #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 71 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 72 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 73
+5 -7
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 215 static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, 216 struct mlx5e_dma_info *dma_info) 217 { 218 - struct page *page; 219 - 220 if (mlx5e_rx_cache_get(rq, dma_info)) 221 return 0; 222 223 - page = dev_alloc_pages(rq->buff.page_order); 224 - if (unlikely(!page)) 225 return -ENOMEM; 226 227 - dma_info->addr = dma_map_page(rq->pdev, page, 0, 228 RQ_PAGE_SIZE(rq), rq->buff.map_dir); 229 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 230 - put_page(page); 231 return -ENOMEM; 232 } 233 - dma_info->page = page; 234 235 return 0; 236 }
··· 215 static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, 216 struct mlx5e_dma_info *dma_info) 217 { 218 if (mlx5e_rx_cache_get(rq, dma_info)) 219 return 0; 220 221 + dma_info->page = dev_alloc_pages(rq->buff.page_order); 222 + if (unlikely(!dma_info->page)) 223 return -ENOMEM; 224 225 + dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, 226 RQ_PAGE_SIZE(rq), rq->buff.map_dir); 227 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 228 + put_page(dma_info->page); 229 + dma_info->page = NULL; 230 return -ENOMEM; 231 } 232 233 return 0; 234 }
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
··· 49 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, 50 napi); 51 bool busy = false; 52 - int work_done; 53 int i; 54 55 for (i = 0; i < c->num_tc; i++) ··· 58 if (c->xdp) 59 busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); 60 61 - work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); 62 - busy |= work_done == budget; 63 64 busy |= c->rq.post_wqes(&c->rq); 65 66 if (busy) { 67 if (likely(mlx5e_channel_no_affinity_change(c))) 68 return budget; 69 - if (work_done == budget) 70 work_done--; 71 } 72
··· 49 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, 50 napi); 51 bool busy = false; 52 + int work_done = 0; 53 int i; 54 55 for (i = 0; i < c->num_tc; i++) ··· 58 if (c->xdp) 59 busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); 60 61 + if (likely(budget)) { /* budget=0 means: don't poll rx rings */ 62 + work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); 63 + busy |= work_done == budget; 64 + } 65 66 busy |= c->rq.post_wqes(&c->rq); 67 68 if (busy) { 69 if (likely(mlx5e_channel_no_affinity_change(c))) 70 return budget; 71 + if (budget && work_done == budget) 72 work_done--; 73 } 74
+7
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1482 return -EAGAIN; 1483 } 1484 1485 ret = mlx5_cmd_force_teardown_hca(dev); 1486 if (ret) { 1487 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); 1488 return ret; 1489 } 1490
··· 1482 return -EAGAIN; 1483 } 1484 1485 + /* Panic tear down fw command will stop the PCI bus communication 1486 + * with the HCA, so the health polll is no longer needed. 1487 + */ 1488 + mlx5_drain_health_wq(dev); 1489 + mlx5_stop_health_poll(dev); 1490 + 1491 ret = mlx5_cmd_force_teardown_hca(dev); 1492 if (ret) { 1493 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); 1494 + mlx5_start_health_poll(dev); 1495 return ret; 1496 } 1497
+3 -3
net/8021q/vlan.c
··· 376 dev->name); 377 vlan_vid_add(dev, htons(ETH_P_8021Q), 0); 378 } 379 380 vlan_info = rtnl_dereference(dev->vlan_info); 381 if (!vlan_info) ··· 425 case NETDEV_DOWN: { 426 struct net_device *tmp; 427 LIST_HEAD(close_list); 428 - 429 - if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 430 - vlan_vid_del(dev, htons(ETH_P_8021Q), 0); 431 432 /* Put all VLANs for this dev in the down state too. */ 433 vlan_group_for_each_dev(grp, i, vlandev) {
··· 376 dev->name); 377 vlan_vid_add(dev, htons(ETH_P_8021Q), 0); 378 } 379 + if (event == NETDEV_DOWN && 380 + (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 381 + vlan_vid_del(dev, htons(ETH_P_8021Q), 0); 382 383 vlan_info = rtnl_dereference(dev->vlan_info); 384 if (!vlan_info) ··· 422 case NETDEV_DOWN: { 423 struct net_device *tmp; 424 LIST_HEAD(close_list); 425 426 /* Put all VLANs for this dev in the down state too. */ 427 vlan_group_for_each_dev(grp, i, vlandev) {
+4
net/dsa/switch.c
··· 133 if (err) 134 return err; 135 } 136 } 137 138 for_each_set_bit(port, group, ds->num_ports) ··· 182 if (err) 183 return err; 184 } 185 } 186 187 for_each_set_bit(port, members, ds->num_ports)
··· 133 if (err) 134 return err; 135 } 136 + 137 + return 0; 138 } 139 140 for_each_set_bit(port, group, ds->num_ports) ··· 180 if (err) 181 return err; 182 } 183 + 184 + return 0; 185 } 186 187 for_each_set_bit(port, members, ds->num_ports)
+1 -2
net/ipv4/tcp_input.c
··· 2615 struct tcp_sock *tp = tcp_sk(sk); 2616 struct sk_buff *skb; 2617 unsigned int mss = tcp_current_mss(sk); 2618 - u32 prior_lost = tp->lost_out; 2619 2620 tcp_for_write_queue(skb, sk) { 2621 if (skb == tcp_send_head(sk)) ··· 2631 2632 tcp_clear_retrans_hints_partial(tp); 2633 2634 - if (prior_lost == tp->lost_out) 2635 return; 2636 2637 if (tcp_is_reno(tp))
··· 2615 struct tcp_sock *tp = tcp_sk(sk); 2616 struct sk_buff *skb; 2617 unsigned int mss = tcp_current_mss(sk); 2618 2619 tcp_for_write_queue(skb, sk) { 2620 if (skb == tcp_send_head(sk)) ··· 2632 2633 tcp_clear_retrans_hints_partial(tp); 2634 2635 + if (!tp->lost_out) 2636 return; 2637 2638 if (tcp_is_reno(tp))
+10 -2
net/ipv4/tcp_offload.c
··· 149 * is freed by GSO engine 150 */ 151 if (copy_destructor) { 152 swap(gso_skb->sk, skb->sk); 153 swap(gso_skb->destructor, skb->destructor); 154 sum_truesize += skb->truesize; 155 - refcount_add(sum_truesize - gso_skb->truesize, 156 - &skb->sk->sk_wmem_alloc); 157 } 158 159 delta = htonl(oldlen + (skb_tail_pointer(skb) -
··· 149 * is freed by GSO engine 150 */ 151 if (copy_destructor) { 152 + int delta; 153 + 154 swap(gso_skb->sk, skb->sk); 155 swap(gso_skb->destructor, skb->destructor); 156 sum_truesize += skb->truesize; 157 + delta = sum_truesize - gso_skb->truesize; 158 + /* In some pathological cases, delta can be negative. 159 + * We need to either use refcount_add() or refcount_sub_and_test() 160 + */ 161 + if (likely(delta >= 0)) 162 + refcount_add(delta, &skb->sk->sk_wmem_alloc); 163 + else 164 + WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); 165 } 166 167 delta = htonl(oldlen + (skb_tail_pointer(skb) -
+5 -5
net/rds/ib_recv.c
··· 410 break; 411 } 412 413 - /* XXX when can this fail? */ 414 - ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 415 - rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, 416 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), 417 (long) ib_sg_dma_address( 418 ic->i_cm_id->device, 419 - &recv->r_frag->f_sg), 420 - ret); 421 if (ret) { 422 rds_ib_conn_error(conn, "recv post on " 423 "%pI4 returned %d, disconnecting and "
··· 410 break; 411 } 412 413 + rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv, 414 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), 415 (long) ib_sg_dma_address( 416 ic->i_cm_id->device, 417 + &recv->r_frag->f_sg)); 418 + 419 + /* XXX when can this fail? */ 420 + ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 421 if (ret) { 422 rds_ib_conn_error(conn, "recv post on " 423 "%pI4 returned %d, disconnecting and "