Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Use after free in vlan, from Cong Wang.

2) Handle NAPI poll with a zero budget properly in mlx5 driver, from
Saeed Mahameed.

3) If DMA mapping fails in mlx5 driver, NULL out page, from Inbar
Karmy.

4) Handle overrun in RX FIFO of sun4i CAN driver, from Gerhard
Bertelsmann.

5) Missing return in mdb and vlan prepare phase of DSA layer, from
Vivien Didelot.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
vlan: fix a use-after-free in vlan_device_event()
net: dsa: return after vlan prepare phase
net: dsa: return after mdb prepare phase
can: ifi: Fix transmitter delay calculation
tcp: fix tcp_fastretrans_alert warning
tcp: gso: avoid refcount_t warning from tcp_gso_segment()
can: peak: Add support for new PCIe/M2 CAN FD interfaces
can: sun4i: handle overrun in RX FIFO
can: c_can: don't indicate triple sampling support for D_CAN
net/mlx5e: Increase Striding RQ minimum size limit to 4 multi-packet WQEs
net/mlx5e: Set page to null in case dma mapping fails
net/mlx5e: Fix napi poll with zero budget
net/mlx5: Cancel health poll before sending panic teardown command
net/mlx5: Loop over temp list to release delay events
rds: ib: Fix NULL pointer dereference in debug code

Changed files
+68 -34
drivers
net
can
ethernet
mellanox
net
-1
drivers/net/can/c_can/c_can_pci.c
··· 178 178 break; 179 179 case BOSCH_D_CAN: 180 180 priv->regs = reg_map_d_can; 181 - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 182 181 break; 183 182 default: 184 183 ret = -EINVAL;
-1
drivers/net/can/c_can/c_can_platform.c
··· 320 320 break; 321 321 case BOSCH_D_CAN: 322 322 priv->regs = reg_map_d_can; 323 - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 324 323 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 325 324 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 326 325 priv->read_reg32 = d_can_plat_read_reg32;
+3 -3
drivers/net/can/ifi_canfd/ifi_canfd.c
··· 670 670 priv->base + IFI_CANFD_FTIME); 671 671 672 672 /* Configure transmitter delay */ 673 - tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK; 674 - writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc, 675 - priv->base + IFI_CANFD_TDELAY); 673 + tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1); 674 + tdc &= IFI_CANFD_TDELAY_MASK; 675 + writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY); 676 676 } 677 677 678 678 static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
+12 -2
drivers/net/can/peak_canfd/peak_pciefd_main.c
··· 29 29 #include "peak_canfd_user.h" 30 30 31 31 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); 32 - MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards"); 33 - MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards"); 32 + MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); 33 + MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards"); 34 34 MODULE_LICENSE("GPL v2"); 35 35 36 36 #define PCIEFD_DRV_NAME "peak_pciefd" 37 37 38 38 #define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ 39 39 #define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ 40 + #define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */ 41 + #define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */ 42 + #define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */ 43 + #define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */ 44 + #define PCAN_M2_ID 0x001a /* for M2 slot cards */ 40 45 41 46 /* PEAK PCIe board access description */ 42 47 #define PCIEFD_BAR0_SIZE (64 * 1024) ··· 208 203 /* supported device ids. */ 209 204 static const struct pci_device_id peak_pciefd_tbl[] = { 210 205 {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 206 + {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 207 + {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 208 + {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 209 + {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, 210 + {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,}, 211 211 {0,} 212 212 }; 213 213
+10 -2
drivers/net/can/sun4i_can.c
··· 539 539 } 540 540 stats->rx_over_errors++; 541 541 stats->rx_errors++; 542 + 543 + /* reset the CAN IP by entering reset mode 544 + * ignoring timeout error 545 + */ 546 + set_reset_mode(dev); 547 + set_normal_mode(dev); 548 + 542 549 /* clear bit */ 543 550 sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); 544 551 } ··· 660 653 netif_wake_queue(dev); 661 654 can_led_event(dev, CAN_LED_EVENT_TX); 662 655 } 663 - if (isrc & SUN4I_INT_RBUF_VLD) { 664 - /* receive interrupt */ 656 + if ((isrc & SUN4I_INT_RBUF_VLD) && 657 + !(isrc & SUN4I_INT_DATA_OR)) { 658 + /* receive interrupt - don't read if overrun occurred */ 665 659 while (status & SUN4I_STA_RBUF_RDY) { 666 660 /* RX buffer is not empty */ 667 661 sun4i_can_rx(dev);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 93 93 list_splice_init(&priv->waiting_events_list, &temp); 94 94 if (!dev_ctx->context) 95 95 goto out; 96 - list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) 96 + list_for_each_entry_safe(de, n, &temp, list) 97 97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); 98 98 99 99 out:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 67 67 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa 68 68 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd 69 69 70 - #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1 70 + #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 71 71 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 72 72 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 73 73
+5 -7
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 215 215 static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, 216 216 struct mlx5e_dma_info *dma_info) 217 217 { 218 - struct page *page; 219 - 220 218 if (mlx5e_rx_cache_get(rq, dma_info)) 221 219 return 0; 222 220 223 - page = dev_alloc_pages(rq->buff.page_order); 224 - if (unlikely(!page)) 221 + dma_info->page = dev_alloc_pages(rq->buff.page_order); 222 + if (unlikely(!dma_info->page)) 225 223 return -ENOMEM; 226 224 227 - dma_info->addr = dma_map_page(rq->pdev, page, 0, 225 + dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, 228 226 RQ_PAGE_SIZE(rq), rq->buff.map_dir); 229 227 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 230 - put_page(page); 228 + put_page(dma_info->page); 229 + dma_info->page = NULL; 231 230 return -ENOMEM; 232 231 } 233 - dma_info->page = page; 234 232 235 233 return 0; 236 234 }
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
··· 49 49 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, 50 50 napi); 51 51 bool busy = false; 52 - int work_done; 52 + int work_done = 0; 53 53 int i; 54 54 55 55 for (i = 0; i < c->num_tc; i++) ··· 58 58 if (c->xdp) 59 59 busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); 60 60 61 - work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); 62 - busy |= work_done == budget; 61 + if (likely(budget)) { /* budget=0 means: don't poll rx rings */ 62 + work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); 63 + busy |= work_done == budget; 64 + } 63 65 64 66 busy |= c->rq.post_wqes(&c->rq); 65 67 66 68 if (busy) { 67 69 if (likely(mlx5e_channel_no_affinity_change(c))) 68 70 return budget; 69 - if (work_done == budget) 71 + if (budget && work_done == budget) 70 72 work_done--; 71 73 } 72 74
+7
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1482 1482 return -EAGAIN; 1483 1483 } 1484 1484 1485 + /* Panic tear down fw command will stop the PCI bus communication 1486 + * with the HCA, so the health polll is no longer needed. 1487 + */ 1488 + mlx5_drain_health_wq(dev); 1489 + mlx5_stop_health_poll(dev); 1490 + 1485 1491 ret = mlx5_cmd_force_teardown_hca(dev); 1486 1492 if (ret) { 1487 1493 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); 1494 + mlx5_start_health_poll(dev); 1488 1495 return ret; 1489 1496 } 1490 1497
+3 -3
net/8021q/vlan.c
··· 376 376 dev->name); 377 377 vlan_vid_add(dev, htons(ETH_P_8021Q), 0); 378 378 } 379 + if (event == NETDEV_DOWN && 380 + (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 381 + vlan_vid_del(dev, htons(ETH_P_8021Q), 0); 379 382 380 383 vlan_info = rtnl_dereference(dev->vlan_info); 381 384 if (!vlan_info) ··· 425 422 case NETDEV_DOWN: { 426 423 struct net_device *tmp; 427 424 LIST_HEAD(close_list); 428 - 429 - if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 430 - vlan_vid_del(dev, htons(ETH_P_8021Q), 0); 431 425 432 426 /* Put all VLANs for this dev in the down state too. */ 433 427 vlan_group_for_each_dev(grp, i, vlandev) {
+4
net/dsa/switch.c
··· 133 133 if (err) 134 134 return err; 135 135 } 136 + 137 + return 0; 136 138 } 137 139 138 140 for_each_set_bit(port, group, ds->num_ports) ··· 182 180 if (err) 183 181 return err; 184 182 } 183 + 184 + return 0; 185 185 } 186 186 187 187 for_each_set_bit(port, members, ds->num_ports)
+1 -2
net/ipv4/tcp_input.c
··· 2615 2615 struct tcp_sock *tp = tcp_sk(sk); 2616 2616 struct sk_buff *skb; 2617 2617 unsigned int mss = tcp_current_mss(sk); 2618 - u32 prior_lost = tp->lost_out; 2619 2618 2620 2619 tcp_for_write_queue(skb, sk) { 2621 2620 if (skb == tcp_send_head(sk)) ··· 2631 2632 2632 2633 tcp_clear_retrans_hints_partial(tp); 2633 2634 2634 - if (prior_lost == tp->lost_out) 2635 + if (!tp->lost_out) 2635 2636 return; 2636 2637 2637 2638 if (tcp_is_reno(tp))
+10 -2
net/ipv4/tcp_offload.c
··· 149 149 * is freed by GSO engine 150 150 */ 151 151 if (copy_destructor) { 152 + int delta; 153 + 152 154 swap(gso_skb->sk, skb->sk); 153 155 swap(gso_skb->destructor, skb->destructor); 154 156 sum_truesize += skb->truesize; 155 - refcount_add(sum_truesize - gso_skb->truesize, 156 - &skb->sk->sk_wmem_alloc); 157 + delta = sum_truesize - gso_skb->truesize; 158 + /* In some pathological cases, delta can be negative. 159 + * We need to either use refcount_add() or refcount_sub_and_test() 160 + */ 161 + if (likely(delta >= 0)) 162 + refcount_add(delta, &skb->sk->sk_wmem_alloc); 163 + else 164 + WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); 157 165 } 158 166 159 167 delta = htonl(oldlen + (skb_tail_pointer(skb) -
+5 -5
net/rds/ib_recv.c
··· 410 410 break; 411 411 } 412 412 413 - /* XXX when can this fail? */ 414 - ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 415 - rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, 413 + rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv, 416 414 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), 417 415 (long) ib_sg_dma_address( 418 416 ic->i_cm_id->device, 419 - &recv->r_frag->f_sg), 420 - ret); 417 + &recv->r_frag->f_sg)); 418 + 419 + /* XXX when can this fail? */ 420 + ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 421 421 if (ret) { 422 422 rds_ib_conn_error(conn, "recv post on " 423 423 "%pI4 returned %d, disconnecting and "