Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Missing list head init in bluetooth hidp session creation, from Tedd
Ho-Jeong An.

2) Don't leak SKB in bridge netfilter error paths, from Florian
Westphal.

3) ipv6 netdevice private leak in netfilter bridging, fixed by Julien
Grall.

4) Fix regression in IP over hamradio bpq encapsulation, from Ralf
Baechle.

5) Fix race between rhashtable resize events and table walks, from Phil
Sutter.

6) Missing validation of IFLA_VF_INFO netlink attributes, fix from
Daniel Borkmann.

7) Missing security layer socket state initialization in tipc code,
from Stephen Smalley.

8) Fix shared IRQ handling in boomerang 3c59x interrupt handler, from
Denys Vlasenko.

9) Missing minor_idr destroy on module unload on macvtap driver, from
Johannes Thumshirn.

10) Various pktgen kernel thread races, from Oleg Nesterov.

11) Fix races that can cause packets to be processed in the backlog even
after a device attached to that SKB has been fully unregistered.
From Julian Anastasov.

12) bcmgenet driver doesn't account packet drops vs. errors properly,
fix from Petri Gynther.

13) Array index validation and off by one fix in DSA layer from Florian
Fainelli

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (66 commits)
can: replace timestamp as unique skb attribute
ARM: dts: dra7x-evm: Prevent glitch on DCAN1 pinmux
can: c_can: Fix default pinmux glitch at init
can: rcar_can: unify error messages
can: rcar_can: print request_irq() error code
can: rcar_can: fix typo in error message
can: rcar_can: print signed IRQ #
can: rcar_can: fix IRQ check
net: dsa: Fix off-by-one in switch address parsing
net: dsa: Test array index before use
net: switchdev: don't abort unsupported operations
net: bcmgenet: fix accounting of packet drops vs errors
cdc_ncm: update specs URL
Doc: z8530book: Fix typo in API-z8530-sync-txdma-open.html
net: inet_diag: always export IPV6_V6ONLY sockopt for listening sockets
bridge: mdb: allow the user to delete mdb entry if there's a querier
net: call rcu_read_lock early in process_backlog
net: do not process device backlog during unregistration
bridge: fix potential crash in __netdev_pick_tx()
net: axienet: Fix devm_ioremap_resource return value check
...

+703 -395
+1
MAINTAINERS
··· 7019 7019 F: net/*/netfilter.c 7020 7020 F: net/*/netfilter/ 7021 7021 F: net/netfilter/ 7022 + F: net/bridge/br_netfilter*.c 7022 7023 7023 7024 NETLABEL 7024 7025 M: Paul Moore <paul@paul-moore.com>
+3 -2
arch/arm/boot/dts/dra7-evm.dts
··· 686 686 687 687 &dcan1 { 688 688 status = "ok"; 689 - pinctrl-names = "default", "sleep"; 690 - pinctrl-0 = <&dcan1_pins_default>; 689 + pinctrl-names = "default", "sleep", "active"; 690 + pinctrl-0 = <&dcan1_pins_sleep>; 691 691 pinctrl-1 = <&dcan1_pins_sleep>; 692 + pinctrl-2 = <&dcan1_pins_default>; 692 693 };
+3 -2
arch/arm/boot/dts/dra72-evm.dts
··· 587 587 588 588 &dcan1 { 589 589 status = "ok"; 590 - pinctrl-names = "default", "sleep"; 591 - pinctrl-0 = <&dcan1_pins_default>; 590 + pinctrl-names = "default", "sleep", "active"; 591 + pinctrl-0 = <&dcan1_pins_sleep>; 592 592 pinctrl-1 = <&dcan1_pins_sleep>; 593 + pinctrl-2 = <&dcan1_pins_default>; 593 594 }; 594 595 595 596 &qspi {
+34 -17
drivers/net/bonding/bond_main.c
··· 689 689 690 690 } 691 691 692 - static bool bond_should_change_active(struct bonding *bond) 692 + static struct slave *bond_choose_primary_or_current(struct bonding *bond) 693 693 { 694 694 struct slave *prim = rtnl_dereference(bond->primary_slave); 695 695 struct slave *curr = rtnl_dereference(bond->curr_active_slave); 696 696 697 - if (!prim || !curr || curr->link != BOND_LINK_UP) 698 - return true; 697 + if (!prim || prim->link != BOND_LINK_UP) { 698 + if (!curr || curr->link != BOND_LINK_UP) 699 + return NULL; 700 + return curr; 701 + } 702 + 699 703 if (bond->force_primary) { 700 704 bond->force_primary = false; 701 - return true; 705 + return prim; 702 706 } 703 - if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER && 704 - (prim->speed < curr->speed || 705 - (prim->speed == curr->speed && prim->duplex <= curr->duplex))) 706 - return false; 707 - if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE) 708 - return false; 709 - return true; 707 + 708 + if (!curr || curr->link != BOND_LINK_UP) 709 + return prim; 710 + 711 + /* At this point, prim and curr are both up */ 712 + switch (bond->params.primary_reselect) { 713 + case BOND_PRI_RESELECT_ALWAYS: 714 + return prim; 715 + case BOND_PRI_RESELECT_BETTER: 716 + if (prim->speed < curr->speed) 717 + return curr; 718 + if (prim->speed == curr->speed && prim->duplex <= curr->duplex) 719 + return curr; 720 + return prim; 721 + case BOND_PRI_RESELECT_FAILURE: 722 + return curr; 723 + default: 724 + netdev_err(bond->dev, "impossible primary_reselect %d\n", 725 + bond->params.primary_reselect); 726 + return curr; 727 + } 710 728 } 711 729 712 730 /** 713 - * find_best_interface - select the best available slave to be the active one 731 + * bond_find_best_slave - select the best available slave to be the active one 714 732 * @bond: our bonding struct 715 733 */ 716 734 static struct slave *bond_find_best_slave(struct bonding *bond) 717 735 { 718 - struct slave *slave, *bestslave = NULL, *primary; 736 + struct slave *slave, *bestslave = NULL; 719 737 struct list_head *iter; 720 738 int mintime = bond->params.updelay; 721 739 722 - primary = rtnl_dereference(bond->primary_slave); 723 - if (primary && primary->link == BOND_LINK_UP && 724 - bond_should_change_active(bond)) 725 - return primary; 740 + slave = bond_choose_primary_or_current(bond); 741 + if (slave) 742 + return slave; 726 743 727 744 bond_for_each_slave(bond, slave, iter) { 728 745 if (slave->link == BOND_LINK_UP)
+8 -2
drivers/net/can/c_can/c_can.c
··· 592 592 { 593 593 struct c_can_priv *priv = netdev_priv(dev); 594 594 int err; 595 + struct pinctrl *p; 595 596 596 597 /* basic c_can configuration */ 597 598 err = c_can_chip_config(dev); ··· 605 604 606 605 priv->can.state = CAN_STATE_ERROR_ACTIVE; 607 606 608 - /* activate pins */ 609 - pinctrl_pm_select_default_state(dev->dev.parent); 607 + /* Attempt to use "active" if available else use "default" */ 608 + p = pinctrl_get_select(priv->device, "active"); 609 + if (!IS_ERR(p)) 610 + pinctrl_put(p); 611 + else 612 + pinctrl_pm_select_default_state(priv->device); 613 + 610 614 return 0; 611 615 } 612 616
+2 -5
drivers/net/can/dev.c
··· 440 440 struct can_frame *cf = (struct can_frame *)skb->data; 441 441 u8 dlc = cf->can_dlc; 442 442 443 - if (!(skb->tstamp.tv64)) 444 - __net_timestamp(skb); 445 - 446 443 netif_rx(priv->echo_skb[idx]); 447 444 priv->echo_skb[idx] = NULL; 448 445 ··· 575 578 if (unlikely(!skb)) 576 579 return NULL; 577 580 578 - __net_timestamp(skb); 579 581 skb->protocol = htons(ETH_P_CAN); 580 582 skb->pkt_type = PACKET_BROADCAST; 581 583 skb->ip_summed = CHECKSUM_UNNECESSARY; ··· 585 589 586 590 can_skb_reserve(skb); 587 591 can_skb_prv(skb)->ifindex = dev->ifindex; 592 + can_skb_prv(skb)->skbcnt = 0; 588 593 589 594 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); 590 595 memset(*cf, 0, sizeof(struct can_frame)); ··· 604 607 if (unlikely(!skb)) 605 608 return NULL; 606 609 607 - __net_timestamp(skb); 608 610 skb->protocol = htons(ETH_P_CANFD); 609 611 skb->pkt_type = PACKET_BROADCAST; 610 612 skb->ip_summed = CHECKSUM_UNNECESSARY; ··· 614 618 615 619 can_skb_reserve(skb); 616 620 can_skb_prv(skb)->ifindex = dev->ifindex; 621 + can_skb_prv(skb)->skbcnt = 0; 617 622 618 623 *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame)); 619 624 memset(*cfd, 0, sizeof(struct canfd_frame));
+10 -6
drivers/net/can/rcar_can.c
··· 508 508 509 509 err = clk_prepare_enable(priv->clk); 510 510 if (err) { 511 - netdev_err(ndev, "failed to enable periperal clock, error %d\n", 511 + netdev_err(ndev, 512 + "failed to enable peripheral clock, error %d\n", 512 513 err); 513 514 goto out; 514 515 } ··· 527 526 napi_enable(&priv->napi); 528 527 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); 529 528 if (err) { 530 - netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq); 529 + netdev_err(ndev, "request_irq(%d) failed, error %d\n", 530 + ndev->irq, err); 531 531 goto out_close; 532 532 } 533 533 can_led_event(ndev, CAN_LED_EVENT_OPEN); ··· 760 758 } 761 759 762 760 irq = platform_get_irq(pdev, 0); 763 - if (!irq) { 761 + if (irq < 0) { 764 762 dev_err(&pdev->dev, "No IRQ resource\n"); 763 + err = irq; 765 764 goto fail; 766 765 } 767 766 ··· 785 782 priv->clk = devm_clk_get(&pdev->dev, "clkp1"); 786 783 if (IS_ERR(priv->clk)) { 787 784 err = PTR_ERR(priv->clk); 788 - dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err); 785 + dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n", 786 + err); 789 787 goto fail_clk; 790 788 } 791 789 ··· 798 794 priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); 799 795 if (IS_ERR(priv->can_clk)) { 800 796 err = PTR_ERR(priv->can_clk); 801 - dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err); 797 + dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err); 802 798 goto fail_clk; 803 799 } 804 800 ··· 827 823 828 824 devm_can_led_init(ndev); 829 825 830 - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", 826 + dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", 831 827 priv->regs, ndev->irq); 832 828 833 829 return 0;
+1 -1
drivers/net/can/slcan.c
··· 207 207 if (!skb) 208 208 return; 209 209 210 - __net_timestamp(skb); 211 210 skb->dev = sl->dev; 212 211 skb->protocol = htons(ETH_P_CAN); 213 212 skb->pkt_type = PACKET_BROADCAST; ··· 214 215 215 216 can_skb_reserve(skb); 216 217 can_skb_prv(skb)->ifindex = sl->dev->ifindex; 218 + can_skb_prv(skb)->skbcnt = 0; 217 219 218 220 memcpy(skb_put(skb, sizeof(struct can_frame)), 219 221 &cf, sizeof(struct can_frame));
-3
drivers/net/can/vcan.c
··· 78 78 skb->dev = dev; 79 79 skb->ip_summed = CHECKSUM_UNNECESSARY; 80 80 81 - if (!(skb->tstamp.tv64)) 82 - __net_timestamp(skb); 83 - 84 81 netif_rx_ni(skb); 85 82 } 86 83
+3 -1
drivers/net/ethernet/3com/3c59x.c
··· 2382 2382 void __iomem *ioaddr; 2383 2383 int status; 2384 2384 int work_done = max_interrupt_work; 2385 + int handled = 0; 2385 2386 2386 2387 ioaddr = vp->ioaddr; 2387 2388 ··· 2401 2400 2402 2401 if ((status & IntLatch) == 0) 2403 2402 goto handler_exit; /* No interrupt: shared IRQs can cause this */ 2403 + handled = 1; 2404 2404 2405 2405 if (status == 0xffff) { /* h/w no longer present (hotplug)? */ 2406 2406 if (vortex_debug > 1) ··· 2503 2501 handler_exit: 2504 2502 vp->handling_irq = 0; 2505 2503 spin_unlock(&vp->lock); 2506 - return IRQ_HANDLED; 2504 + return IRQ_RETVAL(handled); 2507 2505 } 2508 2506 2509 2507 static int vortex_rx(struct net_device *dev)
+2 -1
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
··· 303 303 get_page(pa->pages); 304 304 bd->pa = *pa; 305 305 306 - bd->dma = pa->pages_dma + pa->pages_offset; 306 + bd->dma_base = pa->pages_dma; 307 + bd->dma_off = pa->pages_offset; 307 308 bd->dma_len = len; 308 309 309 310 pa->pages_offset += len;
+7 -4
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 1110 1110 unsigned int rx_usecs = pdata->rx_usecs; 1111 1111 unsigned int rx_frames = pdata->rx_frames; 1112 1112 unsigned int inte; 1113 + dma_addr_t hdr_dma, buf_dma; 1113 1114 1114 1115 if (!rx_usecs && !rx_frames) { 1115 1116 /* No coalescing, interrupt for every descriptor */ ··· 1130 1129 * Set buffer 2 (hi) address to buffer dma address (hi) and 1131 1130 * set control bits OWN and INTE 1132 1131 */ 1133 - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); 1134 - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); 1135 - rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); 1136 - rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); 1132 + hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; 1133 + buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; 1134 + rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); 1135 + rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); 1136 + rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); 1137 + rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); 1137 1138 1138 1139 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 1139 1140
+11 -6
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 1765 1765 /* Start with the header buffer which may contain just the header 1766 1766 * or the header plus data 1767 1767 */ 1768 - dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma, 1769 - rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); 1768 + dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, 1769 + rdata->rx.hdr.dma_off, 1770 + rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); 1770 1771 1771 1772 packet = page_address(rdata->rx.hdr.pa.pages) + 1772 1773 rdata->rx.hdr.pa.pages_offset; ··· 1779 1778 len -= copy_len; 1780 1779 if (len) { 1781 1780 /* Add the remaining data as a frag */ 1782 - dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma, 1783 - rdata->rx.buf.dma_len, DMA_FROM_DEVICE); 1781 + dma_sync_single_range_for_cpu(pdata->dev, 1782 + rdata->rx.buf.dma_base, 1783 + rdata->rx.buf.dma_off, 1784 + rdata->rx.buf.dma_len, 1785 + DMA_FROM_DEVICE); 1784 1786 1785 1787 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1786 1788 rdata->rx.buf.pa.pages, ··· 1949 1945 if (!skb) 1950 1946 error = 1; 1951 1947 } else if (rdesc_len) { 1952 - dma_sync_single_for_cpu(pdata->dev, 1953 - rdata->rx.buf.dma, 1948 + dma_sync_single_range_for_cpu(pdata->dev, 1949 + rdata->rx.buf.dma_base, 1950 + rdata->rx.buf.dma_off, 1954 1951 rdata->rx.buf.dma_len, 1955 1952 DMA_FROM_DEVICE); 1956 1953
+2 -1
drivers/net/ethernet/amd/xgbe/xgbe.h
··· 337 337 struct xgbe_page_alloc pa; 338 338 struct xgbe_page_alloc pa_unmap; 339 339 340 - dma_addr_t dma; 340 + dma_addr_t dma_base; 341 + unsigned long dma_off; 341 342 unsigned int dma_len; 342 343 }; 343 344
+1 -1
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1793 1793 macaddr = of_get_mac_address(dn); 1794 1794 if (!macaddr || !is_valid_ether_addr(macaddr)) { 1795 1795 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 1796 - random_ether_addr(dev->dev_addr); 1796 + eth_hw_addr_random(dev); 1797 1797 } else { 1798 1798 ether_addr_copy(dev->dev_addr, macaddr); 1799 1799 }
-4
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1230 1230 new_skb = skb_realloc_headroom(skb, sizeof(*status)); 1231 1231 dev_kfree_skb(skb); 1232 1232 if (!new_skb) { 1233 - dev->stats.tx_errors++; 1234 1233 dev->stats.tx_dropped++; 1235 1234 return NULL; 1236 1235 } ··· 1464 1465 1465 1466 if (unlikely(!skb)) { 1466 1467 dev->stats.rx_dropped++; 1467 - dev->stats.rx_errors++; 1468 1468 goto next; 1469 1469 } 1470 1470 ··· 1491 1493 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1492 1494 netif_err(priv, rx_status, dev, 1493 1495 "dropping fragmented packet!\n"); 1494 - dev->stats.rx_dropped++; 1495 1496 dev->stats.rx_errors++; 1496 1497 dev_kfree_skb_any(skb); 1497 1498 goto next; ··· 1512 1515 dev->stats.rx_frame_errors++; 1513 1516 if (dma_flag & DMA_RX_LG) 1514 1517 dev->stats.rx_length_errors++; 1515 - dev->stats.rx_dropped++; 1516 1518 dev->stats.rx_errors++; 1517 1519 dev_kfree_skb_any(skb); 1518 1520 goto next;
+13 -12
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
··· 952 952 * eventually have to put a format interpreter in here ... 953 953 */ 954 954 seq_printf(seq, "%10d %15llu %8s %8s ", 955 - e->seqno, e->timestamp, 955 + be32_to_cpu(e->seqno), 956 + be64_to_cpu(e->timestamp), 956 957 (e->level < ARRAY_SIZE(devlog_level_strings) 957 958 ? devlog_level_strings[e->level] 958 959 : "UNKNOWN"), 959 960 (e->facility < ARRAY_SIZE(devlog_facility_strings) 960 961 ? devlog_facility_strings[e->facility] 961 962 : "UNKNOWN")); 962 - seq_printf(seq, e->fmt, e->params[0], e->params[1], 963 - e->params[2], e->params[3], e->params[4], 964 - e->params[5], e->params[6], e->params[7]); 963 + seq_printf(seq, e->fmt, 964 + be32_to_cpu(e->params[0]), 965 + be32_to_cpu(e->params[1]), 966 + be32_to_cpu(e->params[2]), 967 + be32_to_cpu(e->params[3]), 968 + be32_to_cpu(e->params[4]), 969 + be32_to_cpu(e->params[5]), 970 + be32_to_cpu(e->params[6]), 971 + be32_to_cpu(e->params[7])); 965 972 } 966 973 return 0; 967 974 } ··· 1050 1043 return ret; 1051 1044 } 1052 1045 1053 - /* Translate log multi-byte integral elements into host native format 1054 - * and determine where the first entry in the log is. 1046 + /* Find the earliest (lowest Sequence Number) log entry in the 1047 + * circular Device Log. 1055 1048 */ 1056 1049 for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) { 1057 1050 struct fw_devlog_e *e = &dinfo->log[index]; 1058 - int i; 1059 1051 __u32 seqno; 1060 1052 1061 1053 if (e->timestamp == 0) 1062 1054 continue; 1063 1055 1064 - e->timestamp = (__force __be64)be64_to_cpu(e->timestamp); 1065 1056 seqno = be32_to_cpu(e->seqno); 1066 - for (i = 0; i < 8; i++) 1067 - e->params[i] = 1068 - (__force __be32)be32_to_cpu(e->params[i]); 1069 - 1070 1057 if (seqno < fseqno) { 1071 1058 fseqno = seqno; 1072 1059 dinfo->first = index;
+2 -2
drivers/net/ethernet/cisco/enic/enic_main.c
··· 1170 1170 wq_work_done, 1171 1171 0 /* dont unmask intr */, 1172 1172 0 /* dont reset intr timer */); 1173 - return rq_work_done; 1173 + return budget; 1174 1174 } 1175 1175 1176 1176 if (budget > 0) ··· 1191 1191 0 /* don't reset intr timer */); 1192 1192 1193 1193 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1194 + enic_poll_unlock_napi(&enic->rq[cq_rq], napi); 1194 1195 1195 1196 /* Buffer allocation failed. Stay in polling 1196 1197 * mode so we can try to fill the ring again. ··· 1209 1208 napi_complete(napi); 1210 1209 vnic_intr_unmask(&enic->intr[intr]); 1211 1210 } 1212 - enic_poll_unlock_napi(&enic->rq[cq_rq], napi); 1213 1211 1214 1212 return rq_work_done; 1215 1213 }
+75 -13
drivers/net/ethernet/freescale/fec_main.c
··· 24 24 #include <linux/module.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/string.h> 27 + #include <linux/pm_runtime.h> 27 28 #include <linux/ptrace.h> 28 29 #include <linux/errno.h> 29 30 #include <linux/ioport.h> ··· 78 77 #define FEC_ENET_RAEM_V 0x8 79 78 #define FEC_ENET_RAFL_V 0x8 80 79 #define FEC_ENET_OPD_V 0xFFF0 80 + #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 81 81 82 82 static struct platform_device_id fec_devtype[] = { 83 83 { ··· 1769 1767 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1770 1768 { 1771 1769 struct fec_enet_private *fep = bus->priv; 1770 + struct device *dev = &fep->pdev->dev; 1772 1771 unsigned long time_left; 1772 + int ret = 0; 1773 + 1774 + ret = pm_runtime_get_sync(dev); 1775 + if (IS_ERR_VALUE(ret)) 1776 + return ret; 1773 1777 1774 1778 fep->mii_timeout = 0; 1775 1779 init_completion(&fep->mdio_done); ··· 1791 1783 if (time_left == 0) { 1792 1784 fep->mii_timeout = 1; 1793 1785 netdev_err(fep->netdev, "MDIO read timeout\n"); 1794 - return -ETIMEDOUT; 1786 + ret = -ETIMEDOUT; 1787 + goto out; 1795 1788 } 1796 1789 1797 - /* return value */ 1798 - return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1790 + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1791 + 1792 + out: 1793 + pm_runtime_mark_last_busy(dev); 1794 + pm_runtime_put_autosuspend(dev); 1795 + 1796 + return ret; 1799 1797 } 1800 1798 1801 1799 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1802 1800 u16 value) 1803 1801 { 1804 1802 struct fec_enet_private *fep = bus->priv; 1803 + struct device *dev = &fep->pdev->dev; 1805 1804 unsigned long time_left; 1805 + int ret = 0; 1806 + 1807 + ret = pm_runtime_get_sync(dev); 1808 + if (IS_ERR_VALUE(ret)) 1809 + return ret; 1806 1810 1807 1811 fep->mii_timeout = 0; 1808 1812 init_completion(&fep->mdio_done); ··· 1831 1811 if (time_left == 0) { 1832 1812 fep->mii_timeout = 1; 1833 1813 netdev_err(fep->netdev, "MDIO write timeout\n"); 1834 - return -ETIMEDOUT; 1814 + ret = -ETIMEDOUT; 1835 1815 } 1836 1816 1837 - return 0; 1817 + pm_runtime_mark_last_busy(dev); 1818 + pm_runtime_put_autosuspend(dev); 1819 + 1820 + return ret; 1838 1821 } 1839 1822 1840 1823 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ··· 1849 1826 ret = clk_prepare_enable(fep->clk_ahb); 1850 1827 if (ret) 1851 1828 return ret; 1852 - ret = clk_prepare_enable(fep->clk_ipg); 1853 - if (ret) 1854 - goto failed_clk_ipg; 1855 1829 if (fep->clk_enet_out) { 1856 1830 ret = clk_prepare_enable(fep->clk_enet_out); 1857 1831 if (ret) ··· 1872 1852 } 1873 1853 } else { 1874 1854 clk_disable_unprepare(fep->clk_ahb); 1875 - clk_disable_unprepare(fep->clk_ipg); 1876 1855 if (fep->clk_enet_out) 1877 1856 clk_disable_unprepare(fep->clk_enet_out); 1878 1857 if (fep->clk_ptp) { ··· 1893 1874 if (fep->clk_enet_out) 1894 1875 clk_disable_unprepare(fep->clk_enet_out); 1895 1876 failed_clk_enet_out: 1896 - clk_disable_unprepare(fep->clk_ipg); 1897 - failed_clk_ipg: 1898 1877 clk_disable_unprepare(fep->clk_ahb); 1899 1878 1900 1879 return ret; ··· 2864 2847 struct fec_enet_private *fep = netdev_priv(ndev); 2865 2848 int ret; 2866 2849 2850 + ret = pm_runtime_get_sync(&fep->pdev->dev); 2851 + if (IS_ERR_VALUE(ret)) 2852 + return ret; 2853 + 2867 2854 pinctrl_pm_select_default_state(&fep->pdev->dev); 2868 2855 ret = fec_enet_clk_enable(ndev, true); 2869 2856 if (ret) 2870 - return ret; 2857 + goto clk_enable; 2871 2858 2872 2859 /* I should reset the ring buffers here, but I don't yet know 2873 2860 * a simple way to do that. ··· 2902 2881 fec_enet_free_buffers(ndev); 2903 2882 err_enet_alloc: 2904 2883 fec_enet_clk_enable(ndev, false); 2884 + clk_enable: 2885 + pm_runtime_mark_last_busy(&fep->pdev->dev); 2886 + pm_runtime_put_autosuspend(&fep->pdev->dev); 2905 2887 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2906 2888 return ret; 2907 2889 } ··· 2927 2903 2928 2904 fec_enet_clk_enable(ndev, false); 2929 2905 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2906 + pm_runtime_mark_last_busy(&fep->pdev->dev); 2907 + pm_runtime_put_autosuspend(&fep->pdev->dev); 2908 + 2930 2909 fec_enet_free_buffers(ndev); 2931 2910 2932 2911 return 0; ··· 3415 3388 if (ret) 3416 3389 goto failed_clk; 3417 3390 3391 + ret = clk_prepare_enable(fep->clk_ipg); 3392 + if (ret) 3393 + goto failed_clk_ipg; 3394 + 3418 3395 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3419 3396 if (!IS_ERR(fep->reg_phy)) { 3420 3397 ret = regulator_enable(fep->reg_phy); ··· 3465 3434 netif_carrier_off(ndev); 3466 3435 fec_enet_clk_enable(ndev, false); 3467 3436 pinctrl_pm_select_sleep_state(&pdev->dev); 3437 + pm_runtime_set_active(&pdev->dev); 3438 + pm_runtime_enable(&pdev->dev); 3468 3439 3469 3440 ret = register_netdev(ndev); 3470 3441 if (ret) ··· 3480 3447 3481 3448 fep->rx_copybreak = COPYBREAK_DEFAULT; 3482 3449 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3450 + 3451 + pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3452 + pm_runtime_use_autosuspend(&pdev->dev); 3453 + pm_runtime_mark_last_busy(&pdev->dev); 3454 + pm_runtime_put_autosuspend(&pdev->dev); 3455 + 3483 3456 return 0; 3484 3457 3485 3458 failed_register: ··· 3496 3457 if (fep->reg_phy) 3497 3458 regulator_disable(fep->reg_phy); 3498 3459 failed_regulator: 3460 + clk_disable_unprepare(fep->clk_ipg); 3461 + failed_clk_ipg: 3499 3462 fec_enet_clk_enable(ndev, false); 3500 3463 failed_clk: 3501 3464 failed_phy: ··· 3609 3568 return ret; 3610 3569 } 3611 3570 3612 - static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); 3571 + static int __maybe_unused fec_runtime_suspend(struct device *dev) 3572 + { 3573 + struct net_device *ndev = dev_get_drvdata(dev); 3574 + struct fec_enet_private *fep = netdev_priv(ndev); 3575 + 3576 + clk_disable_unprepare(fep->clk_ipg); 3577 + 3578 + return 0; 3579 + } 3580 + 3581 + static int __maybe_unused fec_runtime_resume(struct device *dev) 3582 + { 3583 + struct net_device *ndev = dev_get_drvdata(dev); 3584 + struct fec_enet_private *fep = netdev_priv(ndev); 3585 + 3586 + return clk_prepare_enable(fep->clk_ipg); 3587 + } 3588 + 3589 + static const struct dev_pm_ops fec_pm_ops = { 3590 + SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 3591 + SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 3592 + }; 3613 3593 3614 3594 static struct platform_driver fec_driver = { 3615 3595 .driver = {
+145 -27
drivers/net/ethernet/sfc/ef10.c
··· 101 101 return resource_size(&efx->pci_dev->resource[bar]); 102 102 } 103 103 104 + static bool efx_ef10_is_vf(struct efx_nic *efx) 105 + { 106 + return efx->type->is_vf; 107 + } 108 + 104 109 static int efx_ef10_get_pf_index(struct efx_nic *efx) 105 110 { 106 111 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); ··· 680 675 static int efx_ef10_probe_pf(struct efx_nic *efx) 681 676 { 682 677 return efx_ef10_probe(efx); 678 + } 679 + 680 + int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 681 + { 682 + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 683 + 684 + MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 685 + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), 686 + NULL, 0, NULL); 687 + } 688 + 689 + int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) 690 + { 691 + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); 692 + 693 + MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 694 + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), 695 + NULL, 0, NULL); 696 + } 697 + 698 + int efx_ef10_vport_add_mac(struct efx_nic *efx, 699 + unsigned int port_id, u8 *mac) 700 + { 701 + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); 702 + 703 + MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); 704 + ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); 705 + 706 + return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, 707 + sizeof(inbuf), NULL, 0, NULL); 708 + } 709 + 710 + int efx_ef10_vport_del_mac(struct efx_nic *efx, 711 + unsigned int port_id, u8 *mac) 712 + { 713 + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); 714 + 715 + MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); 716 + ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); 717 + 718 + return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, 719 + sizeof(inbuf), NULL, 0, NULL); 683 720 } 684 721 685 722 #ifdef CONFIG_SFC_SRIOV ··· 3851 3804 WARN_ON(remove_failed); 3852 3805 } 3853 3806 3807 + static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) 3808 + { 3809 + struct efx_ef10_nic_data *nic_data = efx->nic_data; 3810 + u8 mac_old[ETH_ALEN]; 3811 + int rc, rc2; 3812 + 3813 + /* Only reconfigure a PF-created vport */ 3814 + if (is_zero_ether_addr(nic_data->vport_mac)) 3815 + return 0; 3816 + 3817 + efx_device_detach_sync(efx); 3818 + efx_net_stop(efx->net_dev); 3819 + down_write(&efx->filter_sem); 3820 + efx_ef10_filter_table_remove(efx); 3821 + up_write(&efx->filter_sem); 3822 + 3823 + rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); 3824 + if (rc) 3825 + goto restore_filters; 3826 + 3827 + ether_addr_copy(mac_old, nic_data->vport_mac); 3828 + rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, 3829 + nic_data->vport_mac); 3830 + if (rc) 3831 + goto restore_vadaptor; 3832 + 3833 + rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, 3834 + efx->net_dev->dev_addr); 3835 + if (!rc) { 3836 + ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); 3837 + } else { 3838 + rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); 3839 + if (rc2) { 3840 + /* Failed to add original MAC, so clear vport_mac */ 3841 + eth_zero_addr(nic_data->vport_mac); 3842 + goto reset_nic; 3843 + } 3844 + } 3845 + 3846 + restore_vadaptor: 3847 + rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); 3848 + if (rc2) 3849 + goto reset_nic; 3850 + restore_filters: 3851 + down_write(&efx->filter_sem); 3852 + rc2 = efx_ef10_filter_table_probe(efx); 3853 + up_write(&efx->filter_sem); 3854 + if (rc2) 3855 + goto reset_nic; 3856 + 3857 + rc2 = efx_net_open(efx->net_dev); 3858 + if (rc2) 3859 + goto reset_nic; 3860 + 3861 + netif_device_attach(efx->net_dev); 3862 + 3863 + return rc; 3864 + 3865 + reset_nic: 3866 + netif_err(efx, drv, efx->net_dev, 3867 + "Failed to restore when changing MAC address - scheduling reset\n"); 3868 + efx_schedule_reset(efx, RESET_TYPE_DATAPATH); 3869 + 3870 + return rc ? rc : rc2; 3871 + } 3872 + 3854 3873 static int efx_ef10_set_mac_address(struct efx_nic *efx) 3855 3874 { 3856 3875 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); ··· 3933 3820 efx->net_dev->dev_addr); 3934 3821 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 3935 3822 nic_data->vport_id); 3936 - rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 3937 - sizeof(inbuf), NULL, 0, NULL); 3823 + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 3824 + sizeof(inbuf), NULL, 0, NULL); 3938 3825 3939 3826 efx_ef10_filter_table_probe(efx); 3940 3827 up_write(&efx->filter_sem); ··· 3942 3829 efx_net_open(efx->net_dev); 3943 3830 netif_device_attach(efx->net_dev); 3944 3831 3945 - #if !defined(CONFIG_SFC_SRIOV) 3946 - if (rc == -EPERM) 3947 - netif_err(efx, drv, efx->net_dev, 3948 - "Cannot change MAC address; use sfboot to enable mac-spoofing" 3949 - " on this interface\n"); 3950 - #else 3951 - if (rc == -EPERM) { 3832 + #ifdef CONFIG_SFC_SRIOV 3833 + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { 3952 3834 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3953 3835 3954 - /* Switch to PF and change MAC address on vport */ 3955 - if (efx->pci_dev->is_virtfn && pci_dev_pf) { 3956 - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3836 + if (rc == -EPERM) { 3837 + struct efx_nic *efx_pf; 3957 3838 3958 - if (!efx_ef10_sriov_set_vf_mac(efx_pf, 3839 + /* Switch to PF and change MAC address on vport */ 3840 + efx_pf = pci_get_drvdata(pci_dev_pf); 3841 + 3842 + rc = efx_ef10_sriov_set_vf_mac(efx_pf, 3959 3843 nic_data->vf_index, 3960 - efx->net_dev->dev_addr)) 3961 - return 0; 3962 - } 3963 - netif_err(efx, drv, efx->net_dev, 3964 - "Cannot change MAC address; use sfboot to enable mac-spoofing" 3965 - " on this interface\n"); 3966 - } else if (efx->pci_dev->is_virtfn) { 3967 - /* Successfully changed by VF (with MAC spoofing), so update the 3968 - * parent PF if possible. 3969 - */ 3970 - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3971 - 3972 - if (pci_dev_pf) { 3844 + efx->net_dev->dev_addr); 3845 + } else if (!rc) { 3973 3846 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3974 3847 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 3975 3848 unsigned int i; 3976 3849 3850 + /* MAC address successfully changed by VF (with MAC 3851 + * spoofing) so update the parent PF if possible. 3852 + */ 3977 3853 for (i = 0; i < efx_pf->vf_count; ++i) { 3978 3854 struct ef10_vf *vf = nic_data->vf + i; 3979 3855 ··· 3973 3871 } 3974 3872 } 3975 3873 } 3976 - } 3874 + } else 3977 3875 #endif 3876 + if (rc == -EPERM) { 3877 + netif_err(efx, drv, efx->net_dev, 3878 + "Cannot change MAC address; use sfboot to enable" 3879 + " mac-spoofing on this interface\n"); 3880 + } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { 3881 + /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC 3882 + * fall-back to the method of changing the MAC address on the 3883 + * vport. This only applies to PFs because such versions of 3884 + * MCFW do not support VFs. 3885 + */ 3886 + rc = efx_ef10_vport_set_mac_address(efx); 3887 + } else { 3888 + efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, 3889 + sizeof(inbuf), NULL, 0, rc); 3890 + } 3891 + 3978 3892 return rc; 3979 3893 } 3980 3894
+11 -48
drivers/net/ethernet/sfc/ef10_sriov.c
··· 29 29 NULL, 0, NULL); 30 30 } 31 31 32 - static int efx_ef10_vport_add_mac(struct efx_nic *efx, 33 - unsigned int port_id, u8 *mac) 34 - { 35 - MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); 36 - 37 - MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); 38 - ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); 39 - 40 - return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, 41 - sizeof(inbuf), NULL, 0, NULL); 42 - } 43 - 44 - static int efx_ef10_vport_del_mac(struct efx_nic *efx, 45 - unsigned int port_id, u8 *mac) 46 - { 47 - MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); 48 - 49 - MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); 50 - ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); 51 - 52 - return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, 53 - sizeof(inbuf), NULL, 0, NULL); 54 - } 55 - 56 32 static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, 57 33 unsigned int vswitch_type) 58 34 { ··· 109 133 MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id); 110 134 111 135 return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf), 112 - NULL, 0, NULL); 113 - } 114 - 115 - static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 116 - { 117 - MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 118 - 119 - MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 120 - return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), 121 - NULL, 0, NULL); 122 - } 123 - 124 - static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) 125 - { 126 - MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); 127 - 128 - MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 129 - return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), 130 136 NULL, 0, NULL); 131 137 } 132 138 ··· 598 640 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, 599 641 vf->vlan, &vf->vport_id); 600 642 if (rc) 601 - goto reset_nic; 643 + goto reset_nic_up_write; 602 644 603 645 restore_mac: 604 646 if (!is_zero_ether_addr(vf->mac)) { 605 647 rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); 606 648 if (rc2) { 607 649 eth_zero_addr(vf->mac); 608 - goto reset_nic; 650 + goto reset_nic_up_write; 609 651 } 610 652 } 611 653 612 654 restore_evb_port: 613 655 rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); 614 656 if (rc2) 615 - goto reset_nic; 657 + goto reset_nic_up_write; 616 658 else 617 659 vf->vport_assigned = 1; 618 660 ··· 620 662 if (vf->efx) { 621 663 rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); 622 664 if (rc2) 623 - goto reset_nic; 665 + goto reset_nic_up_write; 624 666 } 625 667 626 668 restore_filters: 627 669 if (vf->efx) { 628 670 rc2 = vf->efx->type->filter_table_probe(vf->efx); 629 671 if (rc2) 630 - goto reset_nic; 672 + goto reset_nic_up_write; 673 + 674 + up_write(&vf->efx->filter_sem); 631 675 632 676 up_write(&vf->efx->filter_sem); 633 677 ··· 641 681 } 642 682 return rc; 643 683 684 + reset_nic_up_write: 685 + if (vf->efx) 686 + up_write(&vf->efx->filter_sem); 687 + 644 688 reset_nic: 645 689 if (vf->efx) { 646 - up_write(&vf->efx->filter_sem); 647 690 netif_err(efx, drv, efx->net_dev, 648 691 "Failed to restore VF - scheduling reset.\n"); 649 692 efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
+6
drivers/net/ethernet/sfc/ef10_sriov.h
··· 65 65 int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); 66 66 void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); 67 67 void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); 68 + int efx_ef10_vport_add_mac(struct efx_nic *efx, 69 + unsigned int port_id, u8 *mac); 70 + int efx_ef10_vport_del_mac(struct efx_nic *efx, 71 + unsigned int port_id, u8 *mac); 72 + int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); 73 + int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); 68 74 69 75 #endif /* EF10_SRIOV_H */
+14
drivers/net/ethernet/sfc/efx.c
··· 245 245 */ 246 246 static int efx_process_channel(struct efx_channel *channel, int budget) 247 247 { 248 + struct efx_tx_queue *tx_queue; 248 249 int spent; 249 250 250 251 if (unlikely(!channel->enabled)) 251 252 return 0; 253 + 254 + efx_for_each_channel_tx_queue(tx_queue, channel) { 255 + tx_queue->pkts_compl = 0; 256 + tx_queue->bytes_compl = 0; 257 + } 252 258 253 259 spent = efx_nic_process_eventq(channel, budget); 254 260 if (spent && efx_channel_has_rx_queue(channel)) { ··· 263 257 264 258 efx_rx_flush_packet(channel); 265 259 efx_fast_push_rx_descriptors(rx_queue, true); 260 + } 261 + 262 + /* Update BQL */ 263 + efx_for_each_channel_tx_queue(tx_queue, channel) { 264 + if (tx_queue->bytes_compl) { 265 + netdev_tx_completed_queue(tx_queue->core_txq, 266 + tx_queue->pkts_compl, tx_queue->bytes_compl); 267 + } 266 268 } 267 269 268 270 return spent;
+2
drivers/net/ethernet/sfc/net_driver.h
··· 241 241 unsigned int read_count ____cacheline_aligned_in_smp; 242 242 unsigned int old_write_count; 243 243 unsigned int merge_events; 244 + unsigned int bytes_compl; 245 + unsigned int pkts_compl; 244 246 245 247 /* Members used only on the xmit path */ 246 248 unsigned int insert_count ____cacheline_aligned_in_smp;
+2 -1
drivers/net/ethernet/sfc/tx.c
··· 617 617 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 618 618 619 619 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 620 - netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 620 + tx_queue->pkts_compl += pkts_compl; 621 + tx_queue->bytes_compl += bytes_compl; 621 622 622 623 if (pkts_compl > 1) 623 624 ++tx_queue->merge_events;
+7 -18
drivers/net/ethernet/ti/cpsw.c
··· 138 138 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) 139 139 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) 140 140 141 - #define cpsw_enable_irq(priv) \ 142 - do { \ 143 - u32 i; \ 144 - for (i = 0; i < priv->num_irqs; i++) \ 145 - enable_irq(priv->irqs_table[i]); \ 146 - } while (0) 147 - #define cpsw_disable_irq(priv) \ 148 - do { \ 149 - u32 i; \ 150 - for (i = 0; i < priv->num_irqs; i++) \ 151 - disable_irq_nosync(priv->irqs_table[i]); \ 152 - } while (0) 153 - 154 141 #define cpsw_slave_index(priv) \ 155 142 ((priv->data.dual_emac) ? priv->emac_port : \ 156 143 priv->data.active_slave) ··· 496 509 (func)(slave++, ##arg); \ 497 510 } while (0) 498 511 #define cpsw_get_slave_ndev(priv, __slave_no__) \ 499 - (priv->slaves[__slave_no__].ndev) 512 + ((__slave_no__ < priv->data.slaves) ? \ 513 + priv->slaves[__slave_no__].ndev : NULL) 500 514 #define cpsw_get_slave_priv(priv, __slave_no__) \ 501 - ((priv->slaves[__slave_no__].ndev) ? \ 515 + (((__slave_no__ < priv->data.slaves) && \ 516 + (priv->slaves[__slave_no__].ndev)) ? \ 502 517 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ 503 518 504 519 #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ ··· 770 781 771 782 cpsw_intr_disable(priv); 772 783 if (priv->irq_enabled == true) { 773 - cpsw_disable_irq(priv); 784 + disable_irq_nosync(priv->irqs_table[0]); 774 785 priv->irq_enabled = false; 775 786 } 776 787 ··· 806 817 prim_cpsw = cpsw_get_slave_priv(priv, 0); 807 818 if (prim_cpsw->irq_enabled == false) { 808 819 prim_cpsw->irq_enabled = true; 809 - cpsw_enable_irq(priv); 820 + enable_irq(priv->irqs_table[0]); 810 821 } 811 822 } 812 823 ··· 1322 1333 if (prim_cpsw->irq_enabled == false) { 1323 1334 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { 1324 1335 prim_cpsw->irq_enabled = true; 1325 - cpsw_enable_irq(prim_cpsw); 1336 + enable_irq(prim_cpsw->irqs_table[0]); 1326 1337 } 1327 1338 } 1328 1339
+4 -4
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1530 1530 /* Map device registers */ 1531 1531 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1532 1532 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1533 - if (!lp->regs) { 1533 + if (IS_ERR(lp->regs)) { 1534 1534 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1535 - ret = -ENOMEM; 1535 + ret = PTR_ERR(lp->regs); 1536 1536 goto free_netdev; 1537 1537 } 1538 1538 ··· 1599 1599 goto free_netdev; 1600 1600 } 1601 1601 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1602 - if (!lp->dma_regs) { 1602 + if (IS_ERR(lp->dma_regs)) { 1603 1603 dev_err(&pdev->dev, "could not map DMA regs\n"); 1604 - ret = -ENOMEM; 1604 + ret = PTR_ERR(lp->dma_regs); 1605 1605 goto free_netdev; 1606 1606 } 1607 1607 lp->rx_irq = irq_of_parse_and_map(np, 1);
+1
drivers/net/hamradio/bpqether.c
··· 482 482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 483 483 484 484 dev->flags = 0; 485 + dev->features = NETIF_F_LLTX; /* Allow recursion */ 485 486 486 487 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 487 488 dev->header_ops = &ax25_header_ops;
+1
drivers/net/macvtap.c
··· 1355 1355 class_unregister(macvtap_class); 1356 1356 cdev_del(&macvtap_cdev); 1357 1357 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1358 + idr_destroy(&minor_idr); 1358 1359 } 1359 1360 module_exit(macvtap_exit); 1360 1361
+1 -1
drivers/net/phy/Kconfig
··· 191 191 192 192 config MDIO_BUS_MUX_MMIOREG 193 193 tristate "Support for MMIO device-controlled MDIO bus multiplexers" 194 - depends on OF_MDIO 194 + depends on OF_MDIO && HAS_IOMEM 195 195 select MDIO_BUS_MUX 196 196 help 197 197 This module provides a driver for MDIO bus multiplexers that
+8
drivers/net/usb/cdc_ether.c
··· 523 523 #define REALTEK_VENDOR_ID 0x0bda 524 524 #define SAMSUNG_VENDOR_ID 0x04e8 525 525 #define LENOVO_VENDOR_ID 0x17ef 526 + #define NVIDIA_VENDOR_ID 0x0955 526 527 527 528 static const struct usb_device_id products[] = { 528 529 /* BLACKLIST !! ··· 707 706 /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 708 707 { 709 708 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM, 709 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 710 + .driver_info = 0, 711 + }, 712 + 713 + /* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 714 + { 715 + USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM, 710 716 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 711 717 .driver_info = 0, 712 718 },
+1 -1
drivers/net/usb/cdc_mbim.c
··· 158 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 159 159 goto err; 160 160 161 - ret = cdc_ncm_bind_common(dev, intf, data_altsetting); 161 + ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); 162 162 if (ret) 163 163 goto err; 164 164
+56 -7
drivers/net/usb/cdc_ncm.c
··· 6 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 7 7 * 8 8 * USB Host Driver for Network Control Model (NCM) 9 - * http://www.usb.org/developers/devclass_docs/NCM10.zip 9 + * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip 10 10 * 11 11 * The NCM encoding, decoding and initialization logic 12 12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h ··· 684 684 ctx->tx_curr_skb = NULL; 685 685 } 686 686 687 + kfree(ctx->delayed_ndp16); 688 + 687 689 kfree(ctx); 688 690 } 689 691 690 - int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting) 692 + int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) 691 693 { 692 694 const struct usb_cdc_union_desc *union_desc = NULL; 693 695 struct cdc_ncm_ctx *ctx; ··· 857 855 /* finish setting up the device specific data */ 858 856 cdc_ncm_setup(dev); 859 857 858 + /* Device-specific flags */ 859 + ctx->drvflags = drvflags; 860 + 861 + /* Allocate the delayed NDP if needed. */ 862 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { 863 + ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL); 864 + if (!ctx->delayed_ndp16) 865 + goto error2; 866 + dev_info(&intf->dev, "NDP will be placed at end of frame for this device."); 867 + } 868 + 860 869 /* override ethtool_ops */ 861 870 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops; 862 871 ··· 967 954 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) 968 955 return -ENODEV; 969 956 970 - /* The NCM data altsetting is fixed */ 971 - ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM); 957 + /* The NCM data altsetting is fixed, so we hard-coded it. 958 + * Additionally, generic NCM devices are assumed to accept arbitrarily 959 + * placed NDP. 960 + */ 961 + ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); 972 962 973 963 /* 974 964 * We should get an event when network connection is "connected" or ··· 1002 986 struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data; 1003 987 size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex); 1004 988 989 + /* If NDP should be moved to the end of the NCM package, we can't follow the 990 + * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and 991 + * the wNdpIndex field in the header is actually not consistent with reality. It will be later. 992 + */ 993 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 994 + if (ctx->delayed_ndp16->dwSignature == sign) 995 + return ctx->delayed_ndp16; 996 + 1005 997 /* follow the chain of NDPs, looking for a match */ 1006 998 while (ndpoffset) { 1007 999 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); ··· 1019 995 } 1020 996 1021 997 /* align new NDP */ 1022 - cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); 998 + if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)) 999 + cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); 1023 1000 1024 1001 /* verify that there is room for the NDP and the datagram (reserve) */ 1025 1002 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size) ··· 1033 1008 nth16->wNdpIndex = cpu_to_le16(skb->len); 1034 1009 1035 1010 /* push a new empty NDP */ 1036 - ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); 1011 + if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)) 1012 + ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); 1013 + else 1014 + ndp16 = ctx->delayed_ndp16; 1015 + 1037 1016 ndp16->dwSignature = sign; 1038 1017 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); 1039 1018 return ndp16; ··· 1052 1023 struct sk_buff *skb_out; 1053 1024 u16 n = 0, index, ndplen; 1054 1025 u8 ready2send = 0; 1026 + u32 delayed_ndp_size; 1027 + 1028 + /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated 1029 + * accordingly. Otherwise, we should check here. 1030 + */ 1031 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 1032 + delayed_ndp_size = ctx->max_ndp_size; 1033 + else 1034 + delayed_ndp_size = 0; 1055 1035 1056 1036 /* if there is a remaining skb, it gets priority */ 1057 1037 if (skb != NULL) { ··· 1115 1077 cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); 1116 1078 1117 1079 /* check if we had enough room left for both NDP and frame */ 1118 - if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { 1080 + if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) { 1119 1081 if (n == 0) { 1120 1082 /* won't fit, MTU problem? */ 1121 1083 dev_kfree_skb_any(skb); ··· 1186 1148 ctx->tx_reason_max_datagram++; /* count reason for transmitting */ 1187 1149 /* frame goes out */ 1188 1150 /* variables will be reset at next call */ 1151 + } 1152 + 1153 + /* If requested, put NDP at end of frame. */ 1154 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { 1155 + nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; 1156 + cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max); 1157 + nth16->wNdpIndex = cpu_to_le16(skb_out->len); 1158 + memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size); 1159 + 1160 + /* Zero out delayed NDP - signature checking will naturally fail. */ 1161 + ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size); 1189 1162 } 1190 1163 1191 1164 /* If collected data size is less or equal ctx->min_tx_pkt
+5 -2
drivers/net/usb/huawei_cdc_ncm.c
··· 73 73 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 74 74 int ret = -ENODEV; 75 75 struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data; 76 + int drvflags = 0; 76 77 77 78 /* altsetting should always be 1 for NCM devices - so we hard-coded 78 - * it here 79 + * it here. Some huawei devices will need the NDP part of the NCM package to 80 + * be at the end of the frame. 79 81 */ 80 - ret = cdc_ncm_bind_common(usbnet_dev, intf, 1); 82 + drvflags |= CDC_NCM_FLAG_NDP_TO_END; 83 + ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); 81 84 if (ret) 82 85 goto err; 83 86
+2
drivers/net/usb/r8152.c
··· 494 494 #define VENDOR_ID_REALTEK 0x0bda 495 495 #define VENDOR_ID_SAMSUNG 0x04e8 496 496 #define VENDOR_ID_LENOVO 0x17ef 497 + #define VENDOR_ID_NVIDIA 0x0955 497 498 498 499 #define MCU_TYPE_PLA 0x0100 499 500 #define MCU_TYPE_USB 0x0000 ··· 4118 4117 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4119 4118 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 4120 4119 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4120 + {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, 4121 4121 {} 4122 4122 }; 4123 4123
+4 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 1216 1216 static const u32 rxprod_reg[2] = { 1217 1217 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 1218 1218 }; 1219 - u32 num_rxd = 0; 1219 + u32 num_pkts = 0; 1220 1220 bool skip_page_frags = false; 1221 1221 struct Vmxnet3_RxCompDesc *rcd; 1222 1222 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; ··· 1235 1235 struct Vmxnet3_RxDesc *rxd; 1236 1236 u32 idx, ring_idx; 1237 1237 struct vmxnet3_cmd_ring *ring = NULL; 1238 - if (num_rxd >= quota) { 1238 + if (num_pkts >= quota) { 1239 1239 /* we may stop even before we see the EOP desc of 1240 1240 * the current pkt 1241 1241 */ 1242 1242 break; 1243 1243 } 1244 - num_rxd++; 1245 1244 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1246 1245 idx = rcd->rxdIdx; 1247 1246 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; ··· 1412 1413 napi_gro_receive(&rq->napi, skb); 1413 1414 1414 1415 ctx->skb = NULL; 1416 + num_pkts++; 1415 1417 } 1416 1418 1417 1419 rcd_done: ··· 1443 1443 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1444 1444 } 1445 1445 1446 - return num_rxd; 1446 + return num_pkts; 1447 1447 } 1448 1448 1449 1449
+1 -1
drivers/net/wan/z85230.c
··· 1044 1044 * @dev: The network device to attach 1045 1045 * @c: The Z8530 channel to configure in sync DMA mode. 1046 1046 * 1047 - * Set up a Z85x30 device for synchronous DMA tranmission. One 1047 + * Set up a Z85x30 device for synchronous DMA transmission. One 1048 1048 * ISA DMA channel must be available for this to work. The receive 1049 1049 * side is run in PIO mode, but then it has the bigger FIFO. 1050 1050 */
+2
include/linux/can/skb.h
··· 27 27 /** 28 28 * struct can_skb_priv - private additional data inside CAN sk_buffs 29 29 * @ifindex: ifindex of the first interface the CAN frame appeared on 30 + * @skbcnt: atomic counter to have an unique id together with skb pointer 30 31 * @cf: align to the following CAN frame at skb->data 31 32 */ 32 33 struct can_skb_priv { 33 34 int ifindex; 35 + int skbcnt; 34 36 struct can_frame cf[0]; 35 37 }; 36 38
+6 -1
include/linux/usb/cdc_ncm.h
··· 80 80 #define CDC_NCM_TIMER_INTERVAL_MIN 5UL 81 81 #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) 82 82 83 + /* Driver flags */ 84 + #define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ 85 + 83 86 #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ 84 87 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) 85 88 #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) ··· 106 103 107 104 spinlock_t mtx; 108 105 atomic_t stop; 106 + int drvflags; 109 107 110 108 u32 timer_interval; 111 109 u32 max_ndp_size; 110 + struct usb_cdc_ncm_ndp16 *delayed_ndp16; 112 111 113 112 u32 tx_timer_pending; 114 113 u32 tx_curr_frame_num; ··· 138 133 }; 139 134 140 135 u8 cdc_ncm_select_altsetting(struct usb_interface *intf); 141 - int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 136 + int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); 142 137 void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 143 138 struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); 144 139 int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+1
include/uapi/linux/netconf.h
··· 15 15 NETCONFA_RP_FILTER, 16 16 NETCONFA_MC_FORWARDING, 17 17 NETCONFA_PROXY_NEIGH, 18 + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 18 19 __NETCONFA_MAX 19 20 }; 20 21 #define NETCONFA_MAX (__NETCONFA_MAX - 1)
+2 -2
lib/rhashtable.c
··· 610 610 iter->skip = 0; 611 611 } 612 612 613 + iter->p = NULL; 614 + 613 615 /* Ensure we see any new tables. */ 614 616 smp_rmb(); 615 617 ··· 621 619 iter->skip = 0; 622 620 return ERR_PTR(-EAGAIN); 623 621 } 624 - 625 - iter->p = NULL; 626 622 627 623 return NULL; 628 624 }
+1
net/bridge/br_forward.c
··· 42 42 } else { 43 43 skb_push(skb, ETH_HLEN); 44 44 br_drop_fake_rtable(skb); 45 + skb_sender_cpu_clear(skb); 45 46 dev_queue_xmit(skb); 46 47 } 47 48
+7 -9
net/bridge/br_mdb.c
··· 323 323 struct net_bridge_port_group *p; 324 324 struct net_bridge_port_group __rcu **pp; 325 325 struct net_bridge_mdb_htable *mdb; 326 + unsigned long now = jiffies; 326 327 int err; 327 328 328 329 mdb = mlock_dereference(br->mdb, br); ··· 348 347 if (unlikely(!p)) 349 348 return -ENOMEM; 350 349 rcu_assign_pointer(*pp, p); 350 + if (state == MDB_TEMPORARY) 351 + mod_timer(&p->timer, now + br->multicast_membership_interval); 351 352 352 353 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 353 354 return 0; ··· 374 371 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 375 372 return -EINVAL; 376 373 374 + memset(&ip, 0, sizeof(ip)); 377 375 ip.proto = entry->addr.proto; 378 376 if (ip.proto == htons(ETH_P_IP)) 379 377 ip.u.ip4 = entry->addr.u.ip4; ··· 421 417 if (!netif_running(br->dev) || br->multicast_disabled) 422 418 return -EINVAL; 423 419 420 + memset(&ip, 0, sizeof(ip)); 424 421 ip.proto = entry->addr.proto; 425 - if (ip.proto == htons(ETH_P_IP)) { 426 - if (timer_pending(&br->ip4_other_query.timer)) 427 - return -EBUSY; 428 - 422 + if (ip.proto == htons(ETH_P_IP)) 429 423 ip.u.ip4 = entry->addr.u.ip4; 430 424 #if IS_ENABLED(CONFIG_IPV6) 431 - } else { 432 - if (timer_pending(&br->ip6_other_query.timer)) 433 - return -EBUSY; 434 - 425 + else 435 426 ip.u.ip6 = entry->addr.u.ip6; 436 427 #endif 437 - } 438 428 439 429 spin_lock_bh(&br->multicast_lock); 440 430 mdb = mlock_dereference(br->mdb, br);
+11 -5
net/bridge/br_netfilter_hooks.c
··· 111 111 /* largest possible L2 header, see br_nf_dev_queue_xmit() */ 112 112 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN) 113 113 114 - #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 114 + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 115 115 struct brnf_frag_data { 116 116 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH]; 117 117 u8 encap_size; ··· 694 694 } 695 695 #endif 696 696 697 + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 697 698 static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb, 698 699 int (*output)(struct sock *, struct sk_buff *)) 699 700 { ··· 713 712 714 713 return ip_do_fragment(sk, skb, output); 715 714 } 715 + #endif 716 716 717 717 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) 718 718 { ··· 744 742 struct brnf_frag_data *data; 745 743 746 744 if (br_validate_ipv4(skb)) 747 - return NF_DROP; 745 + goto drop; 748 746 749 747 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; 750 748 ··· 769 767 struct brnf_frag_data *data; 770 768 771 769 if (br_validate_ipv6(skb)) 772 - return NF_DROP; 770 + goto drop; 773 771 774 772 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; 775 773 ··· 784 782 785 783 if (v6ops) 786 784 return v6ops->fragment(sk, skb, br_nf_push_frag_xmit); 787 - else 788 - return -EMSGSIZE; 785 + 786 + kfree_skb(skb); 787 + return -EMSGSIZE; 789 788 } 790 789 #endif 791 790 nf_bridge_info_free(skb); 792 791 return br_dev_queue_push_xmit(sk, skb); 792 + drop: 793 + kfree_skb(skb); 794 + return 0; 793 795 } 794 796 795 797 /* PF_BRIDGE/POST_ROUTING ********************************************/
+1 -1
net/bridge/br_netfilter_ipv6.c
··· 104 104 { 105 105 const struct ipv6hdr *hdr; 106 106 struct net_device *dev = skb->dev; 107 - struct inet6_dev *idev = in6_dev_get(skb->dev); 107 + struct inet6_dev *idev = __in6_dev_get(skb->dev); 108 108 u32 pkt_len; 109 109 u8 ip6h_len = sizeof(struct ipv6hdr); 110 110
+2
net/bridge/br_netlink.c
··· 457 457 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 458 458 return -EINVAL; 459 459 vinfo = nla_data(attr); 460 + if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) 461 + return -EINVAL; 460 462 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 461 463 if (vinfo_start) 462 464 return -EINVAL;
+7 -5
net/can/af_can.c
··· 89 89 struct s_stats can_stats; /* packet statistics */ 90 90 struct s_pstats can_pstats; /* receive list statistics */ 91 91 92 + static atomic_t skbcounter = ATOMIC_INIT(0); 93 + 92 94 /* 93 95 * af_can socket functions 94 96 */ ··· 312 310 return err; 313 311 } 314 312 315 - if (newskb) { 316 - if (!(newskb->tstamp.tv64)) 317 - __net_timestamp(newskb); 318 - 313 + if (newskb) 319 314 netif_rx_ni(newskb); 320 - } 321 315 322 316 /* update statistics */ 323 317 can_stats.tx_frames++; ··· 680 682 /* update statistics */ 681 683 can_stats.rx_frames++; 682 684 can_stats.rx_frames_delta++; 685 + 686 + /* create non-zero unique skb identifier together with *skb */ 687 + while (!(can_skb_prv(skb)->skbcnt)) 688 + can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter); 683 689 684 690 rcu_read_lock(); 685 691
+2
net/can/bcm.c
··· 261 261 262 262 can_skb_reserve(skb); 263 263 can_skb_prv(skb)->ifindex = dev->ifindex; 264 + can_skb_prv(skb)->skbcnt = 0; 264 265 265 266 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 266 267 ··· 1218 1217 } 1219 1218 1220 1219 can_skb_prv(skb)->ifindex = dev->ifindex; 1220 + can_skb_prv(skb)->skbcnt = 0; 1221 1221 skb->dev = dev; 1222 1222 can_skb_set_owner(skb, sk); 1223 1223 err = can_send(skb, 1); /* send with loopback */
+4 -3
net/can/raw.c
··· 75 75 */ 76 76 77 77 struct uniqframe { 78 - ktime_t tstamp; 78 + int skbcnt; 79 79 const struct sk_buff *skb; 80 80 unsigned int join_rx_count; 81 81 }; ··· 133 133 134 134 /* eliminate multiple filter matches for the same skb */ 135 135 if (this_cpu_ptr(ro->uniq)->skb == oskb && 136 - ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) { 136 + this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) { 137 137 if (ro->join_filters) { 138 138 this_cpu_inc(ro->uniq->join_rx_count); 139 139 /* drop frame until all enabled filters matched */ ··· 144 144 } 145 145 } else { 146 146 this_cpu_ptr(ro->uniq)->skb = oskb; 147 - this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp; 147 + this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt; 148 148 this_cpu_ptr(ro->uniq)->join_rx_count = 1; 149 149 /* drop first frame to check all enabled filters? */ 150 150 if (ro->join_filters && ro->count > 1) ··· 749 749 750 750 can_skb_reserve(skb); 751 751 can_skb_prv(skb)->ifindex = dev->ifindex; 752 + can_skb_prv(skb)->skbcnt = 0; 752 753 753 754 err = memcpy_from_msg(skb_put(skb, size), msg, size); 754 755 if (err < 0)
+22 -23
net/core/dev.c
··· 677 677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 678 678 return dev->netdev_ops->ndo_get_iflink(dev); 679 679 680 - /* If dev->rtnl_link_ops is set, it's a virtual interface. */ 681 - if (dev->rtnl_link_ops) 682 - return 0; 683 - 684 680 return dev->ifindex; 685 681 } 686 682 EXPORT_SYMBOL(dev_get_iflink); ··· 3448 3452 local_irq_save(flags); 3449 3453 3450 3454 rps_lock(sd); 3455 + if (!netif_running(skb->dev)) 3456 + goto drop; 3451 3457 qlen = skb_queue_len(&sd->input_pkt_queue); 3452 3458 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 3453 3459 if (qlen) { ··· 3471 3473 goto enqueue; 3472 3474 } 3473 3475 3476 + drop: 3474 3477 sd->dropped++; 3475 3478 rps_unlock(sd); 3476 3479 ··· 3774 3775 3775 3776 pt_prev = NULL; 3776 3777 3777 - rcu_read_lock(); 3778 - 3779 3778 another_round: 3780 3779 skb->skb_iif = skb->dev->ifindex; 3781 3780 ··· 3783 3786 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 3784 3787 skb = skb_vlan_untag(skb); 3785 3788 if (unlikely(!skb)) 3786 - goto unlock; 3789 + goto out; 3787 3790 } 3788 3791 3789 3792 #ifdef CONFIG_NET_CLS_ACT ··· 3813 3816 if (static_key_false(&ingress_needed)) { 3814 3817 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3815 3818 if (!skb) 3816 - goto unlock; 3819 + goto out; 3817 3820 3818 3821 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 3819 - goto unlock; 3822 + goto out; 3820 3823 } 3821 3824 #endif 3822 3825 #ifdef CONFIG_NET_CLS_ACT ··· 3834 3837 if (vlan_do_receive(&skb)) 3835 3838 goto another_round; 3836 3839 else if (unlikely(!skb)) 3837 - goto unlock; 3840 + goto out; 3838 3841 } 3839 3842 3840 3843 rx_handler = rcu_dereference(skb->dev->rx_handler); ··· 3846 3849 switch (rx_handler(&skb)) { 3847 3850 case RX_HANDLER_CONSUMED: 3848 3851 ret = NET_RX_SUCCESS; 3849 - goto unlock; 3852 + goto out; 3850 3853 case RX_HANDLER_ANOTHER: 3851 3854 goto another_round; 3852 3855 case RX_HANDLER_EXACT: ··· 3900 3903 ret = NET_RX_DROP; 3901 3904 } 3902 3905 3903 - unlock: 3904 - rcu_read_unlock(); 3906 + out: 3905 3907 return ret; 3906 3908 } 3907 3909 ··· 3931 3935 3932 3936 static int netif_receive_skb_internal(struct sk_buff *skb) 3933 3937 { 3938 + int ret; 3939 + 3934 3940 net_timestamp_check(netdev_tstamp_prequeue, skb); 3935 3941 3936 3942 if (skb_defer_rx_timestamp(skb)) 3937 3943 return NET_RX_SUCCESS; 3938 3944 3945 + rcu_read_lock(); 3946 + 3939 3947 #ifdef CONFIG_RPS 3940 3948 if (static_key_false(&rps_needed)) { 3941 3949 struct rps_dev_flow voidflow, *rflow = &voidflow; 3942 - int cpu, ret; 3943 - 3944 - rcu_read_lock(); 3945 - 3946 - cpu = get_rps_cpu(skb->dev, skb, &rflow); 3950 + int cpu = get_rps_cpu(skb->dev, skb, &rflow); 3947 3951 3948 3952 if (cpu >= 0) { 3949 3953 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3950 3954 rcu_read_unlock(); 3951 3955 return ret; 3952 3956 } 3953 - rcu_read_unlock(); 3954 3957 } 3955 3958 #endif 3956 - return __netif_receive_skb(skb); 3959 + ret = __netif_receive_skb(skb); 3960 + rcu_read_unlock(); 3961 + return ret; 3957 3962 } 3958 3963 3959 3964 /** ··· 4499 4502 struct sk_buff *skb; 4500 4503 4501 4504 while ((skb = __skb_dequeue(&sd->process_queue))) { 4505 + rcu_read_lock(); 4502 4506 local_irq_enable(); 4503 4507 __netif_receive_skb(skb); 4508 + rcu_read_unlock(); 4504 4509 local_irq_disable(); 4505 4510 input_queue_head_incr(sd); 4506 4511 if (++work >= quota) { ··· 6138 6139 unlist_netdevice(dev); 6139 6140 6140 6141 dev->reg_state = NETREG_UNREGISTERING; 6142 + on_each_cpu(flush_backlog, dev, 1); 6141 6143 } 6142 6144 6143 6145 synchronize_net(); ··· 6409 6409 struct netdev_queue *tx; 6410 6410 size_t sz = count * sizeof(*tx); 6411 6411 6412 - BUG_ON(count < 1 || count > 0xffff); 6412 + if (count < 1 || count > 0xffff) 6413 + return -EINVAL; 6413 6414 6414 6415 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 6415 6416 if (!tx) { ··· 6773 6772 } 6774 6773 6775 6774 dev->reg_state = NETREG_UNREGISTERED; 6776 - 6777 - on_each_cpu(flush_backlog, dev, 1); 6778 6775 6779 6776 netdev_wait_allrefs(dev); 6780 6777
+7 -6
net/core/gen_estimator.c
··· 66 66 67 67 NOTES. 68 68 69 - * avbps is scaled by 2^5, avpps is scaled by 2^10. 69 + * avbps and avpps are scaled by 2^5. 70 70 * both values are reported as 32 bit unsigned values. bps can 71 71 overflow for fast links : max speed being 34360Mbit/sec 72 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor ··· 85 85 struct gnet_stats_rate_est64 *rate_est; 86 86 spinlock_t *stats_lock; 87 87 int ewma_log; 88 + u32 last_packets; 89 + unsigned long avpps; 88 90 u64 last_bytes; 89 91 u64 avbps; 90 - u32 last_packets; 91 - u32 avpps; 92 92 struct rcu_head e_rcu; 93 93 struct rb_node node; 94 94 struct gnet_stats_basic_cpu __percpu *cpu_bstats; ··· 118 118 rcu_read_lock(); 119 119 list_for_each_entry_rcu(e, &elist[idx].list, list) { 120 120 struct gnet_stats_basic_packed b = {0}; 121 + unsigned long rate; 121 122 u64 brate; 122 - u32 rate; 123 123 124 124 spin_lock(e->stats_lock); 125 125 read_lock(&est_lock); ··· 133 133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); 134 134 e->rate_est->bps = (e->avbps+0xF)>>5; 135 135 136 - rate = (b.packets - e->last_packets)<<(12 - idx); 136 + rate = b.packets - e->last_packets; 137 + rate <<= (7 - idx); 137 138 e->last_packets = b.packets; 138 139 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); 139 - e->rate_est->pps = (e->avpps+0x1FF)>>10; 140 + e->rate_est->pps = (e->avpps + 0xF) >> 5; 140 141 skip: 141 142 read_unlock(&est_lock); 142 143 spin_unlock(e->stats_lock);
+2 -7
net/core/pktgen.c
··· 3571 3571 pr_debug("%s removing thread\n", t->tsk->comm); 3572 3572 pktgen_rem_thread(t); 3573 3573 3574 - /* Wait for kthread_stop */ 3575 - while (!kthread_should_stop()) { 3576 - set_current_state(TASK_INTERRUPTIBLE); 3577 - schedule(); 3578 - } 3579 - __set_current_state(TASK_RUNNING); 3580 - 3581 3574 return 0; 3582 3575 } 3583 3576 ··· 3762 3769 } 3763 3770 3764 3771 t->net = pn; 3772 + get_task_struct(p); 3765 3773 wake_up_process(p); 3766 3774 wait_for_completion(&t->start_done); 3767 3775 ··· 3885 3891 t = list_entry(q, struct pktgen_thread, th_list); 3886 3892 list_del(&t->th_list); 3887 3893 kthread_stop(t->tsk); 3894 + put_task_struct(t->tsk); 3888 3895 kfree(t); 3889 3896 } 3890 3897
+96 -91
net/core/rtnetlink.c
··· 1328 1328 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1329 1329 }; 1330 1330 1331 - static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { 1332 - [IFLA_VF_INFO] = { .type = NLA_NESTED }, 1333 - }; 1334 - 1335 1331 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1336 1332 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1337 1333 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, ··· 1484 1488 return 0; 1485 1489 } 1486 1490 1487 - static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) 1491 + static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 1488 1492 { 1489 - int rem, err = -EINVAL; 1490 - struct nlattr *vf; 1491 1493 const struct net_device_ops *ops = dev->netdev_ops; 1494 + int err = -EINVAL; 1492 1495 1493 - nla_for_each_nested(vf, attr, rem) { 1494 - switch (nla_type(vf)) { 1495 - case IFLA_VF_MAC: { 1496 - struct ifla_vf_mac *ivm; 1497 - ivm = nla_data(vf); 1498 - err = -EOPNOTSUPP; 1499 - if (ops->ndo_set_vf_mac) 1500 - err = ops->ndo_set_vf_mac(dev, ivm->vf, 1501 - ivm->mac); 1502 - break; 1503 - } 1504 - case IFLA_VF_VLAN: { 1505 - struct ifla_vf_vlan *ivv; 1506 - ivv = nla_data(vf); 1507 - err = -EOPNOTSUPP; 1508 - if (ops->ndo_set_vf_vlan) 1509 - err = ops->ndo_set_vf_vlan(dev, ivv->vf, 1510 - ivv->vlan, 1511 - ivv->qos); 1512 - break; 1513 - } 1514 - case IFLA_VF_TX_RATE: { 1515 - struct ifla_vf_tx_rate *ivt; 1516 - struct ifla_vf_info ivf; 1517 - ivt = nla_data(vf); 1518 - err = -EOPNOTSUPP; 1519 - if (ops->ndo_get_vf_config) 1520 - err = ops->ndo_get_vf_config(dev, ivt->vf, 1521 - &ivf); 1522 - if (err) 1523 - break; 1524 - err = -EOPNOTSUPP; 1525 - if (ops->ndo_set_vf_rate) 1526 - err = ops->ndo_set_vf_rate(dev, ivt->vf, 1527 - ivf.min_tx_rate, 1528 - ivt->rate); 1529 - break; 1530 - } 1531 - case IFLA_VF_RATE: { 1532 - struct ifla_vf_rate *ivt; 1533 - ivt = nla_data(vf); 1534 - err = -EOPNOTSUPP; 1535 - if (ops->ndo_set_vf_rate) 1536 - err = ops->ndo_set_vf_rate(dev, ivt->vf, 1537 - ivt->min_tx_rate, 1538 - ivt->max_tx_rate); 1539 - break; 1540 - } 1541 - case IFLA_VF_SPOOFCHK: { 1542 - struct ifla_vf_spoofchk *ivs; 1543 - ivs = nla_data(vf); 1544 - err = -EOPNOTSUPP; 1545 - if (ops->ndo_set_vf_spoofchk) 1546 - err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 1547 - ivs->setting); 1548 - break; 1549 - } 1550 - case IFLA_VF_LINK_STATE: { 1551 - struct ifla_vf_link_state *ivl; 1552 - ivl = nla_data(vf); 1553 - err = -EOPNOTSUPP; 1554 - if (ops->ndo_set_vf_link_state) 1555 - err = ops->ndo_set_vf_link_state(dev, ivl->vf, 1556 - ivl->link_state); 1557 - break; 1558 - } 1559 - case IFLA_VF_RSS_QUERY_EN: { 1560 - struct ifla_vf_rss_query_en *ivrssq_en; 1496 + if (tb[IFLA_VF_MAC]) { 1497 + struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 1561 1498 1562 - ivrssq_en = nla_data(vf); 1563 - err = -EOPNOTSUPP; 1564 - if (ops->ndo_set_vf_rss_query_en) 1565 - err = ops->ndo_set_vf_rss_query_en(dev, 1566 - ivrssq_en->vf, 1567 - ivrssq_en->setting); 1568 - break; 1569 - } 1570 - default: 1571 - err = -EINVAL; 1572 - break; 1573 - } 1574 - if (err) 1575 - break; 1499 + err = -EOPNOTSUPP; 1500 + if (ops->ndo_set_vf_mac) 1501 + err = ops->ndo_set_vf_mac(dev, ivm->vf, 1502 + ivm->mac); 1503 + if (err < 0) 1504 + return err; 1576 1505 } 1506 + 1507 + if (tb[IFLA_VF_VLAN]) { 1508 + struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 1509 + 1510 + err = -EOPNOTSUPP; 1511 + if (ops->ndo_set_vf_vlan) 1512 + err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 1513 + ivv->qos); 1514 + if (err < 0) 1515 + return err; 1516 + } 1517 + 1518 + if (tb[IFLA_VF_TX_RATE]) { 1519 + struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 1520 + struct ifla_vf_info ivf; 1521 + 1522 + err = -EOPNOTSUPP; 1523 + if (ops->ndo_get_vf_config) 1524 + err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 1525 + if (err < 0) 1526 + return err; 1527 + 1528 + err = -EOPNOTSUPP; 1529 + if (ops->ndo_set_vf_rate) 1530 + err = ops->ndo_set_vf_rate(dev, ivt->vf, 1531 + ivf.min_tx_rate, 1532 + ivt->rate); 1533 + if (err < 0) 1534 + return err; 1535 + } 1536 + 1537 + if (tb[IFLA_VF_RATE]) { 1538 + struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 1539 + 1540 + err = -EOPNOTSUPP; 1541 + if (ops->ndo_set_vf_rate) 1542 + err = ops->ndo_set_vf_rate(dev, ivt->vf, 1543 + ivt->min_tx_rate, 1544 + ivt->max_tx_rate); 1545 + if (err < 0) 1546 + return err; 1547 + } 1548 + 1549 + if (tb[IFLA_VF_SPOOFCHK]) { 1550 + struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 1551 + 1552 + err = -EOPNOTSUPP; 1553 + if (ops->ndo_set_vf_spoofchk) 1554 + err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 1555 + ivs->setting); 1556 + if (err < 0) 1557 + return err; 1558 + } 1559 + 1560 + if (tb[IFLA_VF_LINK_STATE]) { 1561 + struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 1562 + 1563 + err = -EOPNOTSUPP; 1564 + if (ops->ndo_set_vf_link_state) 1565 + err = ops->ndo_set_vf_link_state(dev, ivl->vf, 1566 + ivl->link_state); 1567 + if (err < 0) 1568 + return err; 1569 + } 1570 + 1571 + if (tb[IFLA_VF_RSS_QUERY_EN]) { 1572 + struct ifla_vf_rss_query_en *ivrssq_en; 1573 + 1574 + err = -EOPNOTSUPP; 1575 + ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 1576 + if (ops->ndo_set_vf_rss_query_en) 1577 + err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 1578 + ivrssq_en->setting); 1579 + if (err < 0) 1580 + return err; 1581 + } 1582 + 1577 1583 return err; 1578 1584 } 1579 1585 ··· 1771 1773 } 1772 1774 1773 1775 if (tb[IFLA_VFINFO_LIST]) { 1776 + struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 1774 1777 struct nlattr *attr; 1775 1778 int rem; 1779 + 1776 1780 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1777 - if (nla_type(attr) != IFLA_VF_INFO) { 1781 + if (nla_type(attr) != IFLA_VF_INFO || 1782 + nla_len(attr) < NLA_HDRLEN) { 1778 1783 err = -EINVAL; 1779 1784 goto errout; 1780 1785 } 1781 - err = do_setvfinfo(dev, attr); 1786 + err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, 1787 + ifla_vf_policy); 1788 + if (err < 0) 1789 + goto errout; 1790 + err = do_setvfinfo(dev, vfinfo); 1782 1791 if (err < 0) 1783 1792 goto errout; 1784 1793 status |= DO_SETLINK_NOTIFY;
+3 -3
net/dsa/dsa.c
··· 630 630 continue; 631 631 632 632 cd->sw_addr = be32_to_cpup(sw_addr); 633 - if (cd->sw_addr > PHY_MAX_ADDR) 633 + if (cd->sw_addr >= PHY_MAX_ADDR) 634 634 continue; 635 635 636 636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) ··· 642 642 continue; 643 643 644 644 port_index = be32_to_cpup(port_reg); 645 + if (port_index >= DSA_MAX_PORTS) 646 + break; 645 647 646 648 port_name = of_get_property(port, "label", NULL); 647 649 if (!port_name) ··· 668 666 goto out_free_chip; 669 667 } 670 668 671 - if (port_index == DSA_MAX_PORTS) 672 - break; 673 669 } 674 670 } 675 671
+13
net/ipv4/devinet.c
··· 1740 1740 size += nla_total_size(4); 1741 1741 if (type == -1 || type == NETCONFA_PROXY_NEIGH) 1742 1742 size += nla_total_size(4); 1743 + if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) 1744 + size += nla_total_size(4); 1743 1745 1744 1746 return size; 1745 1747 } ··· 1782 1780 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, 1783 1781 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0) 1784 1782 goto nla_put_failure; 1783 + if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) && 1784 + nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 1785 + IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0) 1786 + goto nla_put_failure; 1785 1787 1786 1788 nlmsg_end(skb, nlh); 1787 1789 return 0; ··· 1825 1819 [NETCONFA_FORWARDING] = { .len = sizeof(int) }, 1826 1820 [NETCONFA_RP_FILTER] = { .len = sizeof(int) }, 1827 1821 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, 1822 + [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) }, 1828 1823 }; 1829 1824 1830 1825 static int inet_netconf_get_devconf(struct sk_buff *in_skb, ··· 2053 2046 new_value != old_value) { 2054 2047 ifindex = devinet_conf_ifindex(net, cnf); 2055 2048 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, 2049 + ifindex, cnf); 2050 + } 2051 + if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 && 2052 + new_value != old_value) { 2053 + ifindex = devinet_conf_ifindex(net, cnf); 2054 + inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 2056 2055 ifindex, cnf); 2057 2056 } 2058 2057 }
+2 -2
net/ipv4/inet_diag.c
··· 152 152 inet6_sk(sk)->tclass) < 0) 153 153 goto errout; 154 154 155 - if (ipv6_only_sock(sk) && 156 - nla_put_u8(skb, INET_DIAG_SKV6ONLY, 1)) 155 + if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && 156 + nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk))) 157 157 goto errout; 158 158 } 159 159 #endif
+5 -3
net/ipv4/ip_tunnel.c
··· 586 586 EXPORT_SYMBOL(ip_tunnel_encap); 587 587 588 588 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, 589 - struct rtable *rt, __be16 df) 589 + struct rtable *rt, __be16 df, 590 + const struct iphdr *inner_iph) 590 591 { 591 592 struct ip_tunnel *tunnel = netdev_priv(dev); 592 593 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; ··· 604 603 605 604 if (skb->protocol == htons(ETH_P_IP)) { 606 605 if (!skb_is_gso(skb) && 607 - (df & htons(IP_DF)) && mtu < pkt_size) { 606 + (inner_iph->frag_off & htons(IP_DF)) && 607 + mtu < pkt_size) { 608 608 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 609 609 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 610 610 return -E2BIG; ··· 739 737 goto tx_error; 740 738 } 741 739 742 - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { 740 + if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { 743 741 ip_rt_put(rt); 744 742 goto tx_error; 745 743 }
+16 -9
net/ipv4/netfilter/arp_tables.c
··· 254 254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 255 255 unsigned int verdict = NF_DROP; 256 256 const struct arphdr *arp; 257 - struct arpt_entry *e, *back; 257 + struct arpt_entry *e, **jumpstack; 258 258 const char *indev, *outdev; 259 259 const void *table_base; 260 + unsigned int cpu, stackidx = 0; 260 261 const struct xt_table_info *private; 261 262 struct xt_action_param acpar; 262 263 unsigned int addend; ··· 271 270 local_bh_disable(); 272 271 addend = xt_write_recseq_begin(); 273 272 private = table->private; 273 + cpu = smp_processor_id(); 274 274 /* 275 275 * Ensure we load private-> members after we've fetched the base 276 276 * pointer. 277 277 */ 278 278 smp_read_barrier_depends(); 279 279 table_base = private->entries; 280 + jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; 280 281 281 282 e = get_entry(table_base, private->hook_entry[hook]); 282 - back = get_entry(table_base, private->underflow[hook]); 283 283 284 284 acpar.in = state->in; 285 285 acpar.out = state->out; ··· 314 312 verdict = (unsigned int)(-v) - 1; 315 313 break; 316 314 } 317 - e = back; 318 - back = get_entry(table_base, back->comefrom); 315 + if (stackidx == 0) { 316 + e = get_entry(table_base, 317 + private->underflow[hook]); 318 + } else { 319 + e = jumpstack[--stackidx]; 320 + e = arpt_next_entry(e); 321 + } 319 322 continue; 320 323 } 321 324 if (table_base + v 322 325 != arpt_next_entry(e)) { 323 - /* Save old back ptr in next entry */ 324 - struct arpt_entry *next = arpt_next_entry(e); 325 - next->comefrom = (void *)back - table_base; 326 326 327 - /* set back pointer to next entry */ 328 - back = next; 327 + if (stackidx >= private->stacksize) { 328 + verdict = NF_DROP; 329 + break; 330 + } 331 + jumpstack[stackidx++] = e; 329 332 } 330 333 331 334 e = get_entry(table_base, v);
+3 -3
net/ipv6/ip6_input.c
··· 331 331 if (offset < 0) 332 332 goto out; 333 333 334 - if (!ipv6_is_mld(skb, nexthdr, offset)) 335 - goto out; 334 + if (ipv6_is_mld(skb, nexthdr, offset)) 335 + deliver = true; 336 336 337 - deliver = true; 337 + goto out; 338 338 } 339 339 /* unknown RA - process it normally */ 340 340 }
+1 -4
net/ipv6/route.c
··· 369 369 struct inet6_dev *idev; 370 370 371 371 dst_destroy_metrics_generic(dst); 372 - 373 - if (rt->rt6i_pcpu) 374 - free_percpu(rt->rt6i_pcpu); 375 - 372 + free_percpu(rt->rt6i_pcpu); 376 373 rt6_uncached_list_del(rt); 377 374 378 375 idev = rt->rt6i_idev;
+1 -1
net/netfilter/nf_queue.c
··· 213 213 214 214 if (verdict == NF_ACCEPT) { 215 215 next_hook: 216 - verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], 216 + verdict = nf_iterate(entry->state.hook_list, 217 217 skb, &entry->state, &elem); 218 218 } 219 219
+26 -14
net/netfilter/nfnetlink.c
··· 269 269 } 270 270 } 271 271 272 + enum { 273 + NFNL_BATCH_FAILURE = (1 << 0), 274 + NFNL_BATCH_DONE = (1 << 1), 275 + NFNL_BATCH_REPLAY = (1 << 2), 276 + }; 277 + 272 278 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 273 279 u_int16_t subsys_id) 274 280 { ··· 282 276 struct net *net = sock_net(skb->sk); 283 277 const struct nfnetlink_subsystem *ss; 284 278 const struct nfnl_callback *nc; 285 - bool success = true, done = false; 286 279 static LIST_HEAD(err_list); 280 + u32 status; 287 281 int err; 288 282 289 283 if (subsys_id >= NFNL_SUBSYS_COUNT) 290 284 return netlink_ack(skb, nlh, -EINVAL); 291 285 replay: 286 + status = 0; 287 + 292 288 skb = netlink_skb_clone(oskb, GFP_KERNEL); 293 289 if (!skb) 294 290 return netlink_ack(oskb, nlh, -ENOMEM); ··· 344 336 if (type == NFNL_MSG_BATCH_BEGIN) { 345 337 /* Malformed: Batch begin twice */ 346 338 nfnl_err_reset(&err_list); 347 - success = false; 339 + status |= NFNL_BATCH_FAILURE; 348 340 goto done; 349 341 } else if (type == NFNL_MSG_BATCH_END) { 350 - done = true; 342 + status |= NFNL_BATCH_DONE; 351 343 goto done; 352 344 } else if (type < NLMSG_MIN_TYPE) { 353 345 err = -EINVAL; ··· 390 382 * original skb. 391 383 */ 392 384 if (err == -EAGAIN) { 393 - nfnl_err_reset(&err_list); 394 - ss->abort(oskb); 395 - nfnl_unlock(subsys_id); 396 - kfree_skb(skb); 397 - goto replay; 385 + status |= NFNL_BATCH_REPLAY; 386 + goto next; 398 387 } 399 388 } 400 389 ack: ··· 407 402 */ 408 403 nfnl_err_reset(&err_list); 409 404 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 410 - success = false; 405 + status |= NFNL_BATCH_FAILURE; 411 406 goto done; 412 407 } 413 408 /* We don't stop processing the batch on errors, thus, ··· 415 410 * triggers. 416 411 */ 417 412 if (err) 418 - success = false; 413 + status |= NFNL_BATCH_FAILURE; 419 414 } 420 - 415 + next: 421 416 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 422 417 if (msglen > skb->len) 423 418 msglen = skb->len; 424 419 skb_pull(skb, msglen); 425 420 } 426 421 done: 427 - if (success && done) 428 - ss->commit(oskb); 429 - else 422 + if (status & NFNL_BATCH_REPLAY) { 430 423 ss->abort(oskb); 424 + nfnl_err_reset(&err_list); 425 + nfnl_unlock(subsys_id); 426 + kfree_skb(skb); 427 + goto replay; 428 + } else if (status == NFNL_BATCH_DONE) { 429 + ss->commit(oskb); 430 + } else { 431 + ss->abort(oskb); 432 + } 431 433 432 434 nfnl_err_deliver(&err_list, oskb); 433 435 nfnl_unlock(subsys_id);
+1 -1
net/netlink/af_netlink.c
··· 158 158 out: 159 159 spin_unlock(&netlink_tap_lock); 160 160 161 - if (found && nt->module) 161 + if (found) 162 162 module_put(nt->module); 163 163 164 164 return found ? 0 : -ENODEV;
+1 -1
net/rds/transport.c
··· 73 73 74 74 void rds_trans_put(struct rds_transport *trans) 75 75 { 76 - if (trans && trans->t_owner) 76 + if (trans) 77 77 module_put(trans->t_owner); 78 78 } 79 79
+8 -4
net/switchdev/switchdev.c
··· 171 171 * released. 172 172 */ 173 173 174 - attr->trans = SWITCHDEV_TRANS_ABORT; 175 - __switchdev_port_attr_set(dev, attr); 174 + if (err != -EOPNOTSUPP) { 175 + attr->trans = SWITCHDEV_TRANS_ABORT; 176 + __switchdev_port_attr_set(dev, attr); 177 + } 176 178 177 179 return err; 178 180 } ··· 251 249 * released. 252 250 */ 253 251 254 - obj->trans = SWITCHDEV_TRANS_ABORT; 255 - __switchdev_port_obj_add(dev, obj); 252 + if (err != -EOPNOTSUPP) { 253 + obj->trans = SWITCHDEV_TRANS_ABORT; 254 + __switchdev_port_obj_add(dev, obj); 255 + } 256 256 257 257 return err; 258 258 }
+1
net/tipc/socket.c
··· 2007 2007 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); 2008 2008 if (res) 2009 2009 goto exit; 2010 + security_sk_clone(sock->sk, new_sock->sk); 2010 2011 2011 2012 new_sk = new_sock->sk; 2012 2013 new_tsock = tipc_sk(new_sk);