Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking updates from David Miller:
"Here is a pile of bug fixes that accumulated while I was in Europe"

1) In fixing kernel leaks to userspace during copying of socket
addresses, we broke a case that used to work, namely the user
providing a buffer larger than the in-kernel generic socket address
structure. This broke Ruby amongst other things. Fix from Dan
Carpenter.

2) Fix regression added by byte queue limit support in 8139cp driver,
from Yang Yingliang.

3) The addition of MSG_SENDPAGE_NOTLAST buggered up a few sendpage
implementations, they should just treat it the same as MSG_MORE.
Fix from Richard Weinberger and Shawn Landden.

4) Handle icmpv4 errors received on ipv6 SIT tunnels correctly, from
Oussama Ghorbel. In particular we should send an ICMPv6 unreachable
in such situations.

5) Fix some regressions in the recent genetlink fixes, in particular
get the pmcraid driver to use the new safer interfaces correctly.
From Johannes Berg.

6) macvtap was converted to use a per-cpu set of statistics, but some
code was still bumping tx_dropped elsewhere. From Jason Wang.

7) Fix build failure of xen-netback due to missing include on some
architectures, from Andy Whitecroft.

8) macvtap double counts received packets in statistics, fix from Vlad
Yasevich.

9) Fix various cases of using *_STATS_BH() when *_STATS() is more
appropriate. From Eric Dumazet and Hannes Frederic Sowa.

10) Pktgen ipsec mode doesn't update the ipv4 header length and checksum
properly after encapsulation. Fix from Fan Du.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits)
net/mlx4_en: Remove selftest TX queues empty condition
{pktgen, xfrm} Update IPv4 header total len and checksum after tranformation
virtio_net: make all RX paths handle erors consistently
virtio_net: fix error handling for mergeable buffers
virtio_net: Fixed a trivial typo (fitler --> filter)
netem: fix gemodel loss generator
netem: fix loss 4 state model
netem: missing break in ge loss generator
net/hsr: Support iproute print_opt ('ip -details ...')
net/hsr: Very small fix of comment style.
MAINTAINERS: Added net/hsr/ maintainer
ipv6: fix possible seqlock deadlock in ip6_finish_output2
ixgbe: Make ixgbe_identify_qsfp_module_generic static
ixgbe: turn NETIF_F_HW_L2FW_DOFFLOAD off by default
ixgbe: ixgbe_fwd_ring_down needs to be static
e1000: fix possible reset_task running after adapter down
e1000: fix lockdep warning in e1000_reset_task
e1000: prevent oops when adapter is being closed and reset simultaneously
igb: Fixed Wake On LAN support
inet: fix possible seqlock deadlocks
...

+492 -345
+6
MAINTAINERS
··· 4049 4049 S: Maintained 4050 4050 F: drivers/net/usb/hso.c 4051 4051 4052 + HSR NETWORK PROTOCOL 4053 + M: Arvid Brodin <arvid.brodin@alten.se> 4054 + L: netdev@vger.kernel.org 4055 + S: Maintained 4056 + F: net/hsr/ 4057 + 4052 4058 HTCPEN TOUCHSCREEN DRIVER 4053 4059 M: Pau Oliva Fora <pof@eslack.org> 4054 4060 L: linux-input@vger.kernel.org
+3
crypto/algif_hash.c
··· 114 114 struct hash_ctx *ctx = ask->private; 115 115 int err; 116 116 117 + if (flags & MSG_SENDPAGE_NOTLAST) 118 + flags |= MSG_MORE; 119 + 117 120 lock_sock(sk); 118 121 sg_init_table(ctx->sgl.sg, 1); 119 122 sg_set_page(ctx->sgl.sg, page, size, offset);
+3
crypto/algif_skcipher.c
··· 378 378 struct skcipher_sg_list *sgl; 379 379 int err = -EINVAL; 380 380 381 + if (flags & MSG_SENDPAGE_NOTLAST) 382 + flags |= MSG_MORE; 383 + 381 384 lock_sock(sk); 382 385 if (!ctx->more && ctx->used) 383 386 goto unlock;
+2 -2
drivers/net/bonding/bond_main.c
··· 4110 4110 if (!miimon) { 4111 4111 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 4112 4112 pr_warning("Forcing miimon to 100msec\n"); 4113 - miimon = 100; 4113 + miimon = BOND_DEFAULT_MIIMON; 4114 4114 } 4115 4115 } 4116 4116 ··· 4147 4147 if (!miimon) { 4148 4148 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); 4149 4149 pr_warning("Forcing miimon to 100msec\n"); 4150 - miimon = 100; 4150 + miimon = BOND_DEFAULT_MIIMON; 4151 4151 } 4152 4152 } 4153 4153
+9 -4
drivers/net/bonding/bond_options.c
··· 45 45 return -EPERM; 46 46 } 47 47 48 - if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) { 49 - pr_err("%s: %s mode is incompatible with arp monitoring.\n", 50 - bond->dev->name, bond_mode_tbl[mode].modename); 51 - return -EINVAL; 48 + if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) { 49 + pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", 50 + bond->dev->name, bond_mode_tbl[mode].modename); 51 + /* disable arp monitoring */ 52 + bond->params.arp_interval = 0; 53 + /* set miimon to default value */ 54 + bond->params.miimon = BOND_DEFAULT_MIIMON; 55 + pr_info("%s: Setting MII monitoring interval to %d.\n", 56 + bond->dev->name, bond->params.miimon); 52 57 } 53 58 54 59 /* don't cache arp_validate between modes */
+1 -3
drivers/net/bonding/bond_sysfs.c
··· 523 523 ret = -EINVAL; 524 524 goto out; 525 525 } 526 - if (bond->params.mode == BOND_MODE_ALB || 527 - bond->params.mode == BOND_MODE_TLB || 528 - bond->params.mode == BOND_MODE_8023AD) { 526 + if (BOND_NO_USES_ARP(bond->params.mode)) { 529 527 pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", 530 528 bond->dev->name, bond->dev->name); 531 529 ret = -EINVAL;
+7
drivers/net/bonding/bonding.h
··· 35 35 36 36 #define BOND_MAX_ARP_TARGETS 16 37 37 38 + #define BOND_DEFAULT_MIIMON 100 39 + 38 40 #define IS_UP(dev) \ 39 41 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 40 42 netif_running(dev) && \ ··· 55 53 #define USES_PRIMARY(mode) \ 56 54 (((mode) == BOND_MODE_ACTIVEBACKUP) || \ 57 55 ((mode) == BOND_MODE_TLB) || \ 56 + ((mode) == BOND_MODE_ALB)) 57 + 58 + #define BOND_NO_USES_ARP(mode) \ 59 + (((mode) == BOND_MODE_8023AD) || \ 60 + ((mode) == BOND_MODE_TLB) || \ 58 61 ((mode) == BOND_MODE_ALB)) 59 62 60 63 #define TX_QUEUE_OVERRIDE(mode) \
+16 -6
drivers/net/can/c_can/c_can.c
··· 712 712 return 0; 713 713 } 714 714 715 - static int c_can_get_berr_counter(const struct net_device *dev, 716 - struct can_berr_counter *bec) 715 + static int __c_can_get_berr_counter(const struct net_device *dev, 716 + struct can_berr_counter *bec) 717 717 { 718 718 unsigned int reg_err_counter; 719 719 struct c_can_priv *priv = netdev_priv(dev); 720 - 721 - c_can_pm_runtime_get_sync(priv); 722 720 723 721 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 724 722 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 725 723 ERR_CNT_REC_SHIFT; 726 724 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 727 725 726 + return 0; 727 + } 728 + 729 + static int c_can_get_berr_counter(const struct net_device *dev, 730 + struct can_berr_counter *bec) 731 + { 732 + struct c_can_priv *priv = netdev_priv(dev); 733 + int err; 734 + 735 + c_can_pm_runtime_get_sync(priv); 736 + err = __c_can_get_berr_counter(dev, bec); 728 737 c_can_pm_runtime_put_sync(priv); 729 738 730 - return 0; 739 + return err; 731 740 } 732 741 733 742 /* ··· 763 754 if (!(val & (1 << (msg_obj_no - 1)))) { 764 755 can_get_echo_skb(dev, 765 756 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 757 + c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL); 766 758 stats->tx_bytes += priv->read_reg(priv, 767 759 C_CAN_IFACE(MSGCTRL_REG, 0)) 768 760 & IF_MCONT_DLC_MASK; ··· 882 872 if (unlikely(!skb)) 883 873 return 0; 884 874 885 - c_can_get_berr_counter(dev, &bec); 875 + __c_can_get_berr_counter(dev, &bec); 886 876 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 887 877 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> 888 878 ERR_CNT_RP_SHIFT;
+1 -1
drivers/net/can/flexcan.c
··· 1020 1020 dev_err(&pdev->dev, "no ipg clock defined\n"); 1021 1021 return PTR_ERR(clk_ipg); 1022 1022 } 1023 - clock_freq = clk_get_rate(clk_ipg); 1024 1023 1025 1024 clk_per = devm_clk_get(&pdev->dev, "per"); 1026 1025 if (IS_ERR(clk_per)) { 1027 1026 dev_err(&pdev->dev, "no per clock defined\n"); 1028 1027 return PTR_ERR(clk_per); 1029 1028 } 1029 + clock_freq = clk_get_rate(clk_per); 1030 1030 } 1031 1031 1032 1032 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+9 -8
drivers/net/can/sja1000/sja1000.c
··· 494 494 uint8_t isrc, status; 495 495 int n = 0; 496 496 497 - /* Shared interrupts and IRQ off? */ 498 - if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) 499 - return IRQ_NONE; 500 - 501 497 if (priv->pre_irq) 502 498 priv->pre_irq(priv); 503 499 500 + /* Shared interrupts and IRQ off? */ 501 + if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) 502 + goto out; 503 + 504 504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) && 505 505 (n < SJA1000_MAX_IRQ)) { 506 - n++; 506 + 507 507 status = priv->read_reg(priv, SJA1000_SR); 508 508 /* check for absent controller due to hw unplug */ 509 509 if (status == 0xFF && sja1000_is_absent(priv)) 510 - return IRQ_NONE; 510 + goto out; 511 511 512 512 if (isrc & IRQ_WUI) 513 513 netdev_warn(dev, "wakeup interrupt\n"); ··· 535 535 status = priv->read_reg(priv, SJA1000_SR); 536 536 /* check for absent controller */ 537 537 if (status == 0xFF && sja1000_is_absent(priv)) 538 - return IRQ_NONE; 538 + goto out; 539 539 } 540 540 } 541 541 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { ··· 543 543 if (sja1000_err(dev, isrc, status)) 544 544 break; 545 545 } 546 + n++; 546 547 } 547 - 548 + out: 548 549 if (priv->post_irq) 549 550 priv->post_irq(priv); 550 551
+6 -19
drivers/net/ethernet/broadcom/tg3.c
··· 10629 10629 static ssize_t tg3_show_temp(struct device *dev, 10630 10630 struct device_attribute *devattr, char *buf) 10631 10631 { 10632 - struct pci_dev *pdev = to_pci_dev(dev); 10633 - struct net_device *netdev = pci_get_drvdata(pdev); 10634 - struct tg3 *tp = netdev_priv(netdev); 10635 10632 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10633 + struct tg3 *tp = dev_get_drvdata(dev); 10636 10634 u32 temperature; 10637 10635 10638 10636 spin_lock_bh(&tp->lock); ··· 10648 10650 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, 10649 10651 TG3_TEMP_MAX_OFFSET); 10650 10652 10651 - static struct attribute *tg3_attributes[] = { 10653 + static struct attribute *tg3_attrs[] = { 10652 10654 &sensor_dev_attr_temp1_input.dev_attr.attr, 10653 10655 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10654 10656 &sensor_dev_attr_temp1_max.dev_attr.attr, 10655 10657 NULL 10656 10658 }; 10657 - 10658 - static const struct attribute_group tg3_group = { 10659 - .attrs = tg3_attributes, 10660 - }; 10659 + ATTRIBUTE_GROUPS(tg3); 10661 10660 10662 10661 static void tg3_hwmon_close(struct tg3 *tp) 10663 10662 { 10664 10663 if (tp->hwmon_dev) { 10665 10664 hwmon_device_unregister(tp->hwmon_dev); 10666 10665 tp->hwmon_dev = NULL; 10667 - sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); 10668 10666 } 10669 10667 } 10670 10668 10671 10669 static void tg3_hwmon_open(struct tg3 *tp) 10672 10670 { 10673 - int i, err; 10671 + int i; 10674 10672 u32 size = 0; 10675 10673 struct pci_dev *pdev = tp->pdev; 10676 10674 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; ··· 10684 10690 if (!size) 10685 10691 return; 10686 10692 10687 - /* Register hwmon sysfs hooks */ 10688 - err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); 10689 - if (err) { 10690 - dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); 10691 - return; 10692 - } 10693 - 10694 - tp->hwmon_dev = hwmon_device_register(&pdev->dev); 10693 + tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10694 + tp, tg3_groups); 10695 10695 if (IS_ERR(tp->hwmon_dev)) { 10696 10696 tp->hwmon_dev = NULL; 10697 10697 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10698 - sysfs_remove_group(&pdev->dev.kobj, &tg3_group); 10699 10698 } 10700 10699 } 10701 10700
+1
drivers/net/ethernet/emulex/benet/be.h
··· 503 503 }; 504 504 505 505 #define be_physfn(adapter) (!adapter->virtfn) 506 + #define be_virtfn(adapter) (adapter->virtfn) 506 507 #define sriov_enabled(adapter) (adapter->num_vfs > 0) 507 508 #define sriov_want(adapter) (be_physfn(adapter) && \ 508 509 (num_vfs || pci_num_vf(adapter->pdev)))
+7
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 1032 1032 } else { 1033 1033 req->hdr.version = 2; 1034 1034 req->page_size = 1; /* 1 for 4K */ 1035 + 1036 + /* coalesce-wm field in this cmd is not relevant to Lancer. 1037 + * Lancer uses COMMON_MODIFY_CQ to set this field 1038 + */ 1039 + if (!lancer_chip(adapter)) 1040 + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1041 + ctxt, coalesce_wm); 1035 1042 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1036 1043 no_delay); 1037 1044 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+8 -8
drivers/net/ethernet/emulex/benet/be_main.c
··· 2658 2658 2659 2659 be_roce_dev_close(adapter); 2660 2660 2661 - for_all_evt_queues(adapter, eqo, i) { 2662 - if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2661 + if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2662 + for_all_evt_queues(adapter, eqo, i) { 2663 2663 napi_disable(&eqo->napi); 2664 2664 be_disable_busy_poll(eqo); 2665 2665 } ··· 3253 3253 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); 3254 3254 } 3255 3255 3256 - /* On BE3 VFs this cmd may fail due to lack of privilege. 3257 - * Ignore the failure as in this case pmac_id is fetched 3258 - * in the IFACE_CREATE cmd. 3259 - */ 3260 - be_cmd_pmac_add(adapter, mac, adapter->if_handle, 3261 - &adapter->pmac_id[0], 0); 3256 + /* For BE3-R VFs, the PF programs the initial MAC address */ 3257 + if (!(BEx_chip(adapter) && be_virtfn(adapter))) 3258 + be_cmd_pmac_add(adapter, mac, adapter->if_handle, 3259 + &adapter->pmac_id[0], 0); 3262 3260 return 0; 3263 3261 } 3264 3262 ··· 4597 4599 if (adapter->wol) 4598 4600 be_setup_wol(adapter, true); 4599 4601 4602 + be_intr_set(adapter, false); 4600 4603 cancel_delayed_work_sync(&adapter->func_recovery_work); 4601 4604 4602 4605 netif_device_detach(netdev); ··· 4633 4634 if (status) 4634 4635 return status; 4635 4636 4637 + be_intr_set(adapter, true); 4636 4638 /* tell fw we're ready to fire cmds */ 4637 4639 status = be_cmd_fw_init(adapter); 4638 4640 if (status)
+5 -2
drivers/net/ethernet/intel/e1000/e1000.h
··· 83 83 84 84 #define E1000_MAX_INTR 10 85 85 86 + /* 87 + * Count for polling __E1000_RESET condition every 10-20msec. 88 + */ 89 + #define E1000_CHECK_RESET_COUNT 50 90 + 86 91 /* TX/RX descriptor defines */ 87 92 #define E1000_DEFAULT_TXD 256 88 93 #define E1000_MAX_TXD 256 ··· 317 312 struct delayed_work watchdog_task; 318 313 struct delayed_work fifo_stall_task; 319 314 struct delayed_work phy_info_task; 320 - 321 - struct mutex mutex; 322 315 }; 323 316 324 317 enum e1000_state_t {
+23 -37
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 494 494 { 495 495 set_bit(__E1000_DOWN, &adapter->flags); 496 496 497 + cancel_delayed_work_sync(&adapter->watchdog_task); 498 + 499 + /* 500 + * Since the watchdog task can reschedule other tasks, we should cancel 501 + * it first, otherwise we can run into the situation when a work is 502 + * still running after the adapter has been turned down. 503 + */ 504 + 505 + cancel_delayed_work_sync(&adapter->phy_info_task); 506 + cancel_delayed_work_sync(&adapter->fifo_stall_task); 507 + 497 508 /* Only kill reset task if adapter is not resetting */ 498 509 if (!test_bit(__E1000_RESETTING, &adapter->flags)) 499 510 cancel_work_sync(&adapter->reset_task); 500 - 501 - cancel_delayed_work_sync(&adapter->watchdog_task); 502 - cancel_delayed_work_sync(&adapter->phy_info_task); 503 - cancel_delayed_work_sync(&adapter->fifo_stall_task); 504 511 } 505 512 506 513 void e1000_down(struct e1000_adapter *adapter) ··· 551 544 e1000_clean_all_rx_rings(adapter); 552 545 } 553 546 554 - static void e1000_reinit_safe(struct e1000_adapter *adapter) 555 - { 556 - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 557 - msleep(1); 558 - mutex_lock(&adapter->mutex); 559 - e1000_down(adapter); 560 - e1000_up(adapter); 561 - mutex_unlock(&adapter->mutex); 562 - clear_bit(__E1000_RESETTING, &adapter->flags); 563 - } 564 - 565 547 void e1000_reinit_locked(struct e1000_adapter *adapter) 566 548 { 567 - /* if rtnl_lock is not held the call path is bogus */ 568 - ASSERT_RTNL(); 569 549 WARN_ON(in_interrupt()); 570 550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 571 551 msleep(1); ··· 1310 1316 e1000_irq_disable(adapter); 1311 1317 1312 1318 spin_lock_init(&adapter->stats_lock); 1313 - mutex_init(&adapter->mutex); 1314 1319 1315 1320 set_bit(__E1000_DOWN, &adapter->flags); 1316 1321 ··· 1433 1440 { 1434 1441 struct e1000_adapter *adapter = netdev_priv(netdev); 1435 1442 struct e1000_hw *hw = &adapter->hw; 1443 + int count = E1000_CHECK_RESET_COUNT; 1444 + 1445 + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 1446 + usleep_range(10000, 20000); 1436 1447 1437 1448 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1438 1449 e1000_down(adapter); ··· 2322 2325 struct e1000_adapter *adapter = container_of(work, 2323 2326 struct e1000_adapter, 2324 2327 phy_info_task.work); 2325 - if (test_bit(__E1000_DOWN, &adapter->flags)) 2326 - return; 2327 - mutex_lock(&adapter->mutex); 2328 + 2328 2329 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2329 - mutex_unlock(&adapter->mutex); 2330 2330 } 2331 2331 2332 2332 /** ··· 2339 2345 struct net_device *netdev = adapter->netdev; 2340 2346 u32 tctl; 2341 2347 2342 - if (test_bit(__E1000_DOWN, &adapter->flags)) 2343 - return; 2344 - mutex_lock(&adapter->mutex); 2345 2348 if (atomic_read(&adapter->tx_fifo_stall)) { 2346 2349 if ((er32(TDT) == er32(TDH)) && 2347 2350 (er32(TDFT) == er32(TDFH)) && ··· 2359 2368 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2360 2369 } 2361 2370 } 2362 - mutex_unlock(&adapter->mutex); 2363 2371 } 2364 2372 2365 2373 bool e1000_has_link(struct e1000_adapter *adapter) ··· 2412 2422 struct e1000_tx_ring *txdr = adapter->tx_ring; 2413 2423 u32 link, tctl; 2414 2424 2415 - if (test_bit(__E1000_DOWN, &adapter->flags)) 2416 - return; 2417 - 2418 - mutex_lock(&adapter->mutex); 2419 2425 link = e1000_has_link(adapter); 2420 2426 if ((netif_carrier_ok(netdev)) && link) 2421 2427 goto link_up; ··· 2502 2516 adapter->tx_timeout_count++; 2503 2517 schedule_work(&adapter->reset_task); 2504 2518 /* exit immediately since reset is imminent */ 2505 - goto unlock; 2519 + return; 2506 2520 } 2507 2521 } 2508 2522 ··· 2530 2544 /* Reschedule the task */ 2531 2545 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2532 2546 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2533 - 2534 - unlock: 2535 - mutex_unlock(&adapter->mutex); 2536 2547 } 2537 2548 2538 2549 enum latency_range { ··· 3478 3495 struct e1000_adapter *adapter = 3479 3496 container_of(work, struct e1000_adapter, reset_task); 3480 3497 3481 - if (test_bit(__E1000_DOWN, &adapter->flags)) 3482 - return; 3483 3498 e_err(drv, "Reset adapter\n"); 3484 - e1000_reinit_safe(adapter); 3499 + e1000_reinit_locked(adapter); 3485 3500 } 3486 3501 3487 3502 /** ··· 4944 4963 netif_device_detach(netdev); 4945 4964 4946 4965 if (netif_running(netdev)) { 4966 + int count = E1000_CHECK_RESET_COUNT; 4967 + 4968 + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 4969 + usleep_range(10000, 20000); 4970 + 4947 4971 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4948 4972 e1000_down(adapter); 4949 4973 }
+4 -3
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 2062 2062 { 2063 2063 struct igb_adapter *adapter = netdev_priv(netdev); 2064 2064 2065 - wol->supported = WAKE_UCAST | WAKE_MCAST | 2066 - WAKE_BCAST | WAKE_MAGIC | 2067 - WAKE_PHY; 2068 2065 wol->wolopts = 0; 2069 2066 2070 2067 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) 2071 2068 return; 2069 + 2070 + wol->supported = WAKE_UCAST | WAKE_MCAST | 2071 + WAKE_BCAST | WAKE_MAGIC | 2072 + WAKE_PHY; 2072 2073 2073 2074 /* apply any specific unsupported masks here */ 2074 2075 switch (adapter->hw.device_id) {
+4 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 4251 4251 rx_ring->l2_accel_priv = NULL; 4252 4252 } 4253 4253 4254 - int ixgbe_fwd_ring_down(struct net_device *vdev, 4255 - struct ixgbe_fwd_adapter *accel) 4254 + static int ixgbe_fwd_ring_down(struct net_device *vdev, 4255 + struct ixgbe_fwd_adapter *accel) 4256 4256 { 4257 4257 struct ixgbe_adapter *adapter = accel->real_adapter; 4258 4258 unsigned int rxbase = accel->rx_base_queue; ··· 7986 7986 NETIF_F_TSO | 7987 7987 NETIF_F_TSO6 | 7988 7988 NETIF_F_RXHASH | 7989 - NETIF_F_RXCSUM | 7990 - NETIF_F_HW_L2FW_DOFFLOAD; 7989 + NETIF_F_RXCSUM; 7991 7990 7992 - netdev->hw_features = netdev->features; 7991 + netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; 7993 7992 7994 7993 switch (adapter->hw.mac.type) { 7995 7994 case ixgbe_mac_82599EB:
+2 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
··· 46 46 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); 47 47 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 48 48 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 49 + static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); 49 50 50 51 /** 51 52 * ixgbe_identify_phy_generic - Get physical layer module ··· 1165 1164 * 1166 1165 * Searches for and identifies the QSFP module and assigns appropriate PHY type 1167 1166 **/ 1168 - s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) 1167 + static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) 1169 1168 { 1170 1169 struct ixgbe_adapter *adapter = hw->back; 1171 1170 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
-1
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
··· 145 145 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 146 146 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 147 147 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 148 - s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); 149 148 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 150 149 u16 *list_offset, 151 150 u16 *data_offset);
-7
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
··· 140 140 { 141 141 struct mlx4_en_priv *priv = netdev_priv(dev); 142 142 struct mlx4_en_dev *mdev = priv->mdev; 143 - struct mlx4_en_tx_ring *tx_ring; 144 143 int i, carrier_ok; 145 144 146 145 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); ··· 149 150 carrier_ok = netif_carrier_ok(dev); 150 151 151 152 netif_carrier_off(dev); 152 - retry_tx: 153 153 /* Wait until all tx queues are empty. 154 154 * there should not be any additional incoming traffic 155 155 * since we turned the carrier off */ 156 156 msleep(200); 157 - for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { 158 - tx_ring = priv->tx_ring[i]; 159 - if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) 160 - goto retry_tx; 161 - } 162 157 163 158 if (priv->mdev->dev->caps.flags & 164 159 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
+2 -3
drivers/net/ethernet/realtek/8139cp.c
··· 678 678 le32_to_cpu(txd->opts1) & 0xffff, 679 679 PCI_DMA_TODEVICE); 680 680 681 - bytes_compl += skb->len; 682 - pkts_compl++; 683 - 684 681 if (status & LastFrag) { 685 682 if (status & (TxError | TxFIFOUnder)) { 686 683 netif_dbg(cp, tx_err, cp->dev, ··· 699 702 netif_dbg(cp, tx_done, cp->dev, 700 703 "tx done, slot %d\n", tx_tail); 701 704 } 705 + bytes_compl += skb->len; 706 + pkts_compl++; 702 707 dev_kfree_skb_irq(skb); 703 708 } 704 709
+5
drivers/net/ethernet/realtek/r8169.c
··· 3465 3465 rtl_writephy(tp, 0x14, 0x9065); 3466 3466 rtl_writephy(tp, 0x14, 0x1065); 3467 3467 3468 + /* Check ALDPS bit, disable it if enabled */ 3469 + rtl_writephy(tp, 0x1f, 0x0a43); 3470 + if (rtl_readphy(tp, 0x10) & 0x0004) 3471 + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); 3472 + 3468 3473 rtl_writephy(tp, 0x1f, 0x0000); 3469 3474 } 3470 3475
+2
drivers/net/ethernet/sfc/mcdi.h
··· 75 75 unsigned long last_update; 76 76 struct device *device; 77 77 struct efx_mcdi_mon_attribute *attrs; 78 + struct attribute_group group; 79 + const struct attribute_group *groups[2]; 78 80 unsigned int n_attrs; 79 81 }; 80 82
+30 -48
drivers/net/ethernet/sfc/mcdi_mon.c
··· 139 139 return rc; 140 140 } 141 141 142 - static ssize_t efx_mcdi_mon_show_name(struct device *dev, 143 - struct device_attribute *attr, 144 - char *buf) 145 - { 146 - return sprintf(buf, "%s\n", KBUILD_MODNAME); 147 - } 148 - 149 142 static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, 150 143 efx_dword_t *entry) 151 144 { 152 - struct efx_nic *efx = dev_get_drvdata(dev); 145 + struct efx_nic *efx = dev_get_drvdata(dev->parent); 153 146 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 154 147 int rc; 155 148 ··· 256 263 efx_mcdi_sensor_type[mon_attr->type].label); 257 264 } 258 265 259 - static int 266 + static void 260 267 efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, 261 268 ssize_t (*reader)(struct device *, 262 269 struct device_attribute *, char *), ··· 265 272 { 266 273 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 267 274 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; 268 - int rc; 269 275 270 276 strlcpy(attr->name, name, sizeof(attr->name)); 271 277 attr->index = index; ··· 278 286 attr->dev_attr.attr.name = attr->name; 279 287 attr->dev_attr.attr.mode = S_IRUGO; 280 288 attr->dev_attr.show = reader; 281 - rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); 282 - if (rc == 0) 283 - ++hwmon->n_attrs; 284 - return rc; 289 + hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; 285 290 } 286 291 287 292 int efx_mcdi_mon_probe(struct efx_nic *efx) ··· 327 338 efx_mcdi_mon_update(efx); 328 339 329 340 /* Allocate space for the maximum possible number of 330 - * attributes for this set of sensors: name of the driver plus 341 + * attributes for this set of sensors: 331 342 * value, min, max, crit, alarm and label for each sensor. 332 343 */ 333 - n_attrs = 1 + 6 * n_sensors; 344 + n_attrs = 6 * n_sensors; 334 345 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); 335 346 if (!hwmon->attrs) { 336 347 rc = -ENOMEM; 337 348 goto fail; 338 349 } 339 - 340 - hwmon->device = hwmon_device_register(&efx->pci_dev->dev); 341 - if (IS_ERR(hwmon->device)) { 342 - rc = PTR_ERR(hwmon->device); 350 + hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), 351 + GFP_KERNEL); 352 + if (!hwmon->group.attrs) { 353 + rc = -ENOMEM; 343 354 goto fail; 344 355 } 345 - 346 - rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0); 347 - if (rc) 348 - goto fail; 349 356 350 357 for (i = 0, j = -1, type = -1; ; i++) { 351 358 enum efx_hwmon_type hwmon_type; ··· 357 372 page = type / 32; 358 373 j = -1; 359 374 if (page == n_pages) 360 - return 0; 375 + goto hwmon_register; 361 376 362 377 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, 363 378 page); ··· 438 453 if (min1 != max1) { 439 454 snprintf(name, sizeof(name), "%s%u_input", 440 455 hwmon_prefix, hwmon_index); 441 - rc = efx_mcdi_mon_add_attr( 456 + efx_mcdi_mon_add_attr( 442 457 efx, name, efx_mcdi_mon_show_value, i, type, 0); 443 - if (rc) 444 - goto fail; 445 458 446 459 if (hwmon_type != EFX_HWMON_POWER) { 447 460 snprintf(name, sizeof(name), "%s%u_min", 448 461 hwmon_prefix, hwmon_index); 449 - rc = efx_mcdi_mon_add_attr( 462 + efx_mcdi_mon_add_attr( 450 463 efx, name, efx_mcdi_mon_show_limit, 451 464 i, type, min1); 452 - if (rc) 453 - goto fail; 454 465 } 455 466 456 467 snprintf(name, sizeof(name), "%s%u_max", 457 468 hwmon_prefix, hwmon_index); 458 - rc = efx_mcdi_mon_add_attr( 469 + efx_mcdi_mon_add_attr( 459 470 efx, name, efx_mcdi_mon_show_limit, 460 471 i, type, max1); 461 - if (rc) 462 - goto fail; 463 472 464 473 if (min2 != max2) { 465 474 /* Assume max2 is critical value. ··· 461 482 */ 462 483 snprintf(name, sizeof(name), "%s%u_crit", 463 484 hwmon_prefix, hwmon_index); 464 - rc = efx_mcdi_mon_add_attr( 485 + efx_mcdi_mon_add_attr( 465 486 efx, name, efx_mcdi_mon_show_limit, 466 487 i, type, max2); 467 - if (rc) 468 - goto fail; 469 488 } 470 489 } 471 490 472 491 snprintf(name, sizeof(name), "%s%u_alarm", 473 492 hwmon_prefix, hwmon_index); 474 - rc = efx_mcdi_mon_add_attr( 493 + efx_mcdi_mon_add_attr( 475 494 efx, name, efx_mcdi_mon_show_alarm, i, type, 0); 476 - if (rc) 477 - goto fail; 478 495 479 496 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && 480 497 efx_mcdi_sensor_type[type].label) { 481 498 snprintf(name, sizeof(name), "%s%u_label", 482 499 hwmon_prefix, hwmon_index); 483 - rc = efx_mcdi_mon_add_attr( 500 + efx_mcdi_mon_add_attr( 484 501 efx, name, efx_mcdi_mon_show_label, i, type, 0); 485 - if (rc) 486 - goto fail; 487 502 } 488 503 } 504 + 505 + hwmon_register: 506 + hwmon->groups[0] = &hwmon->group; 507 + hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, 508 + KBUILD_MODNAME, NULL, 509 + hwmon->groups); 510 + if (IS_ERR(hwmon->device)) { 511 + rc = PTR_ERR(hwmon->device); 512 + goto fail; 513 + } 514 + 515 + return 0; 489 516 490 517 fail: 491 518 efx_mcdi_mon_remove(efx); ··· 501 516 void efx_mcdi_mon_remove(struct efx_nic *efx) 502 517 { 503 518 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 504 - unsigned int i; 505 519 506 - for (i = 0; i < hwmon->n_attrs; i++) 507 - device_remove_file(&efx->pci_dev->dev, 508 - &hwmon->attrs[i].dev_attr); 509 - kfree(hwmon->attrs); 510 520 if (hwmon->device) 511 521 hwmon_device_unregister(hwmon->device); 522 + kfree(hwmon->attrs); 523 + kfree(hwmon->group.attrs); 512 524 efx_nic_free_buffer(efx, &hwmon->dma_buf); 513 525 } 514 526
+4 -18
drivers/net/ethernet/smsc/smc91x.h
··· 46 46 defined(CONFIG_MACH_LITTLETON) ||\ 47 47 defined(CONFIG_MACH_ZYLONITE2) ||\ 48 48 defined(CONFIG_ARCH_VIPER) ||\ 49 - defined(CONFIG_MACH_STARGATE2) 49 + defined(CONFIG_MACH_STARGATE2) ||\ 50 + defined(CONFIG_ARCH_VERSATILE) 50 51 51 52 #include <asm/mach-types.h> 52 53 ··· 155 154 #define SMC_outl(v, a, r) writel(v, (a) + (r)) 156 155 #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 157 156 #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 157 + #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 158 + #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 158 159 #define SMC_IRQ_FLAGS (-1) /* from resource */ 159 160 160 161 /* We actually can't write halfwords properly if not word aligned */ ··· 208 205 209 206 #define RPC_LSA_DEFAULT RPC_LED_TX_RX 210 207 #define RPC_LSB_DEFAULT RPC_LED_100_10 211 - 212 - #elif defined(CONFIG_ARCH_VERSATILE) 213 - 214 - #define SMC_CAN_USE_8BIT 1 215 - #define SMC_CAN_USE_16BIT 1 216 - #define SMC_CAN_USE_32BIT 1 217 - #define SMC_NOWAIT 1 218 - 219 - #define SMC_inb(a, r) readb((a) + (r)) 220 - #define SMC_inw(a, r) readw((a) + (r)) 221 - #define SMC_inl(a, r) readl((a) + (r)) 222 - #define SMC_outb(v, a, r) writeb(v, (a) + (r)) 223 - #define SMC_outw(v, a, r) writew(v, (a) + (r)) 224 - #define SMC_outl(v, a, r) writel(v, (a) + (r)) 225 - #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 226 - #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 227 - #define SMC_IRQ_FLAGS (-1) /* from resource */ 228 208 229 209 #elif defined(CONFIG_MN10300) 230 210
+6 -5
drivers/net/ethernet/via/via-velocity.c
··· 2172 2172 unsigned int rx_done; 2173 2173 unsigned long flags; 2174 2174 2175 - spin_lock_irqsave(&vptr->lock, flags); 2176 2175 /* 2177 2176 * Do rx and tx twice for performance (taken from the VIA 2178 2177 * out-of-tree driver). 2179 2178 */ 2180 - rx_done = velocity_rx_srv(vptr, budget / 2); 2179 + rx_done = velocity_rx_srv(vptr, budget); 2180 + spin_lock_irqsave(&vptr->lock, flags); 2181 2181 velocity_tx_srv(vptr); 2182 - rx_done += velocity_rx_srv(vptr, budget - rx_done); 2183 - velocity_tx_srv(vptr); 2184 - 2185 2182 /* If budget not fully consumed, exit the polling mode */ 2186 2183 if (rx_done < budget) { 2187 2184 napi_complete(napi); ··· 2339 2342 if (ret < 0) 2340 2343 goto out_free_tmp_vptr_1; 2341 2344 2345 + napi_disable(&vptr->napi); 2346 + 2342 2347 spin_lock_irqsave(&vptr->lock, flags); 2343 2348 2344 2349 netif_stop_queue(dev); ··· 2360 2361 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2361 2362 2362 2363 velocity_give_many_rx_descs(vptr); 2364 + 2365 + napi_enable(&vptr->napi); 2363 2366 2364 2367 mac_enable_int(vptr->mac_regs); 2365 2368 netif_start_queue(dev);
+1 -11
drivers/net/macvtap.c
··· 744 744 rcu_read_lock(); 745 745 vlan = rcu_dereference(q->vlan); 746 746 if (vlan) 747 - vlan->dev->stats.tx_dropped++; 747 + this_cpu_inc(vlan->pcpu_stats->tx_dropped); 748 748 rcu_read_unlock(); 749 749 750 750 return err; ··· 767 767 const struct sk_buff *skb, 768 768 const struct iovec *iv, int len) 769 769 { 770 - struct macvlan_dev *vlan; 771 770 int ret; 772 771 int vnet_hdr_len = 0; 773 772 int vlan_offset = 0; ··· 820 821 copied += len; 821 822 822 823 done: 823 - rcu_read_lock(); 824 - vlan = rcu_dereference(q->vlan); 825 - if (vlan) { 826 - preempt_disable(); 827 - macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 828 - preempt_enable(); 829 - } 830 - rcu_read_unlock(); 831 - 832 824 return ret ? ret : copied; 833 825 } 834 826
+15
drivers/net/phy/vitesse.c
··· 64 64 65 65 #define PHY_ID_VSC8234 0x000fc620 66 66 #define PHY_ID_VSC8244 0x000fc6c0 67 + #define PHY_ID_VSC8514 0x00070670 67 68 #define PHY_ID_VSC8574 0x000704a0 68 69 #define PHY_ID_VSC8662 0x00070660 69 70 #define PHY_ID_VSC8221 0x000fc550 ··· 132 131 err = phy_write(phydev, MII_VSC8244_IMASK, 133 132 (phydev->drv->phy_id == PHY_ID_VSC8234 || 134 133 phydev->drv->phy_id == PHY_ID_VSC8244 || 134 + phydev->drv->phy_id == PHY_ID_VSC8514 || 135 135 phydev->drv->phy_id == PHY_ID_VSC8574) ? 136 136 MII_VSC8244_IMASK_MASK : 137 137 MII_VSC8221_IMASK_MASK); ··· 248 246 .config_intr = &vsc82xx_config_intr, 249 247 .driver = { .owner = THIS_MODULE,}, 250 248 }, { 249 + .phy_id = PHY_ID_VSC8514, 250 + .name = "Vitesse VSC8514", 251 + .phy_id_mask = 0x000ffff0, 252 + .features = PHY_GBIT_FEATURES, 253 + .flags = PHY_HAS_INTERRUPT, 254 + .config_init = &vsc824x_config_init, 255 + .config_aneg = &vsc82x4_config_aneg, 256 + .read_status = &genphy_read_status, 257 + .ack_interrupt = &vsc824x_ack_interrupt, 258 + .config_intr = &vsc82xx_config_intr, 259 + .driver = { .owner = THIS_MODULE,}, 260 + }, { 251 261 .phy_id = PHY_ID_VSC8574, 252 262 .name = "Vitesse VSC8574", 253 263 .phy_id_mask = 0x000ffff0, ··· 329 315 static struct mdio_device_id __maybe_unused vitesse_tbl[] = { 330 316 { PHY_ID_VSC8234, 0x000ffff0 }, 331 317 { PHY_ID_VSC8244, 0x000fffc0 }, 318 + { PHY_ID_VSC8514, 0x000ffff0 }, 332 319 { PHY_ID_VSC8574, 0x000ffff0 }, 333 320 { PHY_ID_VSC8662, 0x000ffff0 }, 334 321 { PHY_ID_VSC8221, 0x000ffff0 },
+4
drivers/net/team/team.c
··· 1366 1366 return 0; 1367 1367 } 1368 1368 1369 + static void __team_carrier_check(struct team *team); 1370 + 1369 1371 static int team_user_linkup_option_set(struct team *team, 1370 1372 struct team_gsetter_ctx *ctx) 1371 1373 { ··· 1375 1373 1376 1374 port->user.linkup = ctx->data.bool_val; 1377 1375 team_refresh_port_linkup(port); 1376 + __team_carrier_check(port->team); 1378 1377 return 0; 1379 1378 } 1380 1379 ··· 1395 1392 1396 1393 port->user.linkup_enabled = ctx->data.bool_val; 1397 1394 team_refresh_port_linkup(port); 1395 + __team_carrier_check(port->team); 1398 1396 return 0; 1399 1397 } 1400 1398
+89 -49
drivers/net/virtio_net.c
··· 299 299 return skb; 300 300 } 301 301 302 - static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) 302 + static struct sk_buff *receive_small(void *buf, unsigned int len) 303 303 { 304 - struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); 305 - struct sk_buff *curr_skb = head_skb; 306 - char *buf; 307 - struct page *page; 308 - int num_buf, len, offset; 304 + struct sk_buff * skb = buf; 309 305 310 - num_buf = hdr->mhdr.num_buffers; 306 + len -= sizeof(struct virtio_net_hdr); 307 + skb_trim(skb, len); 308 + 309 + return skb; 310 + } 311 + 312 + static struct sk_buff *receive_big(struct net_device *dev, 313 + struct receive_queue *rq, 314 + void *buf, 315 + unsigned int len) 316 + { 317 + struct page *page = buf; 318 + struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); 319 + 320 + if (unlikely(!skb)) 321 + goto err; 322 + 323 + return skb; 324 + 325 + err: 326 + dev->stats.rx_dropped++; 327 + give_pages(rq, page); 328 + return NULL; 329 + } 330 + 331 + static struct sk_buff *receive_mergeable(struct net_device *dev, 332 + struct receive_queue *rq, 333 + void *buf, 334 + unsigned int len) 335 + { 336 + struct skb_vnet_hdr *hdr = buf; 337 + int num_buf = hdr->mhdr.num_buffers; 338 + struct page *page = virt_to_head_page(buf); 339 + int offset = buf - page_address(page); 340 + struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, 341 + MERGE_BUFFER_LEN); 342 + struct sk_buff *curr_skb = head_skb; 343 + 344 + if (unlikely(!curr_skb)) 345 + goto err_skb; 346 + 311 347 while (--num_buf) { 312 - int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 348 + int num_skb_frags; 349 + 313 350 buf = virtqueue_get_buf(rq->vq, &len); 314 351 if (unlikely(!buf)) { 315 - pr_debug("%s: rx error: %d buffers missing\n", 316 - head_skb->dev->name, hdr->mhdr.num_buffers); 317 - head_skb->dev->stats.rx_length_errors++; 318 - return -EINVAL; 352 + pr_debug("%s: rx error: %d buffers out of %d missing\n", 353 + dev->name, num_buf, hdr->mhdr.num_buffers); 354 + dev->stats.rx_length_errors++; 355 + goto err_buf; 319 356 } 320 357 if (unlikely(len > MERGE_BUFFER_LEN)) { 321 358 pr_debug("%s: rx error: merge buffer too long\n", 322 - head_skb->dev->name); 359 + dev->name); 323 360 len = MERGE_BUFFER_LEN; 324 361 } 362 + 363 + page = virt_to_head_page(buf); 364 + --rq->num; 365 + 366 + num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 325 367 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 326 368 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 327 - if (unlikely(!nskb)) { 328 - head_skb->dev->stats.rx_dropped++; 329 - return -ENOMEM; 330 - } 369 + 370 + if (unlikely(!nskb)) 371 + goto err_skb; 331 372 if (curr_skb == head_skb) 332 373 skb_shinfo(curr_skb)->frag_list = nskb; 333 374 else ··· 382 341 head_skb->len += len; 383 342 head_skb->truesize += MERGE_BUFFER_LEN; 384 343 } 385 - page = virt_to_head_page(buf); 386 - offset = buf - (char *)page_address(page); 344 + offset = buf - page_address(page); 387 345 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 388 346 put_page(page); 389 347 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, ··· 391 351 skb_add_rx_frag(curr_skb, num_skb_frags, page, 392 352 offset, len, MERGE_BUFFER_LEN); 393 353 } 354 + } 355 + 356 + return head_skb; 357 + 358 + err_skb: 359 + put_page(page); 360 + while (--num_buf) { 361 + buf = virtqueue_get_buf(rq->vq, &len); 362 + if (unlikely(!buf)) { 363 + pr_debug("%s: rx error: %d buffers missing\n", 364 + dev->name, num_buf); 365 + dev->stats.rx_length_errors++; 366 + break; 367 + } 368 + page = virt_to_head_page(buf); 369 + put_page(page); 394 370 --rq->num; 395 371 } 396 - return 0; 372 + err_buf: 373 + dev->stats.rx_dropped++; 374 + dev_kfree_skb(head_skb); 375 + return NULL; 397 376 } 398 377 399 378 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) ··· 421 362 struct net_device *dev = vi->dev; 422 363 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 423 364 struct sk_buff *skb; 424 - struct page *page; 425 365 struct skb_vnet_hdr *hdr; 426 366 427 367 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { ··· 435 377 return; 436 378 } 437 379 438 - if (!vi->mergeable_rx_bufs && !vi->big_packets) { 439 - skb = buf; 440 - len -= sizeof(struct virtio_net_hdr); 441 - skb_trim(skb, len); 442 - } else if (vi->mergeable_rx_bufs) { 443 - struct page *page = virt_to_head_page(buf); 444 - skb = page_to_skb(rq, page, 445 - (char *)buf - (char *)page_address(page), 446 - len, MERGE_BUFFER_LEN); 447 - if (unlikely(!skb)) { 448 - dev->stats.rx_dropped++; 449 - put_page(page); 450 - return; 451 - } 452 - if (receive_mergeable(rq, skb)) { 453 - dev_kfree_skb(skb); 454 - return; 455 - } 456 - } else { 457 - page = buf; 458 - skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); 459 - if (unlikely(!skb)) { 460 - dev->stats.rx_dropped++; 461 - give_pages(rq, page); 462 - return; 463 - } 464 - } 380 + if (vi->mergeable_rx_bufs) 381 + skb = receive_mergeable(dev, rq, buf, len); 382 + else if (vi->big_packets) 383 + skb = receive_big(dev, rq, buf, len); 384 + else 385 + skb = receive_small(buf, len); 386 + 387 + if (unlikely(!skb)) 388 + return; 465 389 466 390 hdr = skb_vnet_hdr(skb); 467 391 ··· 1124 1084 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1125 1085 VIRTIO_NET_CTRL_MAC_TABLE_SET, 1126 1086 sg, NULL)) 1127 - dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 1087 + dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 1128 1088 1129 1089 kfree(buf); 1130 1090 }
+1
drivers/net/xen-netback/netback.c
··· 39 39 #include <linux/udp.h> 40 40 41 41 #include <net/tcp.h> 42 + #include <net/ip6_checksum.h> 42 43 43 44 #include <xen/xen.h> 44 45 #include <xen/events.h>
+15 -5
drivers/scsi/pmcraid.c
··· 1404 1404 }; 1405 1405 #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) 1406 1406 1407 + static struct genl_multicast_group pmcraid_mcgrps[] = { 1408 + { .name = "events", /* not really used - see ID discussion below */ }, 1409 + }; 1410 + 1407 1411 static struct genl_family pmcraid_event_family = { 1408 - .id = GENL_ID_GENERATE, 1412 + /* 1413 + * Due to prior multicast group abuse (the code having assumed that 1414 + * the family ID can be used as a multicast group ID) we need to 1415 + * statically allocate a family (and thus group) ID. 1416 + */ 1417 + .id = GENL_ID_PMCRAID, 1409 1418 .name = "pmcraid", 1410 1419 .version = 1, 1411 - .maxattr = PMCRAID_AEN_ATTR_MAX 1420 + .maxattr = PMCRAID_AEN_ATTR_MAX, 1421 + .mcgrps = pmcraid_mcgrps, 1422 + .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), 1412 1423 }; 1413 1424 1414 1425 /** ··· 1522 1511 return result; 1523 1512 } 1524 1513 1525 - result = 1526 - genlmsg_multicast(&pmcraid_event_family, skb, 0, 1527 - pmcraid_event_family.id, GFP_ATOMIC); 1514 + result = genlmsg_multicast(&pmcraid_event_family, skb, 1515 + 0, 0, GFP_ATOMIC); 1528 1516 1529 1517 /* If there are no listeners, genlmsg_multicast may return non-zero 1530 1518 * value.
+1 -1
include/net/ip.h
··· 473 473 int ip_ra_control(struct sock *sk, unsigned char on, 474 474 void (*destructor)(struct sock *)); 475 475 476 - int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); 476 + int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); 477 477 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 478 478 u32 info, u8 *payload); 479 479 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+4 -2
include/net/ipv6.h
··· 776 776 777 777 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 778 778 779 - int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); 780 - int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len); 779 + int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, 780 + int *addr_len); 781 + int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, 782 + int *addr_len); 781 783 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 782 784 u32 info, u8 *payload); 783 785 void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+2 -1
include/net/ping.h
··· 31 31 32 32 /* Compatibility glue so we can support IPv6 when it's compiled as a module */ 33 33 struct pingv6_ops { 34 - int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len); 34 + int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len, 35 + int *addr_len); 35 36 int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, 36 37 struct sk_buff *skb); 37 38 int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
+1
include/net/sctp/structs.h
··· 629 629 #define SCTP_NEED_FRTX 0x1 630 630 #define SCTP_DONT_FRTX 0x2 631 631 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ 632 + resent:1, /* Has this chunk ever been resent. */ 632 633 has_tsn:1, /* Does this chunk have a TSN yet? */ 633 634 has_ssn:1, /* Does this chunk have a SSN yet? */ 634 635 singleton:1, /* Only chunk in the packet? */
+1
include/uapi/linux/genetlink.h
··· 28 28 #define GENL_ID_GENERATE 0 29 29 #define GENL_ID_CTRL NLMSG_MIN_TYPE 30 30 #define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) 31 + #define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2) 31 32 32 33 /************************************************************************** 33 34 * Controller
+3 -1
include/uapi/linux/if_link.h
··· 488 488 IFLA_HSR_UNSPEC, 489 489 IFLA_HSR_SLAVE1, 490 490 IFLA_HSR_SLAVE2, 491 - IFLA_HSR_MULTICAST_SPEC, 491 + IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ 492 + IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ 493 + IFLA_HSR_SEQ_NR, 492 494 __IFLA_HSR_MAX, 493 495 }; 494 496
+1
include/uapi/linux/packet_diag.h
··· 29 29 }; 30 30 31 31 enum { 32 + /* PACKET_DIAG_NONE, standard nl API requires this attribute! */ 32 33 PACKET_DIAG_INFO, 33 34 PACKET_DIAG_MCLIST, 34 35 PACKET_DIAG_RX_RING,
+1
include/uapi/linux/unix_diag.h
··· 31 31 }; 32 32 33 33 enum { 34 + /* UNIX_DIAG_NONE, standard nl API requires this attribute! */ 34 35 UNIX_DIAG_NAME, 35 36 UNIX_DIAG_VFS, 36 37 UNIX_DIAG_PEER,
+1 -1
net/compat.c
··· 72 72 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 73 73 return -EFAULT; 74 74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 75 - return -EINVAL; 75 + kmsg->msg_namelen = sizeof(struct sockaddr_storage); 76 76 kmsg->msg_name = compat_ptr(tmp1); 77 77 kmsg->msg_iov = compat_ptr(tmp2); 78 78 kmsg->msg_control = compat_ptr(tmp3);
+7
net/core/pktgen.c
··· 2527 2527 if (x) { 2528 2528 int ret; 2529 2529 __u8 *eth; 2530 + struct iphdr *iph; 2531 + 2530 2532 nhead = x->props.header_len - skb_headroom(skb); 2531 2533 if (nhead > 0) { 2532 2534 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); ··· 2550 2548 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2551 2549 memcpy(eth, pkt_dev->hh, 12); 2552 2550 *(u16 *) &eth[12] = protocol; 2551 + 2552 + /* Update IPv4 header len as well as checksum value */ 2553 + iph = ip_hdr(skb); 2554 + iph->tot_len = htons(skb->len - ETH_HLEN); 2555 + ip_send_check(iph); 2553 2556 } 2554 2557 } 2555 2558 return 1;
+2 -1
net/hsr/hsr_framereg.c
··· 288 288 static bool seq_nr_after(u16 a, u16 b) 289 289 { 290 290 /* Remove inconsistency where 291 - * seq_nr_after(a, b) == seq_nr_before(a, b) */ 291 + * seq_nr_after(a, b) == seq_nr_before(a, b) 292 + */ 292 293 if ((int) b - a == 32768) 293 294 return false; 294 295
+28
net/hsr/hsr_netlink.c
··· 23 23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, 24 24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, 25 25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, 26 + [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, 27 + [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, 26 28 }; 27 29 28 30 ··· 61 59 return hsr_dev_finalize(dev, link, multicast_spec); 62 60 } 63 61 62 + static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) 63 + { 64 + struct hsr_priv *hsr_priv; 65 + 66 + hsr_priv = netdev_priv(dev); 67 + 68 + if (hsr_priv->slave[0]) 69 + if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex)) 70 + goto nla_put_failure; 71 + 72 + if (hsr_priv->slave[1]) 73 + if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex)) 74 + goto nla_put_failure; 75 + 76 + if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, 77 + hsr_priv->sup_multicast_addr) || 78 + nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr)) 79 + goto nla_put_failure; 80 + 81 + return 0; 82 + 83 + nla_put_failure: 84 + return -EMSGSIZE; 85 + } 86 + 64 87 static struct rtnl_link_ops hsr_link_ops __read_mostly = { 65 88 .kind = "hsr", 66 89 .maxtype = IFLA_HSR_MAX, ··· 93 66 .priv_size = sizeof(struct hsr_priv), 94 67 .setup = hsr_dev_setup, 95 68 .newlink = hsr_newlink, 69 + .fill_info = hsr_fill_info, 96 70 }; 97 71 98 72
+2 -1
net/ipv4/ip_sockglue.c
··· 386 386 /* 387 387 * Handle MSG_ERRQUEUE 388 388 */ 389 - int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) 389 + int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) 390 390 { 391 391 struct sock_exterr_skb *serr; 392 392 struct sk_buff *skb, *skb2; ··· 423 423 serr->addr_offset); 424 424 sin->sin_port = serr->port; 425 425 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 426 + *addr_len = sizeof(*sin); 426 427 } 427 428 428 429 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+4 -3
net/ipv4/ping.c
··· 772 772 err = PTR_ERR(rt); 773 773 rt = NULL; 774 774 if (err == -ENETUNREACH) 775 - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 775 + IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 776 776 goto out; 777 777 } 778 778 ··· 841 841 842 842 if (flags & MSG_ERRQUEUE) { 843 843 if (family == AF_INET) { 844 - return ip_recv_error(sk, msg, len); 844 + return ip_recv_error(sk, msg, len, addr_len); 845 845 #if IS_ENABLED(CONFIG_IPV6) 846 846 } else if (family == AF_INET6) { 847 - return pingv6_ops.ipv6_recv_error(sk, msg, len); 847 + return pingv6_ops.ipv6_recv_error(sk, msg, len, 848 + addr_len); 848 849 #endif 849 850 } 850 851 }
-8
net/ipv4/protocol.c
··· 31 31 const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; 32 32 const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; 33 33 34 - /* 35 - * Add a protocol handler to the hash tables 36 - */ 37 - 38 34 int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 39 35 { 40 36 if (!prot->netns_ok) { ··· 50 54 NULL, prot) ? 0 : -1; 51 55 } 52 56 EXPORT_SYMBOL(inet_add_offload); 53 - 54 - /* 55 - * Remove a protocol from the hash tables. 56 - */ 57 57 58 58 int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 59 59 {
+1 -1
net/ipv4/raw.c
··· 697 697 goto out; 698 698 699 699 if (flags & MSG_ERRQUEUE) { 700 - err = ip_recv_error(sk, msg, len); 700 + err = ip_recv_error(sk, msg, len, addr_len); 701 701 goto out; 702 702 } 703 703
+1 -1
net/ipv4/tcp_ipv4.c
··· 177 177 if (IS_ERR(rt)) { 178 178 err = PTR_ERR(rt); 179 179 if (err == -ENETUNREACH) 180 - IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 180 + IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 181 181 return err; 182 182 } 183 183
-2
net/ipv4/tcp_memcontrol.c
··· 60 60 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) 61 61 { 62 62 struct cg_proto *cg_proto; 63 - u64 old_lim; 64 63 int i; 65 64 int ret; 66 65 ··· 70 71 if (val > RES_COUNTER_MAX) 71 72 val = RES_COUNTER_MAX; 72 73 73 - old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT); 74 74 ret = res_counter_set_limit(&cg_proto->memory_allocated, val); 75 75 if (ret) 76 76 return ret;
+15 -16
net/ipv4/tcp_offload.c
··· 274 274 { 275 275 const struct iphdr *iph = skb_gro_network_header(skb); 276 276 __wsum wsum; 277 - __sum16 sum; 277 + 278 + /* Don't bother verifying checksum if we're going to flush anyway. */ 279 + if (NAPI_GRO_CB(skb)->flush) 280 + goto skip_csum; 281 + 282 + wsum = skb->csum; 278 283 279 284 switch (skb->ip_summed) { 285 + case CHECKSUM_NONE: 286 + wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 287 + 0); 288 + 289 + /* fall through */ 290 + 280 291 case CHECKSUM_COMPLETE: 281 292 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, 282 - skb->csum)) { 293 + wsum)) { 283 294 skb->ip_summed = CHECKSUM_UNNECESSARY; 284 295 break; 285 296 } 286 - flush: 297 + 287 298 NAPI_GRO_CB(skb)->flush = 1; 288 299 return NULL; 289 - 290 - case CHECKSUM_NONE: 291 - wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr, 292 - skb_gro_len(skb), IPPROTO_TCP, 0); 293 - sum = csum_fold(skb_checksum(skb, 294 - skb_gro_offset(skb), 295 - skb_gro_len(skb), 296 - wsum)); 297 - if (sum) 298 - goto flush; 299 - 300 - skb->ip_summed = CHECKSUM_UNNECESSARY; 301 - break; 302 300 } 303 301 302 + skip_csum: 304 303 return tcp_gro_receive(head, skb); 305 304 } 306 305
+5 -2
net/ipv4/udp.c
··· 999 999 err = PTR_ERR(rt); 1000 1000 rt = NULL; 1001 1001 if (err == -ENETUNREACH) 1002 - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 1002 + IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 1003 1003 goto out; 1004 1004 } 1005 1005 ··· 1097 1097 struct inet_sock *inet = inet_sk(sk); 1098 1098 struct udp_sock *up = udp_sk(sk); 1099 1099 int ret; 1100 + 1101 + if (flags & MSG_SENDPAGE_NOTLAST) 1102 + flags |= MSG_MORE; 1100 1103 1101 1104 if (!up->pending) { 1102 1105 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; ··· 1239 1236 bool slow; 1240 1237 1241 1238 if (flags & MSG_ERRQUEUE) 1242 - return ip_recv_error(sk, msg, len); 1239 + return ip_recv_error(sk, msg, len, addr_len); 1243 1240 1244 1241 try_again: 1245 1242 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+6 -2
net/ipv6/datagram.c
··· 318 318 /* 319 319 * Handle MSG_ERRQUEUE 320 320 */ 321 - int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 321 + int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) 322 322 { 323 323 struct ipv6_pinfo *np = inet6_sk(sk); 324 324 struct sock_exterr_skb *serr; ··· 369 369 &sin->sin6_addr); 370 370 sin->sin6_scope_id = 0; 371 371 } 372 + *addr_len = sizeof(*sin); 372 373 } 373 374 374 375 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); ··· 378 377 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { 379 378 sin->sin6_family = AF_INET6; 380 379 sin->sin6_flowinfo = 0; 380 + sin->sin6_port = 0; 381 381 if (skb->protocol == htons(ETH_P_IPV6)) { 382 382 sin->sin6_addr = ipv6_hdr(skb)->saddr; 383 383 if (np->rxopt.all) ··· 425 423 /* 426 424 * Handle IPV6_RECVPATHMTU 427 425 */ 428 - int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) 426 + int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, 427 + int *addr_len) 429 428 { 430 429 struct ipv6_pinfo *np = inet6_sk(sk); 431 430 struct sk_buff *skb; ··· 460 457 sin->sin6_port = 0; 461 458 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; 462 459 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; 460 + *addr_len = sizeof(*sin); 463 461 } 464 462 465 463 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
+2 -2
net/ipv6/ip6_output.c
··· 116 116 } 117 117 rcu_read_unlock_bh(); 118 118 119 - IP6_INC_STATS_BH(dev_net(dst->dev), 120 - ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 119 + IP6_INC_STATS(dev_net(dst->dev), 120 + ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 121 121 kfree_skb(skb); 122 122 return -EINVAL; 123 123 }
+2 -1
net/ipv6/ping.c
··· 57 57 58 58 59 59 /* Compatibility glue so we can support IPv6 when it's compiled as a module */ 60 - static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 60 + static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, 61 + int *addr_len) 61 62 { 62 63 return -EAFNOSUPPORT; 63 64 }
-4
net/ipv6/protocol.c
··· 36 36 } 37 37 EXPORT_SYMBOL(inet6_add_protocol); 38 38 39 - /* 40 - * Remove a protocol from the hash tables. 41 - */ 42 - 43 39 int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) 44 40 { 45 41 int ret;
+2 -2
net/ipv6/raw.c
··· 466 466 return -EOPNOTSUPP; 467 467 468 468 if (flags & MSG_ERRQUEUE) 469 - return ipv6_recv_error(sk, msg, len); 469 + return ipv6_recv_error(sk, msg, len, addr_len); 470 470 471 471 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 472 - return ipv6_recv_rxpmtu(sk, msg, len); 472 + return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 473 473 474 474 skb = skb_recv_datagram(sk, flags, noblock, &err); 475 475 if (!skb)
+41 -9
net/ipv6/sit.c
··· 478 478 dev_put(dev); 479 479 } 480 480 481 + /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH 482 + * if sufficient data bytes are available 483 + */ 484 + static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb) 485 + { 486 + const struct iphdr *iph = (const struct iphdr *) skb->data; 487 + struct rt6_info *rt; 488 + struct sk_buff *skb2; 489 + 490 + if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8)) 491 + return 1; 492 + 493 + skb2 = skb_clone(skb, GFP_ATOMIC); 494 + 495 + if (!skb2) 496 + return 1; 497 + 498 + skb_dst_drop(skb2); 499 + skb_pull(skb2, iph->ihl * 4); 500 + skb_reset_network_header(skb2); 501 + 502 + rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); 503 + 504 + if (rt && rt->dst.dev) 505 + skb2->dev = rt->dst.dev; 506 + 507 + icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 508 + 509 + if (rt) 510 + ip6_rt_put(rt); 511 + 512 + kfree_skb(skb2); 513 + 514 + return 0; 515 + } 481 516 482 517 static int ipip6_err(struct sk_buff *skb, u32 info) 483 518 { 484 - 485 - /* All the routers (except for Linux) return only 486 - 8 bytes of packet payload. It means, that precise relaying of 487 - ICMP in the real Internet is absolutely infeasible. 488 - */ 489 519 const struct iphdr *iph = (const struct iphdr *)skb->data; 490 520 const int type = icmp_hdr(skb)->type; 491 521 const int code = icmp_hdr(skb)->code; ··· 530 500 case ICMP_DEST_UNREACH: 531 501 switch (code) { 532 502 case ICMP_SR_FAILED: 533 - case ICMP_PORT_UNREACH: 534 503 /* Impossible event. */ 535 504 return 0; 536 505 default: ··· 574 545 goto out; 575 546 576 547 err = 0; 548 + if (!ipip6_err_gen_icmpv6_unreach(skb)) 549 + goto out; 550 + 577 551 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 578 552 goto out; 579 553 ··· 951 919 if (!new_skb) { 952 920 ip_rt_put(rt); 953 921 dev->stats.tx_dropped++; 954 - dev_kfree_skb(skb); 922 + kfree_skb(skb); 955 923 return NETDEV_TX_OK; 956 924 } 957 925 if (skb->sk) ··· 977 945 tx_error_icmp: 978 946 dst_link_failure(skb); 979 947 tx_error: 980 - dev_kfree_skb(skb); 948 + kfree_skb(skb); 981 949 out: 982 950 dev->stats.tx_errors++; 983 951 return NETDEV_TX_OK; ··· 1017 985 1018 986 tx_err: 1019 987 dev->stats.tx_errors++; 1020 - dev_kfree_skb(skb); 988 + kfree_skb(skb); 1021 989 return NETDEV_TX_OK; 1022 990 1023 991 }
+15 -17
net/ipv6/tcpv6_offload.c
··· 37 37 { 38 38 const struct ipv6hdr *iph = skb_gro_network_header(skb); 39 39 __wsum wsum; 40 - __sum16 sum; 40 + 41 + /* Don't bother verifying checksum if we're going to flush anyway. */ 42 + if (NAPI_GRO_CB(skb)->flush) 43 + goto skip_csum; 44 + 45 + wsum = skb->csum; 41 46 42 47 switch (skb->ip_summed) { 48 + case CHECKSUM_NONE: 49 + wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 50 + wsum); 51 + 52 + /* fall through */ 53 + 43 54 case CHECKSUM_COMPLETE: 44 55 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, 45 - skb->csum)) { 56 + wsum)) { 46 57 skb->ip_summed = CHECKSUM_UNNECESSARY; 47 58 break; 48 59 } 49 - flush: 60 + 50 61 NAPI_GRO_CB(skb)->flush = 1; 51 62 return NULL; 52 - 53 - case CHECKSUM_NONE: 54 - wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, 55 - skb_gro_len(skb), 56 - IPPROTO_TCP, 0)); 57 - sum = csum_fold(skb_checksum(skb, 58 - skb_gro_offset(skb), 59 - skb_gro_len(skb), 60 - wsum)); 61 - if (sum) 62 - goto flush; 63 - 64 - skb->ip_summed = CHECKSUM_UNNECESSARY; 65 - break; 66 63 } 67 64 65 + skip_csum: 68 66 return tcp_gro_receive(head, skb); 69 67 } 70 68
+2 -2
net/ipv6/udp.c
··· 393 393 bool slow; 394 394 395 395 if (flags & MSG_ERRQUEUE) 396 - return ipv6_recv_error(sk, msg, len); 396 + return ipv6_recv_error(sk, msg, len, addr_len); 397 397 398 398 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 399 - return ipv6_recv_rxpmtu(sk, msg, len); 399 + return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 400 400 401 401 try_again: 402 402 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+1 -1
net/l2tp/l2tp_ip6.c
··· 665 665 *addr_len = sizeof(*lsa); 666 666 667 667 if (flags & MSG_ERRQUEUE) 668 - return ipv6_recv_error(sk, msg, len); 668 + return ipv6_recv_error(sk, msg, len, addr_len); 669 669 670 670 skb = skb_recv_datagram(sk, flags, noblock, &err); 671 671 if (!skb)
+10 -3
net/netlink/genetlink.c
··· 74 74 * Bit 17 is marked as already used since the VFS quota code 75 75 * also abused this API and relied on family == group ID, we 76 76 * cater to that by giving it a static family and group ID. 77 + * Bit 18 is marked as already used since the PMCRAID driver 78 + * did the same thing as the VFS quota code (maybe copied?) 77 79 */ 78 80 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | 79 - BIT(GENL_ID_VFS_DQUOT); 81 + BIT(GENL_ID_VFS_DQUOT) | 82 + BIT(GENL_ID_PMCRAID); 80 83 static unsigned long *mc_groups = &mc_group_start; 81 84 static unsigned long mc_groups_longs = 1; 82 85 ··· 142 139 143 140 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { 144 141 if (id_gen_idx != GENL_ID_VFS_DQUOT && 142 + id_gen_idx != GENL_ID_PMCRAID && 145 143 !genl_family_find_byid(id_gen_idx)) 146 144 return id_gen_idx; 147 145 if (++id_gen_idx > GENL_MAX_ID) ··· 218 214 { 219 215 int first_id; 220 216 int n_groups = family->n_mcgrps; 221 - int err, i; 217 + int err = 0, i; 222 218 bool groups_allocated = false; 223 219 224 220 if (!n_groups) ··· 240 236 } else if (strcmp(family->name, "NET_DM") == 0) { 241 237 first_id = 1; 242 238 BUG_ON(n_groups != 1); 243 - } else if (strcmp(family->name, "VFS_DQUOT") == 0) { 239 + } else if (family->id == GENL_ID_VFS_DQUOT) { 244 240 first_id = GENL_ID_VFS_DQUOT; 241 + BUG_ON(n_groups != 1); 242 + } else if (family->id == GENL_ID_PMCRAID) { 243 + first_id = GENL_ID_PMCRAID; 245 244 BUG_ON(n_groups != 1); 246 245 } else { 247 246 groups_allocated = true;
+2 -2
net/packet/af_packet.c
··· 439 439 440 440 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; 441 441 442 - spin_lock(&rb_queue->lock); 442 + spin_lock_bh(&rb_queue->lock); 443 443 pkc->delete_blk_timer = 1; 444 - spin_unlock(&rb_queue->lock); 444 + spin_unlock_bh(&rb_queue->lock); 445 445 446 446 prb_del_retire_blk_timer(pkc); 447 447 }
+4 -3
net/sched/sch_netem.c
··· 215 215 if (rnd < clg->a4) { 216 216 clg->state = 4; 217 217 return true; 218 - } else if (clg->a4 < rnd && rnd < clg->a1) { 218 + } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { 219 219 clg->state = 3; 220 220 return true; 221 - } else if (clg->a1 < rnd) 221 + } else if (clg->a1 + clg->a4 < rnd) 222 222 clg->state = 1; 223 223 224 224 break; ··· 268 268 clg->state = 2; 269 269 if (net_random() < clg->a4) 270 270 return true; 271 + break; 271 272 case 2: 272 273 if (net_random() < clg->a2) 273 274 clg->state = 1; 274 - if (clg->a3 > net_random()) 275 + if (net_random() > clg->a3) 275 276 return true; 276 277 } 277 278
+25 -7
net/sched/sch_tbf.c
··· 21 21 #include <net/netlink.h> 22 22 #include <net/sch_generic.h> 23 23 #include <net/pkt_sched.h> 24 + #include <net/tcp.h> 24 25 25 26 26 27 /* Simple Token Bucket Filter. ··· 118 117 }; 119 118 120 119 120 + /* 121 + * Return length of individual segments of a gso packet, 122 + * including all headers (MAC, IP, TCP/UDP) 123 + */ 124 + static unsigned int skb_gso_seglen(const struct sk_buff *skb) 125 + { 126 + unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 127 + const struct skb_shared_info *shinfo = skb_shinfo(skb); 128 + 129 + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 130 + hdr_len += tcp_hdrlen(skb); 131 + else 132 + hdr_len += sizeof(struct udphdr); 133 + return hdr_len + shinfo->gso_size; 134 + } 135 + 121 136 /* GSO packet is too big, segment it so that tbf can transmit 122 137 * each segment in time 123 138 */ ··· 153 136 while (segs) { 154 137 nskb = segs->next; 155 138 segs->next = NULL; 156 - if (likely(segs->len <= q->max_size)) { 157 - qdisc_skb_cb(segs)->pkt_len = segs->len; 158 - ret = qdisc_enqueue(segs, q->qdisc); 159 - } else { 160 - ret = qdisc_reshape_fail(skb, sch); 161 - } 139 + qdisc_skb_cb(segs)->pkt_len = segs->len; 140 + ret = qdisc_enqueue(segs, q->qdisc); 162 141 if (ret != NET_XMIT_SUCCESS) { 163 142 if (net_xmit_drop_count(ret)) 164 143 sch->qstats.drops++; ··· 176 163 int ret; 177 164 178 165 if (qdisc_pkt_len(skb) > q->max_size) { 179 - if (skb_is_gso(skb)) 166 + if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size) 180 167 return tbf_segment(skb, sch); 181 168 return qdisc_reshape_fail(skb, sch); 182 169 } ··· 331 318 } 332 319 if (max_size < 0) 333 320 goto done; 321 + 322 + if (max_size < psched_mtu(qdisc_dev(sch))) 323 + pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", 324 + max_size, qdisc_dev(sch)->name, 325 + psched_mtu(qdisc_dev(sch))); 334 326 335 327 if (q->qdisc != &noop_qdisc) { 336 328 err = fifo_set_limit(q->qdisc, qopt->limit);
+2 -1
net/sctp/output.c
··· 474 474 * for a given destination transport address. 475 475 */ 476 476 477 - if (!tp->rto_pending) { 477 + if (!chunk->resent && !tp->rto_pending) { 478 478 chunk->rtt_in_progress = 1; 479 479 tp->rto_pending = 1; 480 480 } 481 + 481 482 has_data = 1; 482 483 } 483 484
+5 -1
net/sctp/outqueue.c
··· 446 446 transport->rto_pending = 0; 447 447 } 448 448 449 + chunk->resent = 1; 450 + 449 451 /* Move the chunk to the retransmit queue. The chunks 450 452 * on the retransmit queue are always kept in order. 451 453 */ ··· 1377 1375 * instance). 1378 1376 */ 1379 1377 if (!tchunk->tsn_gap_acked && 1378 + !tchunk->resent && 1380 1379 tchunk->rtt_in_progress) { 1381 1380 tchunk->rtt_in_progress = 0; 1382 1381 rtt = jiffies - tchunk->sent_at; ··· 1394 1391 */ 1395 1392 if (!tchunk->tsn_gap_acked) { 1396 1393 tchunk->tsn_gap_acked = 1; 1397 - *highest_new_tsn_in_sack = tsn; 1394 + if (TSN_lt(*highest_new_tsn_in_sack, tsn)) 1395 + *highest_new_tsn_in_sack = tsn; 1398 1396 bytes_acked += sctp_data_size(tchunk); 1399 1397 if (!tchunk->transport) 1400 1398 migrate_bytes += sctp_data_size(tchunk);
+1 -1
net/socket.c
··· 1973 1973 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) 1974 1974 return -EFAULT; 1975 1975 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1976 - return -EINVAL; 1976 + kmsg->msg_namelen = sizeof(struct sockaddr_storage); 1977 1977 return 0; 1978 1978 } 1979 1979