Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) MLX5 bug fixes from Saeed Mahameed et al:
- released wrong resources when firmware timeout happens
- fix wrong check for encapsulation size limits
- UAR memory leak
- ETHTOOL_GRXCLSRLALL failed to fill in info->data

2) Don't cache l3mdev on mis-matches local route, causes net devices to
leak refs. From Robert Shearman.

3) Handle fragmented SKBs properly in macsec driver, the problem is
that we were mis-sizing the sgvec table. From Jason A. Donenfeld.

4) We cannot have checksum offload enabled for inner UDP tunneled
packet during IPSEC, from Ansis Atteka.

5) Fix double SKB free in ravb driver, from Dan Carpenter.

6) Fix CPU port handling in b53 DSA driver, from Florian Dainelli.

7) Don't use on-stack buffers for usb_control_msg() in CAN usb driver,
from Maksim Salau.

8) Fix device leak in macvlan driver, from Herbert Xu. We have to purge
the broadcast queue properly on port destroy.

9) Fix tx ring entry limit on EF10 devices in sfc driver. From Bert
Kenward.

10) Fix memory leaks in team driver, from Pan Bian.

11) Don't setup ipv6_stub before it can be actually used, from Paolo
Abeni.

12) Fix tipc socket flow control accounting, from Parthasarathy
Bhuvaragan.

13) Fix crash on module unload in hso driver, from Andreas Kemnade.

14) Fix purging of bridge multicast entries, the problem is that if we
don't defer it to ndo_uninit it's possible for new entries to get
added after we purge. Fix from Xin Long.

15) Don't return garbage for PACKET_HDRLEN getsockopt, from Alexander
Potapenko.

16) Fix autoneg stall properly in PHY layer, and revert micrel driver
change that was papering over it. From Alexander Kochetkov.

17) Don't dereference an ipv4 route as an ipv6 one in the ip6_tunnnel
code, from Cong Wang.

18) Clear out the congestion control private of the TCP socket in all of
the right places, from Wei Wang.

19) rawv6_ioctl measures SKB length incorrectly, fix from Jamie
Bainbridge.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
ipv6: check raw payload size correctly in ioctl
tcp: memset ca_priv data to 0 properly
ipv6: check skb->protocol before lookup for nexthop
net: core: Prevent from dereferencing null pointer when releasing SKB
macsec: dynamically allocate space for sglist
Revert "phy: micrel: Disable auto negotiation on startup"
net: phy: fix auto-negotiation stall due to unavailable interrupt
net/packet: check length in getsockopt() called with PACKET_HDRLEN
net: ipv6: regenerate host route if moved to gc list
bridge: move bridge multicast cleanup to ndo_uninit
ipv6: fix source routing
qed: Fix error in the dcbx app meta data initialization.
netvsc: fix calculation of available send sections
net: hso: fix module unloading
tipc: fix socket flow control accounting error at tipc_recv_stream
tipc: fix socket flow control accounting error at tipc_send_stream
ipv6: move stub initialization after ipv6 setup completion
team: fix memory leaks
sfc: tx ring can only have 2048 entries for all EF10 NICs
macvlan: Fix device ref leak when purging bc_queue
...

+373 -141
+2
drivers/net/can/usb/Kconfig
··· 72 72 PCAN-USB Pro dual CAN 2.0b channels USB adapter 73 73 PCAN-USB FD single CAN-FD channel USB adapter 74 74 PCAN-USB Pro FD dual CAN-FD channels USB adapter 75 + PCAN-Chip USB CAN-FD to USB stamp module 76 + PCAN-USB X6 6 CAN-FD channels USB adapter 75 77 76 78 (see also http://www.peak-system.com). 77 79
+12 -5
drivers/net/can/usb/gs_usb.c
··· 739 739 static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) 740 740 { 741 741 struct gs_can *dev = netdev_priv(netdev); 742 - struct gs_identify_mode imode; 742 + struct gs_identify_mode *imode; 743 743 int rc; 744 744 745 + imode = kmalloc(sizeof(*imode), GFP_KERNEL); 746 + 747 + if (!imode) 748 + return -ENOMEM; 749 + 745 750 if (do_identify) 746 - imode.mode = GS_CAN_IDENTIFY_ON; 751 + imode->mode = GS_CAN_IDENTIFY_ON; 747 752 else 748 - imode.mode = GS_CAN_IDENTIFY_OFF; 753 + imode->mode = GS_CAN_IDENTIFY_OFF; 749 754 750 755 rc = usb_control_msg(interface_to_usbdev(dev->iface), 751 756 usb_sndctrlpipe(interface_to_usbdev(dev->iface), ··· 760 755 USB_RECIP_INTERFACE, 761 756 dev->channel, 762 757 0, 763 - &imode, 764 - sizeof(imode), 758 + imode, 759 + sizeof(*imode), 765 760 100); 761 + 762 + kfree(imode); 766 763 767 764 return (rc > 0) ? 0 : rc; 768 765 }
+2
drivers/net/can/usb/peak_usb/pcan_usb_core.c
··· 39 39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, 40 40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, 41 41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, 42 + {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)}, 42 43 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, 43 44 {} /* Terminating entry */ 44 45 }; ··· 52 51 &pcan_usb_pro, 53 52 &pcan_usb_fd, 54 53 &pcan_usb_pro_fd, 54 + &pcan_usb_chip, 55 55 &pcan_usb_x6, 56 56 }; 57 57
+2
drivers/net/can/usb/peak_usb/pcan_usb_core.h
··· 27 27 #define PCAN_USBPRO_PRODUCT_ID 0x000d 28 28 #define PCAN_USBPROFD_PRODUCT_ID 0x0011 29 29 #define PCAN_USBFD_PRODUCT_ID 0x0012 30 + #define PCAN_USBCHIP_PRODUCT_ID 0x0013 30 31 #define PCAN_USBX6_PRODUCT_ID 0x0014 31 32 32 33 #define PCAN_USB_DRIVER_NAME "peak_usb" ··· 91 90 extern const struct peak_usb_adapter pcan_usb; 92 91 extern const struct peak_usb_adapter pcan_usb_pro; 93 92 extern const struct peak_usb_adapter pcan_usb_fd; 93 + extern const struct peak_usb_adapter pcan_usb_chip; 94 94 extern const struct peak_usb_adapter pcan_usb_pro_fd; 95 95 extern const struct peak_usb_adapter pcan_usb_x6; 96 96
+72
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
··· 1061 1061 .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1062 1062 }; 1063 1063 1064 + /* describes the PCAN-CHIP USB */ 1065 + static const struct can_bittiming_const pcan_usb_chip_const = { 1066 + .name = "pcan_chip_usb", 1067 + .tseg1_min = 1, 1068 + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), 1069 + .tseg2_min = 1, 1070 + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), 1071 + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), 1072 + .brp_min = 1, 1073 + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), 1074 + .brp_inc = 1, 1075 + }; 1076 + 1077 + static const struct can_bittiming_const pcan_usb_chip_data_const = { 1078 + .name = "pcan_chip_usb", 1079 + .tseg1_min = 1, 1080 + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), 1081 + .tseg2_min = 1, 1082 + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), 1083 + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), 1084 + .brp_min = 1, 1085 + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), 1086 + .brp_inc = 1, 1087 + }; 1088 + 1089 + const struct peak_usb_adapter pcan_usb_chip = { 1090 + .name = "PCAN-Chip USB", 1091 + .device_id = PCAN_USBCHIP_PRODUCT_ID, 1092 + .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, 1093 + .ctrlmode_supported = CAN_CTRLMODE_FD | 1094 + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, 1095 + .clock = { 1096 + .freq = PCAN_UFD_CRYSTAL_HZ, 1097 + }, 1098 + .bittiming_const = &pcan_usb_chip_const, 1099 + .data_bittiming_const = &pcan_usb_chip_data_const, 1100 + 1101 + /* size of device private data */ 1102 + .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), 1103 + 1104 + /* timestamps usage */ 1105 + .ts_used_bits = 32, 1106 + .ts_period = 1000000, /* calibration period in ts. */ 1107 + .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ 1108 + .us_per_ts_shift = 0, 1109 + 1110 + /* give here messages in/out endpoints */ 1111 + .ep_msg_in = PCAN_USBPRO_EP_MSGIN, 1112 + .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0}, 1113 + 1114 + /* size of rx/tx usb buffers */ 1115 + .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, 1116 + .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, 1117 + 1118 + /* device callbacks */ 1119 + .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ 1120 + .dev_init = pcan_usb_fd_init, 1121 + 1122 + .dev_exit = pcan_usb_fd_exit, 1123 + .dev_free = pcan_usb_fd_free, 1124 + .dev_set_bus = pcan_usb_fd_set_bus, 1125 + .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, 1126 + .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, 1127 + .dev_decode_buf = pcan_usb_fd_decode_buf, 1128 + .dev_start = pcan_usb_fd_start, 1129 + .dev_stop = pcan_usb_fd_stop, 1130 + .dev_restart_async = pcan_usb_fd_restart_async, 1131 + .dev_encode_msg = pcan_usb_fd_encode_msg, 1132 + 1133 + .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1134 + }; 1135 + 1064 1136 /* describes the PCAN-USB Pro FD adapter */ 1065 1137 static const struct can_bittiming_const pcan_usb_pro_fd_const = { 1066 1138 .name = "pcan_usb_pro_fd",
+35 -2
drivers/net/dsa/b53/b53_common.c
··· 326 326 327 327 static void b53_set_forwarding(struct b53_device *dev, int enable) 328 328 { 329 + struct dsa_switch *ds = dev->ds; 329 330 u8 mgmt; 330 331 331 332 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); ··· 337 336 mgmt &= ~SM_SW_FWD_EN; 338 337 339 338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 339 + 340 + /* Include IMP port in dumb forwarding mode when no tagging protocol is 341 + * set 342 + */ 343 + if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) { 344 + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 345 + mgmt |= B53_MII_DUMB_FWDG_EN; 346 + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 347 + } 340 348 } 341 349 342 350 static void b53_enable_vlan(struct b53_device *dev, bool enable) ··· 608 598 609 599 static int b53_switch_reset(struct b53_device *dev) 610 600 { 611 - u8 mgmt; 601 + unsigned int timeout = 1000; 602 + u8 mgmt, reg; 612 603 613 604 b53_switch_reset_gpio(dev); 614 605 615 606 if (is539x(dev)) { 616 607 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 617 608 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 609 + } 610 + 611 + /* This is specific to 58xx devices here, do not use is58xx() which 612 + * covers the larger Starfigther 2 family, including 7445/7278 which 613 + * still use this driver as a library and need to perform the reset 614 + * earlier. 615 + */ 616 + if (dev->chip_id == BCM58XX_DEVICE_ID) { 617 + b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg); 618 + reg |= SW_RST | EN_SW_RST | EN_CH_RST; 619 + b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 620 + 621 + do { 622 + b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg); 623 + if (!(reg & SW_RST)) 624 + break; 625 + 626 + usleep_range(1000, 2000); 627 + } while (timeout-- > 0); 628 + 629 + if (timeout == 0) 630 + return -ETIMEDOUT; 618 631 } 619 632 620 633 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); ··· 1764 1731 .vlans = 4096, 1765 1732 .enabled_ports = 0x1ff, 1766 1733 .arl_entries = 4, 1767 - .cpu_port = B53_CPU_PORT_25, 1734 + .cpu_port = B53_CPU_PORT, 1768 1735 .vta_regs = B53_VTA_REGS, 1769 1736 .duplex_reg = B53_DUPLEX_STAT_GE, 1770 1737 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+5
drivers/net/dsa/b53/b53_regs.h
··· 104 104 #define B53_UC_FWD_EN BIT(6) 105 105 #define B53_MC_FWD_EN BIT(7) 106 106 107 + /* Switch control (8 bit) */ 108 + #define B53_SWITCH_CTRL 0x22 109 + #define B53_MII_DUMB_FWDG_EN BIT(6) 110 + 107 111 /* (16 bit) */ 108 112 #define B53_UC_FLOOD_MASK 0x32 109 113 #define B53_MC_FLOOD_MASK 0x34 ··· 143 139 /* Software reset register (8 bit) */ 144 140 #define B53_SOFTRESET 0x79 145 141 #define SW_RST BIT(7) 142 + #define EN_CH_RST BIT(6) 146 143 #define EN_SW_RST BIT(4) 147 144 148 145 /* Fast Aging Control register (8 bit) */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 90 90 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) 91 91 92 92 #define MLX5_UMR_ALIGN (2048) 93 - #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) 93 + #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) 94 94 95 95 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 96 96 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
+1
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
··· 564 564 int idx = 0; 565 565 int err = 0; 566 566 567 + info->data = MAX_NUM_OF_ETHTOOL_RULES; 567 568 while ((!err || err == -ENOENT) && idx < info->rule_cnt) { 568 569 err = mlx5e_ethtool_get_flow(priv, info, location); 569 570 if (!err)
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 174 174 175 175 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) 176 176 { 177 - struct mlx5e_sw_stats *s = &priv->stats.sw; 177 + struct mlx5e_sw_stats temp, *s = &temp; 178 178 struct mlx5e_rq_stats *rq_stats; 179 179 struct mlx5e_sq_stats *sq_stats; 180 180 u64 tx_offload_none = 0; ··· 229 229 s->link_down_events_phy = MLX5_GET(ppcnt_reg, 230 230 priv->stats.pport.phy_counters, 231 231 counter_set.phys_layer_cntrs.link_down_events); 232 + memcpy(&priv->stats.sw, s, sizeof(*s)); 232 233 } 233 234 234 235 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) ··· 244 243 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 245 244 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 246 245 247 - memset(out, 0, outlen); 248 246 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); 249 247 } 250 248
+48 -39
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 639 639 640 640 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && 641 641 rep->vport != FDB_UPLINK_VPORT) { 642 - if (min_inline > esw->offloads.inline_mode) { 642 + if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 643 + esw->offloads.inline_mode < min_inline) { 643 644 netdev_warn(priv->netdev, 644 645 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 645 646 min_inline, esw->offloads.inline_mode); ··· 786 785 return 0; 787 786 } 788 787 789 - static int gen_vxlan_header_ipv4(struct net_device *out_dev, 790 - char buf[], 791 - unsigned char h_dest[ETH_ALEN], 792 - int ttl, 793 - __be32 daddr, 794 - __be32 saddr, 795 - __be16 udp_dst_port, 796 - __be32 vx_vni) 788 + static void gen_vxlan_header_ipv4(struct net_device *out_dev, 789 + char buf[], int encap_size, 790 + unsigned char h_dest[ETH_ALEN], 791 + int ttl, 792 + __be32 daddr, 793 + __be32 saddr, 794 + __be16 udp_dst_port, 795 + __be32 vx_vni) 797 796 { 798 - int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN; 799 797 struct ethhdr *eth = (struct ethhdr *)buf; 800 798 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); 801 799 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); ··· 817 817 udp->dest = udp_dst_port; 818 818 vxh->vx_flags = VXLAN_HF_VNI; 819 819 vxh->vx_vni = vxlan_vni_field(vx_vni); 820 - 821 - return encap_size; 822 820 } 823 821 824 - static int gen_vxlan_header_ipv6(struct net_device *out_dev, 825 - char buf[], 826 - unsigned char h_dest[ETH_ALEN], 827 - int ttl, 828 - struct in6_addr *daddr, 829 - struct in6_addr *saddr, 830 - __be16 udp_dst_port, 831 - __be32 vx_vni) 822 + static void gen_vxlan_header_ipv6(struct net_device *out_dev, 823 + char buf[], int encap_size, 824 + unsigned char h_dest[ETH_ALEN], 825 + int ttl, 826 + struct in6_addr *daddr, 827 + struct in6_addr *saddr, 828 + __be16 udp_dst_port, 829 + __be32 vx_vni) 832 830 { 833 - int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; 834 831 struct ethhdr *eth = (struct ethhdr *)buf; 835 832 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); 836 833 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); ··· 849 852 udp->dest = udp_dst_port; 850 853 vxh->vx_flags = VXLAN_HF_VNI; 851 854 vxh->vx_vni = vxlan_vni_field(vx_vni); 852 - 853 - return encap_size; 854 855 } 855 856 856 857 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ··· 857 862 struct net_device **out_dev) 858 863 { 859 864 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 865 + int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN; 860 866 struct ip_tunnel_key *tun_key = &e->tun_info.key; 861 - int encap_size, ttl, err; 862 867 struct neighbour *n = NULL; 863 868 struct flowi4 fl4 = {}; 864 869 char *encap_header; 870 + int ttl, err; 865 871 866 - encap_header = kzalloc(max_encap_size, GFP_KERNEL); 872 + if (max_encap_size < ipv4_encap_size) { 873 + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 874 + ipv4_encap_size, max_encap_size); 875 + return -EOPNOTSUPP; 876 + } 877 + 878 + encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); 867 879 if (!encap_header) 868 880 return -ENOMEM; 869 881 ··· 905 903 906 904 switch (e->tunnel_type) { 907 905 case MLX5_HEADER_TYPE_VXLAN: 908 - encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, 909 - e->h_dest, ttl, 910 - fl4.daddr, 911 - fl4.saddr, tun_key->tp_dst, 912 - tunnel_id_to_key32(tun_key->tun_id)); 906 + gen_vxlan_header_ipv4(*out_dev, encap_header, 907 + ipv4_encap_size, e->h_dest, ttl, 908 + fl4.daddr, 909 + fl4.saddr, tun_key->tp_dst, 910 + tunnel_id_to_key32(tun_key->tun_id)); 913 911 break; 914 912 default: 915 913 err = -EOPNOTSUPP; ··· 917 915 } 918 916 919 917 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 920 - encap_size, encap_header, &e->encap_id); 918 + ipv4_encap_size, encap_header, &e->encap_id); 921 919 out: 922 920 if (err && n) 923 921 neigh_release(n); ··· 932 930 933 931 { 934 932 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 933 + int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; 935 934 struct ip_tunnel_key *tun_key = &e->tun_info.key; 936 - int encap_size, err, ttl = 0; 937 935 struct neighbour *n = NULL; 938 936 struct flowi6 fl6 = {}; 939 937 char *encap_header; 938 + int err, ttl = 0; 940 939 941 - encap_header = kzalloc(max_encap_size, GFP_KERNEL); 940 + if (max_encap_size < ipv6_encap_size) { 941 + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 942 + ipv6_encap_size, max_encap_size); 943 + return -EOPNOTSUPP; 944 + } 945 + 946 + encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); 942 947 if (!encap_header) 943 948 return -ENOMEM; 944 949 ··· 981 972 982 973 switch (e->tunnel_type) { 983 974 case MLX5_HEADER_TYPE_VXLAN: 984 - encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, 985 - e->h_dest, ttl, 986 - &fl6.daddr, 987 - &fl6.saddr, tun_key->tp_dst, 988 - tunnel_id_to_key32(tun_key->tun_id)); 975 + gen_vxlan_header_ipv6(*out_dev, encap_header, 976 + ipv6_encap_size, e->h_dest, ttl, 977 + &fl6.daddr, 978 + &fl6.saddr, tun_key->tp_dst, 979 + tunnel_id_to_key32(tun_key->tun_id)); 989 980 break; 990 981 default: 991 982 err = -EOPNOTSUPP; ··· 993 984 } 994 985 995 986 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 996 - encap_size, encap_header, &e->encap_id); 987 + ipv6_encap_size, encap_header, &e->encap_id); 997 988 out: 998 989 if (err && n) 999 990 neigh_release(n);
+24 -12
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 911 911 struct mlx5_core_dev *dev = devlink_priv(devlink); 912 912 struct mlx5_eswitch *esw = dev->priv.eswitch; 913 913 int num_vports = esw->enabled_vports; 914 - int err; 915 - int vport; 914 + int err, vport; 916 915 u8 mlx5_mode; 917 916 918 917 if (!MLX5_CAP_GEN(dev, vport_group_manager)) ··· 920 921 if (esw->mode == SRIOV_NONE) 921 922 return -EOPNOTSUPP; 922 923 923 - if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 924 - MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 924 + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 925 + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 926 + if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) 927 + return 0; 928 + /* fall through */ 929 + case MLX5_CAP_INLINE_MODE_L2: 930 + esw_warn(dev, "Inline mode can't be set\n"); 925 931 return -EOPNOTSUPP; 932 + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 933 + break; 934 + } 926 935 927 936 if (esw->offloads.num_flows > 0) { 928 937 esw_warn(dev, "Can't set inline mode when flows are configured\n"); ··· 973 966 if (esw->mode == SRIOV_NONE) 974 967 return -EOPNOTSUPP; 975 968 976 - if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 977 - MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 978 - return -EOPNOTSUPP; 979 - 980 969 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 981 970 } 982 971 983 972 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) 984 973 { 974 + u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 985 975 struct mlx5_core_dev *dev = esw->dev; 986 976 int vport; 987 - u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 988 977 989 978 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 990 979 return -EOPNOTSUPP; ··· 988 985 if (esw->mode == SRIOV_NONE) 989 986 return -EOPNOTSUPP; 990 987 991 - if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 992 - MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 993 - return -EOPNOTSUPP; 988 + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 989 + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 990 + mlx5_mode = MLX5_INLINE_MODE_NONE; 991 + goto out; 992 + case MLX5_CAP_INLINE_MODE_L2: 993 + mlx5_mode = MLX5_INLINE_MODE_L2; 994 + goto out; 995 + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 996 + goto query_vports; 997 + } 994 998 999 + query_vports: 995 1000 for (vport = 1; vport <= nvfs; vport++) { 996 1001 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); 997 1002 if (vport > 1 && prev_mlx5_mode != mlx5_mode) ··· 1007 996 prev_mlx5_mode = mlx5_mode; 1008 997 } 1009 998 999 + out: 1010 1000 *mode = mlx5_mode; 1011 1001 return 0; 1012 1002 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1029 1029 if (err) { 1030 1030 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", 1031 1031 FW_INIT_TIMEOUT_MILI); 1032 - goto out_err; 1032 + goto err_cmd_cleanup; 1033 1033 } 1034 1034 1035 1035 err = mlx5_core_enable_hca(dev, 0);
+1
drivers/net/ethernet/mellanox/mlx5/core/uar.c
··· 87 87 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); 88 88 89 89 list_del(&up->list); 90 + iounmap(up->map); 90 91 if (mlx5_cmd_free_uar(up->mdev, up->index)) 91 92 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); 92 93 kfree(up->reg_bitmap);
+5 -5
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
··· 64 64 ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) 65 65 66 66 static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { 67 - {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT}, 68 - {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT}, 69 - {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT}, 70 - {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT}, 71 - {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH} 67 + {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI}, 68 + {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE}, 69 + {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE}, 70 + {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE}, 71 + {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}, 72 72 }; 73 73 74 74 static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+4 -3
drivers/net/ethernet/renesas/ravb_main.c
··· 1516 1516 spin_unlock_irqrestore(&priv->lock, flags); 1517 1517 return NETDEV_TX_BUSY; 1518 1518 } 1519 - entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); 1520 - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; 1521 1519 1522 1520 if (skb_put_padto(skb, ETH_ZLEN)) 1523 - goto drop; 1521 + goto exit; 1522 + 1523 + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); 1524 + priv->tx_skb[q][entry / NUM_TX_DESC] = skb; 1524 1525 1525 1526 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1526 1527 entry / NUM_TX_DESC * DPTR_ALIGN;
+4 -1
drivers/net/ethernet/sfc/efx.h
··· 74 74 #define EFX_RXQ_MIN_ENT 128U 75 75 #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) 76 76 77 - #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ 77 + /* All EF10 architecture NICs steal one bit of the DMAQ size for various 78 + * other purposes when counting TxQ entries, so we halve the queue size. 79 + */ 80 + #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ 78 81 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 79 82 80 83 static inline bool efx_rss_enabled(struct efx_nic *efx)
+1
drivers/net/ethernet/sfc/workarounds.h
··· 16 16 */ 17 17 18 18 #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 19 + #define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 19 20 #define EFX_WORKAROUND_10G(efx) 1 20 21 21 22 /* Bit-bashed I2C reads cause performance drop */
+1 -1
drivers/net/ethernet/toshiba/tc35815.c
··· 1017 1017 BUG_ON(lp->tx_skbs[i].skb != skb); 1018 1018 #endif 1019 1019 if (skb) { 1020 - dev_kfree_skb(skb); 1021 1020 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); 1021 + dev_kfree_skb(skb); 1022 1022 lp->tx_skbs[i].skb = NULL; 1023 1023 lp->tx_skbs[i].skb_dma = 0; 1024 1024 }
-1
drivers/net/hyperv/hyperv_net.h
··· 751 751 u32 send_section_cnt; 752 752 u32 send_section_size; 753 753 unsigned long *send_section_map; 754 - int map_words; 755 754 756 755 /* Used for NetVSP initialization protocol */ 757 756 struct completion channel_init_wait;
+4 -5
drivers/net/hyperv/netvsc.c
··· 236 236 struct netvsc_device *net_device; 237 237 struct nvsp_message *init_packet; 238 238 struct net_device *ndev; 239 + size_t map_words; 239 240 int node; 240 241 241 242 net_device = get_outbound_net_device(device); ··· 402 401 net_device->send_section_size, net_device->send_section_cnt); 403 402 404 403 /* Setup state for managing the send buffer. */ 405 - net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, 406 - BITS_PER_LONG); 404 + map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); 407 405 408 - net_device->send_section_map = kcalloc(net_device->map_words, 409 - sizeof(ulong), GFP_KERNEL); 406 + net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL); 410 407 if (net_device->send_section_map == NULL) { 411 408 ret = -ENOMEM; 412 409 goto cleanup; ··· 682 683 unsigned long *map_addr = net_device->send_section_map; 683 684 unsigned int i; 684 685 685 - for_each_clear_bit(i, map_addr, net_device->map_words) { 686 + for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { 686 687 if (sync_test_and_set_bit(i, map_addr) == 0) 687 688 return i; 688 689 }
+21 -6
drivers/net/macsec.c
··· 617 617 618 618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 619 619 unsigned char **iv, 620 - struct scatterlist **sg) 620 + struct scatterlist **sg, 621 + int num_frags) 621 622 { 622 623 size_t size, iv_offset, sg_offset; 623 624 struct aead_request *req; ··· 630 629 631 630 size = ALIGN(size, __alignof__(struct scatterlist)); 632 631 sg_offset = size; 633 - size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 632 + size += sizeof(struct scatterlist) * num_frags; 634 633 635 634 tmp = kmalloc(size, GFP_ATOMIC); 636 635 if (!tmp) ··· 650 649 { 651 650 int ret; 652 651 struct scatterlist *sg; 652 + struct sk_buff *trailer; 653 653 unsigned char *iv; 654 654 struct ethhdr *eth; 655 655 struct macsec_eth_header *hh; ··· 725 723 return ERR_PTR(-EINVAL); 726 724 } 727 725 728 - req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 726 + ret = skb_cow_data(skb, 0, &trailer); 727 + if (unlikely(ret < 0)) { 728 + macsec_txsa_put(tx_sa); 729 + kfree_skb(skb); 730 + return ERR_PTR(ret); 731 + } 732 + 733 + req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 729 734 if (!req) { 730 735 macsec_txsa_put(tx_sa); 731 736 kfree_skb(skb); ··· 741 732 742 733 macsec_fill_iv(iv, secy->sci, pn); 743 734 744 - sg_init_table(sg, MAX_SKB_FRAGS + 1); 735 + sg_init_table(sg, ret); 745 736 skb_to_sgvec(skb, sg, 0, skb->len); 746 737 747 738 if (tx_sc->encrypt) { ··· 926 917 { 927 918 int ret; 928 919 struct scatterlist *sg; 920 + struct sk_buff *trailer; 929 921 unsigned char *iv; 930 922 struct aead_request *req; 931 923 struct macsec_eth_header *hdr; ··· 937 927 if (!skb) 938 928 return ERR_PTR(-ENOMEM); 939 929 940 - req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 930 + ret = skb_cow_data(skb, 0, &trailer); 931 + if (unlikely(ret < 0)) { 932 + kfree_skb(skb); 933 + return ERR_PTR(ret); 934 + } 935 + req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 941 936 if (!req) { 942 937 kfree_skb(skb); 943 938 return ERR_PTR(-ENOMEM); ··· 951 936 hdr = (struct macsec_eth_header *)skb->data; 952 937 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 953 938 954 - sg_init_table(sg, MAX_SKB_FRAGS + 1); 939 + sg_init_table(sg, ret); 955 940 skb_to_sgvec(skb, sg, 0, skb->len); 956 941 957 942 if (hdr->tci_an & MACSEC_TCI_E) {
+10 -1
drivers/net/macvlan.c
··· 1139 1139 static void macvlan_port_destroy(struct net_device *dev) 1140 1140 { 1141 1141 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 1142 + struct sk_buff *skb; 1142 1143 1143 1144 dev->priv_flags &= ~IFF_MACVLAN_PORT; 1144 1145 netdev_rx_handler_unregister(dev); ··· 1148 1147 * but we need to cancel it and purge left skbs if any. 1149 1148 */ 1150 1149 cancel_work_sync(&port->bc_work); 1151 - __skb_queue_purge(&port->bc_queue); 1150 + 1151 + while ((skb = __skb_dequeue(&port->bc_queue))) { 1152 + const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; 1153 + 1154 + if (src) 1155 + dev_put(src->dev); 1156 + 1157 + kfree_skb(skb); 1158 + } 1152 1159 1153 1160 kfree(port); 1154 1161 }
-11
drivers/net/phy/micrel.c
··· 297 297 if (priv->led_mode >= 0) 298 298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 299 299 300 - if (phy_interrupt_is_valid(phydev)) { 301 - int ctl = phy_read(phydev, MII_BMCR); 302 - 303 - if (ctl < 0) 304 - return ctl; 305 - 306 - ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE); 307 - if (ret < 0) 308 - return ret; 309 - } 310 - 311 300 return 0; 312 301 } 313 302
+36 -4
drivers/net/phy/phy.c
··· 591 591 EXPORT_SYMBOL(phy_mii_ioctl); 592 592 593 593 /** 594 - * phy_start_aneg - start auto-negotiation for this PHY device 594 + * phy_start_aneg_priv - start auto-negotiation for this PHY device 595 595 * @phydev: the phy_device struct 596 + * @sync: indicate whether we should wait for the workqueue cancelation 596 597 * 597 598 * Description: Sanitizes the settings (if we're not autonegotiating 598 599 * them), and then calls the driver's config_aneg function. 599 600 * If the PHYCONTROL Layer is operating, we change the state to 600 601 * reflect the beginning of Auto-negotiation or forcing. 601 602 */ 602 - int phy_start_aneg(struct phy_device *phydev) 603 + static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) 603 604 { 605 + bool trigger = 0; 604 606 int err; 605 607 606 608 if (!phydev->drv) ··· 630 628 } 631 629 } 632 630 631 + /* Re-schedule a PHY state machine to check PHY status because 632 + * negotiation may already be done and aneg interrupt may not be 633 + * generated. 634 + */ 635 + if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { 636 + err = phy_aneg_done(phydev); 637 + if (err > 0) { 638 + trigger = true; 639 + err = 0; 640 + } 641 + } 642 + 633 643 out_unlock: 634 644 mutex_unlock(&phydev->lock); 645 + 646 + if (trigger) 647 + phy_trigger_machine(phydev, sync); 648 + 635 649 return err; 650 + } 651 + 652 + /** 653 + * phy_start_aneg - start auto-negotiation for this PHY device 654 + * @phydev: the phy_device struct 655 + * 656 + * Description: Sanitizes the settings (if we're not autonegotiating 657 + * them), and then calls the driver's config_aneg function. 658 + * If the PHYCONTROL Layer is operating, we change the state to 659 + * reflect the beginning of Auto-negotiation or forcing. 660 + */ 661 + int phy_start_aneg(struct phy_device *phydev) 662 + { 663 + return phy_start_aneg_priv(phydev, true); 636 664 } 637 665 EXPORT_SYMBOL(phy_start_aneg); 638 666 ··· 691 659 * state machine runs. 692 660 */ 693 661 694 - static void phy_trigger_machine(struct phy_device *phydev, bool sync) 662 + void phy_trigger_machine(struct phy_device *phydev, bool sync) 695 663 { 696 664 if (sync) 697 665 cancel_delayed_work_sync(&phydev->state_queue); ··· 1186 1154 mutex_unlock(&phydev->lock); 1187 1155 1188 1156 if (needs_aneg) 1189 - err = phy_start_aneg(phydev); 1157 + err = phy_start_aneg_priv(phydev, false); 1190 1158 else if (do_suspend) 1191 1159 phy_suspend(phydev); 1192 1160
+6 -2
drivers/net/team/team.c
··· 2361 2361 2362 2362 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2363 2363 TEAM_CMD_OPTIONS_GET); 2364 - if (!hdr) 2364 + if (!hdr) { 2365 + nlmsg_free(skb); 2365 2366 return -EMSGSIZE; 2367 + } 2366 2368 2367 2369 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2368 2370 goto nla_put_failure; ··· 2636 2634 2637 2635 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2638 2636 TEAM_CMD_PORT_LIST_GET); 2639 - if (!hdr) 2637 + if (!hdr) { 2638 + nlmsg_free(skb); 2640 2639 return -EMSGSIZE; 2640 + } 2641 2641 2642 2642 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2643 2643 goto nla_put_failure;
+1 -1
drivers/net/usb/Kconfig
··· 369 369 optionally with LEDs that indicate traffic 370 370 371 371 config USB_NET_PLUSB 372 - tristate "Prolific PL-2301/2302/25A1 based cables" 372 + tristate "Prolific PL-2301/2302/25A1/27A1 based cables" 373 373 # if the handshake/init/reset problems, from original 'plusb', 374 374 # are ever resolved ... then remove "experimental" 375 375 depends on USB_USBNET
+1 -1
drivers/net/usb/hso.c
··· 3279 3279 pr_info("unloaded\n"); 3280 3280 3281 3281 tty_unregister_driver(tty_drv); 3282 - put_tty_driver(tty_drv); 3283 3282 /* deregister the usb driver */ 3284 3283 usb_deregister(&hso_driver); 3284 + put_tty_driver(tty_drv); 3285 3285 } 3286 3286 3287 3287 /* Module definitions */
+13 -2
drivers/net/usb/plusb.c
··· 102 102 } 103 103 104 104 static const struct driver_info prolific_info = { 105 - .description = "Prolific PL-2301/PL-2302/PL-25A1", 105 + .description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1", 106 106 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT, 107 107 /* some PL-2302 versions seem to fail usb_set_interface() */ 108 108 .reset = pl_reset, ··· 139 139 * Host-to-Host Cable 140 140 */ 141 141 .driver_info = (unsigned long) &prolific_info, 142 + 143 + }, 144 + 145 + /* super speed cables */ 146 + { 147 + USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom 148 + * also: goobay Active USB 3.0 149 + * Data Link, 150 + * Unitek Y-3501 151 + */ 152 + .driver_info = (unsigned long) &prolific_info, 142 153 }, 143 154 144 155 { }, // END ··· 169 158 module_usb_driver(plusb_driver); 170 159 171 160 MODULE_AUTHOR("David Brownell"); 172 - MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver"); 161 + MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver"); 173 162 MODULE_LICENSE("GPL");
+1
include/linux/phy.h
··· 852 852 void phy_mac_interrupt(struct phy_device *phydev, int new_link); 853 853 void phy_start_machine(struct phy_device *phydev); 854 854 void phy_stop_machine(struct phy_device *phydev); 855 + void phy_trigger_machine(struct phy_device *phydev, bool sync); 855 856 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); 856 857 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); 857 858 int phy_ethtool_ksettings_get(struct phy_device *phydev,
+1
net/bridge/br_device.c
··· 123 123 { 124 124 struct net_bridge *br = netdev_priv(dev); 125 125 126 + br_multicast_dev_del(br); 126 127 br_multicast_uninit_stats(br); 127 128 br_vlan_flush(br); 128 129 free_percpu(br->stats);
-1
net/bridge/br_if.c
··· 311 311 312 312 br_fdb_delete_by_port(br, NULL, 0, 1); 313 313 314 - br_multicast_dev_del(br); 315 314 cancel_delayed_work_sync(&br->gc_work); 316 315 317 316 br_sysfs_delbr(br->dev);
+3
net/core/dev.c
··· 2450 2450 { 2451 2451 unsigned long flags; 2452 2452 2453 + if (unlikely(!skb)) 2454 + return; 2455 + 2453 2456 if (likely(atomic_read(&skb->users) == 1)) { 2454 2457 smp_rmb(); 2455 2458 atomic_set(&skb->users, 0);
+2 -1
net/ipv4/route.c
··· 2359 2359 } 2360 2360 2361 2361 /* L3 master device is the loopback for that domain */ 2362 - dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev; 2362 + dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? : 2363 + net->loopback_dev; 2363 2364 fl4->flowi4_oif = dev_out->ifindex; 2364 2365 flags |= RTCF_LOCAL; 2365 2366 goto make_route;
+3 -8
net/ipv4/tcp_cong.c
··· 168 168 } 169 169 out: 170 170 rcu_read_unlock(); 171 + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 171 172 172 - /* Clear out private data before diag gets it and 173 - * the ca has not been initialized. 174 - */ 175 - if (ca->get_info) 176 - memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 177 173 if (ca->flags & TCP_CONG_NEEDS_ECN) 178 174 INET_ECN_xmit(sk); 179 175 else ··· 196 200 tcp_cleanup_congestion_control(sk); 197 201 icsk->icsk_ca_ops = ca; 198 202 icsk->icsk_ca_setsockopt = 1; 203 + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 199 204 200 - if (sk->sk_state != TCP_CLOSE) { 201 - memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 205 + if (sk->sk_state != TCP_CLOSE) 202 206 tcp_init_congestion_control(sk); 203 - } 204 207 } 205 208 206 209 /* Manage refcounts on socket close. */
+3
net/ipv4/udp_offload.c
··· 29 29 u16 mac_len = skb->mac_len; 30 30 int udp_offset, outer_hlen; 31 31 __wsum partial; 32 + bool need_ipsec; 32 33 33 34 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 34 35 goto out; ··· 63 62 64 63 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 65 64 65 + need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); 66 66 /* Try to offload checksum if possible */ 67 67 offload_csum = !!(need_csum && 68 + !need_ipsec && 68 69 (skb->dev->features & 69 70 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : 70 71 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
+12 -2
net/ipv6/addrconf.c
··· 3271 3271 static int fixup_permanent_addr(struct inet6_dev *idev, 3272 3272 struct inet6_ifaddr *ifp) 3273 3273 { 3274 - if (!ifp->rt) { 3275 - struct rt6_info *rt; 3274 + /* rt6i_ref == 0 means the host route was removed from the 3275 + * FIB, for example, if 'lo' device is taken down. In that 3276 + * case regenerate the host route. 3277 + */ 3278 + if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) { 3279 + struct rt6_info *rt, *prev; 3276 3280 3277 3281 rt = addrconf_dst_alloc(idev, &ifp->addr, false); 3278 3282 if (unlikely(IS_ERR(rt))) 3279 3283 return PTR_ERR(rt); 3280 3284 3285 + /* ifp->rt can be accessed outside of rtnl */ 3286 + spin_lock(&ifp->lock); 3287 + prev = ifp->rt; 3281 3288 ifp->rt = rt; 3289 + spin_unlock(&ifp->lock); 3290 + 3291 + ip6_rt_put(prev); 3282 3292 } 3283 3293 3284 3294 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
+4 -2
net/ipv6/af_inet6.c
··· 933 933 if (err) 934 934 goto igmp_fail; 935 935 936 - ipv6_stub = &ipv6_stub_impl; 937 - 938 936 err = ipv6_netfilter_init(); 939 937 if (err) 940 938 goto netfilter_fail; ··· 1008 1010 if (err) 1009 1011 goto sysctl_fail; 1010 1012 #endif 1013 + 1014 + /* ensure that ipv6 stubs are visible only after ipv6 is ready */ 1015 + wmb(); 1016 + ipv6_stub = &ipv6_stub_impl; 1011 1017 out: 1012 1018 return err; 1013 1019
+4
net/ipv6/exthdrs.c
··· 909 909 { 910 910 switch (opt->type) { 911 911 case IPV6_SRCRT_TYPE_0: 912 + case IPV6_SRCRT_STRICT: 913 + case IPV6_SRCRT_TYPE_2: 912 914 ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr); 913 915 break; 914 916 case IPV6_SRCRT_TYPE_4: ··· 1165 1163 1166 1164 switch (opt->srcrt->type) { 1167 1165 case IPV6_SRCRT_TYPE_0: 1166 + case IPV6_SRCRT_STRICT: 1167 + case IPV6_SRCRT_TYPE_2: 1168 1168 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; 1169 1169 break; 1170 1170 case IPV6_SRCRT_TYPE_4:
+18 -16
net/ipv6/ip6_tunnel.c
··· 1037 1037 struct ip6_tnl *t = netdev_priv(dev); 1038 1038 struct net *net = t->net; 1039 1039 struct net_device_stats *stats = &t->dev->stats; 1040 - struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1040 + struct ipv6hdr *ipv6h; 1041 1041 struct ipv6_tel_txoption opt; 1042 1042 struct dst_entry *dst = NULL, *ndst = NULL; 1043 1043 struct net_device *tdev; ··· 1057 1057 1058 1058 /* NBMA tunnel */ 1059 1059 if (ipv6_addr_any(&t->parms.raddr)) { 1060 - struct in6_addr *addr6; 1061 - struct neighbour *neigh; 1062 - int addr_type; 1060 + if (skb->protocol == htons(ETH_P_IPV6)) { 1061 + struct in6_addr *addr6; 1062 + struct neighbour *neigh; 1063 + int addr_type; 1063 1064 1064 - if (!skb_dst(skb)) 1065 - goto tx_err_link_failure; 1065 + if (!skb_dst(skb)) 1066 + goto tx_err_link_failure; 1066 1067 1067 - neigh = dst_neigh_lookup(skb_dst(skb), 1068 - &ipv6_hdr(skb)->daddr); 1069 - if (!neigh) 1070 - goto tx_err_link_failure; 1068 + neigh = dst_neigh_lookup(skb_dst(skb), 1069 + &ipv6_hdr(skb)->daddr); 1070 + if (!neigh) 1071 + goto tx_err_link_failure; 1071 1072 1072 - addr6 = (struct in6_addr *)&neigh->primary_key; 1073 - addr_type = ipv6_addr_type(addr6); 1073 + addr6 = (struct in6_addr *)&neigh->primary_key; 1074 + addr_type = ipv6_addr_type(addr6); 1074 1075 1075 - if (addr_type == IPV6_ADDR_ANY) 1076 - addr6 = &ipv6_hdr(skb)->daddr; 1076 + if (addr_type == IPV6_ADDR_ANY) 1077 + addr6 = &ipv6_hdr(skb)->daddr; 1077 1078 1078 - memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1079 - neigh_release(neigh); 1079 + memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1080 + neigh_release(neigh); 1081 + } 1080 1082 } else if (!(t->parms.flags & 1081 1083 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1082 1084 /* enable the cache only only if the routing decision does
+2 -1
net/ipv6/ndisc.c
··· 1749 1749 idev = in6_dev_get(dev); 1750 1750 if (!idev) 1751 1751 break; 1752 - if (idev->cnf.ndisc_notify) 1752 + if (idev->cnf.ndisc_notify || 1753 + net->ipv6.devconf_all->ndisc_notify) 1753 1754 ndisc_send_unsol_na(dev); 1754 1755 in6_dev_put(idev); 1755 1756 break;
+1 -2
net/ipv6/raw.c
··· 1178 1178 spin_lock_bh(&sk->sk_receive_queue.lock); 1179 1179 skb = skb_peek(&sk->sk_receive_queue); 1180 1180 if (skb) 1181 - amount = skb_tail_pointer(skb) - 1182 - skb_transport_header(skb); 1181 + amount = skb->len; 1183 1182 spin_unlock_bh(&sk->sk_receive_queue.lock); 1184 1183 return put_user(amount, (int __user *)arg); 1185 1184 }
+2
net/packet/af_packet.c
··· 3836 3836 case PACKET_HDRLEN: 3837 3837 if (len > sizeof(int)) 3838 3838 len = sizeof(int); 3839 + if (len < sizeof(int)) 3840 + return -EINVAL; 3839 3841 if (copy_from_user(&val, optval, len)) 3840 3842 return -EFAULT; 3841 3843 switch (val) {
+2 -2
net/tipc/socket.c
··· 1083 1083 } 1084 1084 } while (sent < dlen && !rc); 1085 1085 1086 - return rc ? rc : sent; 1086 + return sent ? sent : rc; 1087 1087 } 1088 1088 1089 1089 /** ··· 1484 1484 if (unlikely(flags & MSG_PEEK)) 1485 1485 goto exit; 1486 1486 1487 - tsk->rcv_unacked += tsk_inc(tsk, hlen + sz); 1487 + tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg)); 1488 1488 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) 1489 1489 tipc_sk_send_ack(tsk); 1490 1490 tsk_advance_rx_queue(sk);