Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) MLX5 bug fixes from Saeed Mahameed et al:
- released wrong resources when firmware timeout happens
- fix wrong check for encapsulation size limits
- UAR memory leak
- ETHTOOL_GRXCLSRLALL failed to fill in info->data

2) Don't cache l3mdev on mis-matches local route, causes net devices to
leak refs. From Robert Shearman.

3) Handle fragmented SKBs properly in macsec driver, the problem is
that we were mis-sizing the sgvec table. From Jason A. Donenfeld.

4) We cannot have checksum offload enabled for inner UDP tunneled
packet during IPSEC, from Ansis Atteka.

5) Fix double SKB free in ravb driver, from Dan Carpenter.

6) Fix CPU port handling in b53 DSA driver, from Florian Dainelli.

7) Don't use on-stack buffers for usb_control_msg() in CAN usb driver,
from Maksim Salau.

8) Fix device leak in macvlan driver, from Herbert Xu. We have to purge
the broadcast queue properly on port destroy.

9) Fix tx ring entry limit on EF10 devices in sfc driver. From Bert
Kenward.

10) Fix memory leaks in team driver, from Pan Bian.

11) Don't setup ipv6_stub before it can be actually used, from Paolo
Abeni.

12) Fix tipc socket flow control accounting, from Parthasarathy
Bhuvaragan.

13) Fix crash on module unload in hso driver, from Andreas Kemnade.

14) Fix purging of bridge multicast entries, the problem is that if we
don't defer it to ndo_uninit it's possible for new entries to get
added after we purge. Fix from Xin Long.

15) Don't return garbage for PACKET_HDRLEN getsockopt, from Alexander
Potapenko.

16) Fix autoneg stall properly in PHY layer, and revert micrel driver
change that was papering over it. From Alexander Kochetkov.

17) Don't dereference an ipv4 route as an ipv6 one in the ip6_tunnnel
code, from Cong Wang.

18) Clear out the congestion control private of the TCP socket in all of
the right places, from Wei Wang.

19) rawv6_ioctl measures SKB length incorrectly, fix from Jamie
Bainbridge.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
ipv6: check raw payload size correctly in ioctl
tcp: memset ca_priv data to 0 properly
ipv6: check skb->protocol before lookup for nexthop
net: core: Prevent from dereferencing null pointer when releasing SKB
macsec: dynamically allocate space for sglist
Revert "phy: micrel: Disable auto negotiation on startup"
net: phy: fix auto-negotiation stall due to unavailable interrupt
net/packet: check length in getsockopt() called with PACKET_HDRLEN
net: ipv6: regenerate host route if moved to gc list
bridge: move bridge multicast cleanup to ndo_uninit
ipv6: fix source routing
qed: Fix error in the dcbx app meta data initialization.
netvsc: fix calculation of available send sections
net: hso: fix module unloading
tipc: fix socket flow control accounting error at tipc_recv_stream
tipc: fix socket flow control accounting error at tipc_send_stream
ipv6: move stub initialization after ipv6 setup completion
team: fix memory leaks
sfc: tx ring can only have 2048 entries for all EF10 NICs
macvlan: Fix device ref leak when purging bc_queue
...

+373 -141
+2
drivers/net/can/usb/Kconfig
··· 72 PCAN-USB Pro dual CAN 2.0b channels USB adapter 73 PCAN-USB FD single CAN-FD channel USB adapter 74 PCAN-USB Pro FD dual CAN-FD channels USB adapter 75 76 (see also http://www.peak-system.com). 77
··· 72 PCAN-USB Pro dual CAN 2.0b channels USB adapter 73 PCAN-USB FD single CAN-FD channel USB adapter 74 PCAN-USB Pro FD dual CAN-FD channels USB adapter 75 + PCAN-Chip USB CAN-FD to USB stamp module 76 + PCAN-USB X6 6 CAN-FD channels USB adapter 77 78 (see also http://www.peak-system.com). 79
+12 -5
drivers/net/can/usb/gs_usb.c
··· 739 static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) 740 { 741 struct gs_can *dev = netdev_priv(netdev); 742 - struct gs_identify_mode imode; 743 int rc; 744 745 if (do_identify) 746 - imode.mode = GS_CAN_IDENTIFY_ON; 747 else 748 - imode.mode = GS_CAN_IDENTIFY_OFF; 749 750 rc = usb_control_msg(interface_to_usbdev(dev->iface), 751 usb_sndctrlpipe(interface_to_usbdev(dev->iface), ··· 760 USB_RECIP_INTERFACE, 761 dev->channel, 762 0, 763 - &imode, 764 - sizeof(imode), 765 100); 766 767 return (rc > 0) ? 0 : rc; 768 }
··· 739 static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) 740 { 741 struct gs_can *dev = netdev_priv(netdev); 742 + struct gs_identify_mode *imode; 743 int rc; 744 745 + imode = kmalloc(sizeof(*imode), GFP_KERNEL); 746 + 747 + if (!imode) 748 + return -ENOMEM; 749 + 750 if (do_identify) 751 + imode->mode = GS_CAN_IDENTIFY_ON; 752 else 753 + imode->mode = GS_CAN_IDENTIFY_OFF; 754 755 rc = usb_control_msg(interface_to_usbdev(dev->iface), 756 usb_sndctrlpipe(interface_to_usbdev(dev->iface), ··· 755 USB_RECIP_INTERFACE, 756 dev->channel, 757 0, 758 + imode, 759 + sizeof(*imode), 760 100); 761 + 762 + kfree(imode); 763 764 return (rc > 0) ? 0 : rc; 765 }
+2
drivers/net/can/usb/peak_usb/pcan_usb_core.c
··· 39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, 40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, 41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, 42 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, 43 {} /* Terminating entry */ 44 }; ··· 52 &pcan_usb_pro, 53 &pcan_usb_fd, 54 &pcan_usb_pro_fd, 55 &pcan_usb_x6, 56 }; 57
··· 39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, 40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, 41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, 42 + {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)}, 43 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, 44 {} /* Terminating entry */ 45 }; ··· 51 &pcan_usb_pro, 52 &pcan_usb_fd, 53 &pcan_usb_pro_fd, 54 + &pcan_usb_chip, 55 &pcan_usb_x6, 56 }; 57
+2
drivers/net/can/usb/peak_usb/pcan_usb_core.h
··· 27 #define PCAN_USBPRO_PRODUCT_ID 0x000d 28 #define PCAN_USBPROFD_PRODUCT_ID 0x0011 29 #define PCAN_USBFD_PRODUCT_ID 0x0012 30 #define PCAN_USBX6_PRODUCT_ID 0x0014 31 32 #define PCAN_USB_DRIVER_NAME "peak_usb" ··· 91 extern const struct peak_usb_adapter pcan_usb; 92 extern const struct peak_usb_adapter pcan_usb_pro; 93 extern const struct peak_usb_adapter pcan_usb_fd; 94 extern const struct peak_usb_adapter pcan_usb_pro_fd; 95 extern const struct peak_usb_adapter pcan_usb_x6; 96
··· 27 #define PCAN_USBPRO_PRODUCT_ID 0x000d 28 #define PCAN_USBPROFD_PRODUCT_ID 0x0011 29 #define PCAN_USBFD_PRODUCT_ID 0x0012 30 + #define PCAN_USBCHIP_PRODUCT_ID 0x0013 31 #define PCAN_USBX6_PRODUCT_ID 0x0014 32 33 #define PCAN_USB_DRIVER_NAME "peak_usb" ··· 90 extern const struct peak_usb_adapter pcan_usb; 91 extern const struct peak_usb_adapter pcan_usb_pro; 92 extern const struct peak_usb_adapter pcan_usb_fd; 93 + extern const struct peak_usb_adapter pcan_usb_chip; 94 extern const struct peak_usb_adapter pcan_usb_pro_fd; 95 extern const struct peak_usb_adapter pcan_usb_x6; 96
+72
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
··· 1061 .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1062 }; 1063 1064 /* describes the PCAN-USB Pro FD adapter */ 1065 static const struct can_bittiming_const pcan_usb_pro_fd_const = { 1066 .name = "pcan_usb_pro_fd",
··· 1061 .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1062 }; 1063 1064 + /* describes the PCAN-CHIP USB */ 1065 + static const struct can_bittiming_const pcan_usb_chip_const = { 1066 + .name = "pcan_chip_usb", 1067 + .tseg1_min = 1, 1068 + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), 1069 + .tseg2_min = 1, 1070 + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), 1071 + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), 1072 + .brp_min = 1, 1073 + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), 1074 + .brp_inc = 1, 1075 + }; 1076 + 1077 + static const struct can_bittiming_const pcan_usb_chip_data_const = { 1078 + .name = "pcan_chip_usb", 1079 + .tseg1_min = 1, 1080 + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), 1081 + .tseg2_min = 1, 1082 + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), 1083 + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), 1084 + .brp_min = 1, 1085 + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), 1086 + .brp_inc = 1, 1087 + }; 1088 + 1089 + const struct peak_usb_adapter pcan_usb_chip = { 1090 + .name = "PCAN-Chip USB", 1091 + .device_id = PCAN_USBCHIP_PRODUCT_ID, 1092 + .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, 1093 + .ctrlmode_supported = CAN_CTRLMODE_FD | 1094 + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, 1095 + .clock = { 1096 + .freq = PCAN_UFD_CRYSTAL_HZ, 1097 + }, 1098 + .bittiming_const = &pcan_usb_chip_const, 1099 + .data_bittiming_const = &pcan_usb_chip_data_const, 1100 + 1101 + /* size of device private data */ 1102 + .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), 1103 + 1104 + /* timestamps usage */ 1105 + .ts_used_bits = 32, 1106 + .ts_period = 1000000, /* calibration period in ts. */ 1107 + .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ 1108 + .us_per_ts_shift = 0, 1109 + 1110 + /* give here messages in/out endpoints */ 1111 + .ep_msg_in = PCAN_USBPRO_EP_MSGIN, 1112 + .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0}, 1113 + 1114 + /* size of rx/tx usb buffers */ 1115 + .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, 1116 + .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, 1117 + 1118 + /* device callbacks */ 1119 + .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ 1120 + .dev_init = pcan_usb_fd_init, 1121 + 1122 + .dev_exit = pcan_usb_fd_exit, 1123 + .dev_free = pcan_usb_fd_free, 1124 + .dev_set_bus = pcan_usb_fd_set_bus, 1125 + .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, 1126 + .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, 1127 + .dev_decode_buf = pcan_usb_fd_decode_buf, 1128 + .dev_start = pcan_usb_fd_start, 1129 + .dev_stop = pcan_usb_fd_stop, 1130 + .dev_restart_async = pcan_usb_fd_restart_async, 1131 + .dev_encode_msg = pcan_usb_fd_encode_msg, 1132 + 1133 + .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1134 + }; 1135 + 1136 /* describes the PCAN-USB Pro FD adapter */ 1137 static const struct can_bittiming_const pcan_usb_pro_fd_const = { 1138 .name = "pcan_usb_pro_fd",
+35 -2
drivers/net/dsa/b53/b53_common.c
··· 326 327 static void b53_set_forwarding(struct b53_device *dev, int enable) 328 { 329 u8 mgmt; 330 331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); ··· 337 mgmt &= ~SM_SW_FWD_EN; 338 339 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 340 } 341 342 static void b53_enable_vlan(struct b53_device *dev, bool enable) ··· 608 609 static int b53_switch_reset(struct b53_device *dev) 610 { 611 - u8 mgmt; 612 613 b53_switch_reset_gpio(dev); 614 615 if (is539x(dev)) { 616 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 617 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 618 } 619 620 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); ··· 1764 .vlans = 4096, 1765 .enabled_ports = 0x1ff, 1766 .arl_entries = 4, 1767 - .cpu_port = B53_CPU_PORT_25, 1768 .vta_regs = B53_VTA_REGS, 1769 .duplex_reg = B53_DUPLEX_STAT_GE, 1770 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
··· 326 327 static void b53_set_forwarding(struct b53_device *dev, int enable) 328 { 329 + struct dsa_switch *ds = dev->ds; 330 u8 mgmt; 331 332 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); ··· 336 mgmt &= ~SM_SW_FWD_EN; 337 338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 339 + 340 + /* Include IMP port in dumb forwarding mode when no tagging protocol is 341 + * set 342 + */ 343 + if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) { 344 + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 345 + mgmt |= B53_MII_DUMB_FWDG_EN; 346 + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 347 + } 348 } 349 350 static void b53_enable_vlan(struct b53_device *dev, bool enable) ··· 598 599 static int b53_switch_reset(struct b53_device *dev) 600 { 601 + unsigned int timeout = 1000; 602 + u8 mgmt, reg; 603 604 b53_switch_reset_gpio(dev); 605 606 if (is539x(dev)) { 607 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 608 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 609 + } 610 + 611 + /* This is specific to 58xx devices here, do not use is58xx() which 612 + * covers the larger Starfigther 2 family, including 7445/7278 which 613 + * still use this driver as a library and need to perform the reset 614 + * earlier. 615 + */ 616 + if (dev->chip_id == BCM58XX_DEVICE_ID) { 617 + b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg); 618 + reg |= SW_RST | EN_SW_RST | EN_CH_RST; 619 + b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 620 + 621 + do { 622 + b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg); 623 + if (!(reg & SW_RST)) 624 + break; 625 + 626 + usleep_range(1000, 2000); 627 + } while (timeout-- > 0); 628 + 629 + if (timeout == 0) 630 + return -ETIMEDOUT; 631 } 632 633 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); ··· 1731 .vlans = 4096, 1732 .enabled_ports = 0x1ff, 1733 .arl_entries = 4, 1734 + .cpu_port = B53_CPU_PORT, 1735 .vta_regs = B53_VTA_REGS, 1736 .duplex_reg = B53_DUPLEX_STAT_GE, 1737 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+5
drivers/net/dsa/b53/b53_regs.h
··· 104 #define B53_UC_FWD_EN BIT(6) 105 #define B53_MC_FWD_EN BIT(7) 106 107 /* (16 bit) */ 108 #define B53_UC_FLOOD_MASK 0x32 109 #define B53_MC_FLOOD_MASK 0x34 ··· 143 /* Software reset register (8 bit) */ 144 #define B53_SOFTRESET 0x79 145 #define SW_RST BIT(7) 146 #define EN_SW_RST BIT(4) 147 148 /* Fast Aging Control register (8 bit) */
··· 104 #define B53_UC_FWD_EN BIT(6) 105 #define B53_MC_FWD_EN BIT(7) 106 107 + /* Switch control (8 bit) */ 108 + #define B53_SWITCH_CTRL 0x22 109 + #define B53_MII_DUMB_FWDG_EN BIT(6) 110 + 111 /* (16 bit) */ 112 #define B53_UC_FLOOD_MASK 0x32 113 #define B53_MC_FLOOD_MASK 0x34 ··· 139 /* Software reset register (8 bit) */ 140 #define B53_SOFTRESET 0x79 141 #define SW_RST BIT(7) 142 + #define EN_CH_RST BIT(6) 143 #define EN_SW_RST BIT(4) 144 145 /* Fast Aging Control register (8 bit) */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 90 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) 91 92 #define MLX5_UMR_ALIGN (2048) 93 - #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) 94 95 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 96 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
··· 90 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) 91 92 #define MLX5_UMR_ALIGN (2048) 93 + #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) 94 95 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 96 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
+1
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
··· 564 int idx = 0; 565 int err = 0; 566 567 while ((!err || err == -ENOENT) && idx < info->rule_cnt) { 568 err = mlx5e_ethtool_get_flow(priv, info, location); 569 if (!err)
··· 564 int idx = 0; 565 int err = 0; 566 567 + info->data = MAX_NUM_OF_ETHTOOL_RULES; 568 while ((!err || err == -ENOENT) && idx < info->rule_cnt) { 569 err = mlx5e_ethtool_get_flow(priv, info, location); 570 if (!err)
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 174 175 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) 176 { 177 - struct mlx5e_sw_stats *s = &priv->stats.sw; 178 struct mlx5e_rq_stats *rq_stats; 179 struct mlx5e_sq_stats *sq_stats; 180 u64 tx_offload_none = 0; ··· 229 s->link_down_events_phy = MLX5_GET(ppcnt_reg, 230 priv->stats.pport.phy_counters, 231 counter_set.phys_layer_cntrs.link_down_events); 232 } 233 234 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) ··· 244 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 245 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 246 247 - memset(out, 0, outlen); 248 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); 249 } 250
··· 174 175 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) 176 { 177 + struct mlx5e_sw_stats temp, *s = &temp; 178 struct mlx5e_rq_stats *rq_stats; 179 struct mlx5e_sq_stats *sq_stats; 180 u64 tx_offload_none = 0; ··· 229 s->link_down_events_phy = MLX5_GET(ppcnt_reg, 230 priv->stats.pport.phy_counters, 231 counter_set.phys_layer_cntrs.link_down_events); 232 + memcpy(&priv->stats.sw, s, sizeof(*s)); 233 } 234 235 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) ··· 243 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 244 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 245 246 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); 247 } 248
+48 -39
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 639 640 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && 641 rep->vport != FDB_UPLINK_VPORT) { 642 - if (min_inline > esw->offloads.inline_mode) { 643 netdev_warn(priv->netdev, 644 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 645 min_inline, esw->offloads.inline_mode); ··· 786 return 0; 787 } 788 789 - static int gen_vxlan_header_ipv4(struct net_device *out_dev, 790 - char buf[], 791 - unsigned char h_dest[ETH_ALEN], 792 - int ttl, 793 - __be32 daddr, 794 - __be32 saddr, 795 - __be16 udp_dst_port, 796 - __be32 vx_vni) 797 { 798 - int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN; 799 struct ethhdr *eth = (struct ethhdr *)buf; 800 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); 801 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); ··· 817 udp->dest = udp_dst_port; 818 vxh->vx_flags = VXLAN_HF_VNI; 819 vxh->vx_vni = vxlan_vni_field(vx_vni); 820 - 821 - return encap_size; 822 } 823 824 - static int gen_vxlan_header_ipv6(struct net_device *out_dev, 825 - char buf[], 826 - unsigned char h_dest[ETH_ALEN], 827 - int ttl, 828 - struct in6_addr *daddr, 829 - struct in6_addr *saddr, 830 - __be16 udp_dst_port, 831 - __be32 vx_vni) 832 { 833 - int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; 834 struct ethhdr *eth = (struct ethhdr *)buf; 835 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); 836 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); ··· 849 udp->dest = udp_dst_port; 850 vxh->vx_flags = VXLAN_HF_VNI; 851 vxh->vx_vni = vxlan_vni_field(vx_vni); 852 - 853 - return encap_size; 854 } 855 856 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ··· 857 struct net_device **out_dev) 858 { 859 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 860 struct ip_tunnel_key *tun_key = &e->tun_info.key; 861 - int encap_size, ttl, err; 862 struct neighbour *n = NULL; 863 struct flowi4 fl4 = {}; 864 char *encap_header; 865 866 - encap_header = kzalloc(max_encap_size, GFP_KERNEL); 867 if (!encap_header) 868 return -ENOMEM; 869 ··· 905 906 switch (e->tunnel_type) { 907 case MLX5_HEADER_TYPE_VXLAN: 908 - encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, 909 - e->h_dest, ttl, 910 - fl4.daddr, 911 - fl4.saddr, tun_key->tp_dst, 912 - tunnel_id_to_key32(tun_key->tun_id)); 913 break; 914 default: 915 err = -EOPNOTSUPP; ··· 917 } 918 919 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 920 - encap_size, encap_header, &e->encap_id); 921 out: 922 if (err && n) 923 neigh_release(n); ··· 932 933 { 934 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 935 struct ip_tunnel_key *tun_key = &e->tun_info.key; 936 - int encap_size, err, ttl = 0; 937 struct neighbour *n = NULL; 938 struct flowi6 fl6 = {}; 939 char *encap_header; 940 941 - encap_header = kzalloc(max_encap_size, GFP_KERNEL); 942 if (!encap_header) 943 return -ENOMEM; 944 ··· 981 982 switch (e->tunnel_type) { 983 case MLX5_HEADER_TYPE_VXLAN: 984 - encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, 985 - e->h_dest, ttl, 986 - &fl6.daddr, 987 - &fl6.saddr, tun_key->tp_dst, 988 - tunnel_id_to_key32(tun_key->tun_id)); 989 break; 990 default: 991 err = -EOPNOTSUPP; ··· 993 } 994 995 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 996 - encap_size, encap_header, &e->encap_id); 997 out: 998 if (err && n) 999 neigh_release(n);
··· 639 640 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && 641 rep->vport != FDB_UPLINK_VPORT) { 642 + if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 643 + esw->offloads.inline_mode < min_inline) { 644 netdev_warn(priv->netdev, 645 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 646 min_inline, esw->offloads.inline_mode); ··· 785 return 0; 786 } 787 788 + static void gen_vxlan_header_ipv4(struct net_device *out_dev, 789 + char buf[], int encap_size, 790 + unsigned char h_dest[ETH_ALEN], 791 + int ttl, 792 + __be32 daddr, 793 + __be32 saddr, 794 + __be16 udp_dst_port, 795 + __be32 vx_vni) 796 { 797 struct ethhdr *eth = (struct ethhdr *)buf; 798 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); 799 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); ··· 817 udp->dest = udp_dst_port; 818 vxh->vx_flags = VXLAN_HF_VNI; 819 vxh->vx_vni = vxlan_vni_field(vx_vni); 820 } 821 822 + static void gen_vxlan_header_ipv6(struct net_device *out_dev, 823 + char buf[], int encap_size, 824 + unsigned char h_dest[ETH_ALEN], 825 + int ttl, 826 + struct in6_addr *daddr, 827 + struct in6_addr *saddr, 828 + __be16 udp_dst_port, 829 + __be32 vx_vni) 830 { 831 struct ethhdr *eth = (struct ethhdr *)buf; 832 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); 833 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); ··· 852 udp->dest = udp_dst_port; 853 vxh->vx_flags = VXLAN_HF_VNI; 854 vxh->vx_vni = vxlan_vni_field(vx_vni); 855 } 856 857 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ··· 862 struct net_device **out_dev) 863 { 864 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 865 + int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN; 866 struct ip_tunnel_key *tun_key = &e->tun_info.key; 867 struct neighbour *n = NULL; 868 struct flowi4 fl4 = {}; 869 char *encap_header; 870 + int ttl, err; 871 872 + if (max_encap_size < ipv4_encap_size) { 873 + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 874 + ipv4_encap_size, max_encap_size); 875 + return -EOPNOTSUPP; 876 + } 877 + 878 + encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); 879 if (!encap_header) 880 return -ENOMEM; 881 ··· 903 904 switch (e->tunnel_type) { 905 case MLX5_HEADER_TYPE_VXLAN: 906 + gen_vxlan_header_ipv4(*out_dev, encap_header, 907 + ipv4_encap_size, e->h_dest, ttl, 908 + fl4.daddr, 909 + fl4.saddr, tun_key->tp_dst, 910 + tunnel_id_to_key32(tun_key->tun_id)); 911 break; 912 default: 913 err = -EOPNOTSUPP; ··· 915 } 916 917 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 918 + ipv4_encap_size, encap_header, &e->encap_id); 919 out: 920 if (err && n) 921 neigh_release(n); ··· 930 931 { 932 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 933 + int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; 934 struct ip_tunnel_key *tun_key = &e->tun_info.key; 935 struct neighbour *n = NULL; 936 struct flowi6 fl6 = {}; 937 char *encap_header; 938 + int err, ttl = 0; 939 940 + if (max_encap_size < ipv6_encap_size) { 941 + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 942 + ipv6_encap_size, max_encap_size); 943 + return -EOPNOTSUPP; 944 + } 945 + 946 + encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); 947 if (!encap_header) 948 return -ENOMEM; 949 ··· 972 973 switch (e->tunnel_type) { 974 case MLX5_HEADER_TYPE_VXLAN: 975 + gen_vxlan_header_ipv6(*out_dev, encap_header, 976 + ipv6_encap_size, e->h_dest, ttl, 977 + &fl6.daddr, 978 + &fl6.saddr, tun_key->tp_dst, 979 + tunnel_id_to_key32(tun_key->tun_id)); 980 break; 981 default: 982 err = -EOPNOTSUPP; ··· 984 } 985 986 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 987 + ipv6_encap_size, encap_header, &e->encap_id); 988 out: 989 if (err && n) 990 neigh_release(n);
+24 -12
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 911 struct mlx5_core_dev *dev = devlink_priv(devlink); 912 struct mlx5_eswitch *esw = dev->priv.eswitch; 913 int num_vports = esw->enabled_vports; 914 - int err; 915 - int vport; 916 u8 mlx5_mode; 917 918 if (!MLX5_CAP_GEN(dev, vport_group_manager)) ··· 920 if (esw->mode == SRIOV_NONE) 921 return -EOPNOTSUPP; 922 923 - if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 924 - MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 925 return -EOPNOTSUPP; 926 927 if (esw->offloads.num_flows > 0) { 928 esw_warn(dev, "Can't set inline mode when flows are configured\n"); ··· 973 if (esw->mode == SRIOV_NONE) 974 return -EOPNOTSUPP; 975 976 - if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 977 - MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 978 - return -EOPNOTSUPP; 979 - 980 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 981 } 982 983 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) 984 { 985 struct mlx5_core_dev *dev = esw->dev; 986 int vport; 987 - u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 988 989 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 990 return -EOPNOTSUPP; ··· 988 if (esw->mode == SRIOV_NONE) 989 return -EOPNOTSUPP; 990 991 - if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 992 - MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 993 - return -EOPNOTSUPP; 994 995 for (vport = 1; vport <= nvfs; vport++) { 996 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); 997 if (vport > 1 && prev_mlx5_mode != mlx5_mode) ··· 1007 prev_mlx5_mode = mlx5_mode; 1008 } 1009 1010 *mode = mlx5_mode; 1011 return 0; 1012 }
··· 911 struct mlx5_core_dev *dev = devlink_priv(devlink); 912 struct mlx5_eswitch *esw = dev->priv.eswitch; 913 int num_vports = esw->enabled_vports; 914 + int err, vport; 915 u8 mlx5_mode; 916 917 if (!MLX5_CAP_GEN(dev, vport_group_manager)) ··· 921 if (esw->mode == SRIOV_NONE) 922 return -EOPNOTSUPP; 923 924 + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 925 + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 926 + if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) 927 + return 0; 928 + /* fall through */ 929 + case MLX5_CAP_INLINE_MODE_L2: 930 + esw_warn(dev, "Inline mode can't be set\n"); 931 return -EOPNOTSUPP; 932 + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 933 + break; 934 + } 935 936 if (esw->offloads.num_flows > 0) { 937 esw_warn(dev, "Can't set inline mode when flows are configured\n"); ··· 966 if (esw->mode == SRIOV_NONE) 967 return -EOPNOTSUPP; 968 969 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 970 } 971 972 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) 973 { 974 + u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 975 struct mlx5_core_dev *dev = esw->dev; 976 int vport; 977 978 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 979 return -EOPNOTSUPP; ··· 985 if (esw->mode == SRIOV_NONE) 986 return -EOPNOTSUPP; 987 988 + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 989 + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 990 + mlx5_mode = MLX5_INLINE_MODE_NONE; 991 + goto out; 992 + case MLX5_CAP_INLINE_MODE_L2: 993 + mlx5_mode = MLX5_INLINE_MODE_L2; 994 + goto out; 995 + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 996 + goto query_vports; 997 + } 998 999 + query_vports: 1000 for (vport = 1; vport <= nvfs; vport++) { 1001 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); 1002 if (vport > 1 && prev_mlx5_mode != mlx5_mode) ··· 996 prev_mlx5_mode = mlx5_mode; 997 } 998 999 + out: 1000 *mode = mlx5_mode; 1001 return 0; 1002 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1029 if (err) { 1030 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", 1031 FW_INIT_TIMEOUT_MILI); 1032 - goto out_err; 1033 } 1034 1035 err = mlx5_core_enable_hca(dev, 0);
··· 1029 if (err) { 1030 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", 1031 FW_INIT_TIMEOUT_MILI); 1032 + goto err_cmd_cleanup; 1033 } 1034 1035 err = mlx5_core_enable_hca(dev, 0);
+1
drivers/net/ethernet/mellanox/mlx5/core/uar.c
··· 87 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); 88 89 list_del(&up->list); 90 if (mlx5_cmd_free_uar(up->mdev, up->index)) 91 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); 92 kfree(up->reg_bitmap);
··· 87 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); 88 89 list_del(&up->list); 90 + iounmap(up->map); 91 if (mlx5_cmd_free_uar(up->mdev, up->index)) 92 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); 93 kfree(up->reg_bitmap);
+5 -5
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
··· 64 ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) 65 66 static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { 67 - {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT}, 68 - {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT}, 69 - {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT}, 70 - {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT}, 71 - {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH} 72 }; 73 74 static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
··· 64 ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) 65 66 static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { 67 + {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI}, 68 + {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE}, 69 + {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE}, 70 + {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE}, 71 + {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}, 72 }; 73 74 static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+4 -3
drivers/net/ethernet/renesas/ravb_main.c
··· 1516 spin_unlock_irqrestore(&priv->lock, flags); 1517 return NETDEV_TX_BUSY; 1518 } 1519 - entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); 1520 - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; 1521 1522 if (skb_put_padto(skb, ETH_ZLEN)) 1523 - goto drop; 1524 1525 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1526 entry / NUM_TX_DESC * DPTR_ALIGN;
··· 1516 spin_unlock_irqrestore(&priv->lock, flags); 1517 return NETDEV_TX_BUSY; 1518 } 1519 1520 if (skb_put_padto(skb, ETH_ZLEN)) 1521 + goto exit; 1522 + 1523 + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); 1524 + priv->tx_skb[q][entry / NUM_TX_DESC] = skb; 1525 1526 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1527 entry / NUM_TX_DESC * DPTR_ALIGN;
+4 -1
drivers/net/ethernet/sfc/efx.h
··· 74 #define EFX_RXQ_MIN_ENT 128U 75 #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) 76 77 - #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ 78 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 79 80 static inline bool efx_rss_enabled(struct efx_nic *efx)
··· 74 #define EFX_RXQ_MIN_ENT 128U 75 #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) 76 77 + /* All EF10 architecture NICs steal one bit of the DMAQ size for various 78 + * other purposes when counting TxQ entries, so we halve the queue size. 79 + */ 80 + #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ 81 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 82 83 static inline bool efx_rss_enabled(struct efx_nic *efx)
+1
drivers/net/ethernet/sfc/workarounds.h
··· 16 */ 17 18 #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 19 #define EFX_WORKAROUND_10G(efx) 1 20 21 /* Bit-bashed I2C reads cause performance drop */
··· 16 */ 17 18 #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 19 + #define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 20 #define EFX_WORKAROUND_10G(efx) 1 21 22 /* Bit-bashed I2C reads cause performance drop */
+1 -1
drivers/net/ethernet/toshiba/tc35815.c
··· 1017 BUG_ON(lp->tx_skbs[i].skb != skb); 1018 #endif 1019 if (skb) { 1020 - dev_kfree_skb(skb); 1021 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); 1022 lp->tx_skbs[i].skb = NULL; 1023 lp->tx_skbs[i].skb_dma = 0; 1024 }
··· 1017 BUG_ON(lp->tx_skbs[i].skb != skb); 1018 #endif 1019 if (skb) { 1020 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); 1021 + dev_kfree_skb(skb); 1022 lp->tx_skbs[i].skb = NULL; 1023 lp->tx_skbs[i].skb_dma = 0; 1024 }
-1
drivers/net/hyperv/hyperv_net.h
··· 751 u32 send_section_cnt; 752 u32 send_section_size; 753 unsigned long *send_section_map; 754 - int map_words; 755 756 /* Used for NetVSP initialization protocol */ 757 struct completion channel_init_wait;
··· 751 u32 send_section_cnt; 752 u32 send_section_size; 753 unsigned long *send_section_map; 754 755 /* Used for NetVSP initialization protocol */ 756 struct completion channel_init_wait;
+4 -5
drivers/net/hyperv/netvsc.c
··· 236 struct netvsc_device *net_device; 237 struct nvsp_message *init_packet; 238 struct net_device *ndev; 239 int node; 240 241 net_device = get_outbound_net_device(device); ··· 402 net_device->send_section_size, net_device->send_section_cnt); 403 404 /* Setup state for managing the send buffer. */ 405 - net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, 406 - BITS_PER_LONG); 407 408 - net_device->send_section_map = kcalloc(net_device->map_words, 409 - sizeof(ulong), GFP_KERNEL); 410 if (net_device->send_section_map == NULL) { 411 ret = -ENOMEM; 412 goto cleanup; ··· 682 unsigned long *map_addr = net_device->send_section_map; 683 unsigned int i; 684 685 - for_each_clear_bit(i, map_addr, net_device->map_words) { 686 if (sync_test_and_set_bit(i, map_addr) == 0) 687 return i; 688 }
··· 236 struct netvsc_device *net_device; 237 struct nvsp_message *init_packet; 238 struct net_device *ndev; 239 + size_t map_words; 240 int node; 241 242 net_device = get_outbound_net_device(device); ··· 401 net_device->send_section_size, net_device->send_section_cnt); 402 403 /* Setup state for managing the send buffer. */ 404 + map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); 405 406 + net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL); 407 if (net_device->send_section_map == NULL) { 408 ret = -ENOMEM; 409 goto cleanup; ··· 683 unsigned long *map_addr = net_device->send_section_map; 684 unsigned int i; 685 686 + for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { 687 if (sync_test_and_set_bit(i, map_addr) == 0) 688 return i; 689 }
+21 -6
drivers/net/macsec.c
··· 617 618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 619 unsigned char **iv, 620 - struct scatterlist **sg) 621 { 622 size_t size, iv_offset, sg_offset; 623 struct aead_request *req; ··· 630 631 size = ALIGN(size, __alignof__(struct scatterlist)); 632 sg_offset = size; 633 - size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 634 635 tmp = kmalloc(size, GFP_ATOMIC); 636 if (!tmp) ··· 650 { 651 int ret; 652 struct scatterlist *sg; 653 unsigned char *iv; 654 struct ethhdr *eth; 655 struct macsec_eth_header *hh; ··· 725 return ERR_PTR(-EINVAL); 726 } 727 728 - req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 729 if (!req) { 730 macsec_txsa_put(tx_sa); 731 kfree_skb(skb); ··· 741 742 macsec_fill_iv(iv, secy->sci, pn); 743 744 - sg_init_table(sg, MAX_SKB_FRAGS + 1); 745 skb_to_sgvec(skb, sg, 0, skb->len); 746 747 if (tx_sc->encrypt) { ··· 926 { 927 int ret; 928 struct scatterlist *sg; 929 unsigned char *iv; 930 struct aead_request *req; 931 struct macsec_eth_header *hdr; ··· 937 if (!skb) 938 return ERR_PTR(-ENOMEM); 939 940 - req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 941 if (!req) { 942 kfree_skb(skb); 943 return ERR_PTR(-ENOMEM); ··· 951 hdr = (struct macsec_eth_header *)skb->data; 952 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 953 954 - sg_init_table(sg, MAX_SKB_FRAGS + 1); 955 skb_to_sgvec(skb, sg, 0, skb->len); 956 957 if (hdr->tci_an & MACSEC_TCI_E) {
··· 617 618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 619 unsigned char **iv, 620 + struct scatterlist **sg, 621 + int num_frags) 622 { 623 size_t size, iv_offset, sg_offset; 624 struct aead_request *req; ··· 629 630 size = ALIGN(size, __alignof__(struct scatterlist)); 631 sg_offset = size; 632 + size += sizeof(struct scatterlist) * num_frags; 633 634 tmp = kmalloc(size, GFP_ATOMIC); 635 if (!tmp) ··· 649 { 650 int ret; 651 struct scatterlist *sg; 652 + struct sk_buff *trailer; 653 unsigned char *iv; 654 struct ethhdr *eth; 655 struct macsec_eth_header *hh; ··· 723 return ERR_PTR(-EINVAL); 724 } 725 726 + ret = skb_cow_data(skb, 0, &trailer); 727 + if (unlikely(ret < 0)) { 728 + macsec_txsa_put(tx_sa); 729 + kfree_skb(skb); 730 + return ERR_PTR(ret); 731 + } 732 + 733 + req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 734 if (!req) { 735 macsec_txsa_put(tx_sa); 736 kfree_skb(skb); ··· 732 733 macsec_fill_iv(iv, secy->sci, pn); 734 735 + sg_init_table(sg, ret); 736 skb_to_sgvec(skb, sg, 0, skb->len); 737 738 if (tx_sc->encrypt) { ··· 917 { 918 int ret; 919 struct scatterlist *sg; 920 + struct sk_buff *trailer; 921 unsigned char *iv; 922 struct aead_request *req; 923 struct macsec_eth_header *hdr; ··· 927 if (!skb) 928 return ERR_PTR(-ENOMEM); 929 930 + ret = skb_cow_data(skb, 0, &trailer); 931 + if (unlikely(ret < 0)) { 932 + kfree_skb(skb); 933 + return ERR_PTR(ret); 934 + } 935 + req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 936 if (!req) { 937 kfree_skb(skb); 938 return ERR_PTR(-ENOMEM); ··· 936 hdr = (struct macsec_eth_header *)skb->data; 937 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 938 939 + sg_init_table(sg, ret); 940 skb_to_sgvec(skb, sg, 0, skb->len); 941 942 if (hdr->tci_an & MACSEC_TCI_E) {
+10 -1
drivers/net/macvlan.c
··· 1139 static void macvlan_port_destroy(struct net_device *dev) 1140 { 1141 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 1142 1143 dev->priv_flags &= ~IFF_MACVLAN_PORT; 1144 netdev_rx_handler_unregister(dev); ··· 1148 * but we need to cancel it and purge left skbs if any. 1149 */ 1150 cancel_work_sync(&port->bc_work); 1151 - __skb_queue_purge(&port->bc_queue); 1152 1153 kfree(port); 1154 }
··· 1139 static void macvlan_port_destroy(struct net_device *dev) 1140 { 1141 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 1142 + struct sk_buff *skb; 1143 1144 dev->priv_flags &= ~IFF_MACVLAN_PORT; 1145 netdev_rx_handler_unregister(dev); ··· 1147 * but we need to cancel it and purge left skbs if any. 1148 */ 1149 cancel_work_sync(&port->bc_work); 1150 + 1151 + while ((skb = __skb_dequeue(&port->bc_queue))) { 1152 + const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; 1153 + 1154 + if (src) 1155 + dev_put(src->dev); 1156 + 1157 + kfree_skb(skb); 1158 + } 1159 1160 kfree(port); 1161 }
-11
drivers/net/phy/micrel.c
··· 297 if (priv->led_mode >= 0) 298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 299 300 - if (phy_interrupt_is_valid(phydev)) { 301 - int ctl = phy_read(phydev, MII_BMCR); 302 - 303 - if (ctl < 0) 304 - return ctl; 305 - 306 - ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE); 307 - if (ret < 0) 308 - return ret; 309 - } 310 - 311 return 0; 312 } 313
··· 297 if (priv->led_mode >= 0) 298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 299 300 return 0; 301 } 302
+36 -4
drivers/net/phy/phy.c
··· 591 EXPORT_SYMBOL(phy_mii_ioctl); 592 593 /** 594 - * phy_start_aneg - start auto-negotiation for this PHY device 595 * @phydev: the phy_device struct 596 * 597 * Description: Sanitizes the settings (if we're not autonegotiating 598 * them), and then calls the driver's config_aneg function. 599 * If the PHYCONTROL Layer is operating, we change the state to 600 * reflect the beginning of Auto-negotiation or forcing. 601 */ 602 - int phy_start_aneg(struct phy_device *phydev) 603 { 604 int err; 605 606 if (!phydev->drv) ··· 630 } 631 } 632 633 out_unlock: 634 mutex_unlock(&phydev->lock); 635 return err; 636 } 637 EXPORT_SYMBOL(phy_start_aneg); 638 ··· 691 * state machine runs. 692 */ 693 694 - static void phy_trigger_machine(struct phy_device *phydev, bool sync) 695 { 696 if (sync) 697 cancel_delayed_work_sync(&phydev->state_queue); ··· 1186 mutex_unlock(&phydev->lock); 1187 1188 if (needs_aneg) 1189 - err = phy_start_aneg(phydev); 1190 else if (do_suspend) 1191 phy_suspend(phydev); 1192
··· 591 EXPORT_SYMBOL(phy_mii_ioctl); 592 593 /** 594 + * phy_start_aneg_priv - start auto-negotiation for this PHY device 595 * @phydev: the phy_device struct 596 + * @sync: indicate whether we should wait for the workqueue cancelation 597 * 598 * Description: Sanitizes the settings (if we're not autonegotiating 599 * them), and then calls the driver's config_aneg function. 600 * If the PHYCONTROL Layer is operating, we change the state to 601 * reflect the beginning of Auto-negotiation or forcing. 602 */ 603 + static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) 604 { 605 + bool trigger = 0; 606 int err; 607 608 if (!phydev->drv) ··· 628 } 629 } 630 631 + /* Re-schedule a PHY state machine to check PHY status because 632 + * negotiation may already be done and aneg interrupt may not be 633 + * generated. 634 + */ 635 + if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { 636 + err = phy_aneg_done(phydev); 637 + if (err > 0) { 638 + trigger = true; 639 + err = 0; 640 + } 641 + } 642 + 643 out_unlock: 644 mutex_unlock(&phydev->lock); 645 + 646 + if (trigger) 647 + phy_trigger_machine(phydev, sync); 648 + 649 return err; 650 + } 651 + 652 + /** 653 + * phy_start_aneg - start auto-negotiation for this PHY device 654 + * @phydev: the phy_device struct 655 + * 656 + * Description: Sanitizes the settings (if we're not autonegotiating 657 + * them), and then calls the driver's config_aneg function. 658 + * If the PHYCONTROL Layer is operating, we change the state to 659 + * reflect the beginning of Auto-negotiation or forcing. 660 + */ 661 + int phy_start_aneg(struct phy_device *phydev) 662 + { 663 + return phy_start_aneg_priv(phydev, true); 664 } 665 EXPORT_SYMBOL(phy_start_aneg); 666 ··· 659 * state machine runs. 660 */ 661 662 + void phy_trigger_machine(struct phy_device *phydev, bool sync) 663 { 664 if (sync) 665 cancel_delayed_work_sync(&phydev->state_queue); ··· 1154 mutex_unlock(&phydev->lock); 1155 1156 if (needs_aneg) 1157 + err = phy_start_aneg_priv(phydev, false); 1158 else if (do_suspend) 1159 phy_suspend(phydev); 1160
+6 -2
drivers/net/team/team.c
··· 2361 2362 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2363 TEAM_CMD_OPTIONS_GET); 2364 - if (!hdr) 2365 return -EMSGSIZE; 2366 2367 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2368 goto nla_put_failure; ··· 2636 2637 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2638 TEAM_CMD_PORT_LIST_GET); 2639 - if (!hdr) 2640 return -EMSGSIZE; 2641 2642 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2643 goto nla_put_failure;
··· 2361 2362 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2363 TEAM_CMD_OPTIONS_GET); 2364 + if (!hdr) { 2365 + nlmsg_free(skb); 2366 return -EMSGSIZE; 2367 + } 2368 2369 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2370 goto nla_put_failure; ··· 2634 2635 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2636 TEAM_CMD_PORT_LIST_GET); 2637 + if (!hdr) { 2638 + nlmsg_free(skb); 2639 return -EMSGSIZE; 2640 + } 2641 2642 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2643 goto nla_put_failure;
+1 -1
drivers/net/usb/Kconfig
··· 369 optionally with LEDs that indicate traffic 370 371 config USB_NET_PLUSB 372 - tristate "Prolific PL-2301/2302/25A1 based cables" 373 # if the handshake/init/reset problems, from original 'plusb', 374 # are ever resolved ... then remove "experimental" 375 depends on USB_USBNET
··· 369 optionally with LEDs that indicate traffic 370 371 config USB_NET_PLUSB 372 + tristate "Prolific PL-2301/2302/25A1/27A1 based cables" 373 # if the handshake/init/reset problems, from original 'plusb', 374 # are ever resolved ... then remove "experimental" 375 depends on USB_USBNET
+1 -1
drivers/net/usb/hso.c
··· 3279 pr_info("unloaded\n"); 3280 3281 tty_unregister_driver(tty_drv); 3282 - put_tty_driver(tty_drv); 3283 /* deregister the usb driver */ 3284 usb_deregister(&hso_driver); 3285 } 3286 3287 /* Module definitions */
··· 3279 pr_info("unloaded\n"); 3280 3281 tty_unregister_driver(tty_drv); 3282 /* deregister the usb driver */ 3283 usb_deregister(&hso_driver); 3284 + put_tty_driver(tty_drv); 3285 } 3286 3287 /* Module definitions */
+13 -2
drivers/net/usb/plusb.c
··· 102 } 103 104 static const struct driver_info prolific_info = { 105 - .description = "Prolific PL-2301/PL-2302/PL-25A1", 106 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT, 107 /* some PL-2302 versions seem to fail usb_set_interface() */ 108 .reset = pl_reset, ··· 139 * Host-to-Host Cable 140 */ 141 .driver_info = (unsigned long) &prolific_info, 142 }, 143 144 { }, // END ··· 169 module_usb_driver(plusb_driver); 170 171 MODULE_AUTHOR("David Brownell"); 172 - MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver"); 173 MODULE_LICENSE("GPL");
··· 102 } 103 104 static const struct driver_info prolific_info = { 105 + .description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1", 106 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT, 107 /* some PL-2302 versions seem to fail usb_set_interface() */ 108 .reset = pl_reset, ··· 139 * Host-to-Host Cable 140 */ 141 .driver_info = (unsigned long) &prolific_info, 142 + 143 + }, 144 + 145 + /* super speed cables */ 146 + { 147 + USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom 148 + * also: goobay Active USB 3.0 149 + * Data Link, 150 + * Unitek Y-3501 151 + */ 152 + .driver_info = (unsigned long) &prolific_info, 153 }, 154 155 { }, // END ··· 158 module_usb_driver(plusb_driver); 159 160 MODULE_AUTHOR("David Brownell"); 161 + MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver"); 162 MODULE_LICENSE("GPL");
+1
include/linux/phy.h
··· 852 void phy_mac_interrupt(struct phy_device *phydev, int new_link); 853 void phy_start_machine(struct phy_device *phydev); 854 void phy_stop_machine(struct phy_device *phydev); 855 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); 856 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); 857 int phy_ethtool_ksettings_get(struct phy_device *phydev,
··· 852 void phy_mac_interrupt(struct phy_device *phydev, int new_link); 853 void phy_start_machine(struct phy_device *phydev); 854 void phy_stop_machine(struct phy_device *phydev); 855 + void phy_trigger_machine(struct phy_device *phydev, bool sync); 856 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); 857 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); 858 int phy_ethtool_ksettings_get(struct phy_device *phydev,
+1
net/bridge/br_device.c
··· 123 { 124 struct net_bridge *br = netdev_priv(dev); 125 126 br_multicast_uninit_stats(br); 127 br_vlan_flush(br); 128 free_percpu(br->stats);
··· 123 { 124 struct net_bridge *br = netdev_priv(dev); 125 126 + br_multicast_dev_del(br); 127 br_multicast_uninit_stats(br); 128 br_vlan_flush(br); 129 free_percpu(br->stats);
-1
net/bridge/br_if.c
··· 311 312 br_fdb_delete_by_port(br, NULL, 0, 1); 313 314 - br_multicast_dev_del(br); 315 cancel_delayed_work_sync(&br->gc_work); 316 317 br_sysfs_delbr(br->dev);
··· 311 312 br_fdb_delete_by_port(br, NULL, 0, 1); 313 314 cancel_delayed_work_sync(&br->gc_work); 315 316 br_sysfs_delbr(br->dev);
+3
net/core/dev.c
··· 2450 { 2451 unsigned long flags; 2452 2453 if (likely(atomic_read(&skb->users) == 1)) { 2454 smp_rmb(); 2455 atomic_set(&skb->users, 0);
··· 2450 { 2451 unsigned long flags; 2452 2453 + if (unlikely(!skb)) 2454 + return; 2455 + 2456 if (likely(atomic_read(&skb->users) == 1)) { 2457 smp_rmb(); 2458 atomic_set(&skb->users, 0);
+2 -1
net/ipv4/route.c
··· 2359 } 2360 2361 /* L3 master device is the loopback for that domain */ 2362 - dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev; 2363 fl4->flowi4_oif = dev_out->ifindex; 2364 flags |= RTCF_LOCAL; 2365 goto make_route;
··· 2359 } 2360 2361 /* L3 master device is the loopback for that domain */ 2362 + dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? : 2363 + net->loopback_dev; 2364 fl4->flowi4_oif = dev_out->ifindex; 2365 flags |= RTCF_LOCAL; 2366 goto make_route;
+3 -8
net/ipv4/tcp_cong.c
··· 168 } 169 out: 170 rcu_read_unlock(); 171 172 - /* Clear out private data before diag gets it and 173 - * the ca has not been initialized. 174 - */ 175 - if (ca->get_info) 176 - memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 177 if (ca->flags & TCP_CONG_NEEDS_ECN) 178 INET_ECN_xmit(sk); 179 else ··· 196 tcp_cleanup_congestion_control(sk); 197 icsk->icsk_ca_ops = ca; 198 icsk->icsk_ca_setsockopt = 1; 199 200 - if (sk->sk_state != TCP_CLOSE) { 201 - memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 202 tcp_init_congestion_control(sk); 203 - } 204 } 205 206 /* Manage refcounts on socket close. */
··· 168 } 169 out: 170 rcu_read_unlock(); 171 + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 172 173 if (ca->flags & TCP_CONG_NEEDS_ECN) 174 INET_ECN_xmit(sk); 175 else ··· 200 tcp_cleanup_congestion_control(sk); 201 icsk->icsk_ca_ops = ca; 202 icsk->icsk_ca_setsockopt = 1; 203 + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 204 205 + if (sk->sk_state != TCP_CLOSE) 206 tcp_init_congestion_control(sk); 207 } 208 209 /* Manage refcounts on socket close. */
+3
net/ipv4/udp_offload.c
··· 29 u16 mac_len = skb->mac_len; 30 int udp_offset, outer_hlen; 31 __wsum partial; 32 33 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 34 goto out; ··· 63 64 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 65 66 /* Try to offload checksum if possible */ 67 offload_csum = !!(need_csum && 68 (skb->dev->features & 69 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : 70 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
··· 29 u16 mac_len = skb->mac_len; 30 int udp_offset, outer_hlen; 31 __wsum partial; 32 + bool need_ipsec; 33 34 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 35 goto out; ··· 62 63 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 64 65 + need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); 66 /* Try to offload checksum if possible */ 67 offload_csum = !!(need_csum && 68 + !need_ipsec && 69 (skb->dev->features & 70 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : 71 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
+12 -2
net/ipv6/addrconf.c
··· 3271 static int fixup_permanent_addr(struct inet6_dev *idev, 3272 struct inet6_ifaddr *ifp) 3273 { 3274 - if (!ifp->rt) { 3275 - struct rt6_info *rt; 3276 3277 rt = addrconf_dst_alloc(idev, &ifp->addr, false); 3278 if (unlikely(IS_ERR(rt))) 3279 return PTR_ERR(rt); 3280 3281 ifp->rt = rt; 3282 } 3283 3284 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
··· 3271 static int fixup_permanent_addr(struct inet6_dev *idev, 3272 struct inet6_ifaddr *ifp) 3273 { 3274 + /* rt6i_ref == 0 means the host route was removed from the 3275 + * FIB, for example, if 'lo' device is taken down. In that 3276 + * case regenerate the host route. 3277 + */ 3278 + if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) { 3279 + struct rt6_info *rt, *prev; 3280 3281 rt = addrconf_dst_alloc(idev, &ifp->addr, false); 3282 if (unlikely(IS_ERR(rt))) 3283 return PTR_ERR(rt); 3284 3285 + /* ifp->rt can be accessed outside of rtnl */ 3286 + spin_lock(&ifp->lock); 3287 + prev = ifp->rt; 3288 ifp->rt = rt; 3289 + spin_unlock(&ifp->lock); 3290 + 3291 + ip6_rt_put(prev); 3292 } 3293 3294 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
+4 -2
net/ipv6/af_inet6.c
··· 933 if (err) 934 goto igmp_fail; 935 936 - ipv6_stub = &ipv6_stub_impl; 937 - 938 err = ipv6_netfilter_init(); 939 if (err) 940 goto netfilter_fail; ··· 1008 if (err) 1009 goto sysctl_fail; 1010 #endif 1011 out: 1012 return err; 1013
··· 933 if (err) 934 goto igmp_fail; 935 936 err = ipv6_netfilter_init(); 937 if (err) 938 goto netfilter_fail; ··· 1010 if (err) 1011 goto sysctl_fail; 1012 #endif 1013 + 1014 + /* ensure that ipv6 stubs are visible only after ipv6 is ready */ 1015 + wmb(); 1016 + ipv6_stub = &ipv6_stub_impl; 1017 out: 1018 return err; 1019
+4
net/ipv6/exthdrs.c
··· 909 { 910 switch (opt->type) { 911 case IPV6_SRCRT_TYPE_0: 912 ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr); 913 break; 914 case IPV6_SRCRT_TYPE_4: ··· 1165 1166 switch (opt->srcrt->type) { 1167 case IPV6_SRCRT_TYPE_0: 1168 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; 1169 break; 1170 case IPV6_SRCRT_TYPE_4:
··· 909 { 910 switch (opt->type) { 911 case IPV6_SRCRT_TYPE_0: 912 + case IPV6_SRCRT_STRICT: 913 + case IPV6_SRCRT_TYPE_2: 914 ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr); 915 break; 916 case IPV6_SRCRT_TYPE_4: ··· 1163 1164 switch (opt->srcrt->type) { 1165 case IPV6_SRCRT_TYPE_0: 1166 + case IPV6_SRCRT_STRICT: 1167 + case IPV6_SRCRT_TYPE_2: 1168 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; 1169 break; 1170 case IPV6_SRCRT_TYPE_4:
+18 -16
net/ipv6/ip6_tunnel.c
··· 1037 struct ip6_tnl *t = netdev_priv(dev); 1038 struct net *net = t->net; 1039 struct net_device_stats *stats = &t->dev->stats; 1040 - struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1041 struct ipv6_tel_txoption opt; 1042 struct dst_entry *dst = NULL, *ndst = NULL; 1043 struct net_device *tdev; ··· 1057 1058 /* NBMA tunnel */ 1059 if (ipv6_addr_any(&t->parms.raddr)) { 1060 - struct in6_addr *addr6; 1061 - struct neighbour *neigh; 1062 - int addr_type; 1063 1064 - if (!skb_dst(skb)) 1065 - goto tx_err_link_failure; 1066 1067 - neigh = dst_neigh_lookup(skb_dst(skb), 1068 - &ipv6_hdr(skb)->daddr); 1069 - if (!neigh) 1070 - goto tx_err_link_failure; 1071 1072 - addr6 = (struct in6_addr *)&neigh->primary_key; 1073 - addr_type = ipv6_addr_type(addr6); 1074 1075 - if (addr_type == IPV6_ADDR_ANY) 1076 - addr6 = &ipv6_hdr(skb)->daddr; 1077 1078 - memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1079 - neigh_release(neigh); 1080 } else if (!(t->parms.flags & 1081 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1082 /* enable the cache only only if the routing decision does
··· 1037 struct ip6_tnl *t = netdev_priv(dev); 1038 struct net *net = t->net; 1039 struct net_device_stats *stats = &t->dev->stats; 1040 + struct ipv6hdr *ipv6h; 1041 struct ipv6_tel_txoption opt; 1042 struct dst_entry *dst = NULL, *ndst = NULL; 1043 struct net_device *tdev; ··· 1057 1058 /* NBMA tunnel */ 1059 if (ipv6_addr_any(&t->parms.raddr)) { 1060 + if (skb->protocol == htons(ETH_P_IPV6)) { 1061 + struct in6_addr *addr6; 1062 + struct neighbour *neigh; 1063 + int addr_type; 1064 1065 + if (!skb_dst(skb)) 1066 + goto tx_err_link_failure; 1067 1068 + neigh = dst_neigh_lookup(skb_dst(skb), 1069 + &ipv6_hdr(skb)->daddr); 1070 + if (!neigh) 1071 + goto tx_err_link_failure; 1072 1073 + addr6 = (struct in6_addr *)&neigh->primary_key; 1074 + addr_type = ipv6_addr_type(addr6); 1075 1076 + if (addr_type == IPV6_ADDR_ANY) 1077 + addr6 = &ipv6_hdr(skb)->daddr; 1078 1079 + memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1080 + neigh_release(neigh); 1081 + } 1082 } else if (!(t->parms.flags & 1083 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1084 /* enable the cache only only if the routing decision does
+2 -1
net/ipv6/ndisc.c
··· 1749 idev = in6_dev_get(dev); 1750 if (!idev) 1751 break; 1752 - if (idev->cnf.ndisc_notify) 1753 ndisc_send_unsol_na(dev); 1754 in6_dev_put(idev); 1755 break;
··· 1749 idev = in6_dev_get(dev); 1750 if (!idev) 1751 break; 1752 + if (idev->cnf.ndisc_notify || 1753 + net->ipv6.devconf_all->ndisc_notify) 1754 ndisc_send_unsol_na(dev); 1755 in6_dev_put(idev); 1756 break;
+1 -2
net/ipv6/raw.c
··· 1178 spin_lock_bh(&sk->sk_receive_queue.lock); 1179 skb = skb_peek(&sk->sk_receive_queue); 1180 if (skb) 1181 - amount = skb_tail_pointer(skb) - 1182 - skb_transport_header(skb); 1183 spin_unlock_bh(&sk->sk_receive_queue.lock); 1184 return put_user(amount, (int __user *)arg); 1185 }
··· 1178 spin_lock_bh(&sk->sk_receive_queue.lock); 1179 skb = skb_peek(&sk->sk_receive_queue); 1180 if (skb) 1181 + amount = skb->len; 1182 spin_unlock_bh(&sk->sk_receive_queue.lock); 1183 return put_user(amount, (int __user *)arg); 1184 }
+2
net/packet/af_packet.c
··· 3836 case PACKET_HDRLEN: 3837 if (len > sizeof(int)) 3838 len = sizeof(int); 3839 if (copy_from_user(&val, optval, len)) 3840 return -EFAULT; 3841 switch (val) {
··· 3836 case PACKET_HDRLEN: 3837 if (len > sizeof(int)) 3838 len = sizeof(int); 3839 + if (len < sizeof(int)) 3840 + return -EINVAL; 3841 if (copy_from_user(&val, optval, len)) 3842 return -EFAULT; 3843 switch (val) {
+2 -2
net/tipc/socket.c
··· 1083 } 1084 } while (sent < dlen && !rc); 1085 1086 - return rc ? rc : sent; 1087 } 1088 1089 /** ··· 1484 if (unlikely(flags & MSG_PEEK)) 1485 goto exit; 1486 1487 - tsk->rcv_unacked += tsk_inc(tsk, hlen + sz); 1488 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) 1489 tipc_sk_send_ack(tsk); 1490 tsk_advance_rx_queue(sk);
··· 1083 } 1084 } while (sent < dlen && !rc); 1085 1086 + return sent ? sent : rc; 1087 } 1088 1089 /** ··· 1484 if (unlikely(flags & MSG_PEEK)) 1485 goto exit; 1486 1487 + tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg)); 1488 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) 1489 tipc_sk_send_ack(tsk); 1490 tsk_advance_rx_queue(sk);