Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Clamp timeouts to INT_MAX in conntrack, from Jay Elliot.

2) Fix broken UAPI for BPF_PROG_TYPE_PERF_EVENT, from Hendrik
Brueckner.

3) Fix locking in ieee80211_sta_tear_down_BA_sessions, from Johannes
Berg.

4) Add missing barriers to ptr_ring, from Michael S. Tsirkin.

5) Don't advertise gigabit in sh_eth when not available, from Thomas
Petazzoni.

6) Check network namespace when delivering to netlink taps, from Kevin
Cernekee.

7) Kill a race in raw_sendmsg(), from Mohamed Ghannam.

8) Use correct address in TCP md5 lookups when replying to an incoming
segment, from Christoph Paasch.

9) Add schedule points to BPF map alloc/free, from Eric Dumazet.

10) Don't allow silly mtu values to be used in ipv4/ipv6 multicast, also
from Eric Dumazet.

11) Fix SKB leak in tipc, from Jon Maloy.

12) Disable MAC learning on OVS ports of mlxsw, from Yuval Mintz.

13) SKB leak fix in skB_complete_tx_timestamp(), from Willem de Bruijn.

14) Add some new qmi_wwan device IDs, from Daniele Palmas.

15) Fix static key imbalance in ingress qdisc, from Jiri Pirko.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits)
net: qcom/emac: Reduce timeout for mdio read/write
net: sched: fix static key imbalance in case of ingress/clsact_init error
net: sched: fix clsact init error path
ip_gre: fix wrong return value of erspan_rcv
net: usb: qmi_wwan: add Telit ME910 PID 0x1101 support
pkt_sched: Remove TC_RED_OFFLOADED from uapi
net: sched: Move to new offload indication in RED
net: sched: Add TCA_HW_OFFLOAD
net: aquantia: Increment driver version
net: aquantia: Fix typo in ethtool statistics names
net: aquantia: Update hw counters on hw init
net: aquantia: Improve link state and statistics check interval callback
net: aquantia: Fill in multicast counter in ndev stats from hardware
net: aquantia: Fill ndev stat couters from hardware
net: aquantia: Extend stat counters to 64bit values
net: aquantia: Fix hardware DMA stream overload on large MRRS
net: aquantia: Fix actual speed capabilities reporting
sock: free skb in skb_complete_tx_timestamp on error
s390/qeth: update takeover IPs after configuration change
s390/qeth: lock IP table while applying takeover changes
...

+800 -385
+1
MAINTAINERS
··· 13117 13117 13118 13118 SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER 13119 13119 M: Jie Deng <jiedeng@synopsys.com> 13120 + M: Jose Abreu <Jose.Abreu@synopsys.com> 13120 13121 L: netdev@vger.kernel.org 13121 13122 S: Supported 13122 13123 F: drivers/net/ethernet/synopsys/
+2 -2
arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
··· 121 121 switch0port10: port@10 { 122 122 reg = <10>; 123 123 label = "dsa"; 124 - phy-mode = "xgmii"; 124 + phy-mode = "xaui"; 125 125 link = <&switch1port10>; 126 126 }; 127 127 }; ··· 208 208 switch1port10: port@10 { 209 209 reg = <10>; 210 210 label = "dsa"; 211 - phy-mode = "xgmii"; 211 + phy-mode = "xaui"; 212 212 link = <&switch0port10>; 213 213 }; 214 214 };
+1
arch/um/include/asm/Kbuild
··· 1 1 generic-y += barrier.h 2 + generic-y += bpf_perf_event.h 2 3 generic-y += bug.h 3 4 generic-y += clkdev.h 4 5 generic-y += current.h
+1
drivers/net/dsa/mv88e6xxx/port.c
··· 338 338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; 339 339 break; 340 340 case PHY_INTERFACE_MODE_XGMII: 341 + case PHY_INTERFACE_MODE_XAUI: 341 342 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; 342 343 break; 343 344 case PHY_INTERFACE_MODE_RXAUI:
+3 -2
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
··· 50 50 #define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U 51 51 #define AQ_CFG_PCI_FUNC_PORTS 2U 52 52 53 - #define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) 53 + #define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ) 54 54 #define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) 55 55 56 56 #define AQ_CFG_SKB_FRAGS_MAX 32U ··· 80 80 #define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ 81 81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\ 82 82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\ 83 - __stringify(NIC_REVISION_DRIVER_VERSION) 83 + __stringify(NIC_REVISION_DRIVER_VERSION) \ 84 + AQ_CFG_DRV_VERSION_SUFFIX 84 85 85 86 #endif /* AQ_CFG_H */
+8 -8
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
··· 66 66 "OutUCast", 67 67 "OutMCast", 68 68 "OutBCast", 69 - "InUCastOctects", 70 - "OutUCastOctects", 71 - "InMCastOctects", 72 - "OutMCastOctects", 73 - "InBCastOctects", 74 - "OutBCastOctects", 75 - "InOctects", 76 - "OutOctects", 69 + "InUCastOctets", 70 + "OutUCastOctets", 71 + "InMCastOctets", 72 + "OutMCastOctets", 73 + "InBCastOctets", 74 + "OutBCastOctets", 75 + "InOctets", 76 + "OutOctets", 77 77 "InPacketsDma", 78 78 "OutPacketsDma", 79 79 "InOctetsDma",
+26 -3
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
··· 46 46 unsigned int mbps; 47 47 }; 48 48 49 + struct aq_stats_s { 50 + u64 uprc; 51 + u64 mprc; 52 + u64 bprc; 53 + u64 erpt; 54 + u64 uptc; 55 + u64 mptc; 56 + u64 bptc; 57 + u64 erpr; 58 + u64 mbtc; 59 + u64 bbtc; 60 + u64 mbrc; 61 + u64 bbrc; 62 + u64 ubrc; 63 + u64 ubtc; 64 + u64 dpc; 65 + u64 dma_pkt_rc; 66 + u64 dma_pkt_tc; 67 + u64 dma_oct_rc; 68 + u64 dma_oct_tc; 69 + }; 70 + 49 71 #define AQ_HW_IRQ_INVALID 0U 50 72 #define AQ_HW_IRQ_LEGACY 1U 51 73 #define AQ_HW_IRQ_MSI 2U ··· 107 85 void (*destroy)(struct aq_hw_s *self); 108 86 109 87 int (*get_hw_caps)(struct aq_hw_s *self, 110 - struct aq_hw_caps_s *aq_hw_caps); 88 + struct aq_hw_caps_s *aq_hw_caps, 89 + unsigned short device, 90 + unsigned short subsystem_device); 111 91 112 92 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, 113 93 unsigned int frags); ··· 188 164 189 165 int (*hw_update_stats)(struct aq_hw_s *self); 190 166 191 - int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 192 - unsigned int *p_count); 167 + struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self); 193 168 194 169 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 195 170
+55 -27
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
··· 37 37 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); 38 38 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); 39 39 40 + static void aq_nic_update_ndev_stats(struct aq_nic_s *self); 41 + 40 42 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 41 43 { 42 44 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; ··· 168 166 static void aq_nic_service_timer_cb(struct timer_list *t) 169 167 { 170 168 struct aq_nic_s *self = from_timer(self, t, service_timer); 171 - struct net_device *ndev = aq_nic_get_ndev(self); 169 + int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL; 172 170 int err = 0; 173 - unsigned int i = 0U; 174 - struct aq_ring_stats_rx_s stats_rx; 175 - struct aq_ring_stats_tx_s stats_tx; 176 171 177 172 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 178 173 goto err_exit; ··· 181 182 if (self->aq_hw_ops.hw_update_stats) 182 183 self->aq_hw_ops.hw_update_stats(self->aq_hw); 183 184 184 - memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 185 - memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 186 - for (i = AQ_DIMOF(self->aq_vec); i--;) { 187 - if (self->aq_vec[i]) 188 - aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); 189 - } 185 + aq_nic_update_ndev_stats(self); 190 186 191 - ndev->stats.rx_packets = stats_rx.packets; 192 - ndev->stats.rx_bytes = stats_rx.bytes; 193 - ndev->stats.rx_errors = stats_rx.errors; 194 - ndev->stats.tx_packets = stats_tx.packets; 195 - ndev->stats.tx_bytes = stats_tx.bytes; 196 - ndev->stats.tx_errors = stats_tx.errors; 187 + /* If no link - use faster timer rate to detect link up asap */ 188 + if (!netif_carrier_ok(self->ndev)) 189 + ctimer = max(ctimer / 2, 1); 197 190 198 191 err_exit: 199 - mod_timer(&self->service_timer, 200 - jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); 192 + mod_timer(&self->service_timer, jiffies + ctimer); 201 193 } 202 194 203 195 static void aq_nic_polling_timer_cb(struct timer_list *t) ··· 212 222 213 223 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 214 224 const struct ethtool_ops *et_ops, 215 - struct device *dev, 225 + struct pci_dev *pdev, 216 226 struct aq_pci_func_s *aq_pci_func, 217 227 unsigned int port, 218 228 const struct aq_hw_ops *aq_hw_ops) ··· 232 242 ndev->netdev_ops = ndev_ops; 233 243 ndev->ethtool_ops = et_ops; 234 244 235 - SET_NETDEV_DEV(ndev, dev); 245 + SET_NETDEV_DEV(ndev, &pdev->dev); 236 246 237 247 ndev->if_port = port; 238 248 self->ndev = ndev; ··· 244 254 245 255 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, 246 256 &self->aq_hw_ops); 247 - err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); 257 + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps, 258 + pdev->device, pdev->subsystem_device); 248 259 if (err < 0) 249 260 goto err_exit; 250 261 ··· 740 749 741 750 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 742 751 { 743 - struct aq_vec_s *aq_vec = NULL; 744 752 unsigned int i = 0U; 745 753 unsigned int count = 0U; 746 - int err = 0; 754 + struct aq_vec_s *aq_vec = NULL; 755 + struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); 747 756 748 - err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); 749 - if (err < 0) 757 + if (!stats) 750 758 goto err_exit; 751 759 752 - data += count; 760 + data[i] = stats->uprc + stats->mprc + stats->bprc; 761 + data[++i] = stats->uprc; 762 + data[++i] = stats->mprc; 763 + data[++i] = stats->bprc; 764 + data[++i] = stats->erpt; 765 + data[++i] = stats->uptc + stats->mptc + stats->bptc; 766 + data[++i] = stats->uptc; 767 + data[++i] = stats->mptc; 768 + data[++i] = stats->bptc; 769 + data[++i] = stats->ubrc; 770 + data[++i] = stats->ubtc; 771 + data[++i] = stats->mbrc; 772 + data[++i] = stats->mbtc; 773 + data[++i] = stats->bbrc; 774 + data[++i] = stats->bbtc; 775 + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; 776 + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; 777 + data[++i] = stats->dma_pkt_rc; 778 + data[++i] = stats->dma_pkt_tc; 779 + data[++i] = stats->dma_oct_rc; 780 + data[++i] = stats->dma_oct_tc; 781 + data[++i] = stats->dpc; 782 + 783 + i++; 784 + 785 + data += i; 753 786 count = 0U; 754 787 755 788 for (i = 0U, aq_vec = self->aq_vec[0]; ··· 783 768 } 784 769 785 770 err_exit:; 786 - (void)err; 771 + } 772 + 773 + static void aq_nic_update_ndev_stats(struct aq_nic_s *self) 774 + { 775 + struct net_device *ndev = self->ndev; 776 + struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); 777 + 778 + ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc; 779 + ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc; 780 + ndev->stats.rx_errors = stats->erpr; 781 + ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc; 782 + ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc; 783 + ndev->stats.tx_errors = stats->erpt; 784 + ndev->stats.multicast = stats->mprc; 787 785 } 788 786 789 787 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+1 -1
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
··· 71 71 72 72 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 73 73 const struct ethtool_ops *et_ops, 74 - struct device *dev, 74 + struct pci_dev *pdev, 75 75 struct aq_pci_func_s *aq_pci_func, 76 76 unsigned int port, 77 77 const struct aq_hw_ops *aq_hw_ops);
+3 -2
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
··· 51 51 pci_set_drvdata(pdev, self); 52 52 self->pdev = pdev; 53 53 54 - err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); 54 + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device, 55 + pdev->subsystem_device); 55 56 if (err < 0) 56 57 goto err_exit; 57 58 ··· 60 59 61 60 for (port = 0; port < self->ports; ++port) { 62 61 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, 63 - &pdev->dev, self, 62 + pdev, self, 64 63 port, aq_hw_ops); 65 64 66 65 if (!aq_nic) {
+16 -1
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
··· 18 18 #include "hw_atl_a0_internal.h" 19 19 20 20 static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, 21 - struct aq_hw_caps_s *aq_hw_caps) 21 + struct aq_hw_caps_s *aq_hw_caps, 22 + unsigned short device, 23 + unsigned short subsystem_device) 22 24 { 23 25 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); 26 + 27 + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) 28 + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; 29 + 30 + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { 31 + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; 32 + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G; 33 + } 34 + 24 35 return 0; 25 36 } 26 37 ··· 343 332 hw_atl_a0_hw_qos_set(self); 344 333 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 345 334 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 335 + 336 + /* Reset link status and read out initial hardware counters */ 337 + self->aq_link_status.mbps = 0; 338 + hw_atl_utils_update_stats(self); 346 339 347 340 err = aq_hw_err_from_flags(self); 348 341 if (err < 0)
+28 -1
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
··· 16 16 #include "hw_atl_utils.h" 17 17 #include "hw_atl_llh.h" 18 18 #include "hw_atl_b0_internal.h" 19 + #include "hw_atl_llh_internal.h" 19 20 20 21 static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, 21 - struct aq_hw_caps_s *aq_hw_caps) 22 + struct aq_hw_caps_s *aq_hw_caps, 23 + unsigned short device, 24 + unsigned short subsystem_device) 22 25 { 23 26 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); 27 + 28 + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) 29 + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; 30 + 31 + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { 32 + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; 33 + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G; 34 + } 35 + 24 36 return 0; 25 37 } 26 38 ··· 369 357 }; 370 358 371 359 int err = 0; 360 + u32 val; 372 361 373 362 self->aq_nic_cfg = aq_nic_cfg; 374 363 ··· 386 373 hw_atl_b0_hw_qos_set(self); 387 374 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 388 375 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 376 + 377 + /* Force limit MRRS on RDM/TDM to 2K */ 378 + val = aq_hw_read_reg(self, pci_reg_control6_adr); 379 + aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404); 380 + 381 + /* TX DMA total request limit. B0 hardware is not capable to 382 + * handle more than (8K-MRRS) incoming DMA data. 383 + * Value 24 in 256byte units 384 + */ 385 + aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24); 386 + 387 + /* Reset link status and read out initial hardware counters */ 388 + self->aq_link_status.mbps = 0; 389 + hw_atl_utils_update_stats(self); 389 390 390 391 err = aq_hw_err_from_flags(self); 391 392 if (err < 0)
+6
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
··· 2343 2343 #define tx_dma_desc_base_addrmsw_adr(descriptor) \ 2344 2344 (0x00007c04u + (descriptor) * 0x40) 2345 2345 2346 + /* tx dma total request limit */ 2347 + #define tx_dma_total_req_limit_adr 0x00007b20u 2348 + 2346 2349 /* tx interrupt moderation control register definitions 2347 2350 * Preprocessor definitions for TX Interrupt Moderation Control Register 2348 2351 * Base Address: 0x00008980 ··· 2371 2368 #define pci_reg_res_dsbl_width 1 2372 2369 /* default value of bitfield reg_res_dsbl */ 2373 2370 #define pci_reg_res_dsbl_default 0x1 2371 + 2372 + /* PCI core control register */ 2373 + #define pci_reg_control6_adr 0x1014u 2374 2374 2375 2375 /* global microprocessor scratch pad definitions */ 2376 2376 #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+23 -53
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
··· 503 503 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 504 504 struct hw_aq_atl_utils_mbox mbox; 505 505 506 - if (!self->aq_link_status.mbps) 507 - return 0; 508 - 509 506 hw_atl_utils_mpi_read_stats(self, &mbox); 510 507 511 508 #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ 512 509 mbox.stats._N_ - hw_self->last_stats._N_) 510 + if (self->aq_link_status.mbps) { 511 + AQ_SDELTA(uprc); 512 + AQ_SDELTA(mprc); 513 + AQ_SDELTA(bprc); 514 + AQ_SDELTA(erpt); 513 515 514 - AQ_SDELTA(uprc); 515 - AQ_SDELTA(mprc); 516 - AQ_SDELTA(bprc); 517 - AQ_SDELTA(erpt); 516 + AQ_SDELTA(uptc); 517 + AQ_SDELTA(mptc); 518 + AQ_SDELTA(bptc); 519 + AQ_SDELTA(erpr); 518 520 519 - AQ_SDELTA(uptc); 520 - AQ_SDELTA(mptc); 521 - AQ_SDELTA(bptc); 522 - AQ_SDELTA(erpr); 523 - 524 - AQ_SDELTA(ubrc); 525 - AQ_SDELTA(ubtc); 526 - AQ_SDELTA(mbrc); 527 - AQ_SDELTA(mbtc); 528 - AQ_SDELTA(bbrc); 529 - AQ_SDELTA(bbtc); 530 - AQ_SDELTA(dpc); 531 - 521 + AQ_SDELTA(ubrc); 522 + AQ_SDELTA(ubtc); 523 + AQ_SDELTA(mbrc); 524 + AQ_SDELTA(mbtc); 525 + AQ_SDELTA(bbrc); 526 + AQ_SDELTA(bbtc); 527 + AQ_SDELTA(dpc); 528 + } 532 529 #undef AQ_SDELTA 530 + hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self); 531 + hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self); 532 + hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self); 533 + hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self); 533 534 534 535 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); 535 536 536 537 return 0; 537 538 } 538 539 539 - int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 540 - u64 *data, unsigned int *p_count) 540 + struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self) 541 541 { 542 - struct hw_atl_s *hw_self = PHAL_ATLANTIC; 543 - struct hw_atl_stats_s *stats = &hw_self->curr_stats; 544 - int i = 0; 545 - 546 - data[i] = stats->uprc + stats->mprc + stats->bprc; 547 - data[++i] = stats->uprc; 548 - data[++i] = stats->mprc; 549 - data[++i] = stats->bprc; 550 - data[++i] = stats->erpt; 551 - data[++i] = stats->uptc + stats->mptc + stats->bptc; 552 - data[++i] = stats->uptc; 553 - data[++i] = stats->mptc; 554 - data[++i] = stats->bptc; 555 - data[++i] = stats->ubrc; 556 - data[++i] = stats->ubtc; 557 - data[++i] = stats->mbrc; 558 - data[++i] = stats->mbtc; 559 - data[++i] = stats->bbrc; 560 - data[++i] = stats->bbtc; 561 - data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; 562 - data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; 563 - data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); 564 - data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); 565 - data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); 566 - data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); 567 - data[++i] = stats->dpc; 568 - 569 - if (p_count) 570 - *p_count = ++i; 571 - 572 - return 0; 542 + return &PHAL_ATLANTIC->curr_stats; 573 543 } 574 544 575 545 static const u32 hw_atl_utils_hw_mac_regs[] = {
+2 -4
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
··· 129 129 struct __packed hw_atl_s { 130 130 struct aq_hw_s base; 131 131 struct hw_atl_stats_s last_stats; 132 - struct hw_atl_stats_s curr_stats; 132 + struct aq_stats_s curr_stats; 133 133 u64 speed; 134 134 unsigned int chip_features; 135 135 u32 fw_ver_actual; ··· 207 207 208 208 int hw_atl_utils_update_stats(struct aq_hw_s *self); 209 209 210 - int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 211 - u64 *data, 212 - unsigned int *p_count); 210 + struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); 213 211 214 212 #endif /* HW_ATL_UTILS_H */
+4 -2
drivers/net/ethernet/aquantia/atlantic/ver.h
··· 11 11 #define VER_H 12 12 13 13 #define NIC_MAJOR_DRIVER_VERSION 1 14 - #define NIC_MINOR_DRIVER_VERSION 5 15 - #define NIC_BUILD_DRIVER_VERSION 345 14 + #define NIC_MINOR_DRIVER_VERSION 6 15 + #define NIC_BUILD_DRIVER_VERSION 13 16 16 #define NIC_REVISION_DRIVER_VERSION 0 17 + 18 + #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" 17 19 18 20 #endif /* VER_H */
+7 -3
drivers/net/ethernet/arc/emac_rockchip.c
··· 199 199 200 200 /* RMII interface needs always a rate of 50MHz */ 201 201 err = clk_set_rate(priv->refclk, 50000000); 202 - if (err) 202 + if (err) { 203 203 dev_err(dev, 204 204 "failed to change reference clock rate (%d)\n", err); 205 + goto out_regulator_disable; 206 + } 205 207 206 208 if (priv->soc_data->need_div_macclk) { 207 209 priv->macclk = devm_clk_get(dev, "macclk"); ··· 232 230 err = arc_emac_probe(ndev, interface); 233 231 if (err) { 234 232 dev_err(dev, "failed to probe arc emac (%d)\n", err); 235 - goto out_regulator_disable; 233 + goto out_clk_disable_macclk; 236 234 } 237 235 238 236 return 0; 237 + 239 238 out_clk_disable_macclk: 240 - clk_disable_unprepare(priv->macclk); 239 + if (priv->soc_data->need_div_macclk) 240 + clk_disable_unprepare(priv->macclk); 241 241 out_regulator_disable: 242 242 if (priv->regulator) 243 243 regulator_disable(priv->regulator);
-1
drivers/net/ethernet/marvell/skge.c
··· 4081 4081 if (hw->ports > 1) { 4082 4082 skge_write32(hw, B0_IMSK, 0); 4083 4083 skge_read32(hw, B0_IMSK); 4084 - free_irq(pdev->irq, hw); 4085 4084 } 4086 4085 spin_unlock_irq(&hw->hw_lock); 4087 4086
+31 -26
drivers/net/ethernet/mellanox/mlx4/en_port.c
··· 188 188 struct net_device *dev = mdev->pndev[port]; 189 189 struct mlx4_en_priv *priv = netdev_priv(dev); 190 190 struct net_device_stats *stats = &dev->stats; 191 - struct mlx4_cmd_mailbox *mailbox; 191 + struct mlx4_cmd_mailbox *mailbox, *mailbox_priority; 192 192 u64 in_mod = reset << 8 | port; 193 193 int err; 194 194 int i, counter_index; ··· 198 198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 199 199 if (IS_ERR(mailbox)) 200 200 return PTR_ERR(mailbox); 201 + 202 + mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev); 203 + if (IS_ERR(mailbox_priority)) { 204 + mlx4_free_cmd_mailbox(mdev->dev, mailbox); 205 + return PTR_ERR(mailbox_priority); 206 + } 207 + 201 208 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 202 209 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 203 210 MLX4_CMD_NATIVE); ··· 212 205 goto out; 213 206 214 207 mlx4_en_stats = mailbox->buf; 208 + 209 + memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats)); 210 + counter_index = mlx4_get_default_counter_index(mdev->dev, port); 211 + err = mlx4_get_counter_stats(mdev->dev, counter_index, 212 + &tmp_counter_stats, reset); 213 + 214 + /* 0xffs indicates invalid value */ 215 + memset(mailbox_priority->buf, 0xff, 216 + sizeof(*flowstats) * MLX4_NUM_PRIORITIES); 217 + 218 + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { 219 + memset(mailbox_priority->buf, 0, 220 + sizeof(*flowstats) * MLX4_NUM_PRIORITIES); 221 + err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma, 222 + in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, 223 + 0, MLX4_CMD_DUMP_ETH_STATS, 224 + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 225 + if (err) 226 + goto out; 227 + } 228 + 229 + flowstats = mailbox_priority->buf; 215 230 216 231 spin_lock_bh(&priv->stats_lock); 217 232 ··· 374 345 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); 375 346 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); 376 347 377 - spin_unlock_bh(&priv->stats_lock); 378 - 379 - memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats)); 380 - counter_index = mlx4_get_default_counter_index(mdev->dev, port); 381 - err = mlx4_get_counter_stats(mdev->dev, counter_index, 382 - &tmp_counter_stats, reset); 383 - 384 - /* 0xffs indicates invalid value */ 385 - memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES); 386 - 387 - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { 388 - memset(mailbox->buf, 0, 389 - sizeof(*flowstats) * MLX4_NUM_PRIORITIES); 390 - err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, 391 - in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, 392 - 0, MLX4_CMD_DUMP_ETH_STATS, 393 - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 394 - if (err) 395 - goto out; 396 - } 397 - 398 - flowstats = mailbox->buf; 399 - 400 - spin_lock_bh(&priv->stats_lock); 401 - 402 348 if (tmp_counter_stats.counter_mode == 0) { 403 349 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); 404 350 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); ··· 414 410 415 411 out: 416 412 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 413 + mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority); 417 414 return err; 418 415 } 419 416
+1 -1
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
··· 185 185 if (priv->mdev->dev->caps.flags & 186 186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 187 187 buf[3] = mlx4_en_test_registers(priv); 188 - if (priv->port_up) 188 + if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU) 189 189 buf[4] = mlx4_en_test_loopback(priv); 190 190 } 191 191
+3
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 153 153 #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 154 154 #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 155 155 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) 156 + #define PREAMBLE_LEN 8 157 + #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ 158 + ETH_HLEN + PREAMBLE_LEN) 156 159 157 160 #define MLX4_EN_MIN_MTU 46 158 161 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
-1
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 611 611 MLX4_MAX_PORTS; 612 612 else 613 613 res_alloc->guaranteed[t] = 0; 614 - res_alloc->res_free -= res_alloc->guaranteed[t]; 615 614 break; 616 615 default: 617 616 break;
+18
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 4300 4300 4301 4301 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4302 4302 { 4303 + u16 vid = 1; 4303 4304 int err; 4304 4305 4305 4306 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); ··· 4313 4312 true, false); 4314 4313 if (err) 4315 4314 goto err_port_vlan_set; 4315 + 4316 + for (; vid <= VLAN_N_VID - 1; vid++) { 4317 + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4318 + vid, false); 4319 + if (err) 4320 + goto err_vid_learning_set; 4321 + } 4322 + 4316 4323 return 0; 4317 4324 4325 + err_vid_learning_set: 4326 + for (vid--; vid >= 1; vid--) 4327 + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4318 4328 err_port_vlan_set: 4319 4329 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4320 4330 err_port_stp_set: ··· 4335 4323 4336 4324 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4337 4325 { 4326 + u16 vid; 4327 + 4328 + for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4329 + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4330 + vid, true); 4331 + 4338 4332 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4339 4333 false, false); 4340 4334 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+4 -3
drivers/net/ethernet/qualcomm/emac/emac-phy.c
··· 47 47 #define MDIO_CLK_25_28 7 48 48 49 49 #define MDIO_WAIT_TIMES 1000 50 + #define MDIO_STATUS_DELAY_TIME 1 50 51 51 52 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 52 53 { ··· 66 65 67 66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 68 67 !(reg & (MDIO_START | MDIO_BUSY)), 69 - 100, MDIO_WAIT_TIMES * 100)) 68 + MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100)) 70 69 return -EIO; 71 70 72 71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; ··· 89 88 writel(reg, adpt->base + EMAC_MDIO_CTRL); 90 89 91 90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 92 - !(reg & (MDIO_START | MDIO_BUSY)), 100, 93 - MDIO_WAIT_TIMES * 100)) 91 + !(reg & (MDIO_START | MDIO_BUSY)), 92 + MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100)) 94 93 return -EIO; 95 94 96 95 return 0;
+2 -25
drivers/net/ethernet/renesas/ravb_main.c
··· 2308 2308 struct ravb_private *priv = netdev_priv(ndev); 2309 2309 int ret = 0; 2310 2310 2311 - if (priv->wol_enabled) { 2312 - /* Reduce the usecount of the clock to zero and then 2313 - * restore it to its original value. This is done to force 2314 - * the clock to be re-enabled which is a workaround 2315 - * for renesas-cpg-mssr driver which do not enable clocks 2316 - * when resuming from PSCI suspend/resume. 2317 - * 2318 - * Without this workaround the driver fails to communicate 2319 - * with the hardware if WoL was enabled when the system 2320 - * entered PSCI suspend. This is due to that if WoL is enabled 2321 - * we explicitly keep the clock from being turned off when 2322 - * suspending, but in PSCI sleep power is cut so the clock 2323 - * is disabled anyhow, the clock driver is not aware of this 2324 - * so the clock is not turned back on when resuming. 2325 - * 2326 - * TODO: once the renesas-cpg-mssr suspend/resume is working 2327 - * this clock dance should be removed. 2328 - */ 2329 - clk_disable(priv->clk); 2330 - clk_disable(priv->clk); 2331 - clk_enable(priv->clk); 2332 - clk_enable(priv->clk); 2333 - 2334 - /* Set reset mode to rearm the WoL logic */ 2311 + /* If WoL is enabled set reset mode to rearm the WoL logic */ 2312 + if (priv->wol_enabled) 2335 2313 ravb_write(ndev, CCC_OPC_RESET, CCC); 2336 - } 2337 2314 2338 2315 /* All register have been reset to default values. 2339 2316 * Restore all registers which where setup at probe time and
+10
drivers/net/ethernet/renesas/sh_eth.c
··· 1892 1892 return PTR_ERR(phydev); 1893 1893 } 1894 1894 1895 + /* mask with MAC supported features */ 1896 + if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) { 1897 + int err = phy_set_max_speed(phydev, SPEED_100); 1898 + if (err) { 1899 + netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n"); 1900 + phy_disconnect(phydev); 1901 + return err; 1902 + } 1903 + } 1904 + 1895 1905 phy_attached_info(phydev); 1896 1906 1897 1907 return 0;
+1 -1
drivers/net/hippi/rrunner.c
··· 1379 1379 rrpriv->info_dma); 1380 1380 rrpriv->info = NULL; 1381 1381 1382 - free_irq(pdev->irq, dev); 1383 1382 spin_unlock_irqrestore(&rrpriv->lock, flags); 1383 + free_irq(pdev->irq, dev); 1384 1384 1385 1385 return 0; 1386 1386 }
-4
drivers/net/phy/at803x.c
··· 239 239 { 240 240 int value; 241 241 242 - mutex_lock(&phydev->lock); 243 - 244 242 value = phy_read(phydev, MII_BMCR); 245 243 value &= ~(BMCR_PDOWN | BMCR_ISOLATE); 246 244 phy_write(phydev, MII_BMCR, value); 247 - 248 - mutex_unlock(&phydev->lock); 249 245 250 246 return 0; 251 247 }
+4
drivers/net/phy/marvell.c
··· 637 637 if (err < 0) 638 638 goto error; 639 639 640 + /* Do not touch the fiber page if we're in copper->sgmii mode */ 641 + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) 642 + return 0; 643 + 640 644 /* Then the fiber link */ 641 645 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); 642 646 if (err < 0)
+1
drivers/net/phy/mdio_bus.c
··· 270 270 271 271 if (addr == mdiodev->addr) { 272 272 dev->of_node = child; 273 + dev->fwnode = of_fwnode_handle(child); 273 274 return; 274 275 } 275 276 }
+73 -1
drivers/net/phy/meson-gxl.c
··· 22 22 #include <linux/ethtool.h> 23 23 #include <linux/phy.h> 24 24 #include <linux/netdevice.h> 25 + #include <linux/bitfield.h> 25 26 26 27 static int meson_gxl_config_init(struct phy_device *phydev) 27 28 { ··· 51 50 return 0; 52 51 } 53 52 53 + /* This function is provided to cope with the possible failures of this phy 54 + * during aneg process. When aneg fails, the PHY reports that aneg is done 55 + * but the value found in MII_LPA is wrong: 56 + * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that 57 + * the link partner (LP) supports aneg but the LP never acked our base 58 + * code word, it is likely that we never sent it to begin with. 59 + * - Late failures: MII_LPA is filled with a value which seems to make sense 60 + * but it actually is not what the LP is advertising. It seems that we 61 + * can detect this using a magic bit in the WOL bank (reg 12 - bit 12). 62 + * If this particular bit is not set when aneg is reported being done, 63 + * it means MII_LPA is likely to be wrong. 64 + * 65 + * In both case, forcing a restart of the aneg process solve the problem. 66 + * When this failure happens, the first retry is usually successful but, 67 + * in some cases, it may take up to 6 retries to get a decent result 68 + */ 69 + static int meson_gxl_read_status(struct phy_device *phydev) 70 + { 71 + int ret, wol, lpa, exp; 72 + 73 + if (phydev->autoneg == AUTONEG_ENABLE) { 74 + ret = genphy_aneg_done(phydev); 75 + if (ret < 0) 76 + return ret; 77 + else if (!ret) 78 + goto read_status_continue; 79 + 80 + /* Need to access WOL bank, make sure the access is open */ 81 + ret = phy_write(phydev, 0x14, 0x0000); 82 + if (ret) 83 + return ret; 84 + ret = phy_write(phydev, 0x14, 0x0400); 85 + if (ret) 86 + return ret; 87 + ret = phy_write(phydev, 0x14, 0x0000); 88 + if (ret) 89 + return ret; 90 + ret = phy_write(phydev, 0x14, 0x0400); 91 + if (ret) 92 + return ret; 93 + 94 + /* Request LPI_STATUS WOL register */ 95 + ret = phy_write(phydev, 0x14, 0x8D80); 96 + if (ret) 97 + return ret; 98 + 99 + /* Read LPI_STATUS value */ 100 + wol = phy_read(phydev, 0x15); 101 + if (wol < 0) 102 + return wol; 103 + 104 + lpa = phy_read(phydev, MII_LPA); 105 + if (lpa < 0) 106 + return lpa; 107 + 108 + exp = phy_read(phydev, MII_EXPANSION); 109 + if (exp < 0) 110 + return exp; 111 + 112 + if (!(wol & BIT(12)) || 113 + ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) { 114 + /* Looks like aneg failed after all */ 115 + phydev_dbg(phydev, "LPA corruption - aneg restart\n"); 116 + return genphy_restart_aneg(phydev); 117 + } 118 + } 119 + 120 + read_status_continue: 121 + return genphy_read_status(phydev); 122 + } 123 + 54 124 static struct phy_driver meson_gxl_phy[] = { 55 125 { 56 126 .phy_id = 0x01814400, ··· 132 60 .config_init = meson_gxl_config_init, 133 61 .config_aneg = genphy_config_aneg, 134 62 .aneg_done = genphy_aneg_done, 135 - .read_status = genphy_read_status, 63 + .read_status = meson_gxl_read_status, 136 64 .suspend = genphy_suspend, 137 65 .resume = genphy_resume, 138 66 },
+3 -6
drivers/net/phy/phy.c
··· 828 828 */ 829 829 void phy_start(struct phy_device *phydev) 830 830 { 831 - bool do_resume = false; 832 831 int err = 0; 833 832 834 833 mutex_lock(&phydev->lock); ··· 840 841 phydev->state = PHY_UP; 841 842 break; 842 843 case PHY_HALTED: 844 + /* if phy was suspended, bring the physical link up again */ 845 + phy_resume(phydev); 846 + 843 847 /* make sure interrupts are re-enabled for the PHY */ 844 848 if (phydev->irq != PHY_POLL) { 845 849 err = phy_enable_interrupts(phydev); ··· 851 849 } 852 850 853 851 phydev->state = PHY_RESUMING; 854 - do_resume = true; 855 852 break; 856 853 default: 857 854 break; 858 855 } 859 856 mutex_unlock(&phydev->lock); 860 - 861 - /* if phy was suspended, bring the physical link up again */ 862 - if (do_resume) 863 - phy_resume(phydev); 864 857 865 858 phy_trigger_machine(phydev, true); 866 859 }
+6 -4
drivers/net/phy/phy_device.c
··· 135 135 if (!mdio_bus_phy_may_suspend(phydev)) 136 136 goto no_resume; 137 137 138 + mutex_lock(&phydev->lock); 138 139 ret = phy_resume(phydev); 140 + mutex_unlock(&phydev->lock); 139 141 if (ret < 0) 140 142 return ret; 141 143 ··· 1028 1026 if (err) 1029 1027 goto error; 1030 1028 1029 + mutex_lock(&phydev->lock); 1031 1030 phy_resume(phydev); 1031 + mutex_unlock(&phydev->lock); 1032 1032 phy_led_triggers_register(phydev); 1033 1033 1034 1034 return err; ··· 1160 1156 { 1161 1157 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1162 1158 int ret = 0; 1159 + 1160 + WARN_ON(!mutex_is_locked(&phydev->lock)); 1163 1161 1164 1162 if (phydev->drv && phydrv->resume) 1165 1163 ret = phydrv->resume(phydev); ··· 1645 1639 { 1646 1640 int value; 1647 1641 1648 - mutex_lock(&phydev->lock); 1649 - 1650 1642 value = phy_read(phydev, MII_BMCR); 1651 1643 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 1652 - 1653 - mutex_unlock(&phydev->lock); 1654 1644 1655 1645 return 0; 1656 1646 }
+2
drivers/net/usb/qmi_wwan.c
··· 1204 1204 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1205 1205 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1206 1206 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1207 + {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ 1207 1208 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1208 1209 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1209 1210 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1210 1211 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1211 1212 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1212 1213 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1214 + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ 1213 1215 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1214 1216 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1215 1217 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
+3
drivers/of/of_mdio.c
··· 81 81 * can be looked up later */ 82 82 of_node_get(child); 83 83 phy->mdio.dev.of_node = child; 84 + phy->mdio.dev.fwnode = of_fwnode_handle(child); 84 85 85 86 /* All data is now stored in the phy struct; 86 87 * register it */ ··· 112 111 */ 113 112 of_node_get(child); 114 113 mdiodev->dev.of_node = child; 114 + mdiodev->dev.fwnode = of_fwnode_handle(child); 115 115 116 116 /* All data is now stored in the mdiodev struct; register it. */ 117 117 rc = mdio_device_register(mdiodev); ··· 208 206 mdio->phy_mask = ~0; 209 207 210 208 mdio->dev.of_node = np; 209 + mdio->dev.fwnode = of_fwnode_handle(np); 211 210 212 211 /* Get bus level PHY reset GPIO details */ 213 212 mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
+3 -3
drivers/s390/net/qeth_core.h
··· 565 565 }; 566 566 567 567 struct qeth_ipato { 568 - int enabled; 569 - int invert4; 570 - int invert6; 568 + bool enabled; 569 + bool invert4; 570 + bool invert6; 571 571 struct list_head entries; 572 572 }; 573 573
+3 -3
drivers/s390/net/qeth_core_main.c
··· 1480 1480 qeth_set_intial_options(card); 1481 1481 /* IP address takeover */ 1482 1482 INIT_LIST_HEAD(&card->ipato.entries); 1483 - card->ipato.enabled = 0; 1484 - card->ipato.invert4 = 0; 1485 - card->ipato.invert6 = 0; 1483 + card->ipato.enabled = false; 1484 + card->ipato.invert4 = false; 1485 + card->ipato.invert6 = false; 1486 1486 /* init QDIO stuff */ 1487 1487 qeth_init_qdio_info(card); 1488 1488 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
+1 -1
drivers/s390/net/qeth_l3.h
··· 82 82 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 83 83 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 84 84 const u8 *); 85 - int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 85 + void qeth_l3_update_ipato(struct qeth_card *card); 86 86 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); 87 87 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); 88 88 int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
+31 -5
drivers/s390/net/qeth_l3_main.c
··· 164 164 } 165 165 } 166 166 167 - int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 168 - struct qeth_ipaddr *addr) 167 + static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 168 + struct qeth_ipaddr *addr) 169 169 { 170 170 struct qeth_ipato_entry *ipatoe; 171 171 u8 addr_bits[128] = {0, }; ··· 173 173 int rc = 0; 174 174 175 175 if (!card->ipato.enabled) 176 + return 0; 177 + if (addr->type != QETH_IP_TYPE_NORMAL) 176 178 return 0; 177 179 178 180 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, ··· 292 290 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); 293 291 addr->ref_counter = 1; 294 292 295 - if (addr->type == QETH_IP_TYPE_NORMAL && 296 - qeth_l3_is_addr_covered_by_ipato(card, addr)) { 293 + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { 297 294 QETH_CARD_TEXT(card, 2, "tkovaddr"); 298 295 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; 299 296 } ··· 606 605 /* 607 606 * IP address takeover related functions 608 607 */ 608 + 609 + /** 610 + * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. 611 + * 612 + * Caller must hold ip_lock. 613 + */ 614 + void qeth_l3_update_ipato(struct qeth_card *card) 615 + { 616 + struct qeth_ipaddr *addr; 617 + unsigned int i; 618 + 619 + hash_for_each(card->ip_htable, i, addr, hnode) { 620 + if (addr->type != QETH_IP_TYPE_NORMAL) 621 + continue; 622 + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) 623 + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; 624 + else 625 + addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; 626 + } 627 + } 628 + 609 629 static void qeth_l3_clear_ipato_list(struct qeth_card *card) 610 630 { 611 631 struct qeth_ipato_entry *ipatoe, *tmp; ··· 638 616 kfree(ipatoe); 639 617 } 640 618 619 + qeth_l3_update_ipato(card); 641 620 spin_unlock_bh(&card->ip_lock); 642 621 } 643 622 ··· 663 640 } 664 641 } 665 642 666 - if (!rc) 643 + if (!rc) { 667 644 list_add_tail(&new->entry, &card->ipato.entries); 645 + qeth_l3_update_ipato(card); 646 + } 668 647 669 648 spin_unlock_bh(&card->ip_lock); 670 649 ··· 689 664 (proto == QETH_PROT_IPV4)? 4:16) && 690 665 (ipatoe->mask_bits == mask_bits)) { 691 666 list_del(&ipatoe->entry); 667 + qeth_l3_update_ipato(card); 692 668 kfree(ipatoe); 693 669 } 694 670 }
+41 -34
drivers/s390/net/qeth_l3_sys.c
··· 370 370 struct device_attribute *attr, const char *buf, size_t count) 371 371 { 372 372 struct qeth_card *card = dev_get_drvdata(dev); 373 - struct qeth_ipaddr *addr; 374 - int i, rc = 0; 373 + bool enable; 374 + int rc = 0; 375 375 376 376 if (!card) 377 377 return -EINVAL; ··· 384 384 } 385 385 386 386 if (sysfs_streq(buf, "toggle")) { 387 - card->ipato.enabled = (card->ipato.enabled)? 0 : 1; 388 - } else if (sysfs_streq(buf, "1")) { 389 - card->ipato.enabled = 1; 390 - hash_for_each(card->ip_htable, i, addr, hnode) { 391 - if ((addr->type == QETH_IP_TYPE_NORMAL) && 392 - qeth_l3_is_addr_covered_by_ipato(card, addr)) 393 - addr->set_flags |= 394 - QETH_IPA_SETIP_TAKEOVER_FLAG; 395 - } 396 - } else if (sysfs_streq(buf, "0")) { 397 - card->ipato.enabled = 0; 398 - hash_for_each(card->ip_htable, i, addr, hnode) { 399 - if (addr->set_flags & 400 - QETH_IPA_SETIP_TAKEOVER_FLAG) 401 - addr->set_flags &= 402 - ~QETH_IPA_SETIP_TAKEOVER_FLAG; 403 - } 404 - } else 387 + enable = !card->ipato.enabled; 388 + } else if (kstrtobool(buf, &enable)) { 405 389 rc = -EINVAL; 390 + goto out; 391 + } 392 + 393 + if (card->ipato.enabled != enable) { 394 + card->ipato.enabled = enable; 395 + spin_lock_bh(&card->ip_lock); 396 + qeth_l3_update_ipato(card); 397 + spin_unlock_bh(&card->ip_lock); 398 + } 406 399 out: 407 400 mutex_unlock(&card->conf_mutex); 408 401 return rc ? rc : count; ··· 421 428 const char *buf, size_t count) 422 429 { 423 430 struct qeth_card *card = dev_get_drvdata(dev); 431 + bool invert; 424 432 int rc = 0; 425 433 426 434 if (!card) 427 435 return -EINVAL; 428 436 429 437 mutex_lock(&card->conf_mutex); 430 - if (sysfs_streq(buf, "toggle")) 431 - card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; 432 - else if (sysfs_streq(buf, "1")) 433 - card->ipato.invert4 = 1; 434 - else if (sysfs_streq(buf, "0")) 435 - card->ipato.invert4 = 0; 436 - else 438 + if (sysfs_streq(buf, "toggle")) { 439 + invert = !card->ipato.invert4; 440 + } else if (kstrtobool(buf, &invert)) { 437 441 rc = -EINVAL; 442 + goto out; 443 + } 444 + 445 + if (card->ipato.invert4 != invert) { 446 + card->ipato.invert4 = invert; 447 + spin_lock_bh(&card->ip_lock); 448 + qeth_l3_update_ipato(card); 449 + spin_unlock_bh(&card->ip_lock); 450 + } 451 + out: 438 452 mutex_unlock(&card->conf_mutex); 439 453 return rc ? rc : count; 440 454 } ··· 607 607 struct device_attribute *attr, const char *buf, size_t count) 608 608 { 609 609 struct qeth_card *card = dev_get_drvdata(dev); 610 + bool invert; 610 611 int rc = 0; 611 612 612 613 if (!card) 613 614 return -EINVAL; 614 615 615 616 mutex_lock(&card->conf_mutex); 616 - if (sysfs_streq(buf, "toggle")) 617 - card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; 618 - else if (sysfs_streq(buf, "1")) 619 - card->ipato.invert6 = 1; 620 - else if (sysfs_streq(buf, "0")) 621 - card->ipato.invert6 = 0; 622 - else 617 + if (sysfs_streq(buf, "toggle")) { 618 + invert = !card->ipato.invert6; 619 + } else if (kstrtobool(buf, &invert)) { 623 620 rc = -EINVAL; 621 + goto out; 622 + } 623 + 624 + if (card->ipato.invert6 != invert) { 625 + card->ipato.invert6 = invert; 626 + spin_lock_bh(&card->ip_lock); 627 + qeth_l3_update_ipato(card); 628 + spin_unlock_bh(&card->ip_lock); 629 + } 630 + out: 624 631 mutex_unlock(&card->conf_mutex); 625 632 return rc ? rc : count; 626 633 }
+9
include/linux/ptr_ring.h
··· 101 101 102 102 /* Note: callers invoking this in a loop must use a compiler barrier, 103 103 * for example cpu_relax(). Callers must hold producer_lock. 104 + * Callers are responsible for making sure pointer that is being queued 105 + * points to a valid data. 104 106 */ 105 107 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) 106 108 { 107 109 if (unlikely(!r->size) || r->queue[r->producer]) 108 110 return -ENOSPC; 111 + 112 + /* Make sure the pointer we are storing points to a valid data. */ 113 + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ 114 + smp_wmb(); 109 115 110 116 r->queue[r->producer++] = ptr; 111 117 if (unlikely(r->producer >= r->size)) ··· 281 275 if (ptr) 282 276 __ptr_ring_discard_one(r); 283 277 278 + /* Make sure anyone accessing data through the pointer is up to date. */ 279 + /* Pairs with smp_wmb in __ptr_ring_produce. */ 280 + smp_read_barrier_depends(); 284 281 return ptr; 285 282 } 286 283
+9 -9
include/net/gue.h
··· 44 44 #else 45 45 #error "Please fix <asm/byteorder.h>" 46 46 #endif 47 - __u8 proto_ctype; 48 - __u16 flags; 47 + __u8 proto_ctype; 48 + __be16 flags; 49 49 }; 50 - __u32 word; 50 + __be32 word; 51 51 }; 52 52 }; 53 53 ··· 84 84 * if there is an unknown standard or private flags, or the options length for 85 85 * the flags exceeds the options length specific in hlen of the GUE header. 86 86 */ 87 - static inline int validate_gue_flags(struct guehdr *guehdr, 88 - size_t optlen) 87 + static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen) 89 88 { 89 + __be16 flags = guehdr->flags; 90 90 size_t len; 91 - __be32 flags = guehdr->flags; 92 91 93 92 if (flags & ~GUE_FLAGS_ALL) 94 93 return 1; ··· 100 101 /* Private flags are last four bytes accounted in 101 102 * guehdr_flags_len 102 103 */ 103 - flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); 104 + __be32 pflags = *(__be32 *)((void *)&guehdr[1] + 105 + len - GUE_LEN_PRIV); 104 106 105 - if (flags & ~GUE_PFLAGS_ALL) 107 + if (pflags & ~GUE_PFLAGS_ALL) 106 108 return 1; 107 109 108 - len += guehdr_priv_flags_len(flags); 110 + len += guehdr_priv_flags_len(pflags); 109 111 if (len > optlen) 110 112 return 1; 111 113 }
+1
include/net/ip.h
··· 34 34 #include <net/flow_dissector.h> 35 35 36 36 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 37 + #define IPV4_MIN_MTU 68 /* RFC 791 */ 37 38 38 39 struct sock; 39 40
+1
include/net/sch_generic.h
··· 71 71 * qdisc_tree_decrease_qlen() should stop. 72 72 */ 73 73 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 74 + #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 74 75 u32 limit; 75 76 const struct Qdisc_ops *ops; 76 77 struct qdisc_size_table __rcu *stab;
-1
include/uapi/linux/pkt_sched.h
··· 256 256 #define TC_RED_ECN 1 257 257 #define TC_RED_HARDDROP 2 258 258 #define TC_RED_ADAPTATIVE 4 259 - #define TC_RED_OFFLOADED 8 260 259 }; 261 260 262 261 struct tc_red_xstats {
+1
include/uapi/linux/rtnetlink.h
··· 557 557 TCA_PAD, 558 558 TCA_DUMP_INVISIBLE, 559 559 TCA_CHAIN, 560 + TCA_HW_OFFLOAD, 560 561 __TCA_MAX 561 562 }; 562 563
+2
kernel/bpf/hashtab.c
··· 114 114 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 115 115 htab->map.key_size); 116 116 free_percpu(pptr); 117 + cond_resched(); 117 118 } 118 119 free_elems: 119 120 bpf_map_area_free(htab->elems); ··· 160 159 goto free_elems; 161 160 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 162 161 pptr); 162 + cond_resched(); 163 163 } 164 164 165 165 skip_percpu_elems:
+12 -7
kernel/trace/bpf_trace.c
··· 343 343 .arg4_type = ARG_CONST_SIZE, 344 344 }; 345 345 346 - static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); 346 + static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd); 347 347 348 348 static __always_inline u64 349 349 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 350 - u64 flags, struct perf_raw_record *raw) 350 + u64 flags, struct perf_sample_data *sd) 351 351 { 352 352 struct bpf_array *array = container_of(map, struct bpf_array, map); 353 - struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd); 354 353 unsigned int cpu = smp_processor_id(); 355 354 u64 index = flags & BPF_F_INDEX_MASK; 356 355 struct bpf_event_entry *ee; ··· 372 373 if (unlikely(event->oncpu != cpu)) 373 374 return -EOPNOTSUPP; 374 375 375 - perf_sample_data_init(sd, 0, 0); 376 - sd->raw = raw; 377 376 perf_event_output(event, sd, regs); 378 377 return 0; 379 378 } ··· 379 382 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 380 383 u64, flags, void *, data, u64, size) 381 384 { 385 + struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); 382 386 struct perf_raw_record raw = { 383 387 .frag = { 384 388 .size = size, ··· 390 392 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 391 393 return -EINVAL; 392 394 393 - return __bpf_perf_event_output(regs, map, flags, &raw); 395 + perf_sample_data_init(sd, 0, 0); 396 + sd->raw = &raw; 397 + 398 + return __bpf_perf_event_output(regs, map, flags, sd); 394 399 } 395 400 396 401 static const struct bpf_func_proto bpf_perf_event_output_proto = { ··· 408 407 }; 409 408 410 409 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 410 + static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); 411 411 412 412 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 413 413 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 414 414 { 415 + struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); 415 416 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 416 417 struct perf_raw_frag frag = { 417 418 .copy = ctx_copy, ··· 431 428 }; 432 429 433 430 perf_fetch_caller_regs(regs); 431 + perf_sample_data_init(sd, 0, 0); 432 + sd->raw = &raw; 434 433 435 - return __bpf_perf_event_output(regs, map, flags, &raw); 434 + return __bpf_perf_event_output(regs, map, flags, sd); 436 435 } 437 436 438 437 BPF_CALL_0(bpf_get_current_task)
+2 -2
net/batman-adv/bat_iv_ogm.c
··· 1214 1214 orig_node->last_seen = jiffies; 1215 1215 1216 1216 /* find packet count of corresponding one hop neighbor */ 1217 - spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); 1217 + spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); 1218 1218 if_num = if_incoming->if_num; 1219 1219 orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num]; 1220 1220 neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); ··· 1224 1224 } else { 1225 1225 neigh_rq_count = 0; 1226 1226 } 1227 - spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); 1227 + spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); 1228 1228 1229 1229 /* pay attention to not get a value bigger than 100 % */ 1230 1230 if (orig_eq_count > neigh_rq_count)
+1 -1
net/batman-adv/bat_v.c
··· 814 814 } 815 815 816 816 orig_gw = batadv_gw_node_get(bat_priv, orig_node); 817 - if (!orig_node) 817 + if (!orig_gw) 818 818 goto out; 819 819 820 820 if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
+2
net/batman-adv/fragmentation.c
··· 499 499 */ 500 500 if (skb->priority >= 256 && skb->priority <= 263) 501 501 frag_header.priority = skb->priority - 256; 502 + else 503 + frag_header.priority = 0; 502 504 503 505 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); 504 506 ether_addr_copy(frag_header.dest, orig_node->orig);
+2 -2
net/batman-adv/tp_meter.c
··· 482 482 483 483 /** 484 484 * batadv_tp_sender_timeout - timer that fires in case of packet loss 485 - * @arg: address of the related tp_vars 485 + * @t: address to timer_list inside tp_vars 486 486 * 487 487 * If fired it means that there was packet loss. 488 488 * Switch to Slow Start, set the ss_threshold to half of the current cwnd and ··· 1106 1106 /** 1107 1107 * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is 1108 1108 * reached without received ack 1109 - * @arg: address of the related tp_vars 1109 + * @t: address to timer_list inside tp_vars 1110 1110 */ 1111 1111 static void batadv_tp_receiver_shutdown(struct timer_list *t) 1112 1112 {
-1
net/core/netprio_cgroup.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/slab.h> 16 16 #include <linux/types.h> 17 - #include <linux/module.h> 18 17 #include <linux/string.h> 19 18 #include <linux/errno.h> 20 19 #include <linux/skbuff.h>
+5 -1
net/core/skbuff.c
··· 4293 4293 struct sock *sk = skb->sk; 4294 4294 4295 4295 if (!skb_may_tx_timestamp(sk, false)) 4296 - return; 4296 + goto err; 4297 4297 4298 4298 /* Take a reference to prevent skb_orphan() from freeing the socket, 4299 4299 * but only if the socket refcount is not zero. ··· 4302 4302 *skb_hwtstamps(skb) = *hwtstamps; 4303 4303 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 4304 4304 sock_put(sk); 4305 + return; 4305 4306 } 4307 + 4308 + err: 4309 + kfree_skb(skb); 4306 4310 } 4307 4311 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 4308 4312
-1
net/dsa/slave.c
··· 16 16 #include <linux/of_net.h> 17 17 #include <linux/of_mdio.h> 18 18 #include <linux/mdio.h> 19 - #include <linux/list.h> 20 19 #include <net/rtnetlink.h> 21 20 #include <net/pkt_cls.h> 22 21 #include <net/tc_act/tc_mirred.h>
+1 -1
net/ipv4/devinet.c
··· 1428 1428 1429 1429 static bool inetdev_valid_mtu(unsigned int mtu) 1430 1430 { 1431 - return mtu >= 68; 1431 + return mtu >= IPV4_MIN_MTU; 1432 1432 } 1433 1433 1434 1434 static void inetdev_send_gratuitous_arp(struct net_device *dev,
+34 -10
net/ipv4/igmp.c
··· 89 89 #include <linux/rtnetlink.h> 90 90 #include <linux/times.h> 91 91 #include <linux/pkt_sched.h> 92 + #include <linux/byteorder/generic.h> 92 93 93 94 #include <net/net_namespace.h> 94 95 #include <net/arp.h> ··· 322 321 return scount; 323 322 } 324 323 324 + /* source address selection per RFC 3376 section 4.2.13 */ 325 + static __be32 igmpv3_get_srcaddr(struct net_device *dev, 326 + const struct flowi4 *fl4) 327 + { 328 + struct in_device *in_dev = __in_dev_get_rcu(dev); 329 + 330 + if (!in_dev) 331 + return htonl(INADDR_ANY); 332 + 333 + for_ifa(in_dev) { 334 + if (inet_ifa_match(fl4->saddr, ifa)) 335 + return fl4->saddr; 336 + } endfor_ifa(in_dev); 337 + 338 + return htonl(INADDR_ANY); 339 + } 340 + 325 341 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) 326 342 { 327 343 struct sk_buff *skb; ··· 386 368 pip->frag_off = htons(IP_DF); 387 369 pip->ttl = 1; 388 370 pip->daddr = fl4.daddr; 389 - pip->saddr = fl4.saddr; 371 + pip->saddr = igmpv3_get_srcaddr(dev, &fl4); 390 372 pip->protocol = IPPROTO_IGMP; 391 373 pip->tot_len = 0; /* filled in later */ 392 374 ip_select_ident(net, skb, NULL); ··· 422 404 } 423 405 424 406 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, 425 - int type, struct igmpv3_grec **ppgr) 407 + int type, struct igmpv3_grec **ppgr, unsigned int mtu) 426 408 { 427 409 struct net_device *dev = pmc->interface->dev; 428 410 struct igmpv3_report *pih; 429 411 struct igmpv3_grec *pgr; 430 412 431 - if (!skb) 432 - skb = igmpv3_newpack(dev, dev->mtu); 433 - if (!skb) 434 - return NULL; 413 + if (!skb) { 414 + skb = igmpv3_newpack(dev, mtu); 415 + if (!skb) 416 + return NULL; 417 + } 435 418 pgr = skb_put(skb, sizeof(struct igmpv3_grec)); 436 419 pgr->grec_type = type; 437 420 pgr->grec_auxwords = 0; ··· 455 436 struct igmpv3_grec *pgr = NULL; 456 437 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; 457 438 int scount, stotal, first, isquery, truncate; 439 + unsigned int mtu; 458 440 459 441 if (pmc->multiaddr == IGMP_ALL_HOSTS) 460 442 return skb; 461 443 if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) 444 + return skb; 445 + 446 + mtu = READ_ONCE(dev->mtu); 447 + if (mtu < IPV4_MIN_MTU) 462 448 return skb; 463 449 464 450 isquery = type == IGMPV3_MODE_IS_INCLUDE || ··· 486 462 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 487 463 if (skb) 488 464 igmpv3_sendpack(skb); 489 - skb = igmpv3_newpack(dev, dev->mtu); 465 + skb = igmpv3_newpack(dev, mtu); 490 466 } 491 467 } 492 468 first = 1; ··· 522 498 pgr->grec_nsrcs = htons(scount); 523 499 if (skb) 524 500 igmpv3_sendpack(skb); 525 - skb = igmpv3_newpack(dev, dev->mtu); 501 + skb = igmpv3_newpack(dev, mtu); 526 502 first = 1; 527 503 scount = 0; 528 504 } 529 505 if (first) { 530 - skb = add_grhead(skb, pmc, type, &pgr); 506 + skb = add_grhead(skb, pmc, type, &pgr, mtu); 531 507 first = 0; 532 508 } 533 509 if (!skb) ··· 562 538 igmpv3_sendpack(skb); 563 539 skb = NULL; /* add_grhead will get a new one */ 564 540 } 565 - skb = add_grhead(skb, pmc, type, &pgr); 541 + skb = add_grhead(skb, pmc, type, &pgr, mtu); 566 542 } 567 543 } 568 544 if (pgr)
+1 -1
net/ipv4/ip_gre.c
··· 266 266 len = gre_hdr_len + sizeof(*ershdr); 267 267 268 268 if (unlikely(!pskb_may_pull(skb, len))) 269 - return -ENOMEM; 269 + return PACKET_REJECT; 270 270 271 271 iph = ip_hdr(skb); 272 272 ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
+2 -2
net/ipv4/ip_tunnel.c
··· 349 349 dev->needed_headroom = t_hlen + hlen; 350 350 mtu -= (dev->hard_header_len + t_hlen); 351 351 352 - if (mtu < 68) 353 - mtu = 68; 352 + if (mtu < IPV4_MIN_MTU) 353 + mtu = IPV4_MIN_MTU; 354 354 355 355 return mtu; 356 356 }
-1
net/ipv4/netfilter/arp_tables.c
··· 373 373 if (!xt_find_jump_offset(offsets, newpos, 374 374 newinfo->number)) 375 375 return 0; 376 - e = entry0 + newpos; 377 376 } else { 378 377 /* ... this is a fallthru */ 379 378 newpos = pos + e->next_offset;
-1
net/ipv4/netfilter/ip_tables.c
··· 439 439 if (!xt_find_jump_offset(offsets, newpos, 440 440 newinfo->number)) 441 441 return 0; 442 - e = entry0 + newpos; 443 442 } else { 444 443 /* ... this is a fallthru */ 445 444 newpos = pos + e->next_offset;
+2 -1
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 813 813 814 814 static void clusterip_net_exit(struct net *net) 815 815 { 816 - #ifdef CONFIG_PROC_FS 817 816 struct clusterip_net *cn = net_generic(net, clusterip_net_id); 817 + #ifdef CONFIG_PROC_FS 818 818 proc_remove(cn->procdir); 819 819 cn->procdir = NULL; 820 820 #endif 821 821 nf_unregister_net_hook(net, &cip_arp_ops); 822 + WARN_ON_ONCE(!list_empty(&cn->configs)); 822 823 } 823 824 824 825 static struct pernet_operations clusterip_net_ops = {
+10 -5
net/ipv4/raw.c
··· 513 513 int err; 514 514 struct ip_options_data opt_copy; 515 515 struct raw_frag_vec rfv; 516 + int hdrincl; 516 517 517 518 err = -EMSGSIZE; 518 519 if (len > 0xFFFF) 519 520 goto out; 520 521 522 + /* hdrincl should be READ_ONCE(inet->hdrincl) 523 + * but READ_ONCE() doesn't work with bit fields 524 + */ 525 + hdrincl = inet->hdrincl; 521 526 /* 522 527 * Check the flags. 523 528 */ ··· 598 593 /* Linux does not mangle headers on raw sockets, 599 594 * so that IP options + IP_HDRINCL is non-sense. 600 595 */ 601 - if (inet->hdrincl) 596 + if (hdrincl) 602 597 goto done; 603 598 if (ipc.opt->opt.srr) { 604 599 if (!daddr) ··· 620 615 621 616 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 622 617 RT_SCOPE_UNIVERSE, 623 - inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 618 + hdrincl ? IPPROTO_RAW : sk->sk_protocol, 624 619 inet_sk_flowi_flags(sk) | 625 - (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), 620 + (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), 626 621 daddr, saddr, 0, 0, sk->sk_uid); 627 622 628 - if (!inet->hdrincl) { 623 + if (!hdrincl) { 629 624 rfv.msg = msg; 630 625 rfv.hlen = 0; 631 626 ··· 650 645 goto do_confirm; 651 646 back_from_confirm: 652 647 653 - if (inet->hdrincl) 648 + if (hdrincl) 654 649 err = raw_send_hdrinc(sk, &fl4, msg, len, 655 650 &rt, msg->msg_flags, &ipc.sockc); 656 651
+6 -4
net/ipv4/tcp_input.c
··· 508 508 u32 new_sample = tp->rcv_rtt_est.rtt_us; 509 509 long m = sample; 510 510 511 - if (m == 0) 512 - m = 1; 513 - 514 511 if (new_sample != 0) { 515 512 /* If we sample in larger samples in the non-timestamp 516 513 * case, we could grossly overestimate the RTT especially ··· 544 547 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 545 548 return; 546 549 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); 550 + if (!delta_us) 551 + delta_us = 1; 547 552 tcp_rcv_rtt_update(tp, delta_us, 1); 548 553 549 554 new_measure: ··· 562 563 (TCP_SKB_CB(skb)->end_seq - 563 564 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { 564 565 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 565 - u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 566 + u32 delta_us; 566 567 568 + if (!delta) 569 + delta = 1; 570 + delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 567 571 tcp_rcv_rtt_update(tp, delta_us, 0); 568 572 } 569 573 }
+1 -1
net/ipv4/tcp_ipv4.c
··· 848 848 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 849 849 req->ts_recent, 850 850 0, 851 - tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 851 + tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr, 852 852 AF_INET), 853 853 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 854 854 ip_hdr(skb)->tos);
+2
net/ipv4/tcp_timer.c
··· 264 264 icsk->icsk_ack.pingpong = 0; 265 265 icsk->icsk_ack.ato = TCP_ATO_MIN; 266 266 } 267 + tcp_mstamp_refresh(tcp_sk(sk)); 267 268 tcp_send_ack(sk); 268 269 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); 269 270 } ··· 633 632 goto out; 634 633 } 635 634 635 + tcp_mstamp_refresh(tp); 636 636 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 637 637 if (tp->linger2 >= 0) { 638 638 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
+15 -10
net/ipv6/mcast.c
··· 1682 1682 } 1683 1683 1684 1684 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1685 - int type, struct mld2_grec **ppgr) 1685 + int type, struct mld2_grec **ppgr, unsigned int mtu) 1686 1686 { 1687 - struct net_device *dev = pmc->idev->dev; 1688 1687 struct mld2_report *pmr; 1689 1688 struct mld2_grec *pgr; 1690 1689 1691 - if (!skb) 1692 - skb = mld_newpack(pmc->idev, dev->mtu); 1693 - if (!skb) 1694 - return NULL; 1690 + if (!skb) { 1691 + skb = mld_newpack(pmc->idev, mtu); 1692 + if (!skb) 1693 + return NULL; 1694 + } 1695 1695 pgr = skb_put(skb, sizeof(struct mld2_grec)); 1696 1696 pgr->grec_type = type; 1697 1697 pgr->grec_auxwords = 0; ··· 1714 1714 struct mld2_grec *pgr = NULL; 1715 1715 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; 1716 1716 int scount, stotal, first, isquery, truncate; 1717 + unsigned int mtu; 1717 1718 1718 1719 if (pmc->mca_flags & MAF_NOREPORT) 1720 + return skb; 1721 + 1722 + mtu = READ_ONCE(dev->mtu); 1723 + if (mtu < IPV6_MIN_MTU) 1719 1724 return skb; 1720 1725 1721 1726 isquery = type == MLD2_MODE_IS_INCLUDE || ··· 1743 1738 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 1744 1739 if (skb) 1745 1740 mld_sendpack(skb); 1746 - skb = mld_newpack(idev, dev->mtu); 1741 + skb = mld_newpack(idev, mtu); 1747 1742 } 1748 1743 } 1749 1744 first = 1; ··· 1779 1774 pgr->grec_nsrcs = htons(scount); 1780 1775 if (skb) 1781 1776 mld_sendpack(skb); 1782 - skb = mld_newpack(idev, dev->mtu); 1777 + skb = mld_newpack(idev, mtu); 1783 1778 first = 1; 1784 1779 scount = 0; 1785 1780 } 1786 1781 if (first) { 1787 - skb = add_grhead(skb, pmc, type, &pgr); 1782 + skb = add_grhead(skb, pmc, type, &pgr, mtu); 1788 1783 first = 0; 1789 1784 } 1790 1785 if (!skb) ··· 1819 1814 mld_sendpack(skb); 1820 1815 skb = NULL; /* add_grhead will get a new one */ 1821 1816 } 1822 - skb = add_grhead(skb, pmc, type, &pgr); 1817 + skb = add_grhead(skb, pmc, type, &pgr, mtu); 1823 1818 } 1824 1819 } 1825 1820 if (pgr)
-1
net/ipv6/netfilter/ip6_tables.c
··· 458 458 if (!xt_find_jump_offset(offsets, newpos, 459 459 newinfo->number)) 460 460 return 0; 461 - e = entry0 + newpos; 462 461 } else { 463 462 /* ... this is a fallthru */ 464 463 newpos = pos + e->next_offset;
+7 -1
net/ipv6/netfilter/ip6t_MASQUERADE.c
··· 33 33 34 34 if (range->flags & NF_NAT_RANGE_MAP_IPS) 35 35 return -EINVAL; 36 - return 0; 36 + return nf_ct_netns_get(par->net, par->family); 37 + } 38 + 39 + static void masquerade_tg6_destroy(const struct xt_tgdtor_param *par) 40 + { 41 + nf_ct_netns_put(par->net, par->family); 37 42 } 38 43 39 44 static struct xt_target masquerade_tg6_reg __read_mostly = { 40 45 .name = "MASQUERADE", 41 46 .family = NFPROTO_IPV6, 42 47 .checkentry = masquerade_tg6_checkentry, 48 + .destroy = masquerade_tg6_destroy, 43 49 .target = masquerade_tg6, 44 50 .targetsize = sizeof(struct nf_nat_range), 45 51 .table = "nat",
+1 -1
net/ipv6/tcp_ipv6.c
··· 994 994 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 995 995 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 996 996 req->ts_recent, sk->sk_bound_dev_if, 997 - tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 997 + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr), 998 998 0, 0); 999 999 } 1000 1000
+2 -3
net/mac80211/ht.c
··· 291 291 int i; 292 292 293 293 mutex_lock(&sta->ampdu_mlme.mtx); 294 - for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 294 + for (i = 0; i < IEEE80211_NUM_TIDS; i++) 295 295 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 296 296 WLAN_REASON_QSTA_LEAVE_QBSS, 297 297 reason != AGG_STOP_DESTROY_STA && 298 298 reason != AGG_STOP_PEER_REQUEST); 299 - } 300 - mutex_unlock(&sta->ampdu_mlme.mtx); 301 299 302 300 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 303 301 ___ieee80211_stop_tx_ba_session(sta, i, reason); 302 + mutex_unlock(&sta->ampdu_mlme.mtx); 304 303 305 304 /* stopping might queue the work again - so cancel only afterwards */ 306 305 cancel_work_sync(&sta->ampdu_mlme.work);
+98 -30
net/netfilter/nf_conntrack_h323_asn1.c
··· 103 103 #define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;} 104 104 #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} 105 105 #define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;} 106 - #define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND) 107 106 static unsigned int get_len(struct bitstr *bs); 108 107 static unsigned int get_bit(struct bitstr *bs); 109 108 static unsigned int get_bits(struct bitstr *bs, unsigned int b); ··· 162 163 } 163 164 164 165 return v; 166 + } 167 + 168 + static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits) 169 + { 170 + bits += bs->bit; 171 + bytes += bits / BITS_PER_BYTE; 172 + if (bits % BITS_PER_BYTE > 0) 173 + bytes++; 174 + 175 + if (*bs->cur + bytes > *bs->end) 176 + return 1; 177 + 178 + return 0; 165 179 } 166 180 167 181 /****************************************************************************/ ··· 291 279 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 292 280 293 281 INC_BIT(bs); 294 - 295 - CHECK_BOUND(bs, 0); 282 + if (nf_h323_error_boundary(bs, 0, 0)) 283 + return H323_ERROR_BOUND; 296 284 return H323_ERROR_NONE; 297 285 } 298 286 ··· 305 293 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 306 294 307 295 BYTE_ALIGN(bs); 308 - CHECK_BOUND(bs, 1); 296 + if (nf_h323_error_boundary(bs, 1, 0)) 297 + return H323_ERROR_BOUND; 298 + 309 299 len = *bs->cur++; 310 300 bs->cur += len; 301 + if (nf_h323_error_boundary(bs, 0, 0)) 302 + return H323_ERROR_BOUND; 311 303 312 - CHECK_BOUND(bs, 0); 313 304 return H323_ERROR_NONE; 314 305 } 315 306 ··· 334 319 bs->cur += 2; 335 320 break; 336 321 case CONS: /* 64K < Range < 4G */ 322 + if (nf_h323_error_boundary(bs, 0, 2)) 323 + return H323_ERROR_BOUND; 337 324 len = get_bits(bs, 2) + 1; 338 325 BYTE_ALIGN(bs); 339 326 if (base && (f->attr & DECODE)) { /* timeToLive */ ··· 347 330 break; 348 331 case UNCO: 349 332 BYTE_ALIGN(bs); 350 - CHECK_BOUND(bs, 2); 333 + if (nf_h323_error_boundary(bs, 2, 0)) 334 + return H323_ERROR_BOUND; 351 335 len = get_len(bs); 352 336 bs->cur += len; 353 337 break; ··· 359 341 360 342 PRINT("\n"); 361 343 362 - CHECK_BOUND(bs, 0); 344 + if (nf_h323_error_boundary(bs, 0, 0)) 345 + return H323_ERROR_BOUND; 363 346 return H323_ERROR_NONE; 364 347 } 365 348 ··· 376 357 INC_BITS(bs, f->sz); 377 358 } 378 359 379 - CHECK_BOUND(bs, 0); 360 + if (nf_h323_error_boundary(bs, 0, 0)) 361 + return H323_ERROR_BOUND; 380 362 return H323_ERROR_NONE; 381 363 } 382 364 ··· 395 375 len = f->lb; 396 376 break; 397 377 case WORD: /* 2-byte length */ 398 - CHECK_BOUND(bs, 2); 378 + if (nf_h323_error_boundary(bs, 2, 0)) 379 + return H323_ERROR_BOUND; 399 380 len = (*bs->cur++) << 8; 400 381 len += (*bs->cur++) + f->lb; 401 382 break; 402 383 case SEMI: 403 - CHECK_BOUND(bs, 2); 384 + if (nf_h323_error_boundary(bs, 2, 0)) 385 + return H323_ERROR_BOUND; 404 386 len = get_len(bs); 405 387 break; 406 388 default: ··· 413 391 bs->cur += len >> 3; 414 392 bs->bit = len & 7; 415 393 416 - CHECK_BOUND(bs, 0); 394 + if (nf_h323_error_boundary(bs, 0, 0)) 395 + return H323_ERROR_BOUND; 417 396 return H323_ERROR_NONE; 418 397 } 419 398 ··· 427 404 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 428 405 429 406 /* 2 <= Range <= 255 */ 407 + if (nf_h323_error_boundary(bs, 0, f->sz)) 408 + return H323_ERROR_BOUND; 430 409 len = get_bits(bs, f->sz) + f->lb; 431 410 432 411 BYTE_ALIGN(bs); 433 412 INC_BITS(bs, (len << 2)); 434 413 435 - CHECK_BOUND(bs, 0); 414 + if (nf_h323_error_boundary(bs, 0, 0)) 415 + return H323_ERROR_BOUND; 436 416 return H323_ERROR_NONE; 437 417 } 438 418 ··· 466 440 break; 467 441 case BYTE: /* Range == 256 */ 468 442 BYTE_ALIGN(bs); 469 - CHECK_BOUND(bs, 1); 443 + if (nf_h323_error_boundary(bs, 1, 0)) 444 + return H323_ERROR_BOUND; 470 445 len = (*bs->cur++) + f->lb; 471 446 break; 472 447 case SEMI: 473 448 BYTE_ALIGN(bs); 474 - CHECK_BOUND(bs, 2); 449 + if (nf_h323_error_boundary(bs, 2, 0)) 450 + return H323_ERROR_BOUND; 475 451 len = get_len(bs) + f->lb; 476 452 break; 477 453 default: /* 2 <= Range <= 255 */ 454 + if (nf_h323_error_boundary(bs, 0, f->sz)) 455 + return H323_ERROR_BOUND; 478 456 len = get_bits(bs, f->sz) + f->lb; 479 457 BYTE_ALIGN(bs); 480 458 break; ··· 488 458 489 459 PRINT("\n"); 490 460 491 - CHECK_BOUND(bs, 0); 461 + if (nf_h323_error_boundary(bs, 0, 0)) 462 + return H323_ERROR_BOUND; 492 463 return H323_ERROR_NONE; 493 464 } 494 465 ··· 504 473 switch (f->sz) { 505 474 case BYTE: /* Range == 256 */ 506 475 BYTE_ALIGN(bs); 507 - CHECK_BOUND(bs, 1); 476 + if (nf_h323_error_boundary(bs, 1, 0)) 477 + return H323_ERROR_BOUND; 508 478 len = (*bs->cur++) + f->lb; 509 479 break; 510 480 default: /* 2 <= Range <= 255 */ 481 + if (nf_h323_error_boundary(bs, 0, f->sz)) 482 + return H323_ERROR_BOUND; 511 483 len = get_bits(bs, f->sz) + f->lb; 512 484 BYTE_ALIGN(bs); 513 485 break; ··· 518 484 519 485 bs->cur += len << 1; 520 486 521 - CHECK_BOUND(bs, 0); 487 + if (nf_h323_error_boundary(bs, 0, 0)) 488 + return H323_ERROR_BOUND; 522 489 return H323_ERROR_NONE; 523 490 } 524 491 ··· 538 503 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; 539 504 540 505 /* Extensible? */ 506 + if (nf_h323_error_boundary(bs, 0, 1)) 507 + return H323_ERROR_BOUND; 541 508 ext = (f->attr & EXT) ? get_bit(bs) : 0; 542 509 543 510 /* Get fields bitmap */ 511 + if (nf_h323_error_boundary(bs, 0, f->sz)) 512 + return H323_ERROR_BOUND; 544 513 bmp = get_bitmap(bs, f->sz); 545 514 if (base) 546 515 *(unsigned int *)base = bmp; ··· 564 525 565 526 /* Decode */ 566 527 if (son->attr & OPEN) { /* Open field */ 567 - CHECK_BOUND(bs, 2); 528 + if (nf_h323_error_boundary(bs, 2, 0)) 529 + return H323_ERROR_BOUND; 568 530 len = get_len(bs); 569 - CHECK_BOUND(bs, len); 531 + if (nf_h323_error_boundary(bs, len, 0)) 532 + return H323_ERROR_BOUND; 570 533 if (!base || !(son->attr & DECODE)) { 571 534 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, 572 535 " ", son->name); ··· 596 555 return H323_ERROR_NONE; 597 556 598 557 /* Get the extension bitmap */ 558 + if (nf_h323_error_boundary(bs, 0, 7)) 559 + return H323_ERROR_BOUND; 599 560 bmp2_len = get_bits(bs, 7) + 1; 600 - CHECK_BOUND(bs, (bmp2_len + 7) >> 3); 561 + if (nf_h323_error_boundary(bs, 0, bmp2_len)) 562 + return H323_ERROR_BOUND; 601 563 bmp2 = get_bitmap(bs, bmp2_len); 602 564 bmp |= bmp2 >> f->sz; 603 565 if (base) ··· 611 567 for (opt = 0; opt < bmp2_len; opt++, i++, son++) { 612 568 /* Check Range */ 613 569 if (i >= f->ub) { /* Newer Version? */ 614 - CHECK_BOUND(bs, 2); 570 + if (nf_h323_error_boundary(bs, 2, 0)) 571 + return H323_ERROR_BOUND; 615 572 len = get_len(bs); 616 - CHECK_BOUND(bs, len); 573 + if (nf_h323_error_boundary(bs, len, 0)) 574 + return H323_ERROR_BOUND; 617 575 bs->cur += len; 618 576 continue; 619 577 } ··· 629 583 if (!((0x80000000 >> opt) & bmp2)) /* Not present */ 630 584 continue; 631 585 632 - CHECK_BOUND(bs, 2); 586 + if (nf_h323_error_boundary(bs, 2, 0)) 587 + return H323_ERROR_BOUND; 633 588 len = get_len(bs); 634 - CHECK_BOUND(bs, len); 589 + if (nf_h323_error_boundary(bs, len, 0)) 590 + return H323_ERROR_BOUND; 635 591 if (!base || !(son->attr & DECODE)) { 636 592 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 637 593 son->name); ··· 671 623 switch (f->sz) { 672 624 case BYTE: 673 625 BYTE_ALIGN(bs); 674 - CHECK_BOUND(bs, 1); 626 + if (nf_h323_error_boundary(bs, 1, 0)) 627 + return H323_ERROR_BOUND; 675 628 count = *bs->cur++; 676 629 break; 677 630 case WORD: 678 631 BYTE_ALIGN(bs); 679 - CHECK_BOUND(bs, 2); 632 + if (nf_h323_error_boundary(bs, 2, 0)) 633 + return H323_ERROR_BOUND; 680 634 count = *bs->cur++; 681 635 count <<= 8; 682 636 count += *bs->cur++; 683 637 break; 684 638 case SEMI: 685 639 BYTE_ALIGN(bs); 686 - CHECK_BOUND(bs, 2); 640 + if (nf_h323_error_boundary(bs, 2, 0)) 641 + return H323_ERROR_BOUND; 687 642 count = get_len(bs); 688 643 break; 689 644 default: 645 + if (nf_h323_error_boundary(bs, 0, f->sz)) 646 + return H323_ERROR_BOUND; 690 647 count = get_bits(bs, f->sz); 691 648 break; 692 649 } ··· 711 658 for (i = 0; i < count; i++) { 712 659 if (son->attr & OPEN) { 713 660 BYTE_ALIGN(bs); 661 + if (nf_h323_error_boundary(bs, 2, 0)) 662 + return H323_ERROR_BOUND; 714 663 len = get_len(bs); 715 - CHECK_BOUND(bs, len); 664 + if (nf_h323_error_boundary(bs, len, 0)) 665 + return H323_ERROR_BOUND; 716 666 if (!base || !(son->attr & DECODE)) { 717 667 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, 718 668 " ", son->name); ··· 766 710 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; 767 711 768 712 /* Decode the choice index number */ 713 + if (nf_h323_error_boundary(bs, 0, 1)) 714 + return H323_ERROR_BOUND; 769 715 if ((f->attr & EXT) && get_bit(bs)) { 770 716 ext = 1; 717 + if (nf_h323_error_boundary(bs, 0, 7)) 718 + return H323_ERROR_BOUND; 771 719 type = get_bits(bs, 7) + f->lb; 772 720 } else { 773 721 ext = 0; 722 + if (nf_h323_error_boundary(bs, 0, f->sz)) 723 + return H323_ERROR_BOUND; 774 724 type = get_bits(bs, f->sz); 775 725 if (type >= f->lb) 776 726 return H323_ERROR_RANGE; ··· 789 727 /* Check Range */ 790 728 if (type >= f->ub) { /* Newer version? */ 791 729 BYTE_ALIGN(bs); 730 + if (nf_h323_error_boundary(bs, 2, 0)) 731 + return H323_ERROR_BOUND; 792 732 len = get_len(bs); 793 - CHECK_BOUND(bs, len); 733 + if (nf_h323_error_boundary(bs, len, 0)) 734 + return H323_ERROR_BOUND; 794 735 bs->cur += len; 795 736 return H323_ERROR_NONE; 796 737 } ··· 807 742 808 743 if (ext || (son->attr & OPEN)) { 809 744 BYTE_ALIGN(bs); 745 + if (nf_h323_error_boundary(bs, len, 0)) 746 + return H323_ERROR_BOUND; 810 747 len = get_len(bs); 811 - CHECK_BOUND(bs, len); 748 + if (nf_h323_error_boundary(bs, len, 0)) 749 + return H323_ERROR_BOUND; 812 750 if (!base || !(son->attr & DECODE)) { 813 751 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 814 752 son->name);
+9 -4
net/netfilter/nf_conntrack_netlink.c
··· 45 45 #include <net/netfilter/nf_conntrack_zones.h> 46 46 #include <net/netfilter/nf_conntrack_timestamp.h> 47 47 #include <net/netfilter/nf_conntrack_labels.h> 48 - #include <net/netfilter/nf_conntrack_seqadj.h> 49 48 #include <net/netfilter/nf_conntrack_synproxy.h> 50 49 #ifdef CONFIG_NF_NAT_NEEDED 51 50 #include <net/netfilter/nf_nat_core.h> ··· 1565 1566 static int ctnetlink_change_timeout(struct nf_conn *ct, 1566 1567 const struct nlattr * const cda[]) 1567 1568 { 1568 - u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1569 + u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 1569 1570 1570 - ct->timeout = nfct_time_stamp + timeout * HZ; 1571 + if (timeout > INT_MAX) 1572 + timeout = INT_MAX; 1573 + ct->timeout = nfct_time_stamp + (u32)timeout; 1571 1574 1572 1575 if (test_bit(IPS_DYING_BIT, &ct->status)) 1573 1576 return -ETIME; ··· 1769 1768 int err = -EINVAL; 1770 1769 struct nf_conntrack_helper *helper; 1771 1770 struct nf_conn_tstamp *tstamp; 1771 + u64 timeout; 1772 1772 1773 1773 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 1774 1774 if (IS_ERR(ct)) ··· 1778 1776 if (!cda[CTA_TIMEOUT]) 1779 1777 goto err1; 1780 1778 1781 - ct->timeout = nfct_time_stamp + ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 1779 + timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 1780 + if (timeout > INT_MAX) 1781 + timeout = INT_MAX; 1782 + ct->timeout = (u32)timeout + nfct_time_stamp; 1782 1783 1783 1784 rcu_read_lock(); 1784 1785 if (cda[CTA_HELP]) {
+3
net/netfilter/nf_conntrack_proto_tcp.c
··· 1039 1039 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && 1040 1040 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) 1041 1041 timeout = timeouts[TCP_CONNTRACK_UNACK]; 1042 + else if (ct->proto.tcp.last_win == 0 && 1043 + timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) 1044 + timeout = timeouts[TCP_CONNTRACK_RETRANS]; 1042 1045 else 1043 1046 timeout = timeouts[new_state]; 1044 1047 spin_unlock_bh(&ct->lock);
+7
net/netfilter/nf_tables_api.c
··· 5847 5847 return 0; 5848 5848 } 5849 5849 5850 + static void __net_exit nf_tables_exit_net(struct net *net) 5851 + { 5852 + WARN_ON_ONCE(!list_empty(&net->nft.af_info)); 5853 + WARN_ON_ONCE(!list_empty(&net->nft.commit_list)); 5854 + } 5855 + 5850 5856 int __nft_release_basechain(struct nft_ctx *ctx) 5851 5857 { 5852 5858 struct nft_rule *rule, *nr; ··· 5923 5917 5924 5918 static struct pernet_operations nf_tables_net_ops = { 5925 5919 .init = nf_tables_init_net, 5920 + .exit = nf_tables_exit_net, 5926 5921 }; 5927 5922 5928 5923 static int __init nf_tables_module_init(void)
+2
net/netfilter/nft_exthdr.c
··· 214 214 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, 215 215 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, 216 216 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, 217 + [NFTA_EXTHDR_OP] = { .type = NLA_U32 }, 218 + [NFTA_EXTHDR_SREG] = { .type = NLA_U32 }, 217 219 }; 218 220 219 221 static int nft_exthdr_init(const struct nft_ctx *ctx,
+9
net/netfilter/x_tables.c
··· 1729 1729 return 0; 1730 1730 } 1731 1731 1732 + static void __net_exit xt_net_exit(struct net *net) 1733 + { 1734 + int i; 1735 + 1736 + for (i = 0; i < NFPROTO_NUMPROTO; i++) 1737 + WARN_ON_ONCE(!list_empty(&net->xt.tables[i])); 1738 + } 1739 + 1732 1740 static struct pernet_operations xt_net_ops = { 1733 1741 .init = xt_net_init, 1742 + .exit = xt_net_exit, 1734 1743 }; 1735 1744 1736 1745 static int __init xt_init(void)
+6
net/netfilter/xt_bpf.c
··· 27 27 { 28 28 struct sock_fprog_kern program; 29 29 30 + if (len > XT_BPF_MAX_NUM_INSTR) 31 + return -EINVAL; 32 + 30 33 program.len = len; 31 34 program.filter = insns; 32 35 ··· 57 54 { 58 55 mm_segment_t oldfs = get_fs(); 59 56 int retval, fd; 57 + 58 + if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) 59 + return -EINVAL; 60 60 61 61 set_fs(KERNEL_DS); 62 62 fd = bpf_obj_get_user(path, 0);
+7
net/netfilter/xt_osf.c
··· 19 19 #include <linux/module.h> 20 20 #include <linux/kernel.h> 21 21 22 + #include <linux/capability.h> 22 23 #include <linux/if.h> 23 24 #include <linux/inetdevice.h> 24 25 #include <linux/ip.h> ··· 71 70 struct xt_osf_finger *kf = NULL, *sf; 72 71 int err = 0; 73 72 73 + if (!capable(CAP_NET_ADMIN)) 74 + return -EPERM; 75 + 74 76 if (!osf_attrs[OSF_ATTR_FINGER]) 75 77 return -EINVAL; 76 78 ··· 118 114 struct xt_osf_user_finger *f; 119 115 struct xt_osf_finger *sf; 120 116 int err = -ENOENT; 117 + 118 + if (!capable(CAP_NET_ADMIN)) 119 + return -EPERM; 121 120 122 121 if (!osf_attrs[OSF_ATTR_FINGER]) 123 122 return -EINVAL;
+3
net/netlink/af_netlink.c
··· 253 253 struct sock *sk = skb->sk; 254 254 int ret = -ENOMEM; 255 255 256 + if (!net_eq(dev_net(dev), sock_net(sk))) 257 + return 0; 258 + 256 259 dev_hold(dev); 257 260 258 261 if (is_vmalloc_addr(skb->head))
-1
net/sched/act_meta_mark.c
··· 22 22 #include <net/pkt_sched.h> 23 23 #include <uapi/linux/tc_act/tc_ife.h> 24 24 #include <net/tc_act/tc_ife.h> 25 - #include <linux/rtnetlink.h> 26 25 27 26 static int skbmark_encode(struct sk_buff *skb, void *skbdata, 28 27 struct tcf_meta_info *e)
-1
net/sched/act_meta_skbtcindex.c
··· 22 22 #include <net/pkt_sched.h> 23 23 #include <uapi/linux/tc_act/tc_ife.h> 24 24 #include <net/tc_act/tc_ife.h> 25 - #include <linux/rtnetlink.h> 26 25 27 26 static int skbtcindex_encode(struct sk_buff *skb, void *skbdata, 28 27 struct tcf_meta_info *e)
+2 -3
net/sched/cls_api.c
··· 23 23 #include <linux/skbuff.h> 24 24 #include <linux/init.h> 25 25 #include <linux/kmod.h> 26 - #include <linux/err.h> 27 26 #include <linux/slab.h> 28 27 #include <net/net_namespace.h> 29 28 #include <net/sock.h> ··· 351 352 { 352 353 struct tcf_chain *chain; 353 354 355 + if (!block) 356 + return; 354 357 /* Hold a refcnt for all chains, except 0, so that they don't disappear 355 358 * while we are iterating. 356 359 */ ··· 379 378 { 380 379 struct tcf_block_ext_info ei = {0, }; 381 380 382 - if (!block) 383 - return; 384 381 tcf_block_put_ext(block, block->q, &ei); 385 382 } 386 383
-1
net/sched/cls_u32.c
··· 45 45 #include <net/netlink.h> 46 46 #include <net/act_api.h> 47 47 #include <net/pkt_cls.h> 48 - #include <linux/netdevice.h> 49 48 #include <linux/idr.h> 50 49 51 50 struct tc_u_knode {
+2
net/sched/sch_api.c
··· 795 795 tcm->tcm_info = refcount_read(&q->refcnt); 796 796 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 797 797 goto nla_put_failure; 798 + if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) 799 + goto nla_put_failure; 798 800 if (q->ops->dump && q->ops->dump(q, skb) < 0) 799 801 goto nla_put_failure; 800 802 qlen = q->q.qlen;
+6 -9
net/sched/sch_ingress.c
··· 68 68 struct net_device *dev = qdisc_dev(sch); 69 69 int err; 70 70 71 + net_inc_ingress_queue(); 72 + 71 73 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); 72 74 73 75 q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; ··· 80 78 if (err) 81 79 return err; 82 80 83 - net_inc_ingress_queue(); 84 81 sch->flags |= TCQ_F_CPUSTATS; 85 82 86 83 return 0; ··· 173 172 struct net_device *dev = qdisc_dev(sch); 174 173 int err; 175 174 175 + net_inc_ingress_queue(); 176 + net_inc_egress_queue(); 177 + 176 178 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); 177 179 178 180 q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; ··· 194 190 195 191 err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info); 196 192 if (err) 197 - goto err_egress_block_get; 198 - 199 - net_inc_ingress_queue(); 200 - net_inc_egress_queue(); 193 + return err; 201 194 202 195 sch->flags |= TCQ_F_CPUSTATS; 203 196 204 197 return 0; 205 - 206 - err_egress_block_get: 207 - tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); 208 - return err; 209 198 } 210 199 211 200 static void clsact_destroy(struct Qdisc *sch)
+15 -16
net/sched/sch_red.c
··· 157 157 .handle = sch->handle, 158 158 .parent = sch->parent, 159 159 }; 160 + int err; 160 161 161 162 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 162 163 return -EOPNOTSUPP; ··· 172 171 opt.command = TC_RED_DESTROY; 173 172 } 174 173 175 - return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); 174 + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); 175 + 176 + if (!err && enable) 177 + sch->flags |= TCQ_F_OFFLOADED; 178 + else 179 + sch->flags &= ~TCQ_F_OFFLOADED; 180 + 181 + return err; 176 182 } 177 183 178 184 static void red_destroy(struct Qdisc *sch) ··· 282 274 return red_change(sch, opt); 283 275 } 284 276 285 - static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt) 277 + static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt) 286 278 { 287 279 struct net_device *dev = qdisc_dev(sch); 288 280 struct tc_red_qopt_offload hw_stats = { ··· 294 286 .stats.qstats = &sch->qstats, 295 287 }, 296 288 }; 297 - int err; 298 289 299 - opt->flags &= ~TC_RED_OFFLOADED; 300 - if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 290 + if (!(sch->flags & TCQ_F_OFFLOADED)) 301 291 return 0; 302 292 303 - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, 304 - &hw_stats); 305 - if (err == -EOPNOTSUPP) 306 - return 0; 307 - 308 - if (!err) 309 - opt->flags |= TC_RED_OFFLOADED; 310 - 311 - return err; 293 + return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, 294 + &hw_stats); 312 295 } 313 296 314 297 static int red_dump(struct Qdisc *sch, struct sk_buff *skb) ··· 318 319 int err; 319 320 320 321 sch->qstats.backlog = q->qdisc->qstats.backlog; 321 - err = red_dump_offload(sch, &opt); 322 + err = red_dump_offload_stats(sch, &opt); 322 323 if (err) 323 324 goto nla_put_failure; 324 325 ··· 346 347 .marked = q->stats.prob_mark + q->stats.forced_mark, 347 348 }; 348 349 349 - if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc) { 350 + if (sch->flags & TCQ_F_OFFLOADED) { 350 351 struct red_stats hw_stats = {0}; 351 352 struct tc_red_qopt_offload hw_stats_request = { 352 353 .command = TC_RED_XSTATS,
+5 -1
net/sctp/socket.c
··· 3891 3891 struct sctp_association *asoc; 3892 3892 int retval = -EINVAL; 3893 3893 3894 - if (optlen < sizeof(struct sctp_reset_streams)) 3894 + if (optlen < sizeof(*params)) 3895 3895 return -EINVAL; 3896 3896 3897 3897 params = memdup_user(optval, optlen); 3898 3898 if (IS_ERR(params)) 3899 3899 return PTR_ERR(params); 3900 + 3901 + if (params->srs_number_streams * sizeof(__u16) > 3902 + optlen - sizeof(*params)) 3903 + goto out; 3900 3904 3901 3905 asoc = sctp_id2assoc(sk, params->srs_assoc_id); 3902 3906 if (!asoc)
+1 -1
net/tipc/socket.c
··· 1140 1140 __skb_dequeue(arrvq); 1141 1141 __skb_queue_tail(inputq, skb); 1142 1142 } 1143 - refcount_dec(&skb->users); 1143 + kfree_skb(skb); 1144 1144 spin_unlock_bh(&inputq->lock); 1145 1145 continue; 1146 1146 }
+38 -10
net/wireless/Makefile
··· 25 25 26 26 $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509) 27 27 @$(kecho) " GEN $@" 28 - @echo '#include "reg.h"' > $@ 29 - @echo 'const u8 shipped_regdb_certs[] = {' >> $@ 30 - @for f in $^ ; do hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ ; done 31 - @echo '};' >> $@ 32 - @echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);' >> $@ 28 + @(set -e; \ 29 + allf=""; \ 30 + for f in $^ ; do \ 31 + # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \ 32 + thisf=$$(od -An -v -tx1 < $$f | \ 33 + sed -e 's/ /\n/g' | \ 34 + sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \ 35 + sed -e 's/^/0x/;s/$$/,/'); \ 36 + # file should not be empty - maybe command substitution failed? \ 37 + test ! -z "$$thisf";\ 38 + allf=$$allf$$thisf;\ 39 + done; \ 40 + ( \ 41 + echo '#include "reg.h"'; \ 42 + echo 'const u8 shipped_regdb_certs[] = {'; \ 43 + echo "$$allf"; \ 44 + echo '};'; \ 45 + echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \ 46 + ) >> $@) 33 47 34 48 $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ 35 49 $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509) 36 50 @$(kecho) " GEN $@" 37 - @echo '#include "reg.h"' > $@ 38 - @echo 'const u8 extra_regdb_certs[] = {' >> $@ 39 - @for f in $^ ; do test -f $$f && hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ || true ; done 40 - @echo '};' >> $@ 41 - @echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);' >> $@ 51 + @(set -e; \ 52 + allf=""; \ 53 + for f in $^ ; do \ 54 + # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \ 55 + thisf=$$(od -An -v -tx1 < $$f | \ 56 + sed -e 's/ /\n/g' | \ 57 + sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \ 58 + sed -e 's/^/0x/;s/$$/,/'); \ 59 + # file should not be empty - maybe command substitution failed? \ 60 + test ! -z "$$thisf";\ 61 + allf=$$allf$$thisf;\ 62 + done; \ 63 + ( \ 64 + echo '#include "reg.h"'; \ 65 + echo 'const u8 extra_regdb_certs[] = {'; \ 66 + echo "$$allf"; \ 67 + echo '};'; \ 68 + echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \ 69 + ) >> $@)
+7
tools/include/uapi/asm/bpf_perf_event.h
··· 1 + #if defined(__aarch64__) 2 + #include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h" 3 + #elif defined(__s390__) 4 + #include "../../arch/s390/include/uapi/asm/bpf_perf_event.h" 5 + #else 6 + #include <uapi/asm-generic/bpf_perf_event.h> 7 + #endif
+1 -12
tools/testing/selftests/bpf/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - ifeq ($(srctree),) 4 - srctree := $(patsubst %/,%,$(dir $(CURDIR))) 5 - srctree := $(patsubst %/,%,$(dir $(srctree))) 6 - srctree := $(patsubst %/,%,$(dir $(srctree))) 7 - srctree := $(patsubst %/,%,$(dir $(srctree))) 8 - endif 9 - include $(srctree)/tools/scripts/Makefile.arch 10 - 11 - $(call detected_var,SRCARCH) 12 - 13 3 LIBDIR := ../../../lib 14 4 BPFDIR := $(LIBDIR)/bpf 15 5 APIDIR := ../../../include/uapi 16 - ASMDIR:= ../../../arch/$(ARCH)/include/uapi 17 6 GENDIR := ../../../../include/generated 18 7 GENHDR := $(GENDIR)/autoconf.h 19 8 ··· 10 21 GENFLAGS := -DHAVE_GENHDR 11 22 endif 12 23 13 - CFLAGS += -Wall -O2 -I$(APIDIR) -I$(ASMDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 24 + CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 14 25 LDLIBS += -lcap -lelf 15 26 16 27 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \