Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"Hopefully the last round of fixes this release, fingers crossed :)

1) Initialize static nf_conntrack_locks_all_lock properly, from
Florian Westphal.

2) Need to cancel pending work when destroying IDLETIMER entries,
from Liping Zhang.

3) Fix TX param usage when sending TSO over iwlwifi devices, from
Emmanuel Grumbach.

4) NFACCT quota params not validated properly, from Phil Turnbull.

5) Resolve more glibc vs. kernel header conflicts, from Mikko
Tapeli.

6) Missing IRQ free in ravb_close(), from Geert Uytterhoeven.

7) Fix infoleak in x25, from Kangjie Lu.

8) Similarly in thunderx driver, from Heinrich Schuchardt.

9) tc_ife.h uapi header not exported properly, from Jamal Hadi Salim.

10) Don't reenable PHY interreupts if device is in polling mode, from
Shaohui Xie.

11) Packet scheduler actions late binding was not being handled
properly at all, from Jamal Hadi Salim.

12) Fix binding of conntrack entries to helpers in openvswitch, from
Joe Stringer"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (21 commits)
gre: do not keep the GRE header around in collect medata mode
openvswitch: Fix cached ct with helper.
net sched: ife action fix late binding
net sched: skbedit action fix late binding
net sched: simple action fix late binding
net sched: mirred action fix late binding
net sched: ipt action fix late binding
net sched: vlan action fix late binding
net: phylib: fix interrupts re-enablement in phy_start
tcp: refresh skb timestamp at retransmit time
net: nps_enet: bug fix - handle lost tx interrupts
net: nps_enet: Tx handler synchronization
export tc ife uapi header
net: thunderx: avoid exposing kernel stack
net: fix a kernel infoleak in x25 module
ravb: Add missing free_irq() call to ravb_close()
uapi glibc compat: fix compile errors when glibc net/if.h included before linux/if.h
netfilter: nfnetlink_acct: validate NFACCT_QUOTA parameter
iwlwifi: mvm: don't override the rate with the AMSDU len
netfilter: IDLETIMER: fix race condition when destroy the target
...

Changed files
+258 -86
drivers
net
ethernet
cavium
thunder
ezchip
renesas
phy
wireless
intel
iwlwifi
mvm
include
uapi
linux
net
+4
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 533 533 nicvf_config_vlan_stripping(nic, nic->netdev->features); 534 534 535 535 /* Enable Receive queue */ 536 + memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 536 537 rq_cfg.ena = 1; 537 538 rq_cfg.tcp_ena = 0; 538 539 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); ··· 566 565 qidx, (u64)(cq->dmem.phys_base)); 567 566 568 567 /* Enable Completion queue */ 568 + memset(&cq_cfg, 0, sizeof(struct cq_cfg)); 569 569 cq_cfg.ena = 1; 570 570 cq_cfg.reset = 0; 571 571 cq_cfg.caching = 0; ··· 615 613 qidx, (u64)(sq->dmem.phys_base)); 616 614 617 615 /* Enable send queue & set queue size */ 616 + memset(&sq_cfg, 0, sizeof(struct sq_cfg)); 618 617 sq_cfg.ena = 1; 619 618 sq_cfg.reset = 0; 620 619 sq_cfg.ldwb = 0; ··· 652 649 653 650 /* Enable RBDR & set queue size */ 654 651 /* Buffer size should be in multiples of 128 bytes */ 652 + memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); 655 653 rbdr_cfg.ena = 1; 656 654 rbdr_cfg.reset = 0; 657 655 rbdr_cfg.ldwb = 0;
+24 -6
drivers/net/ethernet/ezchip/nps_enet.c
··· 145 145 u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; 146 146 147 147 /* Check if we got TX */ 148 - if (!priv->tx_packet_sent || tx_ctrl_ct) 148 + if (!priv->tx_skb || tx_ctrl_ct) 149 149 return; 150 150 151 151 /* Ack Tx ctrl register */ ··· 160 160 } 161 161 162 162 dev_kfree_skb(priv->tx_skb); 163 - priv->tx_packet_sent = false; 163 + priv->tx_skb = NULL; 164 164 165 165 if (netif_queue_stopped(ndev)) 166 166 netif_wake_queue(ndev); ··· 183 183 work_done = nps_enet_rx_handler(ndev); 184 184 if (work_done < budget) { 185 185 u32 buf_int_enable_value = 0; 186 + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); 187 + u32 tx_ctrl_ct = 188 + (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 186 189 187 190 napi_complete(napi); 188 191 ··· 195 192 196 193 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 197 194 buf_int_enable_value); 195 + 196 + /* in case we will get a tx interrupt while interrupts 197 + * are masked, we will lose it since the tx is edge interrupt. 198 + * specifically, while executing the code section above, 199 + * between nps_enet_tx_handler and the interrupts enable, all 200 + * tx requests will be stuck until we will get an rx interrupt. 201 + * the two code lines below will solve this situation by 202 + * re-adding ourselves to the poll list. 203 + */ 204 + 205 + if (priv->tx_skb && !tx_ctrl_ct) 206 + napi_reschedule(napi); 198 207 } 199 208 200 209 return work_done; ··· 232 217 u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 233 218 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; 234 219 235 - if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr) 220 + if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr) 236 221 if (likely(napi_schedule_prep(&priv->napi))) { 237 222 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 238 223 __napi_schedule(&priv->napi); ··· 402 387 /* Write the length of the Frame */ 403 388 tx_ctrl_value |= length << TX_CTL_NT_SHIFT; 404 389 405 - /* Indicate SW is done */ 406 - priv->tx_packet_sent = true; 407 390 tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; 408 391 /* Send Frame */ 409 392 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); ··· 478 465 s32 err; 479 466 480 467 /* Reset private variables */ 481 - priv->tx_packet_sent = false; 468 + priv->tx_skb = NULL; 482 469 priv->ge_mac_cfg_2_value = 0; 483 470 priv->ge_mac_cfg_3_value = 0; 484 471 ··· 546 533 netif_stop_queue(ndev); 547 534 548 535 priv->tx_skb = skb; 536 + 537 + /* make sure tx_skb is actually written to the memory 538 + * before the HW is informed and the IRQ is fired. 539 + */ 540 + wmb(); 549 541 550 542 nps_enet_send_frame(ndev, skb); 551 543
-2
drivers/net/ethernet/ezchip/nps_enet.h
··· 165 165 * struct nps_enet_priv - Storage of ENET's private information. 166 166 * @regs_base: Base address of ENET memory-mapped control registers. 167 167 * @irq: For RX/TX IRQ number. 168 - * @tx_packet_sent: SW indication if frame is being sent. 169 168 * @tx_skb: socket buffer of sent frame. 170 169 * @napi: Structure for NAPI. 171 170 */ 172 171 struct nps_enet_priv { 173 172 void __iomem *regs_base; 174 173 s32 irq; 175 - bool tx_packet_sent; 176 174 struct sk_buff *tx_skb; 177 175 struct napi_struct napi; 178 176 u32 ge_mac_cfg_2_value;
+2
drivers/net/ethernet/renesas/ravb_main.c
··· 1506 1506 priv->phydev = NULL; 1507 1507 } 1508 1508 1509 + if (priv->chip_id == RCAR_GEN3) 1510 + free_irq(priv->emac_irq, ndev); 1509 1511 free_irq(ndev->irq, ndev); 1510 1512 1511 1513 napi_disable(&priv->napi[RAVB_NC]);
+5 -3
drivers/net/phy/phy.c
··· 790 790 break; 791 791 case PHY_HALTED: 792 792 /* make sure interrupts are re-enabled for the PHY */ 793 - err = phy_enable_interrupts(phydev); 794 - if (err < 0) 795 - break; 793 + if (phydev->irq != PHY_POLL) { 794 + err = phy_enable_interrupts(phydev); 795 + if (err < 0) 796 + break; 797 + } 796 798 797 799 phydev->state = PHY_RESUMING; 798 800 do_resume = true;
+48 -35
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
··· 105 105 struct iwl_tx_cmd *tx_cmd, 106 106 struct ieee80211_tx_info *info, u8 sta_id) 107 107 { 108 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 108 109 struct ieee80211_hdr *hdr = (void *)skb->data; 109 110 __le16 fc = hdr->frame_control; 110 111 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); ··· 186 185 tx_cmd->tx_flags = cpu_to_le32(tx_flags); 187 186 /* Total # bytes to be transmitted */ 188 187 tx_cmd->len = cpu_to_le16((u16)skb->len + 189 - (uintptr_t)info->driver_data[0]); 188 + (uintptr_t)skb_info->driver_data[0]); 190 189 tx_cmd->next_frame_len = 0; 191 190 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 192 191 tx_cmd->sta_id = sta_id; ··· 328 327 */ 329 328 static struct iwl_device_cmd * 330 329 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, 331 - int hdrlen, struct ieee80211_sta *sta, u8 sta_id) 330 + struct ieee80211_tx_info *info, int hdrlen, 331 + struct ieee80211_sta *sta, u8 sta_id) 332 332 { 333 333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 334 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 334 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 335 335 struct iwl_device_cmd *dev_cmd; 336 336 struct iwl_tx_cmd *tx_cmd; 337 337 ··· 352 350 353 351 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); 354 352 355 - memset(&info->status, 0, sizeof(info->status)); 356 - memset(info->driver_data, 0, sizeof(info->driver_data)); 353 + memset(&skb_info->status, 0, sizeof(skb_info->status)); 354 + memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); 357 355 358 - info->driver_data[1] = dev_cmd; 356 + skb_info->driver_data[1] = dev_cmd; 359 357 360 358 return dev_cmd; 361 359 } ··· 363 361 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) 364 362 { 365 363 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 366 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 364 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 365 + struct ieee80211_tx_info info; 367 366 struct iwl_device_cmd *dev_cmd; 368 367 struct iwl_tx_cmd *tx_cmd; 369 368 u8 sta_id; 370 369 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 371 370 372 - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) 371 + memcpy(&info, skb->cb, sizeof(info)); 372 + 373 + if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) 373 374 return -1; 374 375 375 - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 376 - (!info->control.vif || 377 - info->hw_queue != info->control.vif->cab_queue))) 376 + if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 377 + (!info.control.vif || 378 + info.hw_queue != info.control.vif->cab_queue))) 378 379 return -1; 379 380 380 381 /* This holds the amsdu headers length */ 381 - info->driver_data[0] = (void *)(uintptr_t)0; 382 + skb_info->driver_data[0] = (void *)(uintptr_t)0; 382 383 383 384 /* 384 385 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used ··· 390 385 * and hence needs to be sent on the aux queue 391 386 */ 392 387 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && 393 - info->control.vif->type == NL80211_IFTYPE_STATION) 388 + info.control.vif->type == NL80211_IFTYPE_STATION) 394 389 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; 395 390 396 391 /* ··· 403 398 * AUX station. 404 399 */ 405 400 sta_id = mvm->aux_sta.sta_id; 406 - if (info->control.vif) { 401 + if (info.control.vif) { 407 402 struct iwl_mvm_vif *mvmvif = 408 - iwl_mvm_vif_from_mac80211(info->control.vif); 403 + iwl_mvm_vif_from_mac80211(info.control.vif); 409 404 410 - if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 411 - info->control.vif->type == NL80211_IFTYPE_AP) 405 + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 406 + info.control.vif->type == NL80211_IFTYPE_AP) 412 407 sta_id = mvmvif->bcast_sta.sta_id; 413 - else if (info->control.vif->type == NL80211_IFTYPE_STATION && 408 + else if (info.control.vif->type == NL80211_IFTYPE_STATION && 414 409 is_multicast_ether_addr(hdr->addr1)) { 415 410 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); 416 411 ··· 419 414 } 420 415 } 421 416 422 - IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); 417 + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue); 423 418 424 - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); 419 + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); 425 420 if (!dev_cmd) 426 421 return -1; 427 422 428 - /* From now on, we cannot access info->control */ 429 423 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 430 424 431 425 /* Copy MAC header from skb into command buffer */ 432 426 memcpy(tx_cmd->hdr, hdr, hdrlen); 433 427 434 - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { 428 + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) { 435 429 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 436 430 return -1; 437 431 } ··· 449 445 450 446 #ifdef CONFIG_INET 451 447 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 448 + struct ieee80211_tx_info *info, 452 449 struct ieee80211_sta *sta, 453 450 struct sk_buff_head *mpdus_skb) 454 451 { 455 452 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 456 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 457 453 struct ieee80211_hdr *hdr = (void *)skb->data; 458 454 unsigned int mss = skb_shinfo(skb)->gso_size; 459 455 struct sk_buff *tmp, *next; ··· 548 544 549 545 /* This skb fits in one single A-MSDU */ 550 546 if (num_subframes * mss >= tcp_payload_len) { 547 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 548 + 551 549 /* 552 550 * Compute the length of all the data added for the A-MSDU. 553 551 * This will be used to compute the length to write in the TX ··· 558 552 * already had one set of SNAP / IP / TCP headers. 559 553 */ 560 554 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); 561 - info = IEEE80211_SKB_CB(skb); 562 555 amsdu_add = num_subframes * sizeof(struct ethhdr) + 563 556 (num_subframes - 1) * (snap_ip_tcp + pad); 564 557 /* This holds the amsdu headers length */ 565 - info->driver_data[0] = (void *)(uintptr_t)amsdu_add; 558 + skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add; 566 559 567 560 __skb_queue_tail(mpdus_skb, skb); 568 561 return 0; ··· 601 596 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); 602 597 603 598 if (tcp_payload_len > mss) { 599 + struct ieee80211_tx_info *skb_info = 600 + IEEE80211_SKB_CB(tmp); 601 + 604 602 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); 605 - info = IEEE80211_SKB_CB(tmp); 606 603 amsdu_add = num_subframes * sizeof(struct ethhdr) + 607 604 (num_subframes - 1) * (snap_ip_tcp + pad); 608 - info->driver_data[0] = (void *)(uintptr_t)amsdu_add; 605 + skb_info->driver_data[0] = 606 + (void *)(uintptr_t)amsdu_add; 609 607 skb_shinfo(tmp)->gso_size = mss; 610 608 } else { 611 609 qc = ieee80211_get_qos_ctl((void *)tmp->data); ··· 630 622 } 631 623 #else /* CONFIG_INET */ 632 624 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 625 + struct ieee80211_tx_info *info, 633 626 struct ieee80211_sta *sta, 634 627 struct sk_buff_head *mpdus_skb) 635 628 { ··· 645 636 * Sets the fields in the Tx cmd that are crypto related 646 637 */ 647 638 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, 639 + struct ieee80211_tx_info *info, 648 640 struct ieee80211_sta *sta) 649 641 { 650 642 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 651 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 652 643 struct iwl_mvm_sta *mvmsta; 653 644 struct iwl_device_cmd *dev_cmd; 654 645 struct iwl_tx_cmd *tx_cmd; ··· 669 660 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 670 661 return -1; 671 662 672 - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); 663 + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, 664 + sta, mvmsta->sta_id); 673 665 if (!dev_cmd) 674 666 goto drop; 675 667 ··· 746 736 struct ieee80211_sta *sta) 747 737 { 748 738 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 749 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 739 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 740 + struct ieee80211_tx_info info; 750 741 struct sk_buff_head mpdus_skbs; 751 742 unsigned int payload_len; 752 743 int ret; ··· 758 747 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 759 748 return -1; 760 749 750 + memcpy(&info, skb->cb, sizeof(info)); 751 + 761 752 /* This holds the amsdu headers length */ 762 - info->driver_data[0] = (void *)(uintptr_t)0; 753 + skb_info->driver_data[0] = (void *)(uintptr_t)0; 763 754 764 755 if (!skb_is_gso(skb)) 765 - return iwl_mvm_tx_mpdu(mvm, skb, sta); 756 + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 766 757 767 758 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - 768 759 tcp_hdrlen(skb) + skb->data_len; 769 760 770 761 if (payload_len <= skb_shinfo(skb)->gso_size) 771 - return iwl_mvm_tx_mpdu(mvm, skb, sta); 762 + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 772 763 773 764 __skb_queue_head_init(&mpdus_skbs); 774 765 775 - ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs); 766 + ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); 776 767 if (ret) 777 768 return ret; 778 769 ··· 784 771 while (!skb_queue_empty(&mpdus_skbs)) { 785 772 skb = __skb_dequeue(&mpdus_skbs); 786 773 787 - ret = iwl_mvm_tx_mpdu(mvm, skb, sta); 774 + ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 788 775 if (ret) { 789 776 __skb_queue_purge(&mpdus_skbs); 790 777 return ret;
+28
include/uapi/linux/if.h
··· 19 19 #ifndef _LINUX_IF_H 20 20 #define _LINUX_IF_H 21 21 22 + #include <linux/libc-compat.h> /* for compatibility with glibc */ 22 23 #include <linux/types.h> /* for "__kernel_caddr_t" et al */ 23 24 #include <linux/socket.h> /* for "struct sockaddr" et al */ 24 25 #include <linux/compiler.h> /* for "__user" et al */ 25 26 27 + #if __UAPI_DEF_IF_IFNAMSIZ 26 28 #define IFNAMSIZ 16 29 + #endif /* __UAPI_DEF_IF_IFNAMSIZ */ 27 30 #define IFALIASZ 256 28 31 #include <linux/hdlc/ioctl.h> 29 32 33 + /* For glibc compatibility. An empty enum does not compile. */ 34 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \ 35 + __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 30 36 /** 31 37 * enum net_device_flags - &struct net_device flags 32 38 * ··· 74 68 * @IFF_ECHO: echo sent packets. Volatile. 75 69 */ 76 70 enum net_device_flags { 71 + /* for compatibility with glibc net/if.h */ 72 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS 77 73 IFF_UP = 1<<0, /* sysfs */ 78 74 IFF_BROADCAST = 1<<1, /* volatile */ 79 75 IFF_DEBUG = 1<<2, /* sysfs */ ··· 92 84 IFF_PORTSEL = 1<<13, /* sysfs */ 93 85 IFF_AUTOMEDIA = 1<<14, /* sysfs */ 94 86 IFF_DYNAMIC = 1<<15, /* sysfs */ 87 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */ 88 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 95 89 IFF_LOWER_UP = 1<<16, /* volatile */ 96 90 IFF_DORMANT = 1<<17, /* volatile */ 97 91 IFF_ECHO = 1<<18, /* volatile */ 92 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ 98 93 }; 94 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */ 99 95 96 + /* for compatibility with glibc net/if.h */ 97 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS 100 98 #define IFF_UP IFF_UP 101 99 #define IFF_BROADCAST IFF_BROADCAST 102 100 #define IFF_DEBUG IFF_DEBUG ··· 119 105 #define IFF_PORTSEL IFF_PORTSEL 120 106 #define IFF_AUTOMEDIA IFF_AUTOMEDIA 121 107 #define IFF_DYNAMIC IFF_DYNAMIC 108 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */ 109 + 110 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 122 111 #define IFF_LOWER_UP IFF_LOWER_UP 123 112 #define IFF_DORMANT IFF_DORMANT 124 113 #define IFF_ECHO IFF_ECHO 114 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ 125 115 126 116 #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ 127 117 IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) ··· 184 166 * being very small might be worth keeping for clean configuration. 185 167 */ 186 168 169 + /* for compatibility with glibc net/if.h */ 170 + #if __UAPI_DEF_IF_IFMAP 187 171 struct ifmap { 188 172 unsigned long mem_start; 189 173 unsigned long mem_end; ··· 195 175 unsigned char port; 196 176 /* 3 bytes spare */ 197 177 }; 178 + #endif /* __UAPI_DEF_IF_IFMAP */ 198 179 199 180 struct if_settings { 200 181 unsigned int type; /* Type of physical device or protocol */ ··· 221 200 * remainder may be interface specific. 222 201 */ 223 202 203 + /* for compatibility with glibc net/if.h */ 204 + #if __UAPI_DEF_IF_IFREQ 224 205 struct ifreq { 225 206 #define IFHWADDRLEN 6 226 207 union ··· 246 223 struct if_settings ifru_settings; 247 224 } ifr_ifru; 248 225 }; 226 + #endif /* __UAPI_DEF_IF_IFREQ */ 249 227 250 228 #define ifr_name ifr_ifrn.ifrn_name /* interface name */ 251 229 #define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */ ··· 273 249 * must know all networks accessible). 274 250 */ 275 251 252 + /* for compatibility with glibc net/if.h */ 253 + #if __UAPI_DEF_IF_IFCONF 276 254 struct ifconf { 277 255 int ifc_len; /* size of buffer */ 278 256 union { ··· 282 256 struct ifreq __user *ifcu_req; 283 257 } ifc_ifcu; 284 258 }; 259 + #endif /* __UAPI_DEF_IF_IFCONF */ 260 + 285 261 #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ 286 262 #define ifc_req ifc_ifcu.ifcu_req /* array of structures */ 287 263
+44
include/uapi/linux/libc-compat.h
··· 51 51 /* We have included glibc headers... */ 52 52 #if defined(__GLIBC__) 53 53 54 + /* Coordinate with glibc net/if.h header. */ 55 + #if defined(_NET_IF_H) 56 + 57 + /* GLIBC headers included first so don't define anything 58 + * that would already be defined. */ 59 + 60 + #define __UAPI_DEF_IF_IFCONF 0 61 + #define __UAPI_DEF_IF_IFMAP 0 62 + #define __UAPI_DEF_IF_IFNAMSIZ 0 63 + #define __UAPI_DEF_IF_IFREQ 0 64 + /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 65 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0 66 + /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 67 + #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 68 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 69 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ 70 + 71 + #else /* _NET_IF_H */ 72 + 73 + /* Linux headers included first, and we must define everything 74 + * we need. The expectation is that glibc will check the 75 + * __UAPI_DEF_* defines and adjust appropriately. */ 76 + 77 + #define __UAPI_DEF_IF_IFCONF 1 78 + #define __UAPI_DEF_IF_IFMAP 1 79 + #define __UAPI_DEF_IF_IFNAMSIZ 1 80 + #define __UAPI_DEF_IF_IFREQ 1 81 + /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 82 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 83 + /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 84 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 85 + 86 + #endif /* _NET_IF_H */ 87 + 54 88 /* Coordinate with glibc netinet/in.h header. */ 55 89 #if defined(_NETINET_IN_H) 56 90 ··· 150 116 * or we are being included in the kernel, then define everything 151 117 * that we need. */ 152 118 #else /* !defined(__GLIBC__) */ 119 + 120 + /* Definitions for if.h */ 121 + #define __UAPI_DEF_IF_IFCONF 1 122 + #define __UAPI_DEF_IF_IFMAP 1 123 + #define __UAPI_DEF_IF_IFNAMSIZ 1 124 + #define __UAPI_DEF_IF_IFREQ 1 125 + /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 126 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 127 + /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 128 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 153 129 154 130 /* Definitions for in.h */ 155 131 #define __UAPI_DEF_IN_ADDR 1
+1
include/uapi/linux/tc_act/Kbuild
··· 10 10 header-y += tc_vlan.h 11 11 header-y += tc_bpf.h 12 12 header-y += tc_connmark.h 13 + header-y += tc_ife.h
+6 -1
net/ipv4/ip_gre.c
··· 398 398 iph->saddr, iph->daddr, tpi->key); 399 399 400 400 if (tunnel) { 401 - skb_pop_mac_header(skb); 401 + if (tunnel->dev->type != ARPHRD_NONE) 402 + skb_pop_mac_header(skb); 403 + else 404 + skb_reset_mac_header(skb); 402 405 if (tunnel->collect_md) { 403 406 __be16 flags; 404 407 __be64 tun_id; ··· 1034 1031 struct ip_tunnel *t = netdev_priv(dev); 1035 1032 1036 1033 t->collect_md = true; 1034 + if (dev->type == ARPHRD_IPGRE) 1035 + dev->type = ARPHRD_NONE; 1037 1036 } 1038 1037 } 1039 1038
+4 -2
net/ipv4/tcp_output.c
··· 2640 2640 */ 2641 2641 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2642 2642 skb_headroom(skb) >= 0xFFFF)) { 2643 - struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2644 - GFP_ATOMIC); 2643 + struct sk_buff *nskb; 2644 + 2645 + skb_mstamp_get(&skb->skb_mstamp); 2646 + nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2645 2647 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2646 2648 -ENOBUFS; 2647 2649 } else {
+1 -1
net/netfilter/nf_conntrack_core.c
··· 66 66 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); 67 67 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); 68 68 69 - static __read_mostly spinlock_t nf_conntrack_locks_all_lock; 69 + static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); 70 70 static __read_mostly bool nf_conntrack_locks_all; 71 71 72 72 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+1
net/netfilter/xt_IDLETIMER.c
··· 236 236 237 237 list_del(&info->timer->entry); 238 238 del_timer_sync(&info->timer->timer); 239 + cancel_work_sync(&info->timer->work); 239 240 sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); 240 241 kfree(info->timer->attr.attr.name); 241 242 kfree(info->timer);
+13
net/openvswitch/conntrack.c
··· 776 776 return -EINVAL; 777 777 } 778 778 779 + /* Userspace may decide to perform a ct lookup without a helper 780 + * specified followed by a (recirculate and) commit with one. 781 + * Therefore, for unconfirmed connections which we will commit, 782 + * we need to attach the helper here. 783 + */ 784 + if (!nf_ct_is_confirmed(ct) && info->commit && 785 + info->helper && !nfct_help(ct)) { 786 + int err = __nf_ct_try_assign_helper(ct, info->ct, 787 + GFP_ATOMIC); 788 + if (err) 789 + return err; 790 + } 791 + 779 792 /* Call the helper only if: 780 793 * - nf_conntrack_in() was executed above ("!cached") for a 781 794 * confirmed connection, or
+10 -4
net/sched/act_ife.c
··· 423 423 u16 ife_type = 0; 424 424 u8 *daddr = NULL; 425 425 u8 *saddr = NULL; 426 - int ret = 0; 426 + int ret = 0, exists = 0; 427 427 int err; 428 428 429 429 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); ··· 435 435 436 436 parm = nla_data(tb[TCA_IFE_PARMS]); 437 437 438 + exists = tcf_hash_check(tn, parm->index, a, bind); 439 + if (exists && bind) 440 + return 0; 441 + 438 442 if (parm->flags & IFE_ENCODE) { 439 443 /* Until we get issued the ethertype, we cant have 440 444 * a default.. 441 445 **/ 442 446 if (!tb[TCA_IFE_TYPE]) { 447 + if (exists) 448 + tcf_hash_release(a, bind); 443 449 pr_info("You MUST pass etherype for encoding\n"); 444 450 return -EINVAL; 445 451 } 446 452 } 447 453 448 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 454 + if (!exists) { 449 455 ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife), 450 456 bind, false); 451 457 if (ret) 452 458 return ret; 453 459 ret = ACT_P_CREATED; 454 460 } else { 455 - if (bind) /* dont override defaults */ 456 - return 0; 457 461 tcf_hash_release(a, bind); 458 462 if (!ovr) 459 463 return -EEXIST; ··· 499 495 NULL); 500 496 if (err) { 501 497 metadata_parse_err: 498 + if (exists) 499 + tcf_hash_release(a, bind); 502 500 if (ret == ACT_P_CREATED) 503 501 _tcf_ife_cleanup(a, bind); 504 502
+12 -7
net/sched/act_ipt.c
··· 96 96 struct tcf_ipt *ipt; 97 97 struct xt_entry_target *td, *t; 98 98 char *tname; 99 - int ret = 0, err; 99 + int ret = 0, err, exists = 0; 100 100 u32 hook = 0; 101 101 u32 index = 0; 102 102 ··· 107 107 if (err < 0) 108 108 return err; 109 109 110 - if (tb[TCA_IPT_HOOK] == NULL) 110 + if (tb[TCA_IPT_INDEX] != NULL) 111 + index = nla_get_u32(tb[TCA_IPT_INDEX]); 112 + 113 + exists = tcf_hash_check(tn, index, a, bind); 114 + if (exists && bind) 115 + return 0; 116 + 117 + if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) { 118 + if (exists) 119 + tcf_hash_release(a, bind); 111 120 return -EINVAL; 112 - if (tb[TCA_IPT_TARG] == NULL) 113 - return -EINVAL; 121 + } 114 122 115 123 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 116 124 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) 117 125 return -EINVAL; 118 - 119 - if (tb[TCA_IPT_INDEX] != NULL) 120 - index = nla_get_u32(tb[TCA_IPT_INDEX]); 121 126 122 127 if (!tcf_hash_check(tn, index, a, bind)) { 123 128 ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
+13 -6
net/sched/act_mirred.c
··· 61 61 struct tc_mirred *parm; 62 62 struct tcf_mirred *m; 63 63 struct net_device *dev; 64 - int ret, ok_push = 0; 64 + int ret, ok_push = 0, exists = 0; 65 65 66 66 if (nla == NULL) 67 67 return -EINVAL; ··· 71 71 if (tb[TCA_MIRRED_PARMS] == NULL) 72 72 return -EINVAL; 73 73 parm = nla_data(tb[TCA_MIRRED_PARMS]); 74 + 75 + exists = tcf_hash_check(tn, parm->index, a, bind); 76 + if (exists && bind) 77 + return 0; 78 + 74 79 switch (parm->eaction) { 75 80 case TCA_EGRESS_MIRROR: 76 81 case TCA_EGRESS_REDIR: 77 82 break; 78 83 default: 84 + if (exists) 85 + tcf_hash_release(a, bind); 79 86 return -EINVAL; 80 87 } 81 88 if (parm->ifindex) { 82 89 dev = __dev_get_by_index(net, parm->ifindex); 83 - if (dev == NULL) 90 + if (dev == NULL) { 91 + if (exists) 92 + tcf_hash_release(a, bind); 84 93 return -ENODEV; 94 + } 85 95 switch (dev->type) { 86 96 case ARPHRD_TUNNEL: 87 97 case ARPHRD_TUNNEL6: ··· 109 99 dev = NULL; 110 100 } 111 101 112 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 102 + if (!exists) { 113 103 if (dev == NULL) 114 104 return -EINVAL; 115 105 ret = tcf_hash_create(tn, parm->index, est, a, ··· 118 108 return ret; 119 109 ret = ACT_P_CREATED; 120 110 } else { 121 - if (bind) 122 - return 0; 123 - 124 111 tcf_hash_release(a, bind); 125 112 if (!ovr) 126 113 return -EEXIST;
+12 -6
net/sched/act_simple.c
··· 87 87 struct tc_defact *parm; 88 88 struct tcf_defact *d; 89 89 char *defdata; 90 - int ret = 0, err; 90 + int ret = 0, err, exists = 0; 91 91 92 92 if (nla == NULL) 93 93 return -EINVAL; ··· 99 99 if (tb[TCA_DEF_PARMS] == NULL) 100 100 return -EINVAL; 101 101 102 - if (tb[TCA_DEF_DATA] == NULL) 103 - return -EINVAL; 104 102 105 103 parm = nla_data(tb[TCA_DEF_PARMS]); 104 + exists = tcf_hash_check(tn, parm->index, a, bind); 105 + if (exists && bind) 106 + return 0; 107 + 108 + if (tb[TCA_DEF_DATA] == NULL) { 109 + if (exists) 110 + tcf_hash_release(a, bind); 111 + return -EINVAL; 112 + } 113 + 106 114 defdata = nla_data(tb[TCA_DEF_DATA]); 107 115 108 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 116 + if (!exists) { 109 117 ret = tcf_hash_create(tn, parm->index, est, a, 110 118 sizeof(*d), bind, false); 111 119 if (ret) ··· 130 122 } else { 131 123 d = to_defact(a); 132 124 133 - if (bind) 134 - return 0; 135 125 tcf_hash_release(a, bind); 136 126 if (!ovr) 137 127 return -EEXIST;
+11 -7
net/sched/act_skbedit.c
··· 69 69 struct tcf_skbedit *d; 70 70 u32 flags = 0, *priority = NULL, *mark = NULL; 71 71 u16 *queue_mapping = NULL; 72 - int ret = 0, err; 72 + int ret = 0, err, exists = 0; 73 73 74 74 if (nla == NULL) 75 75 return -EINVAL; ··· 96 96 mark = nla_data(tb[TCA_SKBEDIT_MARK]); 97 97 } 98 98 99 - if (!flags) 100 - return -EINVAL; 101 - 102 99 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 103 100 104 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 101 + exists = tcf_hash_check(tn, parm->index, a, bind); 102 + if (exists && bind) 103 + return 0; 104 + 105 + if (!flags) { 106 + tcf_hash_release(a, bind); 107 + return -EINVAL; 108 + } 109 + 110 + if (!exists) { 105 111 ret = tcf_hash_create(tn, parm->index, est, a, 106 112 sizeof(*d), bind, false); 107 113 if (ret) ··· 117 111 ret = ACT_P_CREATED; 118 112 } else { 119 113 d = to_skbedit(a); 120 - if (bind) 121 - return 0; 122 114 tcf_hash_release(a, bind); 123 115 if (!ovr) 124 116 return -EEXIST;
+16 -6
net/sched/act_vlan.c
··· 77 77 int action; 78 78 __be16 push_vid = 0; 79 79 __be16 push_proto = 0; 80 - int ret = 0; 80 + int ret = 0, exists = 0; 81 81 int err; 82 82 83 83 if (!nla) ··· 90 90 if (!tb[TCA_VLAN_PARMS]) 91 91 return -EINVAL; 92 92 parm = nla_data(tb[TCA_VLAN_PARMS]); 93 + exists = tcf_hash_check(tn, parm->index, a, bind); 94 + if (exists && bind) 95 + return 0; 96 + 93 97 switch (parm->v_action) { 94 98 case TCA_VLAN_ACT_POP: 95 99 break; 96 100 case TCA_VLAN_ACT_PUSH: 97 - if (!tb[TCA_VLAN_PUSH_VLAN_ID]) 101 + if (!tb[TCA_VLAN_PUSH_VLAN_ID]) { 102 + if (exists) 103 + tcf_hash_release(a, bind); 98 104 return -EINVAL; 105 + } 99 106 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 100 - if (push_vid >= VLAN_VID_MASK) 107 + if (push_vid >= VLAN_VID_MASK) { 108 + if (exists) 109 + tcf_hash_release(a, bind); 101 110 return -ERANGE; 111 + } 102 112 103 113 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { 104 114 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); ··· 124 114 } 125 115 break; 126 116 default: 117 + if (exists) 118 + tcf_hash_release(a, bind); 127 119 return -EINVAL; 128 120 } 129 121 action = parm->v_action; 130 122 131 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 123 + if (!exists) { 132 124 ret = tcf_hash_create(tn, parm->index, est, a, 133 125 sizeof(*v), bind, false); 134 126 if (ret) ··· 138 126 139 127 ret = ACT_P_CREATED; 140 128 } else { 141 - if (bind) 142 - return 0; 143 129 tcf_hash_release(a, bind); 144 130 if (!ovr) 145 131 return -EEXIST;
+1
net/x25/x25_facilities.c
··· 277 277 278 278 memset(&theirs, 0, sizeof(theirs)); 279 279 memcpy(new, ours, sizeof(*new)); 280 + memset(dte, 0, sizeof(*dte)); 280 281 281 282 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 282 283 if (len < 0)