Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"Hopefully the last round of fixes this release, fingers crossed :)

1) Initialize static nf_conntrack_locks_all_lock properly, from
Florian Westphal.

2) Need to cancel pending work when destroying IDLETIMER entries,
from Liping Zhang.

3) Fix TX param usage when sending TSO over iwlwifi devices, from
Emmanuel Grumbach.

4) NFACCT quota params not validated properly, from Phil Turnbull.

5) Resolve more glibc vs. kernel header conflicts, from Mikko
Tapeli.

6) Missing IRQ free in ravb_close(), from Geert Uytterhoeven.

7) Fix infoleak in x25, from Kangjie Lu.

8) Similarly in thunderx driver, from Heinrich Schuchardt.

9) tc_ife.h uapi header not exported properly, from Jamal Hadi Salim.

10) Don't reenable PHY interreupts if device is in polling mode, from
Shaohui Xie.

11) Packet scheduler actions late binding was not being handled
properly at all, from Jamal Hadi Salim.

12) Fix binding of conntrack entries to helpers in openvswitch, from
Joe Stringer"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (21 commits)
gre: do not keep the GRE header around in collect medata mode
openvswitch: Fix cached ct with helper.
net sched: ife action fix late binding
net sched: skbedit action fix late binding
net sched: simple action fix late binding
net sched: mirred action fix late binding
net sched: ipt action fix late binding
net sched: vlan action fix late binding
net: phylib: fix interrupts re-enablement in phy_start
tcp: refresh skb timestamp at retransmit time
net: nps_enet: bug fix - handle lost tx interrupts
net: nps_enet: Tx handler synchronization
export tc ife uapi header
net: thunderx: avoid exposing kernel stack
net: fix a kernel infoleak in x25 module
ravb: Add missing free_irq() call to ravb_close()
uapi glibc compat: fix compile errors when glibc net/if.h included before linux/if.h
netfilter: nfnetlink_acct: validate NFACCT_QUOTA parameter
iwlwifi: mvm: don't override the rate with the AMSDU len
netfilter: IDLETIMER: fix race condition when destroy the target
...

+258 -86
+4
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 533 nicvf_config_vlan_stripping(nic, nic->netdev->features); 534 535 /* Enable Receive queue */ 536 rq_cfg.ena = 1; 537 rq_cfg.tcp_ena = 0; 538 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); ··· 566 qidx, (u64)(cq->dmem.phys_base)); 567 568 /* Enable Completion queue */ 569 cq_cfg.ena = 1; 570 cq_cfg.reset = 0; 571 cq_cfg.caching = 0; ··· 615 qidx, (u64)(sq->dmem.phys_base)); 616 617 /* Enable send queue & set queue size */ 618 sq_cfg.ena = 1; 619 sq_cfg.reset = 0; 620 sq_cfg.ldwb = 0; ··· 652 653 /* Enable RBDR & set queue size */ 654 /* Buffer size should be in multiples of 128 bytes */ 655 rbdr_cfg.ena = 1; 656 rbdr_cfg.reset = 0; 657 rbdr_cfg.ldwb = 0;
··· 533 nicvf_config_vlan_stripping(nic, nic->netdev->features); 534 535 /* Enable Receive queue */ 536 + memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 537 rq_cfg.ena = 1; 538 rq_cfg.tcp_ena = 0; 539 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); ··· 565 qidx, (u64)(cq->dmem.phys_base)); 566 567 /* Enable Completion queue */ 568 + memset(&cq_cfg, 0, sizeof(struct cq_cfg)); 569 cq_cfg.ena = 1; 570 cq_cfg.reset = 0; 571 cq_cfg.caching = 0; ··· 613 qidx, (u64)(sq->dmem.phys_base)); 614 615 /* Enable send queue & set queue size */ 616 + memset(&sq_cfg, 0, sizeof(struct sq_cfg)); 617 sq_cfg.ena = 1; 618 sq_cfg.reset = 0; 619 sq_cfg.ldwb = 0; ··· 649 650 /* Enable RBDR & set queue size */ 651 /* Buffer size should be in multiples of 128 bytes */ 652 + memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); 653 rbdr_cfg.ena = 1; 654 rbdr_cfg.reset = 0; 655 rbdr_cfg.ldwb = 0;
+24 -6
drivers/net/ethernet/ezchip/nps_enet.c
··· 145 u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; 146 147 /* Check if we got TX */ 148 - if (!priv->tx_packet_sent || tx_ctrl_ct) 149 return; 150 151 /* Ack Tx ctrl register */ ··· 160 } 161 162 dev_kfree_skb(priv->tx_skb); 163 - priv->tx_packet_sent = false; 164 165 if (netif_queue_stopped(ndev)) 166 netif_wake_queue(ndev); ··· 183 work_done = nps_enet_rx_handler(ndev); 184 if (work_done < budget) { 185 u32 buf_int_enable_value = 0; 186 187 napi_complete(napi); 188 ··· 195 196 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 197 buf_int_enable_value); 198 } 199 200 return work_done; ··· 232 u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 233 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; 234 235 - if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr) 236 if (likely(napi_schedule_prep(&priv->napi))) { 237 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 238 __napi_schedule(&priv->napi); ··· 402 /* Write the length of the Frame */ 403 tx_ctrl_value |= length << TX_CTL_NT_SHIFT; 404 405 - /* Indicate SW is done */ 406 - priv->tx_packet_sent = true; 407 tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; 408 /* Send Frame */ 409 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); ··· 478 s32 err; 479 480 /* Reset private variables */ 481 - priv->tx_packet_sent = false; 482 priv->ge_mac_cfg_2_value = 0; 483 priv->ge_mac_cfg_3_value = 0; 484 ··· 546 netif_stop_queue(ndev); 547 548 priv->tx_skb = skb; 549 550 nps_enet_send_frame(ndev, skb); 551
··· 145 u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; 146 147 /* Check if we got TX */ 148 + if (!priv->tx_skb || tx_ctrl_ct) 149 return; 150 151 /* Ack Tx ctrl register */ ··· 160 } 161 162 dev_kfree_skb(priv->tx_skb); 163 + priv->tx_skb = NULL; 164 165 if (netif_queue_stopped(ndev)) 166 netif_wake_queue(ndev); ··· 183 work_done = nps_enet_rx_handler(ndev); 184 if (work_done < budget) { 185 u32 buf_int_enable_value = 0; 186 + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); 187 + u32 tx_ctrl_ct = 188 + (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 189 190 napi_complete(napi); 191 ··· 192 193 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 194 buf_int_enable_value); 195 + 196 + /* in case we will get a tx interrupt while interrupts 197 + * are masked, we will lose it since the tx is edge interrupt. 198 + * specifically, while executing the code section above, 199 + * between nps_enet_tx_handler and the interrupts enable, all 200 + * tx requests will be stuck until we will get an rx interrupt. 201 + * the two code lines below will solve this situation by 202 + * re-adding ourselves to the poll list. 203 + */ 204 + 205 + if (priv->tx_skb && !tx_ctrl_ct) 206 + napi_reschedule(napi); 207 } 208 209 return work_done; ··· 217 u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 218 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; 219 220 + if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr) 221 if (likely(napi_schedule_prep(&priv->napi))) { 222 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 223 __napi_schedule(&priv->napi); ··· 387 /* Write the length of the Frame */ 388 tx_ctrl_value |= length << TX_CTL_NT_SHIFT; 389 390 tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; 391 /* Send Frame */ 392 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); ··· 465 s32 err; 466 467 /* Reset private variables */ 468 + priv->tx_skb = NULL; 469 priv->ge_mac_cfg_2_value = 0; 470 priv->ge_mac_cfg_3_value = 0; 471 ··· 533 netif_stop_queue(ndev); 534 535 priv->tx_skb = skb; 536 + 537 + /* make sure tx_skb is actually written to the memory 538 + * before the HW is informed and the IRQ is fired. 539 + */ 540 + wmb(); 541 542 nps_enet_send_frame(ndev, skb); 543
-2
drivers/net/ethernet/ezchip/nps_enet.h
··· 165 * struct nps_enet_priv - Storage of ENET's private information. 166 * @regs_base: Base address of ENET memory-mapped control registers. 167 * @irq: For RX/TX IRQ number. 168 - * @tx_packet_sent: SW indication if frame is being sent. 169 * @tx_skb: socket buffer of sent frame. 170 * @napi: Structure for NAPI. 171 */ 172 struct nps_enet_priv { 173 void __iomem *regs_base; 174 s32 irq; 175 - bool tx_packet_sent; 176 struct sk_buff *tx_skb; 177 struct napi_struct napi; 178 u32 ge_mac_cfg_2_value;
··· 165 * struct nps_enet_priv - Storage of ENET's private information. 166 * @regs_base: Base address of ENET memory-mapped control registers. 167 * @irq: For RX/TX IRQ number. 168 * @tx_skb: socket buffer of sent frame. 169 * @napi: Structure for NAPI. 170 */ 171 struct nps_enet_priv { 172 void __iomem *regs_base; 173 s32 irq; 174 struct sk_buff *tx_skb; 175 struct napi_struct napi; 176 u32 ge_mac_cfg_2_value;
+2
drivers/net/ethernet/renesas/ravb_main.c
··· 1506 priv->phydev = NULL; 1507 } 1508 1509 free_irq(ndev->irq, ndev); 1510 1511 napi_disable(&priv->napi[RAVB_NC]);
··· 1506 priv->phydev = NULL; 1507 } 1508 1509 + if (priv->chip_id == RCAR_GEN3) 1510 + free_irq(priv->emac_irq, ndev); 1511 free_irq(ndev->irq, ndev); 1512 1513 napi_disable(&priv->napi[RAVB_NC]);
+5 -3
drivers/net/phy/phy.c
··· 790 break; 791 case PHY_HALTED: 792 /* make sure interrupts are re-enabled for the PHY */ 793 - err = phy_enable_interrupts(phydev); 794 - if (err < 0) 795 - break; 796 797 phydev->state = PHY_RESUMING; 798 do_resume = true;
··· 790 break; 791 case PHY_HALTED: 792 /* make sure interrupts are re-enabled for the PHY */ 793 + if (phydev->irq != PHY_POLL) { 794 + err = phy_enable_interrupts(phydev); 795 + if (err < 0) 796 + break; 797 + } 798 799 phydev->state = PHY_RESUMING; 800 do_resume = true;
+48 -35
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
··· 105 struct iwl_tx_cmd *tx_cmd, 106 struct ieee80211_tx_info *info, u8 sta_id) 107 { 108 struct ieee80211_hdr *hdr = (void *)skb->data; 109 __le16 fc = hdr->frame_control; 110 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); ··· 186 tx_cmd->tx_flags = cpu_to_le32(tx_flags); 187 /* Total # bytes to be transmitted */ 188 tx_cmd->len = cpu_to_le16((u16)skb->len + 189 - (uintptr_t)info->driver_data[0]); 190 tx_cmd->next_frame_len = 0; 191 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 192 tx_cmd->sta_id = sta_id; ··· 328 */ 329 static struct iwl_device_cmd * 330 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, 331 - int hdrlen, struct ieee80211_sta *sta, u8 sta_id) 332 { 333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 334 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 335 struct iwl_device_cmd *dev_cmd; 336 struct iwl_tx_cmd *tx_cmd; 337 ··· 352 353 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); 354 355 - memset(&info->status, 0, sizeof(info->status)); 356 - memset(info->driver_data, 0, sizeof(info->driver_data)); 357 358 - info->driver_data[1] = dev_cmd; 359 360 return dev_cmd; 361 } ··· 363 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) 364 { 365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 366 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 367 struct iwl_device_cmd *dev_cmd; 368 struct iwl_tx_cmd *tx_cmd; 369 u8 sta_id; 370 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 371 372 - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) 373 return -1; 374 375 - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 376 - (!info->control.vif || 377 - info->hw_queue != info->control.vif->cab_queue))) 378 return -1; 379 380 /* This holds the amsdu headers length */ 381 - info->driver_data[0] = (void *)(uintptr_t)0; 382 383 /* 384 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used ··· 390 * and hence needs to be sent on the aux queue 391 */ 392 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && 393 - info->control.vif->type == NL80211_IFTYPE_STATION) 394 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; 395 396 /* ··· 403 * AUX station. 404 */ 405 sta_id = mvm->aux_sta.sta_id; 406 - if (info->control.vif) { 407 struct iwl_mvm_vif *mvmvif = 408 - iwl_mvm_vif_from_mac80211(info->control.vif); 409 410 - if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 411 - info->control.vif->type == NL80211_IFTYPE_AP) 412 sta_id = mvmvif->bcast_sta.sta_id; 413 - else if (info->control.vif->type == NL80211_IFTYPE_STATION && 414 is_multicast_ether_addr(hdr->addr1)) { 415 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); 416 ··· 419 } 420 } 421 422 - IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); 423 424 - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); 425 if (!dev_cmd) 426 return -1; 427 428 - /* From now on, we cannot access info->control */ 429 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 430 431 /* Copy MAC header from skb into command buffer */ 432 memcpy(tx_cmd->hdr, hdr, hdrlen); 433 434 - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { 435 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 436 return -1; 437 } ··· 449 450 #ifdef CONFIG_INET 451 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 452 struct ieee80211_sta *sta, 453 struct sk_buff_head *mpdus_skb) 454 { 455 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 456 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 457 struct ieee80211_hdr *hdr = (void *)skb->data; 458 unsigned int mss = skb_shinfo(skb)->gso_size; 459 struct sk_buff *tmp, *next; ··· 548 549 /* This skb fits in one single A-MSDU */ 550 if (num_subframes * mss >= tcp_payload_len) { 551 /* 552 * Compute the length of all the data added for the A-MSDU. 553 * This will be used to compute the length to write in the TX ··· 558 * already had one set of SNAP / IP / TCP headers. 559 */ 560 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); 561 - info = IEEE80211_SKB_CB(skb); 562 amsdu_add = num_subframes * sizeof(struct ethhdr) + 563 (num_subframes - 1) * (snap_ip_tcp + pad); 564 /* This holds the amsdu headers length */ 565 - info->driver_data[0] = (void *)(uintptr_t)amsdu_add; 566 567 __skb_queue_tail(mpdus_skb, skb); 568 return 0; ··· 601 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); 602 603 if (tcp_payload_len > mss) { 604 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); 605 - info = IEEE80211_SKB_CB(tmp); 606 amsdu_add = num_subframes * sizeof(struct ethhdr) + 607 (num_subframes - 1) * (snap_ip_tcp + pad); 608 - info->driver_data[0] = (void *)(uintptr_t)amsdu_add; 609 skb_shinfo(tmp)->gso_size = mss; 610 } else { 611 qc = ieee80211_get_qos_ctl((void *)tmp->data); ··· 630 } 631 #else /* CONFIG_INET */ 632 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 633 struct ieee80211_sta *sta, 634 struct sk_buff_head *mpdus_skb) 635 { ··· 645 * Sets the fields in the Tx cmd that are crypto related 646 */ 647 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, 648 struct ieee80211_sta *sta) 649 { 650 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 651 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 652 struct iwl_mvm_sta *mvmsta; 653 struct iwl_device_cmd *dev_cmd; 654 struct iwl_tx_cmd *tx_cmd; ··· 669 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 670 return -1; 671 672 - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); 673 if (!dev_cmd) 674 goto drop; 675 ··· 746 struct ieee80211_sta *sta) 747 { 748 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 749 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 750 struct sk_buff_head mpdus_skbs; 751 unsigned int payload_len; 752 int ret; ··· 758 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 759 return -1; 760 761 /* This holds the amsdu headers length */ 762 - info->driver_data[0] = (void *)(uintptr_t)0; 763 764 if (!skb_is_gso(skb)) 765 - return iwl_mvm_tx_mpdu(mvm, skb, sta); 766 767 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - 768 tcp_hdrlen(skb) + skb->data_len; 769 770 if (payload_len <= skb_shinfo(skb)->gso_size) 771 - return iwl_mvm_tx_mpdu(mvm, skb, sta); 772 773 __skb_queue_head_init(&mpdus_skbs); 774 775 - ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs); 776 if (ret) 777 return ret; 778 ··· 784 while (!skb_queue_empty(&mpdus_skbs)) { 785 skb = __skb_dequeue(&mpdus_skbs); 786 787 - ret = iwl_mvm_tx_mpdu(mvm, skb, sta); 788 if (ret) { 789 __skb_queue_purge(&mpdus_skbs); 790 return ret;
··· 105 struct iwl_tx_cmd *tx_cmd, 106 struct ieee80211_tx_info *info, u8 sta_id) 107 { 108 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 109 struct ieee80211_hdr *hdr = (void *)skb->data; 110 __le16 fc = hdr->frame_control; 111 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); ··· 185 tx_cmd->tx_flags = cpu_to_le32(tx_flags); 186 /* Total # bytes to be transmitted */ 187 tx_cmd->len = cpu_to_le16((u16)skb->len + 188 + (uintptr_t)skb_info->driver_data[0]); 189 tx_cmd->next_frame_len = 0; 190 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 191 tx_cmd->sta_id = sta_id; ··· 327 */ 328 static struct iwl_device_cmd * 329 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, 330 + struct ieee80211_tx_info *info, int hdrlen, 331 + struct ieee80211_sta *sta, u8 sta_id) 332 { 333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 334 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 335 struct iwl_device_cmd *dev_cmd; 336 struct iwl_tx_cmd *tx_cmd; 337 ··· 350 351 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); 352 353 + memset(&skb_info->status, 0, sizeof(skb_info->status)); 354 + memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); 355 356 + skb_info->driver_data[1] = dev_cmd; 357 358 return dev_cmd; 359 } ··· 361 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) 362 { 363 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 364 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 365 + struct ieee80211_tx_info info; 366 struct iwl_device_cmd *dev_cmd; 367 struct iwl_tx_cmd *tx_cmd; 368 u8 sta_id; 369 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 370 371 + memcpy(&info, skb->cb, sizeof(info)); 372 + 373 + if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) 374 return -1; 375 376 + if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 377 + (!info.control.vif || 378 + info.hw_queue != info.control.vif->cab_queue))) 379 return -1; 380 381 /* This holds the amsdu headers length */ 382 + skb_info->driver_data[0] = (void *)(uintptr_t)0; 383 384 /* 385 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used ··· 385 * and hence needs to be sent on the aux queue 386 */ 387 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && 388 + info.control.vif->type == NL80211_IFTYPE_STATION) 389 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; 390 391 /* ··· 398 * AUX station. 399 */ 400 sta_id = mvm->aux_sta.sta_id; 401 + if (info.control.vif) { 402 struct iwl_mvm_vif *mvmvif = 403 + iwl_mvm_vif_from_mac80211(info.control.vif); 404 405 + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 406 + info.control.vif->type == NL80211_IFTYPE_AP) 407 sta_id = mvmvif->bcast_sta.sta_id; 408 + else if (info.control.vif->type == NL80211_IFTYPE_STATION && 409 is_multicast_ether_addr(hdr->addr1)) { 410 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); 411 ··· 414 } 415 } 416 417 + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue); 418 419 + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); 420 if (!dev_cmd) 421 return -1; 422 423 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 424 425 /* Copy MAC header from skb into command buffer */ 426 memcpy(tx_cmd->hdr, hdr, hdrlen); 427 428 + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) { 429 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 430 return -1; 431 } ··· 445 446 #ifdef CONFIG_INET 447 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 448 + struct ieee80211_tx_info *info, 449 struct ieee80211_sta *sta, 450 struct sk_buff_head *mpdus_skb) 451 { 452 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 453 struct ieee80211_hdr *hdr = (void *)skb->data; 454 unsigned int mss = skb_shinfo(skb)->gso_size; 455 struct sk_buff *tmp, *next; ··· 544 545 /* This skb fits in one single A-MSDU */ 546 if (num_subframes * mss >= tcp_payload_len) { 547 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 548 + 549 /* 550 * Compute the length of all the data added for the A-MSDU. 551 * This will be used to compute the length to write in the TX ··· 552 * already had one set of SNAP / IP / TCP headers. 553 */ 554 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); 555 amsdu_add = num_subframes * sizeof(struct ethhdr) + 556 (num_subframes - 1) * (snap_ip_tcp + pad); 557 /* This holds the amsdu headers length */ 558 + skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add; 559 560 __skb_queue_tail(mpdus_skb, skb); 561 return 0; ··· 596 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); 597 598 if (tcp_payload_len > mss) { 599 + struct ieee80211_tx_info *skb_info = 600 + IEEE80211_SKB_CB(tmp); 601 + 602 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); 603 amsdu_add = num_subframes * sizeof(struct ethhdr) + 604 (num_subframes - 1) * (snap_ip_tcp + pad); 605 + skb_info->driver_data[0] = 606 + (void *)(uintptr_t)amsdu_add; 607 skb_shinfo(tmp)->gso_size = mss; 608 } else { 609 qc = ieee80211_get_qos_ctl((void *)tmp->data); ··· 622 } 623 #else /* CONFIG_INET */ 624 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 625 + struct ieee80211_tx_info *info, 626 struct ieee80211_sta *sta, 627 struct sk_buff_head *mpdus_skb) 628 { ··· 636 * Sets the fields in the Tx cmd that are crypto related 637 */ 638 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, 639 + struct ieee80211_tx_info *info, 640 struct ieee80211_sta *sta) 641 { 642 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 643 struct iwl_mvm_sta *mvmsta; 644 struct iwl_device_cmd *dev_cmd; 645 struct iwl_tx_cmd *tx_cmd; ··· 660 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 661 return -1; 662 663 + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, 664 + sta, mvmsta->sta_id); 665 if (!dev_cmd) 666 goto drop; 667 ··· 736 struct ieee80211_sta *sta) 737 { 738 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 739 + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 740 + struct ieee80211_tx_info info; 741 struct sk_buff_head mpdus_skbs; 742 unsigned int payload_len; 743 int ret; ··· 747 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 748 return -1; 749 750 + memcpy(&info, skb->cb, sizeof(info)); 751 + 752 /* This holds the amsdu headers length */ 753 + skb_info->driver_data[0] = (void *)(uintptr_t)0; 754 755 if (!skb_is_gso(skb)) 756 + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 757 758 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - 759 tcp_hdrlen(skb) + skb->data_len; 760 761 if (payload_len <= skb_shinfo(skb)->gso_size) 762 + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 763 764 __skb_queue_head_init(&mpdus_skbs); 765 766 + ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); 767 if (ret) 768 return ret; 769 ··· 771 while (!skb_queue_empty(&mpdus_skbs)) { 772 skb = __skb_dequeue(&mpdus_skbs); 773 774 + ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 775 if (ret) { 776 __skb_queue_purge(&mpdus_skbs); 777 return ret;
+28
include/uapi/linux/if.h
··· 19 #ifndef _LINUX_IF_H 20 #define _LINUX_IF_H 21 22 #include <linux/types.h> /* for "__kernel_caddr_t" et al */ 23 #include <linux/socket.h> /* for "struct sockaddr" et al */ 24 #include <linux/compiler.h> /* for "__user" et al */ 25 26 #define IFNAMSIZ 16 27 #define IFALIASZ 256 28 #include <linux/hdlc/ioctl.h> 29 30 /** 31 * enum net_device_flags - &struct net_device flags 32 * ··· 74 * @IFF_ECHO: echo sent packets. Volatile. 75 */ 76 enum net_device_flags { 77 IFF_UP = 1<<0, /* sysfs */ 78 IFF_BROADCAST = 1<<1, /* volatile */ 79 IFF_DEBUG = 1<<2, /* sysfs */ ··· 92 IFF_PORTSEL = 1<<13, /* sysfs */ 93 IFF_AUTOMEDIA = 1<<14, /* sysfs */ 94 IFF_DYNAMIC = 1<<15, /* sysfs */ 95 IFF_LOWER_UP = 1<<16, /* volatile */ 96 IFF_DORMANT = 1<<17, /* volatile */ 97 IFF_ECHO = 1<<18, /* volatile */ 98 }; 99 100 #define IFF_UP IFF_UP 101 #define IFF_BROADCAST IFF_BROADCAST 102 #define IFF_DEBUG IFF_DEBUG ··· 119 #define IFF_PORTSEL IFF_PORTSEL 120 #define IFF_AUTOMEDIA IFF_AUTOMEDIA 121 #define IFF_DYNAMIC IFF_DYNAMIC 122 #define IFF_LOWER_UP IFF_LOWER_UP 123 #define IFF_DORMANT IFF_DORMANT 124 #define IFF_ECHO IFF_ECHO 125 126 #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ 127 IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) ··· 184 * being very small might be worth keeping for clean configuration. 185 */ 186 187 struct ifmap { 188 unsigned long mem_start; 189 unsigned long mem_end; ··· 195 unsigned char port; 196 /* 3 bytes spare */ 197 }; 198 199 struct if_settings { 200 unsigned int type; /* Type of physical device or protocol */ ··· 221 * remainder may be interface specific. 222 */ 223 224 struct ifreq { 225 #define IFHWADDRLEN 6 226 union ··· 246 struct if_settings ifru_settings; 247 } ifr_ifru; 248 }; 249 250 #define ifr_name ifr_ifrn.ifrn_name /* interface name */ 251 #define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */ ··· 273 * must know all networks accessible). 274 */ 275 276 struct ifconf { 277 int ifc_len; /* size of buffer */ 278 union { ··· 282 struct ifreq __user *ifcu_req; 283 } ifc_ifcu; 284 }; 285 #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ 286 #define ifc_req ifc_ifcu.ifcu_req /* array of structures */ 287
··· 19 #ifndef _LINUX_IF_H 20 #define _LINUX_IF_H 21 22 + #include <linux/libc-compat.h> /* for compatibility with glibc */ 23 #include <linux/types.h> /* for "__kernel_caddr_t" et al */ 24 #include <linux/socket.h> /* for "struct sockaddr" et al */ 25 #include <linux/compiler.h> /* for "__user" et al */ 26 27 + #if __UAPI_DEF_IF_IFNAMSIZ 28 #define IFNAMSIZ 16 29 + #endif /* __UAPI_DEF_IF_IFNAMSIZ */ 30 #define IFALIASZ 256 31 #include <linux/hdlc/ioctl.h> 32 33 + /* For glibc compatibility. An empty enum does not compile. */ 34 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \ 35 + __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 36 /** 37 * enum net_device_flags - &struct net_device flags 38 * ··· 68 * @IFF_ECHO: echo sent packets. Volatile. 69 */ 70 enum net_device_flags { 71 + /* for compatibility with glibc net/if.h */ 72 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS 73 IFF_UP = 1<<0, /* sysfs */ 74 IFF_BROADCAST = 1<<1, /* volatile */ 75 IFF_DEBUG = 1<<2, /* sysfs */ ··· 84 IFF_PORTSEL = 1<<13, /* sysfs */ 85 IFF_AUTOMEDIA = 1<<14, /* sysfs */ 86 IFF_DYNAMIC = 1<<15, /* sysfs */ 87 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */ 88 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 89 IFF_LOWER_UP = 1<<16, /* volatile */ 90 IFF_DORMANT = 1<<17, /* volatile */ 91 IFF_ECHO = 1<<18, /* volatile */ 92 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ 93 }; 94 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */ 95 96 + /* for compatibility with glibc net/if.h */ 97 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS 98 #define IFF_UP IFF_UP 99 #define IFF_BROADCAST IFF_BROADCAST 100 #define IFF_DEBUG IFF_DEBUG ··· 105 #define IFF_PORTSEL IFF_PORTSEL 106 #define IFF_AUTOMEDIA IFF_AUTOMEDIA 107 #define IFF_DYNAMIC IFF_DYNAMIC 108 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */ 109 + 110 + #if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 111 #define IFF_LOWER_UP IFF_LOWER_UP 112 #define IFF_DORMANT IFF_DORMANT 113 #define IFF_ECHO IFF_ECHO 114 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ 115 116 #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ 117 IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) ··· 166 * being very small might be worth keeping for clean configuration. 167 */ 168 169 + /* for compatibility with glibc net/if.h */ 170 + #if __UAPI_DEF_IF_IFMAP 171 struct ifmap { 172 unsigned long mem_start; 173 unsigned long mem_end; ··· 175 unsigned char port; 176 /* 3 bytes spare */ 177 }; 178 + #endif /* __UAPI_DEF_IF_IFMAP */ 179 180 struct if_settings { 181 unsigned int type; /* Type of physical device or protocol */ ··· 200 * remainder may be interface specific. 201 */ 202 203 + /* for compatibility with glibc net/if.h */ 204 + #if __UAPI_DEF_IF_IFREQ 205 struct ifreq { 206 #define IFHWADDRLEN 6 207 union ··· 223 struct if_settings ifru_settings; 224 } ifr_ifru; 225 }; 226 + #endif /* __UAPI_DEF_IF_IFREQ */ 227 228 #define ifr_name ifr_ifrn.ifrn_name /* interface name */ 229 #define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */ ··· 249 * must know all networks accessible). 250 */ 251 252 + /* for compatibility with glibc net/if.h */ 253 + #if __UAPI_DEF_IF_IFCONF 254 struct ifconf { 255 int ifc_len; /* size of buffer */ 256 union { ··· 256 struct ifreq __user *ifcu_req; 257 } ifc_ifcu; 258 }; 259 + #endif /* __UAPI_DEF_IF_IFCONF */ 260 + 261 #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ 262 #define ifc_req ifc_ifcu.ifcu_req /* array of structures */ 263
+44
include/uapi/linux/libc-compat.h
··· 51 /* We have included glibc headers... */ 52 #if defined(__GLIBC__) 53 54 /* Coordinate with glibc netinet/in.h header. */ 55 #if defined(_NETINET_IN_H) 56 ··· 150 * or we are being included in the kernel, then define everything 151 * that we need. */ 152 #else /* !defined(__GLIBC__) */ 153 154 /* Definitions for in.h */ 155 #define __UAPI_DEF_IN_ADDR 1
··· 51 /* We have included glibc headers... */ 52 #if defined(__GLIBC__) 53 54 + /* Coordinate with glibc net/if.h header. */ 55 + #if defined(_NET_IF_H) 56 + 57 + /* GLIBC headers included first so don't define anything 58 + * that would already be defined. */ 59 + 60 + #define __UAPI_DEF_IF_IFCONF 0 61 + #define __UAPI_DEF_IF_IFMAP 0 62 + #define __UAPI_DEF_IF_IFNAMSIZ 0 63 + #define __UAPI_DEF_IF_IFREQ 0 64 + /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 65 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0 66 + /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 67 + #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 68 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 69 + #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ 70 + 71 + #else /* _NET_IF_H */ 72 + 73 + /* Linux headers included first, and we must define everything 74 + * we need. The expectation is that glibc will check the 75 + * __UAPI_DEF_* defines and adjust appropriately. */ 76 + 77 + #define __UAPI_DEF_IF_IFCONF 1 78 + #define __UAPI_DEF_IF_IFMAP 1 79 + #define __UAPI_DEF_IF_IFNAMSIZ 1 80 + #define __UAPI_DEF_IF_IFREQ 1 81 + /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 82 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 83 + /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 84 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 85 + 86 + #endif /* _NET_IF_H */ 87 + 88 /* Coordinate with glibc netinet/in.h header. */ 89 #if defined(_NETINET_IN_H) 90 ··· 116 * or we are being included in the kernel, then define everything 117 * that we need. */ 118 #else /* !defined(__GLIBC__) */ 119 + 120 + /* Definitions for if.h */ 121 + #define __UAPI_DEF_IF_IFCONF 1 122 + #define __UAPI_DEF_IF_IFMAP 1 123 + #define __UAPI_DEF_IF_IFNAMSIZ 1 124 + #define __UAPI_DEF_IF_IFREQ 1 125 + /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 126 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 127 + /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 128 + #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 129 130 /* Definitions for in.h */ 131 #define __UAPI_DEF_IN_ADDR 1
+1
include/uapi/linux/tc_act/Kbuild
··· 10 header-y += tc_vlan.h 11 header-y += tc_bpf.h 12 header-y += tc_connmark.h
··· 10 header-y += tc_vlan.h 11 header-y += tc_bpf.h 12 header-y += tc_connmark.h 13 + header-y += tc_ife.h
+6 -1
net/ipv4/ip_gre.c
··· 398 iph->saddr, iph->daddr, tpi->key); 399 400 if (tunnel) { 401 - skb_pop_mac_header(skb); 402 if (tunnel->collect_md) { 403 __be16 flags; 404 __be64 tun_id; ··· 1034 struct ip_tunnel *t = netdev_priv(dev); 1035 1036 t->collect_md = true; 1037 } 1038 } 1039
··· 398 iph->saddr, iph->daddr, tpi->key); 399 400 if (tunnel) { 401 + if (tunnel->dev->type != ARPHRD_NONE) 402 + skb_pop_mac_header(skb); 403 + else 404 + skb_reset_mac_header(skb); 405 if (tunnel->collect_md) { 406 __be16 flags; 407 __be64 tun_id; ··· 1031 struct ip_tunnel *t = netdev_priv(dev); 1032 1033 t->collect_md = true; 1034 + if (dev->type == ARPHRD_IPGRE) 1035 + dev->type = ARPHRD_NONE; 1036 } 1037 } 1038
+4 -2
net/ipv4/tcp_output.c
··· 2640 */ 2641 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2642 skb_headroom(skb) >= 0xFFFF)) { 2643 - struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2644 - GFP_ATOMIC); 2645 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2646 -ENOBUFS; 2647 } else {
··· 2640 */ 2641 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2642 skb_headroom(skb) >= 0xFFFF)) { 2643 + struct sk_buff *nskb; 2644 + 2645 + skb_mstamp_get(&skb->skb_mstamp); 2646 + nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2647 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2648 -ENOBUFS; 2649 } else {
+1 -1
net/netfilter/nf_conntrack_core.c
··· 66 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); 67 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); 68 69 - static __read_mostly spinlock_t nf_conntrack_locks_all_lock; 70 static __read_mostly bool nf_conntrack_locks_all; 71 72 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
··· 66 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); 67 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); 68 69 + static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); 70 static __read_mostly bool nf_conntrack_locks_all; 71 72 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+1
net/netfilter/xt_IDLETIMER.c
··· 236 237 list_del(&info->timer->entry); 238 del_timer_sync(&info->timer->timer); 239 sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); 240 kfree(info->timer->attr.attr.name); 241 kfree(info->timer);
··· 236 237 list_del(&info->timer->entry); 238 del_timer_sync(&info->timer->timer); 239 + cancel_work_sync(&info->timer->work); 240 sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); 241 kfree(info->timer->attr.attr.name); 242 kfree(info->timer);
+13
net/openvswitch/conntrack.c
··· 776 return -EINVAL; 777 } 778 779 /* Call the helper only if: 780 * - nf_conntrack_in() was executed above ("!cached") for a 781 * confirmed connection, or
··· 776 return -EINVAL; 777 } 778 779 + /* Userspace may decide to perform a ct lookup without a helper 780 + * specified followed by a (recirculate and) commit with one. 781 + * Therefore, for unconfirmed connections which we will commit, 782 + * we need to attach the helper here. 783 + */ 784 + if (!nf_ct_is_confirmed(ct) && info->commit && 785 + info->helper && !nfct_help(ct)) { 786 + int err = __nf_ct_try_assign_helper(ct, info->ct, 787 + GFP_ATOMIC); 788 + if (err) 789 + return err; 790 + } 791 + 792 /* Call the helper only if: 793 * - nf_conntrack_in() was executed above ("!cached") for a 794 * confirmed connection, or
+10 -4
net/sched/act_ife.c
··· 423 u16 ife_type = 0; 424 u8 *daddr = NULL; 425 u8 *saddr = NULL; 426 - int ret = 0; 427 int err; 428 429 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); ··· 435 436 parm = nla_data(tb[TCA_IFE_PARMS]); 437 438 if (parm->flags & IFE_ENCODE) { 439 /* Until we get issued the ethertype, we cant have 440 * a default.. 441 **/ 442 if (!tb[TCA_IFE_TYPE]) { 443 pr_info("You MUST pass etherype for encoding\n"); 444 return -EINVAL; 445 } 446 } 447 448 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 449 ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife), 450 bind, false); 451 if (ret) 452 return ret; 453 ret = ACT_P_CREATED; 454 } else { 455 - if (bind) /* dont override defaults */ 456 - return 0; 457 tcf_hash_release(a, bind); 458 if (!ovr) 459 return -EEXIST; ··· 499 NULL); 500 if (err) { 501 metadata_parse_err: 502 if (ret == ACT_P_CREATED) 503 _tcf_ife_cleanup(a, bind); 504
··· 423 u16 ife_type = 0; 424 u8 *daddr = NULL; 425 u8 *saddr = NULL; 426 + int ret = 0, exists = 0; 427 int err; 428 429 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); ··· 435 436 parm = nla_data(tb[TCA_IFE_PARMS]); 437 438 + exists = tcf_hash_check(tn, parm->index, a, bind); 439 + if (exists && bind) 440 + return 0; 441 + 442 if (parm->flags & IFE_ENCODE) { 443 /* Until we get issued the ethertype, we cant have 444 * a default.. 445 **/ 446 if (!tb[TCA_IFE_TYPE]) { 447 + if (exists) 448 + tcf_hash_release(a, bind); 449 pr_info("You MUST pass etherype for encoding\n"); 450 return -EINVAL; 451 } 452 } 453 454 + if (!exists) { 455 ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife), 456 bind, false); 457 if (ret) 458 return ret; 459 ret = ACT_P_CREATED; 460 } else { 461 tcf_hash_release(a, bind); 462 if (!ovr) 463 return -EEXIST; ··· 495 NULL); 496 if (err) { 497 metadata_parse_err: 498 + if (exists) 499 + tcf_hash_release(a, bind); 500 if (ret == ACT_P_CREATED) 501 _tcf_ife_cleanup(a, bind); 502
+12 -7
net/sched/act_ipt.c
··· 96 struct tcf_ipt *ipt; 97 struct xt_entry_target *td, *t; 98 char *tname; 99 - int ret = 0, err; 100 u32 hook = 0; 101 u32 index = 0; 102 ··· 107 if (err < 0) 108 return err; 109 110 - if (tb[TCA_IPT_HOOK] == NULL) 111 return -EINVAL; 112 - if (tb[TCA_IPT_TARG] == NULL) 113 - return -EINVAL; 114 115 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 116 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) 117 return -EINVAL; 118 - 119 - if (tb[TCA_IPT_INDEX] != NULL) 120 - index = nla_get_u32(tb[TCA_IPT_INDEX]); 121 122 if (!tcf_hash_check(tn, index, a, bind)) { 123 ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
··· 96 struct tcf_ipt *ipt; 97 struct xt_entry_target *td, *t; 98 char *tname; 99 + int ret = 0, err, exists = 0; 100 u32 hook = 0; 101 u32 index = 0; 102 ··· 107 if (err < 0) 108 return err; 109 110 + if (tb[TCA_IPT_INDEX] != NULL) 111 + index = nla_get_u32(tb[TCA_IPT_INDEX]); 112 + 113 + exists = tcf_hash_check(tn, index, a, bind); 114 + if (exists && bind) 115 + return 0; 116 + 117 + if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) { 118 + if (exists) 119 + tcf_hash_release(a, bind); 120 return -EINVAL; 121 + } 122 123 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 124 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) 125 return -EINVAL; 126 127 if (!tcf_hash_check(tn, index, a, bind)) { 128 ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
+13 -6
net/sched/act_mirred.c
··· 61 struct tc_mirred *parm; 62 struct tcf_mirred *m; 63 struct net_device *dev; 64 - int ret, ok_push = 0; 65 66 if (nla == NULL) 67 return -EINVAL; ··· 71 if (tb[TCA_MIRRED_PARMS] == NULL) 72 return -EINVAL; 73 parm = nla_data(tb[TCA_MIRRED_PARMS]); 74 switch (parm->eaction) { 75 case TCA_EGRESS_MIRROR: 76 case TCA_EGRESS_REDIR: 77 break; 78 default: 79 return -EINVAL; 80 } 81 if (parm->ifindex) { 82 dev = __dev_get_by_index(net, parm->ifindex); 83 - if (dev == NULL) 84 return -ENODEV; 85 switch (dev->type) { 86 case ARPHRD_TUNNEL: 87 case ARPHRD_TUNNEL6: ··· 109 dev = NULL; 110 } 111 112 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 113 if (dev == NULL) 114 return -EINVAL; 115 ret = tcf_hash_create(tn, parm->index, est, a, ··· 118 return ret; 119 ret = ACT_P_CREATED; 120 } else { 121 - if (bind) 122 - return 0; 123 - 124 tcf_hash_release(a, bind); 125 if (!ovr) 126 return -EEXIST;
··· 61 struct tc_mirred *parm; 62 struct tcf_mirred *m; 63 struct net_device *dev; 64 + int ret, ok_push = 0, exists = 0; 65 66 if (nla == NULL) 67 return -EINVAL; ··· 71 if (tb[TCA_MIRRED_PARMS] == NULL) 72 return -EINVAL; 73 parm = nla_data(tb[TCA_MIRRED_PARMS]); 74 + 75 + exists = tcf_hash_check(tn, parm->index, a, bind); 76 + if (exists && bind) 77 + return 0; 78 + 79 switch (parm->eaction) { 80 case TCA_EGRESS_MIRROR: 81 case TCA_EGRESS_REDIR: 82 break; 83 default: 84 + if (exists) 85 + tcf_hash_release(a, bind); 86 return -EINVAL; 87 } 88 if (parm->ifindex) { 89 dev = __dev_get_by_index(net, parm->ifindex); 90 + if (dev == NULL) { 91 + if (exists) 92 + tcf_hash_release(a, bind); 93 return -ENODEV; 94 + } 95 switch (dev->type) { 96 case ARPHRD_TUNNEL: 97 case ARPHRD_TUNNEL6: ··· 99 dev = NULL; 100 } 101 102 + if (!exists) { 103 if (dev == NULL) 104 return -EINVAL; 105 ret = tcf_hash_create(tn, parm->index, est, a, ··· 108 return ret; 109 ret = ACT_P_CREATED; 110 } else { 111 tcf_hash_release(a, bind); 112 if (!ovr) 113 return -EEXIST;
+12 -6
net/sched/act_simple.c
··· 87 struct tc_defact *parm; 88 struct tcf_defact *d; 89 char *defdata; 90 - int ret = 0, err; 91 92 if (nla == NULL) 93 return -EINVAL; ··· 99 if (tb[TCA_DEF_PARMS] == NULL) 100 return -EINVAL; 101 102 - if (tb[TCA_DEF_DATA] == NULL) 103 - return -EINVAL; 104 105 parm = nla_data(tb[TCA_DEF_PARMS]); 106 defdata = nla_data(tb[TCA_DEF_DATA]); 107 108 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 109 ret = tcf_hash_create(tn, parm->index, est, a, 110 sizeof(*d), bind, false); 111 if (ret) ··· 130 } else { 131 d = to_defact(a); 132 133 - if (bind) 134 - return 0; 135 tcf_hash_release(a, bind); 136 if (!ovr) 137 return -EEXIST;
··· 87 struct tc_defact *parm; 88 struct tcf_defact *d; 89 char *defdata; 90 + int ret = 0, err, exists = 0; 91 92 if (nla == NULL) 93 return -EINVAL; ··· 99 if (tb[TCA_DEF_PARMS] == NULL) 100 return -EINVAL; 101 102 103 parm = nla_data(tb[TCA_DEF_PARMS]); 104 + exists = tcf_hash_check(tn, parm->index, a, bind); 105 + if (exists && bind) 106 + return 0; 107 + 108 + if (tb[TCA_DEF_DATA] == NULL) { 109 + if (exists) 110 + tcf_hash_release(a, bind); 111 + return -EINVAL; 112 + } 113 + 114 defdata = nla_data(tb[TCA_DEF_DATA]); 115 116 + if (!exists) { 117 ret = tcf_hash_create(tn, parm->index, est, a, 118 sizeof(*d), bind, false); 119 if (ret) ··· 122 } else { 123 d = to_defact(a); 124 125 tcf_hash_release(a, bind); 126 if (!ovr) 127 return -EEXIST;
+11 -7
net/sched/act_skbedit.c
··· 69 struct tcf_skbedit *d; 70 u32 flags = 0, *priority = NULL, *mark = NULL; 71 u16 *queue_mapping = NULL; 72 - int ret = 0, err; 73 74 if (nla == NULL) 75 return -EINVAL; ··· 96 mark = nla_data(tb[TCA_SKBEDIT_MARK]); 97 } 98 99 - if (!flags) 100 - return -EINVAL; 101 - 102 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 103 104 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 105 ret = tcf_hash_create(tn, parm->index, est, a, 106 sizeof(*d), bind, false); 107 if (ret) ··· 117 ret = ACT_P_CREATED; 118 } else { 119 d = to_skbedit(a); 120 - if (bind) 121 - return 0; 122 tcf_hash_release(a, bind); 123 if (!ovr) 124 return -EEXIST;
··· 69 struct tcf_skbedit *d; 70 u32 flags = 0, *priority = NULL, *mark = NULL; 71 u16 *queue_mapping = NULL; 72 + int ret = 0, err, exists = 0; 73 74 if (nla == NULL) 75 return -EINVAL; ··· 96 mark = nla_data(tb[TCA_SKBEDIT_MARK]); 97 } 98 99 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 100 101 + exists = tcf_hash_check(tn, parm->index, a, bind); 102 + if (exists && bind) 103 + return 0; 104 + 105 + if (!flags) { 106 + tcf_hash_release(a, bind); 107 + return -EINVAL; 108 + } 109 + 110 + if (!exists) { 111 ret = tcf_hash_create(tn, parm->index, est, a, 112 sizeof(*d), bind, false); 113 if (ret) ··· 111 ret = ACT_P_CREATED; 112 } else { 113 d = to_skbedit(a); 114 tcf_hash_release(a, bind); 115 if (!ovr) 116 return -EEXIST;
+16 -6
net/sched/act_vlan.c
··· 77 int action; 78 __be16 push_vid = 0; 79 __be16 push_proto = 0; 80 - int ret = 0; 81 int err; 82 83 if (!nla) ··· 90 if (!tb[TCA_VLAN_PARMS]) 91 return -EINVAL; 92 parm = nla_data(tb[TCA_VLAN_PARMS]); 93 switch (parm->v_action) { 94 case TCA_VLAN_ACT_POP: 95 break; 96 case TCA_VLAN_ACT_PUSH: 97 - if (!tb[TCA_VLAN_PUSH_VLAN_ID]) 98 return -EINVAL; 99 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 100 - if (push_vid >= VLAN_VID_MASK) 101 return -ERANGE; 102 103 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { 104 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); ··· 124 } 125 break; 126 default: 127 return -EINVAL; 128 } 129 action = parm->v_action; 130 131 - if (!tcf_hash_check(tn, parm->index, a, bind)) { 132 ret = tcf_hash_create(tn, parm->index, est, a, 133 sizeof(*v), bind, false); 134 if (ret) ··· 138 139 ret = ACT_P_CREATED; 140 } else { 141 - if (bind) 142 - return 0; 143 tcf_hash_release(a, bind); 144 if (!ovr) 145 return -EEXIST;
··· 77 int action; 78 __be16 push_vid = 0; 79 __be16 push_proto = 0; 80 + int ret = 0, exists = 0; 81 int err; 82 83 if (!nla) ··· 90 if (!tb[TCA_VLAN_PARMS]) 91 return -EINVAL; 92 parm = nla_data(tb[TCA_VLAN_PARMS]); 93 + exists = tcf_hash_check(tn, parm->index, a, bind); 94 + if (exists && bind) 95 + return 0; 96 + 97 switch (parm->v_action) { 98 case TCA_VLAN_ACT_POP: 99 break; 100 case TCA_VLAN_ACT_PUSH: 101 + if (!tb[TCA_VLAN_PUSH_VLAN_ID]) { 102 + if (exists) 103 + tcf_hash_release(a, bind); 104 return -EINVAL; 105 + } 106 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 107 + if (push_vid >= VLAN_VID_MASK) { 108 + if (exists) 109 + tcf_hash_release(a, bind); 110 return -ERANGE; 111 + } 112 113 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { 114 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); ··· 114 } 115 break; 116 default: 117 + if (exists) 118 + tcf_hash_release(a, bind); 119 return -EINVAL; 120 } 121 action = parm->v_action; 122 123 + if (!exists) { 124 ret = tcf_hash_create(tn, parm->index, est, a, 125 sizeof(*v), bind, false); 126 if (ret) ··· 126 127 ret = ACT_P_CREATED; 128 } else { 129 tcf_hash_release(a, bind); 130 if (!ovr) 131 return -EEXIST;
+1
net/x25/x25_facilities.c
··· 277 278 memset(&theirs, 0, sizeof(theirs)); 279 memcpy(new, ours, sizeof(*new)); 280 281 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 282 if (len < 0)
··· 277 278 memset(&theirs, 0, sizeof(theirs)); 279 memcpy(new, ours, sizeof(*new)); 280 + memset(dte, 0, sizeof(*dte)); 281 282 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 283 if (len < 0)