Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) There was a simplification in the ipv6 ndisc packet sending
attempted here, which avoided using memory accounting on the
per-netns ndisc socket for sending NDISC packets. It did fix some
important issues, but it causes regressions so it gets reverted here
too. Specifically, the problem with this change is that the IPV6
output path really depends upon there being a valid skb->sk
attached.

The reason we want to do this change in some form when we figure out
how to do it right, is that if a device goes down the ndisc_sk
socket send queue will fill up and block NDISC packets that we want
to send to other devices too. That's really bad behavior.

Hopefully Thomas can come up with a better version of this change.

2) Fix a severe TCP performance regression by reverting a change made
to dev_pick_tx() quite some time ago. From Eric Dumazet.

3) TIPC returns wrongly signed error codes, fix from Erik Hugne.

4) Fix OOPS when doing IPSEC over ipv4 tunnels due to orphaning the
skb->sk too early. Fix from Li Hongjun.

5) RAW ipv4 sockets can use the wrong routing key during lookup, from
Chris Clark.

6) Similar to #1 revert an older change that tried to use plain
alloc_skb() for SYN/ACK TCP packets, this broke the netfilter owner
mark which needs to see the skb->sk for such frames. From Phil
Oester.

7) BNX2x driver bug fixes from Ariel Elior and Yuval Mintz,
specifically in the handling of virtual functions.

8) IPSEC path error propagations to sockets is not done properly when
we have v4 in v6, and v6 in v4 type rules. Fix from Hannes Frederic
Sowa.

9) Fix missing channel context release in mac80211, from Johannes Berg.

10) Fix network namespace handing wrt. SCM_RIGHTS, from Andy
Lutomirski.

11) Fix usage of bogus NAPI weight in jme, netxen, and ps3_gelic
drivers. From Michal Schmidt.

12) Hopefully a complete and correct fix for the genetlink dump locking
and module reference counting. From Pravin B Shelar.

13) sk_busy_loop() must do a cpu_relax(), from Eliezer Tamir.

14) Fix handling of timestamp offset when restoring a snapshotted TCP
socket. From Andrew Vagin.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits)
net: fec: fix time stamping logic after napi conversion
net: bridge: convert MLDv2 Query MRC into msecs_to_jiffies for max_delay
mISDN: return -EINVAL on error in dsp_control_req()
net: revert 8728c544a9c ("net: dev_pick_tx() fix")
Revert "ipv6: Don't depend on per socket memory for neighbour discovery messages"
ipv4 tunnels: fix an oops when using ipip/sit with IPsec
tipc: set sk_err correctly when connection fails
tcp: tcp_make_synack() should use sock_wmalloc
bridge: separate querier and query timer into IGMP/IPv4 and MLD/IPv6 ones
ipv6: Don't depend on per socket memory for neighbour discovery messages
ipv4: sendto/hdrincl: don't use destination address found in header
tcp: don't apply tsoffset if rcv_tsecr is zero
tcp: initialize rcv_tstamp for restored sockets
net: xilinx: fix memleak
net: usb: Add HP hs2434 device to ZLP exception table
net: add cpu_relax to busy poll loop
net: stmmac: fixed the pbl setting with DT
genl: Hold reference on correct module while netlink-dump.
genl: Fix genl dumpit() locking.
xfrm: Fix potential null pointer dereference in xdst_queue_output
...

+592 -262
+3 -1
drivers/isdn/mISDN/dsp_core.c
··· 288 288 u8 *data; 289 289 int len; 290 290 291 - if (skb->len < sizeof(int)) 291 + if (skb->len < sizeof(int)) { 292 292 printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); 293 + return -EINVAL; 294 + } 293 295 cont = *((int *)skb->data); 294 296 len = skb->len - sizeof(int); 295 297 data = skb->data + sizeof(int);
+8 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 53 53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; 54 54 int old_max_eth_txqs, new_max_eth_txqs; 55 55 int old_txdata_index = 0, new_txdata_index = 0; 56 + struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; 56 57 57 58 /* Copy the NAPI object as it has been already initialized */ 58 59 from_fp->napi = to_fp->napi; ··· 61 60 /* Move bnx2x_fastpath contents */ 62 61 memcpy(to_fp, from_fp, sizeof(*to_fp)); 63 62 to_fp->index = to; 63 + 64 + /* Retain the tpa_info of the original `to' version as we don't want 65 + * 2 FPs to contain the same tpa_info pointer. 66 + */ 67 + to_fp->tpa_info = old_tpa_info; 64 68 65 69 /* move sp_objs contents as well, as their indices match fp ones */ 66 70 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); ··· 2962 2956 if (IS_PF(bp)) { 2963 2957 if (CNIC_LOADED(bp)) 2964 2958 bnx2x_free_mem_cnic(bp); 2965 - bnx2x_free_mem(bp); 2966 2959 } 2960 + bnx2x_free_mem(bp); 2961 + 2967 2962 bp->state = BNX2X_STATE_CLOSED; 2968 2963 bp->cnic_loaded = false; 2969 2964
+6 -3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 7855 7855 { 7856 7856 int i; 7857 7857 7858 - BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 7859 - sizeof(struct host_sp_status_block)); 7860 - 7861 7858 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7862 7859 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7860 + 7861 + if (IS_VF(bp)) 7862 + return; 7863 + 7864 + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 7865 + sizeof(struct host_sp_status_block)); 7863 7866 7864 7867 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7865 7868 sizeof(struct bnx2x_slowpath));
+27 -36
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 522 522 return 0; 523 523 } 524 524 525 - static int 526 - bnx2x_vfop_config_vlan0(struct bnx2x *bp, 527 - struct bnx2x_vlan_mac_ramrod_params *vlan_mac, 528 - bool add) 529 - { 530 - int rc; 531 - 532 - vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : 533 - BNX2X_VLAN_MAC_DEL; 534 - vlan_mac->user_req.u.vlan.vlan = 0; 535 - 536 - rc = bnx2x_config_vlan_mac(bp, vlan_mac); 537 - if (rc == -EEXIST) 538 - rc = 0; 539 - return rc; 540 - } 541 - 542 525 static int bnx2x_vfop_config_list(struct bnx2x *bp, 543 526 struct bnx2x_vfop_filters *filters, 544 527 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) ··· 626 643 627 644 case BNX2X_VFOP_VLAN_CONFIG_LIST: 628 645 /* next state */ 629 - vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 646 + vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 630 647 631 - /* remove vlan0 - could be no-op */ 632 - vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); 633 - if (vfop->rc) 634 - goto op_err; 635 - 636 - /* Do vlan list config. if this operation fails we try to 637 - * restore vlan0 to keep the queue is working order 638 - */ 648 + /* do list config */ 639 649 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 640 650 if (!vfop->rc) { 641 651 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 642 652 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 643 653 } 644 - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ 645 - 646 - case BNX2X_VFOP_VLAN_CONFIG_LIST_0: 647 - /* next state */ 648 - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 649 - 650 - if (list_empty(&obj->head)) 651 - /* add vlan0 */ 652 - vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); 653 654 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 654 655 655 656 default: ··· 2786 2819 return 0; 2787 2820 } 2788 2821 2822 + struct set_vf_state_cookie { 2823 + struct bnx2x_virtf *vf; 2824 + u8 state; 2825 + }; 2826 + 2827 + void bnx2x_set_vf_state(void *cookie) 2828 + { 2829 + struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2830 + 2831 + p->vf->state = p->state; 2832 + } 2833 + 2789 2834 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2790 2835 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2791 2836 { ··· 2848 2869 op_err: 2849 2870 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2850 2871 op_done: 2851 - vf->state = VF_ACQUIRED; 2872 + 2873 + /* need to make sure there are no outstanding stats ramrods which may 2874 + * cause the device to access the VF's stats buffer which it will free 2875 + * as soon as we return from the close flow. 2876 + */ 2877 + { 2878 + struct set_vf_state_cookie cookie; 2879 + 2880 + cookie.vf = vf; 2881 + cookie.state = VF_ACQUIRED; 2882 + bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2883 + } 2884 + 2852 2885 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2853 2886 bnx2x_vfop_end(bp, vf, vfop); 2854 2887 }
+19 -12
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
··· 522 522 /* should be called under stats_sema */ 523 523 static void __bnx2x_stats_start(struct bnx2x *bp) 524 524 { 525 - /* vfs travel through here as part of the statistics FSM, but no action 526 - * is required 527 - */ 528 - if (IS_VF(bp)) 529 - return; 525 + if (IS_PF(bp)) { 526 + if (bp->port.pmf) 527 + bnx2x_port_stats_init(bp); 530 528 531 - if (bp->port.pmf) 532 - bnx2x_port_stats_init(bp); 529 + else if (bp->func_stx) 530 + bnx2x_func_stats_init(bp); 533 531 534 - else if (bp->func_stx) 535 - bnx2x_func_stats_init(bp); 536 - 537 - bnx2x_hw_stats_post(bp); 538 - bnx2x_storm_stats_post(bp); 532 + bnx2x_hw_stats_post(bp); 533 + bnx2x_storm_stats_post(bp); 534 + } 539 535 540 536 bp->stats_started = true; 541 537 } ··· 1992 1996 afex_stats->rx_frames_discarded_lo, 1993 1997 estats->mac_discard); 1994 1998 } 1999 + } 2000 + 2001 + void bnx2x_stats_safe_exec(struct bnx2x *bp, 2002 + void (func_to_exec)(void *cookie), 2003 + void *cookie){ 2004 + if (down_timeout(&bp->stats_sema, HZ/10)) 2005 + BNX2X_ERR("Unable to acquire stats lock\n"); 2006 + bnx2x_stats_comp(bp); 2007 + func_to_exec(cookie); 2008 + __bnx2x_stats_start(bp); 2009 + up(&bp->stats_sema); 1995 2010 }
+3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
··· 539 539 void bnx2x_memset_stats(struct bnx2x *bp); 540 540 void bnx2x_stats_init(struct bnx2x *bp); 541 541 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 542 + void bnx2x_stats_safe_exec(struct bnx2x *bp, 543 + void (func_to_exec)(void *cookie), 544 + void *cookie); 542 545 543 546 /** 544 547 * bnx2x_save_statistics - save statistics when unloading.
+4
drivers/net/ethernet/emulex/benet/be_main.c
··· 4373 4373 pci_set_power_state(pdev, PCI_D0); 4374 4374 pci_restore_state(pdev); 4375 4375 4376 + status = be_fw_wait_ready(adapter); 4377 + if (status) 4378 + return status; 4379 + 4376 4380 /* tell fw we're ready to fire cmds */ 4377 4381 status = be_cmd_fw_init(adapter); 4378 4382 if (status)
+1 -2
drivers/net/ethernet/freescale/fec_main.c
··· 971 971 htons(ETH_P_8021Q), 972 972 vlan_tag); 973 973 974 - if (!skb_defer_rx_timestamp(skb)) 975 - napi_gro_receive(&fep->napi, skb); 974 + napi_gro_receive(&fep->napi, skb); 976 975 } 977 976 978 977 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+1 -1
drivers/net/ethernet/jme.c
··· 3069 3069 jwrite32(jme, JME_APMC, apmc); 3070 3070 } 3071 3071 3072 - NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) 3072 + NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) 3073 3073 3074 3074 spin_lock_init(&jme->phy_lock); 3075 3075 spin_lock_init(&jme->macaddr_lock);
-1
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
··· 1171 1171 1172 1172 #define NETXEN_DB_MAPSIZE_BYTES 0x1000 1173 1173 1174 - #define NETXEN_NETDEV_WEIGHT 128 1175 1174 #define NETXEN_ADAPTER_UP_MAGIC 777 1176 1175 #define NETXEN_NIC_PEG_TUNE 0 1177 1176
+1 -1
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
··· 197 197 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 198 198 sds_ring = &recv_ctx->sds_rings[ring]; 199 199 netif_napi_add(netdev, &sds_ring->napi, 200 - netxen_nic_poll, NETXEN_NETDEV_WEIGHT); 200 + netxen_nic_poll, NAPI_POLL_WEIGHT); 201 201 } 202 202 203 203 return 0;
+12 -8
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 71 71 plat->force_sf_dma_mode = 1; 72 72 } 73 73 74 - dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); 75 - if (!dma_cfg) 76 - return -ENOMEM; 77 - 78 - plat->dma_cfg = dma_cfg; 79 - of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 80 - dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 81 - dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 74 + if (of_find_property(np, "snps,pbl", NULL)) { 75 + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 76 + GFP_KERNEL); 77 + if (!dma_cfg) 78 + return -ENOMEM; 79 + plat->dma_cfg = dma_cfg; 80 + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 81 + dma_cfg->fixed_burst = 82 + of_property_read_bool(np, "snps,fixed-burst"); 83 + dma_cfg->mixed_burst = 84 + of_property_read_bool(np, "snps,mixed-burst"); 85 + } 82 86 83 87 return 0; 84 88 }
+1 -2
drivers/net/ethernet/toshiba/ps3_gelic_net.c
··· 1466 1466 { 1467 1467 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1468 1468 /* NAPI */ 1469 - netif_napi_add(netdev, napi, 1470 - gelic_net_poll, GELIC_NET_NAPI_WEIGHT); 1469 + netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); 1471 1470 netdev->ethtool_ops = &gelic_ether_ethtool_ops; 1472 1471 netdev->netdev_ops = &gelic_netdevice_ops; 1473 1472 }
-1
drivers/net/ethernet/toshiba/ps3_gelic_net.h
··· 37 37 #define GELIC_NET_RXBUF_ALIGN 128 38 38 #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ 39 39 #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ 40 - #define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) 41 40 #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL 42 41 43 42 #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
+1
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
··· 175 175 printk(KERN_WARNING "Setting MDIO clock divisor to " 176 176 "default %d\n", DEFAULT_CLOCK_DIVISOR); 177 177 clk_div = DEFAULT_CLOCK_DIVISOR; 178 + of_node_put(np1); 178 179 goto issue; 179 180 } 180 181
+4
drivers/net/usb/cdc_mbim.c
··· 400 400 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 401 401 .driver_info = (unsigned long)&cdc_mbim_info_zlp, 402 402 }, 403 + /* HP hs2434 Mobile Broadband Module needs ZLPs */ 404 + { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 405 + .driver_info = (unsigned long)&cdc_mbim_info_zlp, 406 + }, 403 407 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 404 408 .driver_info = (unsigned long)&cdc_mbim_info, 405 409 },
+10
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
··· 448 448 struct ieee80211_conf *cur_conf = &priv->hw->conf; 449 449 bool txok; 450 450 int slot; 451 + int hdrlen, padsize; 451 452 452 453 slot = strip_drv_header(priv, skb); 453 454 if (slot < 0) { ··· 504 503 spin_unlock_bh(&priv->tx.tx_lock); 505 504 506 505 ath9k_htc_tx_clear_slot(priv, slot); 506 + 507 + /* Remove padding before handing frame back to mac80211 */ 508 + hdrlen = ieee80211_get_hdrlen_from_skb(skb); 509 + 510 + padsize = hdrlen & 3; 511 + if (padsize && skb->len > hdrlen + padsize) { 512 + memmove(skb->data + padsize, skb->data, hdrlen); 513 + skb_pull(skb, padsize); 514 + } 507 515 508 516 /* Send status to mac80211 */ 509 517 ieee80211_tx_status(priv->hw, skb);
+2 -1
drivers/net/wireless/ath/ath9k/init.c
··· 802 802 IEEE80211_HW_PS_NULLFUNC_STACK | 803 803 IEEE80211_HW_SPECTRUM_MGMT | 804 804 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 805 - IEEE80211_HW_SUPPORTS_RC_TABLE; 805 + IEEE80211_HW_SUPPORTS_RC_TABLE | 806 + IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 806 807 807 808 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 808 809 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+1 -2
drivers/net/wireless/ath/ath9k/main.c
··· 173 173 { 174 174 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 175 175 176 - if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) || 177 - AR_SREV_9550(sc->sc_ah)) 176 + if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) 178 177 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, 179 178 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); 180 179
+2 -1
drivers/net/wireless/ath/carl9170/main.c
··· 1860 1860 IEEE80211_HW_PS_NULLFUNC_STACK | 1861 1861 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | 1862 1862 IEEE80211_HW_SUPPORTS_RC_TABLE | 1863 - IEEE80211_HW_SIGNAL_DBM; 1863 + IEEE80211_HW_SIGNAL_DBM | 1864 + IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 1864 1865 1865 1866 if (!modparam_noht) { 1866 1867 /*
+1 -1
drivers/net/wireless/iwlegacy/4965-mac.c
··· 4464 4464 set_bit(S_RFKILL, &il->status); 4465 4465 } else { 4466 4466 clear_bit(S_RFKILL, &il->status); 4467 - wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); 4468 4467 il_force_reset(il, true); 4469 4468 } 4469 + wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); 4470 4470 4471 4471 handled |= CSR_INT_BIT_RF_KILL; 4472 4472 }
+2 -1
drivers/net/wireless/rt2x00/rt2800lib.c
··· 6133 6133 IEEE80211_HW_SUPPORTS_PS | 6134 6134 IEEE80211_HW_PS_NULLFUNC_STACK | 6135 6135 IEEE80211_HW_AMPDU_AGGREGATION | 6136 - IEEE80211_HW_REPORTS_TX_ACK_STATUS; 6136 + IEEE80211_HW_REPORTS_TX_ACK_STATUS | 6137 + IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 6137 6138 6138 6139 /* 6139 6140 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
+5 -1
include/linux/nsproxy.h
··· 14 14 * A structure to contain pointers to all per-process 15 15 * namespaces - fs (mount), uts, network, sysvipc, etc. 16 16 * 17 + * The pid namespace is an exception -- it's accessed using 18 + * task_active_pid_ns. The pid namespace here is the 19 + * namespace that children will use. 20 + * 17 21 * 'count' is the number of tasks holding a reference. 18 22 * The count for each namespace, then, will be the number 19 23 * of nsproxies pointing to it, not the number of tasks. ··· 31 27 struct uts_namespace *uts_ns; 32 28 struct ipc_namespace *ipc_ns; 33 29 struct mnt_namespace *mnt_ns; 34 - struct pid_namespace *pid_ns; 30 + struct pid_namespace *pid_ns_for_children; 35 31 struct net *net_ns; 36 32 }; 37 33 extern struct nsproxy init_nsproxy;
+1
include/net/busy_poll.h
··· 123 123 /* local bh are disabled so it is ok to use _BH */ 124 124 NET_ADD_STATS_BH(sock_net(sk), 125 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc); 126 + cpu_relax(); 126 127 127 128 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 128 129 !need_resched() && !busy_loop_timeout(end_time));
+18 -2
include/net/genetlink.h
··· 61 61 struct list_head ops_list; /* private */ 62 62 struct list_head family_list; /* private */ 63 63 struct list_head mcast_groups; /* private */ 64 + struct module *module; 64 65 }; 65 66 66 67 /** ··· 122 121 struct list_head ops_list; 123 122 }; 124 123 125 - extern int genl_register_family(struct genl_family *family); 126 - extern int genl_register_family_with_ops(struct genl_family *family, 124 + extern int __genl_register_family(struct genl_family *family); 125 + 126 + static inline int genl_register_family(struct genl_family *family) 127 + { 128 + family->module = THIS_MODULE; 129 + return __genl_register_family(family); 130 + } 131 + 132 + extern int __genl_register_family_with_ops(struct genl_family *family, 127 133 struct genl_ops *ops, size_t n_ops); 134 + 135 + static inline int genl_register_family_with_ops(struct genl_family *family, 136 + struct genl_ops *ops, size_t n_ops) 137 + { 138 + family->module = THIS_MODULE; 139 + return __genl_register_family_with_ops(family, ops, n_ops); 140 + } 141 + 128 142 extern int genl_unregister_family(struct genl_family *family); 129 143 extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); 130 144 extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+1
include/net/mac80211.h
··· 1499 1499 IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24, 1500 1500 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25, 1501 1501 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, 1502 + IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, 1502 1503 }; 1503 1504 1504 1505 /**
+8
include/net/route.h
··· 317 317 return hoplimit; 318 318 } 319 319 320 + static inline int ip_skb_dst_mtu(struct sk_buff *skb) 321 + { 322 + struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; 323 + 324 + return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? 325 + skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 326 + } 327 + 320 328 #endif /* _ROUTE_H */
+6
include/net/xfrm.h
··· 341 341 struct sk_buff *skb); 342 342 int (*transport_finish)(struct sk_buff *skb, 343 343 int async); 344 + void (*local_error)(struct sk_buff *skb, u32 mtu); 344 345 }; 345 346 346 347 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 347 348 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 349 + extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 350 + extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 348 351 349 352 extern void xfrm_state_delete_tunnel(struct xfrm_state *x); 350 353 ··· 1480 1477 extern int xfrm_output_resume(struct sk_buff *skb, int err); 1481 1478 extern int xfrm_output(struct sk_buff *skb); 1482 1479 extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1480 + extern void xfrm_local_error(struct sk_buff *skb, int mtu); 1483 1481 extern int xfrm4_extract_header(struct sk_buff *skb); 1484 1482 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1485 1483 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, ··· 1501 1497 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1502 1498 extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); 1503 1499 extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); 1500 + extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu); 1504 1501 extern int xfrm6_extract_header(struct sk_buff *skb); 1505 1502 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1506 1503 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); ··· 1519 1514 extern int xfrm6_output_finish(struct sk_buff *skb); 1520 1515 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1521 1516 u8 **prevhdr); 1517 + extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu); 1522 1518 1523 1519 #ifdef CONFIG_XFRM 1524 1520 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+3 -2
kernel/fork.c
··· 1177 1177 * don't allow the creation of threads. 1178 1178 */ 1179 1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && 1180 - (task_active_pid_ns(current) != current->nsproxy->pid_ns)) 1180 + (task_active_pid_ns(current) != 1181 + current->nsproxy->pid_ns_for_children)) 1181 1182 return ERR_PTR(-EINVAL); 1182 1183 1183 1184 retval = security_task_create(clone_flags); ··· 1352 1351 1353 1352 if (pid != &init_struct_pid) { 1354 1353 retval = -ENOMEM; 1355 - pid = alloc_pid(p->nsproxy->pid_ns); 1354 + pid = alloc_pid(p->nsproxy->pid_ns_for_children); 1356 1355 if (!pid) 1357 1356 goto bad_fork_cleanup_io; 1358 1357 }
+14 -13
kernel/nsproxy.c
··· 29 29 static struct kmem_cache *nsproxy_cachep; 30 30 31 31 struct nsproxy init_nsproxy = { 32 - .count = ATOMIC_INIT(1), 33 - .uts_ns = &init_uts_ns, 32 + .count = ATOMIC_INIT(1), 33 + .uts_ns = &init_uts_ns, 34 34 #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) 35 - .ipc_ns = &init_ipc_ns, 35 + .ipc_ns = &init_ipc_ns, 36 36 #endif 37 - .mnt_ns = NULL, 38 - .pid_ns = &init_pid_ns, 37 + .mnt_ns = NULL, 38 + .pid_ns_for_children = &init_pid_ns, 39 39 #ifdef CONFIG_NET 40 - .net_ns = &init_net, 40 + .net_ns = &init_net, 41 41 #endif 42 42 }; 43 43 ··· 85 85 goto out_ipc; 86 86 } 87 87 88 - new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); 89 - if (IS_ERR(new_nsp->pid_ns)) { 90 - err = PTR_ERR(new_nsp->pid_ns); 88 + new_nsp->pid_ns_for_children = 89 + copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children); 90 + if (IS_ERR(new_nsp->pid_ns_for_children)) { 91 + err = PTR_ERR(new_nsp->pid_ns_for_children); 91 92 goto out_pid; 92 93 } 93 94 ··· 101 100 return new_nsp; 102 101 103 102 out_net: 104 - if (new_nsp->pid_ns) 105 - put_pid_ns(new_nsp->pid_ns); 103 + if (new_nsp->pid_ns_for_children) 104 + put_pid_ns(new_nsp->pid_ns_for_children); 106 105 out_pid: 107 106 if (new_nsp->ipc_ns) 108 107 put_ipc_ns(new_nsp->ipc_ns); ··· 175 174 put_uts_ns(ns->uts_ns); 176 175 if (ns->ipc_ns) 177 176 put_ipc_ns(ns->ipc_ns); 178 - if (ns->pid_ns) 179 - put_pid_ns(ns->pid_ns); 177 + if (ns->pid_ns_for_children) 178 + put_pid_ns(ns->pid_ns_for_children); 180 179 put_net(ns->net_ns); 181 180 kmem_cache_free(nsproxy_cachep, ns); 182 181 }
+2 -2
kernel/pid_namespace.c
··· 349 349 if (ancestor != active) 350 350 return -EINVAL; 351 351 352 - put_pid_ns(nsproxy->pid_ns); 353 - nsproxy->pid_ns = get_pid_ns(new); 352 + put_pid_ns(nsproxy->pid_ns_for_children); 353 + nsproxy->pid_ns_for_children = get_pid_ns(new); 354 354 return 0; 355 355 } 356 356
+1 -1
net/bridge/br_device.c
··· 71 71 72 72 mdst = br_mdb_get(br, skb, vid); 73 73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 74 - br_multicast_querier_exists(br)) 74 + br_multicast_querier_exists(br, eth_hdr(skb))) 75 75 br_multicast_deliver(mdst, skb); 76 76 else 77 77 br_flood_deliver(br, skb, false);
+1 -1
net/bridge/br_input.c
··· 102 102 } else if (is_multicast_ether_addr(dest)) { 103 103 mdst = br_mdb_get(br, skb, vid); 104 104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 105 - br_multicast_querier_exists(br)) { 105 + br_multicast_querier_exists(br, eth_hdr(skb))) { 106 106 if ((mdst && mdst->mglist) || 107 107 br_multicast_is_router(br)) 108 108 skb2 = skb;
+9 -5
net/bridge/br_mdb.c
··· 414 414 if (!netif_running(br->dev) || br->multicast_disabled) 415 415 return -EINVAL; 416 416 417 - if (timer_pending(&br->multicast_querier_timer)) 418 - return -EBUSY; 419 - 420 417 ip.proto = entry->addr.proto; 421 - if (ip.proto == htons(ETH_P_IP)) 418 + if (ip.proto == htons(ETH_P_IP)) { 419 + if (timer_pending(&br->ip4_querier.timer)) 420 + return -EBUSY; 421 + 422 422 ip.u.ip4 = entry->addr.u.ip4; 423 423 #if IS_ENABLED(CONFIG_IPV6) 424 - else 424 + } else { 425 + if (timer_pending(&br->ip6_querier.timer)) 426 + return -EBUSY; 427 + 425 428 ip.u.ip6 = entry->addr.u.ip6; 426 429 #endif 430 + } 427 431 428 432 spin_lock_bh(&br->multicast_lock); 429 433 mdb = mlock_dereference(br->mdb, br);
+187 -78
net/bridge/br_multicast.c
··· 33 33 34 34 #include "br_private.h" 35 35 36 - static void br_multicast_start_querier(struct net_bridge *br); 36 + static void br_multicast_start_querier(struct net_bridge *br, 37 + struct bridge_mcast_query *query); 37 38 unsigned int br_mdb_rehash_seq; 38 39 39 40 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) ··· 756 755 { 757 756 } 758 757 759 - static void br_multicast_querier_expired(unsigned long data) 758 + static void br_multicast_querier_expired(struct net_bridge *br, 759 + struct bridge_mcast_query *query) 760 760 { 761 - struct net_bridge *br = (void *)data; 762 - 763 761 spin_lock(&br->multicast_lock); 764 762 if (!netif_running(br->dev) || br->multicast_disabled) 765 763 goto out; 766 764 767 - br_multicast_start_querier(br); 765 + br_multicast_start_querier(br, query); 768 766 769 767 out: 770 768 spin_unlock(&br->multicast_lock); 771 769 } 770 + 771 + static void br_ip4_multicast_querier_expired(unsigned long data) 772 + { 773 + struct net_bridge *br = (void *)data; 774 + 775 + br_multicast_querier_expired(br, &br->ip4_query); 776 + } 777 + 778 + #if IS_ENABLED(CONFIG_IPV6) 779 + static void br_ip6_multicast_querier_expired(unsigned long data) 780 + { 781 + struct net_bridge *br = (void *)data; 782 + 783 + br_multicast_querier_expired(br, &br->ip6_query); 784 + } 785 + #endif 772 786 773 787 static void __br_multicast_send_query(struct net_bridge *br, 774 788 struct net_bridge_port *port, ··· 805 789 } 806 790 807 791 static void br_multicast_send_query(struct net_bridge *br, 808 - struct net_bridge_port *port, u32 sent) 792 + struct net_bridge_port *port, 793 + struct bridge_mcast_query *query) 809 794 { 810 795 unsigned long time; 811 796 struct br_ip br_group; 797 + struct bridge_mcast_querier *querier = NULL; 812 798 813 799 if (!netif_running(br->dev) || br->multicast_disabled || 814 - !br->multicast_querier || 815 - timer_pending(&br->multicast_querier_timer)) 800 + !br->multicast_querier) 816 801 return; 817 802 818 803 memset(&br_group.u, 0, sizeof(br_group.u)); 819 804 820 - br_group.proto = htons(ETH_P_IP); 821 - __br_multicast_send_query(br, port, &br_group); 822 - 805 + if (port ? (query == &port->ip4_query) : 806 + (query == &br->ip4_query)) { 807 + querier = &br->ip4_querier; 808 + br_group.proto = htons(ETH_P_IP); 823 809 #if IS_ENABLED(CONFIG_IPV6) 824 - br_group.proto = htons(ETH_P_IPV6); 825 - __br_multicast_send_query(br, port, &br_group); 810 + } else { 811 + querier = &br->ip6_querier; 812 + br_group.proto = htons(ETH_P_IPV6); 826 813 #endif 814 + } 815 + 816 + if (!querier || timer_pending(&querier->timer)) 817 + return; 818 + 819 + __br_multicast_send_query(br, port, &br_group); 827 820 828 821 time = jiffies; 829 - time += sent < br->multicast_startup_query_count ? 822 + time += query->startup_sent < br->multicast_startup_query_count ? 830 823 br->multicast_startup_query_interval : 831 824 br->multicast_query_interval; 832 - mod_timer(port ? &port->multicast_query_timer : 833 - &br->multicast_query_timer, time); 825 + mod_timer(&query->timer, time); 834 826 } 835 827 836 - static void br_multicast_port_query_expired(unsigned long data) 828 + static void br_multicast_port_query_expired(struct net_bridge_port *port, 829 + struct bridge_mcast_query *query) 837 830 { 838 - struct net_bridge_port *port = (void *)data; 839 831 struct net_bridge *br = port->br; 840 832 841 833 spin_lock(&br->multicast_lock); ··· 851 827 port->state == BR_STATE_BLOCKING) 852 828 goto out; 853 829 854 - if (port->multicast_startup_queries_sent < 855 - br->multicast_startup_query_count) 856 - port->multicast_startup_queries_sent++; 830 + if (query->startup_sent < br->multicast_startup_query_count) 831 + query->startup_sent++; 857 832 858 - br_multicast_send_query(port->br, port, 859 - port->multicast_startup_queries_sent); 833 + br_multicast_send_query(port->br, port, query); 860 834 861 835 out: 862 836 spin_unlock(&br->multicast_lock); 863 837 } 838 + 839 + static void br_ip4_multicast_port_query_expired(unsigned long data) 840 + { 841 + struct net_bridge_port *port = (void *)data; 842 + 843 + br_multicast_port_query_expired(port, &port->ip4_query); 844 + } 845 + 846 + #if IS_ENABLED(CONFIG_IPV6) 847 + static void br_ip6_multicast_port_query_expired(unsigned long data) 848 + { 849 + struct net_bridge_port *port = (void *)data; 850 + 851 + br_multicast_port_query_expired(port, &port->ip6_query); 852 + } 853 + #endif 864 854 865 855 void br_multicast_add_port(struct net_bridge_port *port) 866 856 { ··· 882 844 883 845 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 884 846 (unsigned long)port); 885 - setup_timer(&port->multicast_query_timer, 886 - br_multicast_port_query_expired, (unsigned long)port); 847 + setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired, 848 + (unsigned long)port); 849 + #if IS_ENABLED(CONFIG_IPV6) 850 + setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired, 851 + (unsigned long)port); 852 + #endif 887 853 } 888 854 889 855 void br_multicast_del_port(struct net_bridge_port *port) ··· 895 853 del_timer_sync(&port->multicast_router_timer); 896 854 } 897 855 898 - static void __br_multicast_enable_port(struct net_bridge_port *port) 856 + static void br_multicast_enable(struct bridge_mcast_query *query) 899 857 { 900 - port->multicast_startup_queries_sent = 0; 858 + query->startup_sent = 0; 901 859 902 - if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 903 - del_timer(&port->multicast_query_timer)) 904 - mod_timer(&port->multicast_query_timer, jiffies); 860 + if (try_to_del_timer_sync(&query->timer) >= 0 || 861 + del_timer(&query->timer)) 862 + mod_timer(&query->timer, jiffies); 905 863 } 906 864 907 865 void br_multicast_enable_port(struct net_bridge_port *port) ··· 912 870 if (br->multicast_disabled || !netif_running(br->dev)) 913 871 goto out; 914 872 915 - __br_multicast_enable_port(port); 873 + br_multicast_enable(&port->ip4_query); 874 + #if IS_ENABLED(CONFIG_IPV6) 875 + br_multicast_enable(&port->ip6_query); 876 + #endif 916 877 917 878 out: 918 879 spin_unlock(&br->multicast_lock); ··· 934 889 if (!hlist_unhashed(&port->rlist)) 935 890 hlist_del_init_rcu(&port->rlist); 936 891 del_timer(&port->multicast_router_timer); 937 - del_timer(&port->multicast_query_timer); 892 + del_timer(&port->ip4_query.timer); 893 + #if IS_ENABLED(CONFIG_IPV6) 894 + del_timer(&port->ip6_query.timer); 895 + #endif 938 896 spin_unlock(&br->multicast_lock); 939 897 } 940 898 ··· 1062 1014 } 1063 1015 #endif 1064 1016 1065 - static void br_multicast_update_querier_timer(struct net_bridge *br, 1066 - unsigned long max_delay) 1017 + static void 1018 + br_multicast_update_querier_timer(struct net_bridge *br, 1019 + struct bridge_mcast_querier *querier, 1020 + unsigned long max_delay) 1067 1021 { 1068 - if (!timer_pending(&br->multicast_querier_timer)) 1069 - br->multicast_querier_delay_time = jiffies + max_delay; 1022 + if (!timer_pending(&querier->timer)) 1023 + querier->delay_time = jiffies + max_delay; 1070 1024 1071 - mod_timer(&br->multicast_querier_timer, 1072 - jiffies + br->multicast_querier_interval); 1025 + mod_timer(&querier->timer, jiffies + br->multicast_querier_interval); 1073 1026 } 1074 1027 1075 1028 /* ··· 1123 1074 1124 1075 static void br_multicast_query_received(struct net_bridge *br, 1125 1076 struct net_bridge_port *port, 1077 + struct bridge_mcast_querier *querier, 1126 1078 int saddr, 1127 1079 unsigned long max_delay) 1128 1080 { 1129 1081 if (saddr) 1130 - br_multicast_update_querier_timer(br, max_delay); 1131 - else if (timer_pending(&br->multicast_querier_timer)) 1082 + br_multicast_update_querier_timer(br, querier, max_delay); 1083 + else if (timer_pending(&querier->timer)) 1132 1084 return; 1133 1085 1134 1086 br_multicast_mark_router(br, port); ··· 1179 1129 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1180 1130 } 1181 1131 1182 - br_multicast_query_received(br, port, !!iph->saddr, max_delay); 1132 + br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, 1133 + max_delay); 1183 1134 1184 1135 if (!group) 1185 1136 goto out; ··· 1254 1203 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1255 1204 if (!mld2q->mld2q_nsrcs) 1256 1205 group = &mld2q->mld2q_mca; 1257 - max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; 1206 + 1207 + max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL); 1258 1208 } 1259 1209 1260 - br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), 1261 - max_delay); 1210 + br_multicast_query_received(br, port, &br->ip6_querier, 1211 + !ipv6_addr_any(&ip6h->saddr), max_delay); 1262 1212 1263 1213 if (!group) 1264 1214 goto out; ··· 1296 1244 1297 1245 static void br_multicast_leave_group(struct net_bridge *br, 1298 1246 struct net_bridge_port *port, 1299 - struct br_ip *group) 1247 + struct br_ip *group, 1248 + struct bridge_mcast_querier *querier, 1249 + struct bridge_mcast_query *query) 1300 1250 { 1301 1251 struct net_bridge_mdb_htable *mdb; 1302 1252 struct net_bridge_mdb_entry *mp; ··· 1309 1255 spin_lock(&br->multicast_lock); 1310 1256 if (!netif_running(br->dev) || 1311 1257 (port && port->state == BR_STATE_DISABLED) || 1312 - timer_pending(&br->multicast_querier_timer)) 1258 + timer_pending(&querier->timer)) 1313 1259 goto out; 1314 1260 1315 1261 mdb = mlock_dereference(br->mdb, br); ··· 1317 1263 if (!mp) 1318 1264 goto out; 1319 1265 1320 - if (br->multicast_querier && 1321 - !timer_pending(&br->multicast_querier_timer)) { 1266 + if (br->multicast_querier) { 1322 1267 __br_multicast_send_query(br, port, &mp->addr); 1323 1268 1324 1269 time = jiffies + br->multicast_last_member_count * 1325 1270 br->multicast_last_member_interval; 1326 - mod_timer(port ? &port->multicast_query_timer : 1327 - &br->multicast_query_timer, time); 1271 + 1272 + mod_timer(&query->timer, time); 1328 1273 1329 1274 for (p = mlock_dereference(mp->ports, br); 1330 1275 p != NULL; ··· 1376 1323 mod_timer(&mp->timer, time); 1377 1324 } 1378 1325 } 1379 - 1380 1326 out: 1381 1327 spin_unlock(&br->multicast_lock); 1382 1328 } ··· 1386 1334 __u16 vid) 1387 1335 { 1388 1336 struct br_ip br_group; 1337 + struct bridge_mcast_query *query = port ? &port->ip4_query : 1338 + &br->ip4_query; 1389 1339 1390 1340 if (ipv4_is_local_multicast(group)) 1391 1341 return; ··· 1396 1342 br_group.proto = htons(ETH_P_IP); 1397 1343 br_group.vid = vid; 1398 1344 1399 - br_multicast_leave_group(br, port, &br_group); 1345 + br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query); 1400 1346 } 1401 1347 1402 1348 #if IS_ENABLED(CONFIG_IPV6) ··· 1406 1352 __u16 vid) 1407 1353 { 1408 1354 struct br_ip br_group; 1355 + struct bridge_mcast_query *query = port ? &port->ip6_query : 1356 + &br->ip6_query; 1357 + 1409 1358 1410 1359 if (!ipv6_is_transient_multicast(group)) 1411 1360 return; ··· 1417 1360 br_group.proto = htons(ETH_P_IPV6); 1418 1361 br_group.vid = vid; 1419 1362 1420 - br_multicast_leave_group(br, port, &br_group); 1363 + br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query); 1421 1364 } 1422 1365 #endif 1423 1366 ··· 1679 1622 return 0; 1680 1623 } 1681 1624 1682 - static void br_multicast_query_expired(unsigned long data) 1625 + static void br_multicast_query_expired(struct net_bridge *br, 1626 + struct bridge_mcast_query *query) 1627 + { 1628 + spin_lock(&br->multicast_lock); 1629 + if (query->startup_sent < br->multicast_startup_query_count) 1630 + query->startup_sent++; 1631 + 1632 + br_multicast_send_query(br, NULL, query); 1633 + spin_unlock(&br->multicast_lock); 1634 + } 1635 + 1636 + static void br_ip4_multicast_query_expired(unsigned long data) 1683 1637 { 1684 1638 struct net_bridge *br = (void *)data; 1685 1639 1686 - spin_lock(&br->multicast_lock); 1687 - if (br->multicast_startup_queries_sent < 1688 - br->multicast_startup_query_count) 1689 - br->multicast_startup_queries_sent++; 1690 - 1691 - br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1692 - 1693 - spin_unlock(&br->multicast_lock); 1640 + br_multicast_query_expired(br, &br->ip4_query); 1694 1641 } 1642 + 1643 + #if IS_ENABLED(CONFIG_IPV6) 1644 + static void br_ip6_multicast_query_expired(unsigned long data) 1645 + { 1646 + struct net_bridge *br = (void *)data; 1647 + 1648 + br_multicast_query_expired(br, &br->ip6_query); 1649 + } 1650 + #endif 1695 1651 1696 1652 void br_multicast_init(struct net_bridge *br) 1697 1653 { ··· 1724 1654 br->multicast_querier_interval = 255 * HZ; 1725 1655 br->multicast_membership_interval = 260 * HZ; 1726 1656 1727 - br->multicast_querier_delay_time = 0; 1657 + br->ip4_querier.delay_time = 0; 1658 + #if IS_ENABLED(CONFIG_IPV6) 1659 + br->ip6_querier.delay_time = 0; 1660 + #endif 1728 1661 1729 1662 spin_lock_init(&br->multicast_lock); 1730 1663 setup_timer(&br->multicast_router_timer, 1731 1664 br_multicast_local_router_expired, 0); 1732 - setup_timer(&br->multicast_querier_timer, 1733 - br_multicast_querier_expired, (unsigned long)br); 1734 - setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1665 + setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired, 1735 1666 (unsigned long)br); 1667 + setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired, 1668 + (unsigned long)br); 1669 + #if IS_ENABLED(CONFIG_IPV6) 1670 + setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired, 1671 + (unsigned long)br); 1672 + setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired, 1673 + (unsigned long)br); 1674 + #endif 1736 1675 } 1737 1676 1738 - void br_multicast_open(struct net_bridge *br) 1677 + static void __br_multicast_open(struct net_bridge *br, 1678 + struct bridge_mcast_query *query) 1739 1679 { 1740 - br->multicast_startup_queries_sent = 0; 1680 + query->startup_sent = 0; 1741 1681 1742 1682 if (br->multicast_disabled) 1743 1683 return; 1744 1684 1745 - mod_timer(&br->multicast_query_timer, jiffies); 1685 + mod_timer(&query->timer, jiffies); 1686 + } 1687 + 1688 + void br_multicast_open(struct net_bridge *br) 1689 + { 1690 + __br_multicast_open(br, &br->ip4_query); 1691 + #if IS_ENABLED(CONFIG_IPV6) 1692 + __br_multicast_open(br, &br->ip6_query); 1693 + #endif 1746 1694 } 1747 1695 1748 1696 void br_multicast_stop(struct net_bridge *br) ··· 1772 1684 int i; 1773 1685 1774 1686 del_timer_sync(&br->multicast_router_timer); 1775 - del_timer_sync(&br->multicast_querier_timer); 1776 - del_timer_sync(&br->multicast_query_timer); 1687 + del_timer_sync(&br->ip4_querier.timer); 1688 + del_timer_sync(&br->ip4_query.timer); 1689 + #if IS_ENABLED(CONFIG_IPV6) 1690 + del_timer_sync(&br->ip6_querier.timer); 1691 + del_timer_sync(&br->ip6_query.timer); 1692 + #endif 1777 1693 1778 1694 spin_lock_bh(&br->multicast_lock); 1779 1695 mdb = mlock_dereference(br->mdb, br); ··· 1880 1788 return err; 1881 1789 } 1882 1790 1883 - static void br_multicast_start_querier(struct net_bridge *br) 1791 + static void br_multicast_start_querier(struct net_bridge *br, 1792 + struct bridge_mcast_query *query) 1884 1793 { 1885 1794 struct net_bridge_port *port; 1886 1795 1887 - br_multicast_open(br); 1796 + __br_multicast_open(br, query); 1888 1797 1889 1798 list_for_each_entry(port, &br->port_list, list) { 1890 1799 if (port->state == BR_STATE_DISABLED || 1891 1800 port->state == BR_STATE_BLOCKING) 1892 1801 continue; 1893 1802 1894 - __br_multicast_enable_port(port); 1803 + if (query == &br->ip4_query) 1804 + br_multicast_enable(&port->ip4_query); 1805 + #if IS_ENABLED(CONFIG_IPV6) 1806 + else 1807 + br_multicast_enable(&port->ip6_query); 1808 + #endif 1895 1809 } 1896 1810 } 1897 1811 ··· 1932 1834 goto rollback; 1933 1835 } 1934 1836 1935 - br_multicast_start_querier(br); 1837 + br_multicast_start_querier(br, &br->ip4_query); 1838 + #if IS_ENABLED(CONFIG_IPV6) 1839 + br_multicast_start_querier(br, &br->ip6_query); 1840 + #endif 1936 1841 1937 1842 unlock: 1938 1843 spin_unlock_bh(&br->multicast_lock); ··· 1958 1857 goto unlock; 1959 1858 1960 1859 max_delay = br->multicast_query_response_interval; 1961 - if (!timer_pending(&br->multicast_querier_timer)) 1962 - br->multicast_querier_delay_time = jiffies + max_delay; 1963 1860 1964 - br_multicast_start_querier(br); 1861 + if (!timer_pending(&br->ip4_querier.timer)) 1862 + br->ip4_querier.delay_time = jiffies + max_delay; 1863 + 1864 + br_multicast_start_querier(br, &br->ip4_query); 1865 + 1866 + #if IS_ENABLED(CONFIG_IPV6) 1867 + if (!timer_pending(&br->ip6_querier.timer)) 1868 + br->ip6_querier.delay_time = jiffies + max_delay; 1869 + 1870 + br_multicast_start_querier(br, &br->ip6_query); 1871 + #endif 1965 1872 1966 1873 unlock: 1967 1874 spin_unlock_bh(&br->multicast_lock);
+46 -11
net/bridge/br_private.h
··· 66 66 __u16 vid; 67 67 }; 68 68 69 + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 70 + /* our own querier */ 71 + struct bridge_mcast_query { 72 + struct timer_list timer; 73 + u32 startup_sent; 74 + }; 75 + 76 + /* other querier */ 77 + struct bridge_mcast_querier { 78 + struct timer_list timer; 79 + unsigned long delay_time; 80 + }; 81 + #endif 82 + 69 83 struct net_port_vlans { 70 84 u16 port_idx; 71 85 u16 pvid; ··· 176 162 #define BR_FLOOD 0x00000040 177 163 178 164 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 179 - u32 multicast_startup_queries_sent; 165 + struct bridge_mcast_query ip4_query; 166 + #if IS_ENABLED(CONFIG_IPV6) 167 + struct bridge_mcast_query ip6_query; 168 + #endif /* IS_ENABLED(CONFIG_IPV6) */ 180 169 unsigned char multicast_router; 181 170 struct timer_list multicast_router_timer; 182 - struct timer_list multicast_query_timer; 183 171 struct hlist_head mglist; 184 172 struct hlist_node rlist; 185 173 #endif ··· 274 258 u32 hash_max; 275 259 276 260 u32 multicast_last_member_count; 277 - u32 multicast_startup_queries_sent; 278 261 u32 multicast_startup_query_count; 279 262 280 263 unsigned long multicast_last_member_interval; ··· 282 267 unsigned long multicast_query_interval; 283 268 unsigned long multicast_query_response_interval; 284 269 unsigned long multicast_startup_query_interval; 285 - unsigned long multicast_querier_delay_time; 286 270 287 271 spinlock_t multicast_lock; 288 272 struct net_bridge_mdb_htable __rcu *mdb; 289 273 struct hlist_head router_list; 290 274 291 275 struct timer_list multicast_router_timer; 292 - struct timer_list multicast_querier_timer; 293 - struct timer_list multicast_query_timer; 276 + struct bridge_mcast_querier ip4_querier; 277 + struct bridge_mcast_query ip4_query; 278 + #if IS_ENABLED(CONFIG_IPV6) 279 + struct bridge_mcast_querier ip6_querier; 280 + struct bridge_mcast_query ip6_query; 281 + #endif /* IS_ENABLED(CONFIG_IPV6) */ 294 282 #endif 295 283 296 284 struct timer_list hello_timer; ··· 521 503 timer_pending(&br->multicast_router_timer)); 522 504 } 523 505 524 - static inline bool br_multicast_querier_exists(struct net_bridge *br) 506 + static inline bool 507 + __br_multicast_querier_exists(struct net_bridge *br, 508 + struct bridge_mcast_querier *querier) 525 509 { 526 - return time_is_before_jiffies(br->multicast_querier_delay_time) && 527 - (br->multicast_querier || 528 - timer_pending(&br->multicast_querier_timer)); 510 + return time_is_before_jiffies(querier->delay_time) && 511 + (br->multicast_querier || timer_pending(&querier->timer)); 512 + } 513 + 514 + static inline bool br_multicast_querier_exists(struct net_bridge *br, 515 + struct ethhdr *eth) 516 + { 517 + switch (eth->h_proto) { 518 + case (htons(ETH_P_IP)): 519 + return __br_multicast_querier_exists(br, &br->ip4_querier); 520 + #if IS_ENABLED(CONFIG_IPV6) 521 + case (htons(ETH_P_IPV6)): 522 + return __br_multicast_querier_exists(br, &br->ip6_querier); 523 + #endif 524 + default: 525 + return false; 526 + } 529 527 } 530 528 #else 531 529 static inline int br_multicast_rcv(struct net_bridge *br, ··· 599 565 { 600 566 return 0; 601 567 } 602 - static inline bool br_multicast_querier_exists(struct net_bridge *br) 568 + static inline bool br_multicast_querier_exists(struct net_bridge *br, 569 + struct ethhdr *eth) 603 570 { 604 571 return false; 605 572 }
+3 -8
net/core/flow_dissector.c
··· 346 346 if (new_index < 0) 347 347 new_index = skb_tx_hash(dev, skb); 348 348 349 - if (queue_index != new_index && sk) { 350 - struct dst_entry *dst = 351 - rcu_dereference_check(sk->sk_dst_cache, 1); 352 - 353 - if (dst && skb_dst(skb) == dst) 354 - sk_tx_queue_set(sk, queue_index); 355 - 356 - } 349 + if (queue_index != new_index && sk && 350 + rcu_access_pointer(sk->sk_dst_cache)) 351 + sk_tx_queue_set(sk, queue_index); 357 352 358 353 queue_index = new_index; 359 354 }
+1 -1
net/core/scm.c
··· 54 54 return -EINVAL; 55 55 56 56 if ((creds->pid == task_tgid_vnr(current) || 57 - ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && 57 + ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) && 58 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 59 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 60 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
-8
net/ipv4/ip_output.c
··· 211 211 return -EINVAL; 212 212 } 213 213 214 - static inline int ip_skb_dst_mtu(struct sk_buff *skb) 215 - { 216 - struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; 217 - 218 - return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? 219 - skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 220 - } 221 - 222 214 static int ip_finish_output(struct sk_buff *skb) 223 215 { 224 216 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+2 -3
net/ipv4/ipip.c
··· 190 190 struct ip_tunnel *tunnel; 191 191 const struct iphdr *iph; 192 192 193 - if (iptunnel_pull_header(skb, 0, tpi.proto)) 194 - goto drop; 195 - 196 193 iph = ip_hdr(skb); 197 194 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 198 195 iph->saddr, iph->daddr, 0); 199 196 if (tunnel) { 200 197 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 198 + goto drop; 199 + if (iptunnel_pull_header(skb, 0, tpi.proto)) 201 200 goto drop; 202 201 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 203 202 }
+2 -1
net/ipv4/raw.c
··· 571 571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 572 572 RT_SCOPE_UNIVERSE, 573 573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 574 - inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, 574 + inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP | 575 + (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), 575 576 daddr, saddr, 0, 0); 576 577 577 578 if (!inet->hdrincl) {
+6 -3
net/ipv4/tcp_input.c
··· 3535 3535 ++ptr; 3536 3536 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3537 3537 ++ptr; 3538 - tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3538 + if (*ptr) 3539 + tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3540 + else 3541 + tp->rx_opt.rcv_tsecr = 0; 3539 3542 return true; 3540 3543 } 3541 3544 return false; ··· 3563 3560 } 3564 3561 3565 3562 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); 3566 - if (tp->rx_opt.saw_tstamp) 3563 + if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 3567 3564 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3568 3565 3569 3566 return true; ··· 5319 5316 int saved_clamp = tp->rx_opt.mss_clamp; 5320 5317 5321 5318 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5322 - if (tp->rx_opt.saw_tstamp) 5319 + if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 5323 5320 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5324 5321 5325 5322 if (th->ack) {
+3 -1
net/ipv4/tcp_output.c
··· 2670 2670 int tcp_header_size; 2671 2671 int mss; 2672 2672 2673 - skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); 2673 + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2674 2674 if (unlikely(!skb)) { 2675 2675 dst_release(dst); 2676 2676 return NULL; ··· 2814 2814 2815 2815 if (likely(!tp->repair)) 2816 2816 tp->rcv_nxt = 0; 2817 + else 2818 + tp->rcv_tstamp = tcp_time_stamp; 2817 2819 tp->rcv_wup = tp->rcv_nxt; 2818 2820 tp->copied_seq = tp->rcv_nxt; 2819 2821
+11 -5
net/ipv4/xfrm4_output.c
··· 21 21 static int xfrm4_tunnel_check_size(struct sk_buff *skb) 22 22 { 23 23 int mtu, ret = 0; 24 - struct dst_entry *dst; 25 24 26 25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 27 26 goto out; ··· 28 29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 29 30 goto out; 30 31 31 - dst = skb_dst(skb); 32 - mtu = dst_mtu(dst); 32 + mtu = dst_mtu(skb_dst(skb)); 33 33 if (skb->len > mtu) { 34 34 if (skb->sk) 35 - ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, 36 - inet_sk(skb->sk)->inet_dport, mtu); 35 + xfrm_local_error(skb, mtu); 37 36 else 38 37 icmp_send(skb, ICMP_DEST_UNREACH, 39 38 ICMP_FRAG_NEEDED, htonl(mtu)); ··· 95 98 NULL, dst->dev, 96 99 x->outer_mode->afinfo->output_finish, 97 100 !(IPCB(skb)->flags & IPSKB_REROUTED)); 101 + } 102 + 103 + void xfrm4_local_error(struct sk_buff *skb, u32 mtu) 104 + { 105 + struct iphdr *hdr; 106 + 107 + hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 108 + ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, 109 + inet_sk(skb->sk)->inet_dport, mtu); 98 110 }
+1
net/ipv4/xfrm4_state.c
··· 83 83 .extract_input = xfrm4_extract_input, 84 84 .extract_output = xfrm4_extract_output, 85 85 .transport_finish = xfrm4_transport_finish, 86 + .local_error = xfrm4_local_error, 86 87 }; 87 88 88 89 void __init xfrm4_state_init(void)
+5
net/ipv6/ip6_gre.c
··· 724 724 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 725 725 } 726 726 727 + if (likely(!skb->encapsulation)) { 728 + skb_reset_inner_headers(skb); 729 + skb->encapsulation = 1; 730 + } 731 + 727 732 skb_push(skb, gre_hlen); 728 733 skb_reset_network_header(skb); 729 734 skb_set_transport_header(skb, sizeof(*ipv6h));
+3
net/ipv6/ip6_output.c
··· 238 238 hdr->saddr = fl6->saddr; 239 239 hdr->daddr = *first_hop; 240 240 241 + skb->protocol = htons(ETH_P_IPV6); 241 242 skb->priority = sk->sk_priority; 242 243 skb->mark = sk->sk_mark; 243 244 ··· 1058 1057 /* initialize protocol header pointer */ 1059 1058 skb->transport_header = skb->network_header + fragheaderlen; 1060 1059 1060 + skb->protocol = htons(ETH_P_IPV6); 1061 1061 skb->ip_summed = CHECKSUM_PARTIAL; 1062 1062 skb->csum = 0; 1063 1063 } ··· 1361 1359 /* 1362 1360 * Fill in the control structures 1363 1361 */ 1362 + skb->protocol = htons(ETH_P_IPV6); 1364 1363 skb->ip_summed = CHECKSUM_NONE; 1365 1364 skb->csum = 0; 1366 1365 /* reserve for fragmentation and ipsec header */
+6
net/ipv6/ip6_tunnel.c
··· 1027 1027 init_tel_txopt(&opt, encap_limit); 1028 1028 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 1029 1029 } 1030 + 1031 + if (likely(!skb->encapsulation)) { 1032 + skb_reset_inner_headers(skb); 1033 + skb->encapsulation = 1; 1034 + } 1035 + 1030 1036 skb_push(skb, sizeof(struct ipv6hdr)); 1031 1037 skb_reset_network_header(skb); 1032 1038 ipv6h = ipv6_hdr(skb);
+1
net/ipv6/raw.c
··· 628 628 goto error; 629 629 skb_reserve(skb, hlen); 630 630 631 + skb->protocol = htons(ETH_P_IPV6); 631 632 skb->priority = sk->sk_priority; 632 633 skb->mark = sk->sk_mark; 633 634 skb_dst_set(skb, &rt->dst);
+7 -4
net/ipv6/sit.c
··· 645 645 const struct iphdr *iph; 646 646 struct ip_tunnel *tunnel; 647 647 648 - if (iptunnel_pull_header(skb, 0, tpi.proto)) 649 - goto drop; 650 - 651 648 iph = ip_hdr(skb); 652 - 653 649 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 654 650 iph->saddr, iph->daddr); 655 651 if (tunnel != NULL) { ··· 654 658 goto drop; 655 659 656 660 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 661 + goto drop; 662 + if (iptunnel_pull_header(skb, 0, tpi.proto)) 657 663 goto drop; 658 664 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 659 665 } ··· 885 887 if (ttl == 0) 886 888 ttl = iph6->hop_limit; 887 889 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 890 + 891 + if (likely(!skb->encapsulation)) { 892 + skb_reset_inner_headers(skb); 893 + skb->encapsulation = 1; 894 + } 888 895 889 896 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, 890 897 IPPROTO_IPV6, tos, ttl, df);
+15 -6
net/ipv6/xfrm6_output.c
··· 34 34 struct sock *sk = skb->sk; 35 35 36 36 if (sk) { 37 - proto = sk->sk_protocol; 37 + if (sk->sk_family != AF_INET6) 38 + return 0; 38 39 40 + proto = sk->sk_protocol; 39 41 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) 40 42 return inet6_sk(sk)->dontfrag; 41 43 } ··· 56 54 ipv6_local_rxpmtu(sk, &fl6, mtu); 57 55 } 58 56 59 - static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) 57 + void xfrm6_local_error(struct sk_buff *skb, u32 mtu) 60 58 { 61 59 struct flowi6 fl6; 60 + const struct ipv6hdr *hdr; 62 61 struct sock *sk = skb->sk; 63 62 63 + hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 64 64 fl6.fl6_dport = inet_sk(sk)->inet_dport; 65 - fl6.daddr = ipv6_hdr(skb)->daddr; 65 + fl6.daddr = hdr->daddr; 66 66 67 67 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); 68 68 } ··· 84 80 if (xfrm6_local_dontfrag(skb)) 85 81 xfrm6_local_rxpmtu(skb, mtu); 86 82 else if (skb->sk) 87 - xfrm6_local_error(skb, mtu); 83 + xfrm_local_error(skb, mtu); 88 84 else 89 85 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 90 86 ret = -EMSGSIZE; ··· 140 136 { 141 137 struct dst_entry *dst = skb_dst(skb); 142 138 struct xfrm_state *x = dst->xfrm; 143 - int mtu = ip6_skb_dst_mtu(skb); 139 + int mtu; 140 + 141 + if (skb->protocol == htons(ETH_P_IPV6)) 142 + mtu = ip6_skb_dst_mtu(skb); 143 + else 144 + mtu = dst_mtu(skb_dst(skb)); 144 145 145 146 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 146 147 xfrm6_local_rxpmtu(skb, mtu); 147 148 return -EMSGSIZE; 148 149 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 149 - xfrm6_local_error(skb, mtu); 150 + xfrm_local_error(skb, mtu); 150 151 return -EMSGSIZE; 151 152 } 152 153
+1
net/ipv6/xfrm6_state.c
··· 183 183 .extract_input = xfrm6_extract_input, 184 184 .extract_output = xfrm6_extract_output, 185 185 .transport_finish = xfrm6_transport_finish, 186 + .local_error = xfrm6_local_error, 186 187 }; 187 188 188 189 int __init xfrm6_state_init(void)
+30 -4
net/mac80211/ibss.c
··· 36 36 37 37 static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 38 38 const u8 *bssid, const int beacon_int, 39 - struct ieee80211_channel *chan, 39 + struct cfg80211_chan_def *req_chandef, 40 40 const u32 basic_rates, 41 41 const u16 capability, u64 tsf, 42 42 bool creator) ··· 51 51 u32 bss_change; 52 52 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 53 53 struct cfg80211_chan_def chandef; 54 + struct ieee80211_channel *chan; 54 55 struct beacon_data *presp; 55 56 int frame_len; 56 57 ··· 82 81 83 82 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; 84 83 85 - chandef = ifibss->chandef; 84 + /* make a copy of the chandef, it could be modified below. */ 85 + chandef = *req_chandef; 86 + chan = chandef.chan; 86 87 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { 87 88 chandef.width = NL80211_CHAN_WIDTH_20; 88 89 chandef.center_freq1 = chan->center_freq; ··· 262 259 struct cfg80211_bss *cbss = 263 260 container_of((void *)bss, struct cfg80211_bss, priv); 264 261 struct ieee80211_supported_band *sband; 262 + struct cfg80211_chan_def chandef; 265 263 u32 basic_rates; 266 264 int i, j; 267 265 u16 beacon_int = cbss->beacon_interval; 268 266 const struct cfg80211_bss_ies *ies; 267 + enum nl80211_channel_type chan_type; 269 268 u64 tsf; 270 269 271 270 sdata_assert_lock(sdata); 272 271 273 272 if (beacon_int < 10) 274 273 beacon_int = 10; 274 + 275 + switch (sdata->u.ibss.chandef.width) { 276 + case NL80211_CHAN_WIDTH_20_NOHT: 277 + case NL80211_CHAN_WIDTH_20: 278 + case NL80211_CHAN_WIDTH_40: 279 + chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); 280 + cfg80211_chandef_create(&chandef, cbss->channel, chan_type); 281 + break; 282 + case NL80211_CHAN_WIDTH_5: 283 + case NL80211_CHAN_WIDTH_10: 284 + cfg80211_chandef_create(&chandef, cbss->channel, 285 + NL80211_CHAN_WIDTH_20_NOHT); 286 + chandef.width = sdata->u.ibss.chandef.width; 287 + break; 288 + default: 289 + /* fall back to 20 MHz for unsupported modes */ 290 + cfg80211_chandef_create(&chandef, cbss->channel, 291 + NL80211_CHAN_WIDTH_20_NOHT); 292 + break; 293 + } 275 294 276 295 sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; 277 296 ··· 319 294 320 295 __ieee80211_sta_join_ibss(sdata, cbss->bssid, 321 296 beacon_int, 322 - cbss->channel, 297 + &chandef, 323 298 basic_rates, 324 299 cbss->capability, 325 300 tsf, false); ··· 761 736 sdata->drop_unencrypted = 0; 762 737 763 738 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, 764 - ifibss->chandef.chan, ifibss->basic_rates, 739 + &ifibss->chandef, ifibss->basic_rates, 765 740 capability, 0, true); 766 741 } 767 742 ··· 1163 1138 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1164 1139 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1165 1140 BSS_CHANGED_IBSS); 1141 + ieee80211_vif_release_channel(sdata); 1166 1142 synchronize_rcu(); 1167 1143 kfree(presp); 1168 1144
+3
net/mac80211/rc80211_minstrel_ht.c
··· 828 828 if (sband->band != IEEE80211_BAND_2GHZ) 829 829 return; 830 830 831 + if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES)) 832 + return; 833 + 831 834 mi->cck_supported = 0; 832 835 mi->cck_supported_short = 0; 833 836 for (i = 0; i < 4; i++) {
+55 -12
net/netlink/genetlink.c
··· 364 364 EXPORT_SYMBOL(genl_unregister_ops); 365 365 366 366 /** 367 - * genl_register_family - register a generic netlink family 367 + * __genl_register_family - register a generic netlink family 368 368 * @family: generic netlink family 369 369 * 370 370 * Registers the specified family after validating it first. Only one ··· 374 374 * 375 375 * Return 0 on success or a negative error code. 376 376 */ 377 - int genl_register_family(struct genl_family *family) 377 + int __genl_register_family(struct genl_family *family) 378 378 { 379 379 int err = -EINVAL; 380 380 ··· 430 430 errout: 431 431 return err; 432 432 } 433 - EXPORT_SYMBOL(genl_register_family); 433 + EXPORT_SYMBOL(__genl_register_family); 434 434 435 435 /** 436 - * genl_register_family_with_ops - register a generic netlink family 436 + * __genl_register_family_with_ops - register a generic netlink family 437 437 * @family: generic netlink family 438 438 * @ops: operations to be registered 439 439 * @n_ops: number of elements to register ··· 457 457 * 458 458 * Return 0 on success or a negative error code. 459 459 */ 460 - int genl_register_family_with_ops(struct genl_family *family, 460 + int __genl_register_family_with_ops(struct genl_family *family, 461 461 struct genl_ops *ops, size_t n_ops) 462 462 { 463 463 int err, i; 464 464 465 - err = genl_register_family(family); 465 + err = __genl_register_family(family); 466 466 if (err) 467 467 return err; 468 468 ··· 476 476 genl_unregister_family(family); 477 477 return err; 478 478 } 479 - EXPORT_SYMBOL(genl_register_family_with_ops); 479 + EXPORT_SYMBOL(__genl_register_family_with_ops); 480 480 481 481 /** 482 482 * genl_unregister_family - unregister generic netlink family ··· 544 544 } 545 545 EXPORT_SYMBOL(genlmsg_put); 546 546 547 + static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 548 + { 549 + struct genl_ops *ops = cb->data; 550 + int rc; 551 + 552 + genl_lock(); 553 + rc = ops->dumpit(skb, cb); 554 + genl_unlock(); 555 + return rc; 556 + } 557 + 558 + static int genl_lock_done(struct netlink_callback *cb) 559 + { 560 + struct genl_ops *ops = cb->data; 561 + int rc = 0; 562 + 563 + if (ops->done) { 564 + genl_lock(); 565 + rc = ops->done(cb); 566 + genl_unlock(); 567 + } 568 + return rc; 569 + } 570 + 547 571 static int genl_family_rcv_msg(struct genl_family *family, 548 572 struct sk_buff *skb, 549 573 struct nlmsghdr *nlh) ··· 596 572 return -EPERM; 597 573 598 574 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 599 - struct netlink_dump_control c = { 600 - .dump = ops->dumpit, 601 - .done = ops->done, 602 - }; 575 + int rc; 603 576 604 577 if (ops->dumpit == NULL) 605 578 return -EOPNOTSUPP; 606 579 607 - return netlink_dump_start(net->genl_sock, skb, nlh, &c); 580 + if (!family->parallel_ops) { 581 + struct netlink_dump_control c = { 582 + .module = family->module, 583 + .data = ops, 584 + .dump = genl_lock_dumpit, 585 + .done = genl_lock_done, 586 + }; 587 + 588 + genl_unlock(); 589 + rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 590 + genl_lock(); 591 + 592 + } else { 593 + struct netlink_dump_control c = { 594 + .module = family->module, 595 + .dump = ops->dumpit, 596 + .done = ops->done, 597 + }; 598 + 599 + rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 600 + } 601 + 602 + return rc; 608 603 } 609 604 610 605 if (ops->doit == NULL)
+2 -2
net/tipc/socket.c
··· 1257 1257 /* Accept only ACK or NACK message */ 1258 1258 if (unlikely(msg_errcode(msg))) { 1259 1259 sock->state = SS_DISCONNECTING; 1260 - sk->sk_err = -ECONNREFUSED; 1260 + sk->sk_err = ECONNREFUSED; 1261 1261 retval = TIPC_OK; 1262 1262 break; 1263 1263 } ··· 1268 1268 res = auto_connect(sock, msg); 1269 1269 if (res) { 1270 1270 sock->state = SS_DISCONNECTING; 1271 - sk->sk_err = res; 1271 + sk->sk_err = -res; 1272 1272 retval = TIPC_OK; 1273 1273 break; 1274 1274 }
+21
net/xfrm/xfrm_output.c
··· 214 214 return inner_mode->afinfo->extract_output(x, skb); 215 215 } 216 216 217 + void xfrm_local_error(struct sk_buff *skb, int mtu) 218 + { 219 + unsigned int proto; 220 + struct xfrm_state_afinfo *afinfo; 221 + 222 + if (skb->protocol == htons(ETH_P_IP)) 223 + proto = AF_INET; 224 + else if (skb->protocol == htons(ETH_P_IPV6)) 225 + proto = AF_INET6; 226 + else 227 + return; 228 + 229 + afinfo = xfrm_state_get_afinfo(proto); 230 + if (!afinfo) 231 + return; 232 + 233 + afinfo->local_error(skb, mtu); 234 + xfrm_state_put_afinfo(afinfo); 235 + } 236 + 217 237 EXPORT_SYMBOL_GPL(xfrm_output); 218 238 EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); 239 + EXPORT_SYMBOL_GPL(xfrm_local_error);
+1 -8
net/xfrm/xfrm_policy.c
··· 320 320 { 321 321 struct sk_buff *skb; 322 322 323 - while ((skb = skb_dequeue(list)) != NULL) { 324 - dev_put(skb->dev); 323 + while ((skb = skb_dequeue(list)) != NULL) 325 324 kfree_skb(skb); 326 - } 327 325 } 328 326 329 327 /* Rule must be locked. Release descentant resources, announce ··· 1756 1758 struct sk_buff *skb; 1757 1759 struct sock *sk; 1758 1760 struct dst_entry *dst; 1759 - struct net_device *dev; 1760 1761 struct xfrm_policy *pol = (struct xfrm_policy *)arg; 1761 1762 struct xfrm_policy_queue *pq = &pol->polq; 1762 1763 struct flowi fl; ··· 1802 1805 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, 1803 1806 &fl, skb->sk, 0); 1804 1807 if (IS_ERR(dst)) { 1805 - dev_put(skb->dev); 1806 1808 kfree_skb(skb); 1807 1809 continue; 1808 1810 } ··· 1810 1814 skb_dst_drop(skb); 1811 1815 skb_dst_set(skb, dst); 1812 1816 1813 - dev = skb->dev; 1814 1817 err = dst_output(skb); 1815 - dev_put(dev); 1816 1818 } 1817 1819 1818 1820 return; ··· 1833 1839 } 1834 1840 1835 1841 skb_dst_force(skb); 1836 - dev_hold(skb->dev); 1837 1842 1838 1843 spin_lock_bh(&pq->hold_queue.lock); 1839 1844
+2 -5
net/xfrm/xfrm_state.c
··· 39 39 40 40 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 41 41 42 - static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 43 - static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 44 - 45 42 static inline unsigned int xfrm_dst_hash(struct net *net, 46 43 const xfrm_address_t *daddr, 47 44 const xfrm_address_t *saddr, ··· 1857 1860 } 1858 1861 EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1859 1862 1860 - static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1863 + struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1861 1864 { 1862 1865 struct xfrm_state_afinfo *afinfo; 1863 1866 if (unlikely(family >= NPROTO)) ··· 1869 1872 return afinfo; 1870 1873 } 1871 1874 1872 - static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1875 + void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1873 1876 { 1874 1877 rcu_read_unlock(); 1875 1878 }