Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (26 commits)
bonding: fix panic if initialization fails
IXP4xx: complete Ethernet netdev setup before calling register_netdev().
IXP4xx: use "ENODEV" instead of "ENOSYS" in module initialization.
ipvs: Fix IPv4 FWMARK virtual services
ipv4: Make INET_LRO a bool instead of tristate.
net: remove stale reference to fastroute from Kconfig help text
net: update skb_recycle_check() for hardware timestamping changes
bnx2: Fix panic in bnx2_poll_work().
net-sched: fix bfifo default limit
igb: resolve panic on shutdown when SR-IOV is enabled
wimax: oops: wimax_dev_add() is the only one that can initialize the state
wimax: fix oops if netlink fails to add attribute
Bluetooth: Move dev_set_name() to a context that can sleep
netfilter: ctnetlink: fix wrong message type in user updates
netfilter: xt_cluster: fix use of cluster match with 32 nodes
netfilter: ip6t_ipv6header: fix match on packets ending with NEXTHDR_NONE
netfilter: add missing linux/types.h include to xt_LED.h
mac80211: pid, fix memory corruption
mac80211: minstrel, fix memory corruption
cfg80211: fix comment on regulatory hint processing
...

+177 -149
+1 -1
arch/arm/mach-ixp4xx/ixp4xx_npe.c
··· 714 714 } 715 715 716 716 if (!found) 717 - return -ENOSYS; 717 + return -ENODEV; 718 718 return 0; 719 719 } 720 720
+16 -15
drivers/net/arm/ixp4xx_eth.c
··· 338 338 if (cpu_is_ixp43x()) { 339 339 /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */ 340 340 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH)) 341 - return -ENOSYS; 341 + return -ENODEV; 342 342 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; 343 343 } else { 344 344 /* All MII PHY accesses use NPE-B Ethernet registers */ 345 345 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) 346 - return -ENOSYS; 346 + return -ENODEV; 347 347 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; 348 348 } 349 349 ··· 1174 1174 regs_phys = IXP4XX_EthC_BASE_PHYS; 1175 1175 break; 1176 1176 default: 1177 - err = -ENOSYS; 1177 + err = -ENODEV; 1178 1178 goto err_free; 1179 1179 } 1180 1180 ··· 1189 1189 goto err_free; 1190 1190 } 1191 1191 1192 - if (register_netdev(dev)) { 1193 - err = -EIO; 1194 - goto err_npe_rel; 1195 - } 1196 - 1197 1192 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); 1198 1193 if (!port->mem_res) { 1199 1194 err = -EBUSY; 1200 - goto err_unreg; 1195 + goto err_npe_rel; 1201 1196 } 1202 1197 1203 1198 port->plat = plat; ··· 1210 1215 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy); 1211 1216 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, 1212 1217 PHY_INTERFACE_MODE_MII); 1213 - if (IS_ERR(port->phydev)) { 1214 - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 1215 - return PTR_ERR(port->phydev); 1216 - } 1218 + if ((err = IS_ERR(port->phydev))) 1219 + goto err_free_mem; 1217 1220 1218 1221 port->phydev->irq = PHY_POLL; 1222 + 1223 + if ((err = register_netdev(dev))) 1224 + goto err_phy_dis; 1219 1225 1220 1226 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, 1221 1227 npe_name(port->npe)); 1222 1228 1223 1229 return 0; 1224 1230 1225 - err_unreg: 1226 - unregister_netdev(dev); 1231 + err_phy_dis: 1232 + phy_disconnect(port->phydev); 1233 + err_free_mem: 1234 + npe_port_tab[NPE_ID(port->id)] = NULL; 1235 + platform_set_drvdata(pdev, NULL); 1236 + release_resource(port->mem_res); 1227 1237 err_npe_rel: 1228 1238 npe_release(port->npe); 1229 1239 err_free: ··· 1242 1242 struct port *port = netdev_priv(dev); 1243 1243 1244 1244 unregister_netdev(dev); 1245 + phy_disconnect(port->phydev); 1245 1246 npe_port_tab[NPE_ID(port->id)] = NULL; 1246 1247 platform_set_drvdata(pdev, NULL); 1247 1248 npe_release(port->npe);
+4 -2
drivers/net/bnx2.c
··· 54 54 55 55 #define DRV_MODULE_NAME "bnx2" 56 56 #define PFX DRV_MODULE_NAME ": " 57 - #define DRV_MODULE_VERSION "2.0.0" 58 - #define DRV_MODULE_RELDATE "April 2, 2009" 57 + #define DRV_MODULE_VERSION "2.0.1" 58 + #define DRV_MODULE_RELDATE "May 6, 2009" 59 59 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw" 60 60 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw" 61 61 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw" ··· 2600 2600 /* Tell compiler that status block fields can change. */ 2601 2601 barrier(); 2602 2602 cons = *bnapi->hw_tx_cons_ptr; 2603 + barrier(); 2603 2604 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) 2604 2605 cons++; 2605 2606 return cons; ··· 2880 2879 /* Tell compiler that status block fields can change. */ 2881 2880 barrier(); 2882 2881 cons = *bnapi->hw_rx_cons_ptr; 2882 + barrier(); 2883 2883 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) 2884 2884 cons++; 2885 2885 return cons;
-8
drivers/net/bonding/bond_main.c
··· 5181 5181 { 5182 5182 int i; 5183 5183 int res; 5184 - struct bonding *bond; 5185 5184 5186 5185 printk(KERN_INFO "%s", version); 5187 5186 ··· 5211 5212 5212 5213 goto out; 5213 5214 err: 5214 - list_for_each_entry(bond, &bond_dev_list, bond_list) { 5215 - bond_work_cancel_all(bond); 5216 - destroy_workqueue(bond->wq); 5217 - } 5218 - 5219 - bond_destroy_sysfs(); 5220 - 5221 5215 rtnl_lock(); 5222 5216 bond_free_all(); 5223 5217 rtnl_unlock();
+5 -7
drivers/net/igb/igb_main.c
··· 2006 2006 struct e1000_hw *hw = &adapter->hw; 2007 2007 u32 rctl; 2008 2008 u32 srrctl = 0; 2009 - int i, j; 2009 + int i; 2010 2010 2011 2011 rctl = rd32(E1000_RCTL); 2012 2012 ··· 2071 2071 if (adapter->vfs_allocated_count) { 2072 2072 u32 vmolr; 2073 2073 2074 - j = adapter->rx_ring[0].reg_idx; 2075 - 2076 2074 /* set all queue drop enable bits */ 2077 2075 wr32(E1000_QDE, ALL_QUEUES); 2078 2076 srrctl |= E1000_SRRCTL_DROP_EN; ··· 2078 2080 /* disable queue 0 to prevent tail write w/o re-config */ 2079 2081 wr32(E1000_RXDCTL(0), 0); 2080 2082 2081 - vmolr = rd32(E1000_VMOLR(j)); 2083 + vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); 2082 2084 if (rctl & E1000_RCTL_LPE) 2083 2085 vmolr |= E1000_VMOLR_LPE; 2084 - if (adapter->num_rx_queues > 0) 2086 + if (adapter->num_rx_queues > 1) 2085 2087 vmolr |= E1000_VMOLR_RSSE; 2086 - wr32(E1000_VMOLR(j), vmolr); 2088 + wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr); 2087 2089 } 2088 2090 2089 2091 for (i = 0; i < adapter->num_rx_queues; i++) { 2090 - j = adapter->rx_ring[i].reg_idx; 2092 + int j = adapter->rx_ring[i].reg_idx; 2091 2093 wr32(E1000_SRRCTL(j), srrctl); 2092 2094 } 2093 2095
+2 -2
drivers/net/wan/ixp4xx_hss.c
··· 1249 1249 return -ENOMEM; 1250 1250 1251 1251 if ((port->npe = npe_request(0)) == NULL) { 1252 - err = -ENOSYS; 1252 + err = -ENODEV; 1253 1253 goto err_free; 1254 1254 } 1255 1255 ··· 1311 1311 if ((ixp4xx_read_feature_bits() & 1312 1312 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) != 1313 1313 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) 1314 - return -ENOSYS; 1314 + return -ENODEV; 1315 1315 1316 1316 spin_lock_init(&npe_lock); 1317 1317
+11 -10
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 719 719 { 720 720 unsigned long flags; 721 721 int ret = 0; 722 + __le16 key_flags = 0; 723 + 724 + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); 725 + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 726 + key_flags &= ~STA_KEY_FLG_INVALID; 727 + 728 + if (sta_id == priv->hw_params.bcast_sta_id) 729 + key_flags |= STA_KEY_MULTICAST_MSK; 722 730 723 731 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 724 732 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; ··· 746 738 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 747 739 "no space for a new key"); 748 740 741 + priv->stations[sta_id].sta.key.key_flags = key_flags; 742 + 743 + 749 744 /* This copy is acutally not needed: we get the key with each TX */ 750 745 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 751 746 ··· 765 754 { 766 755 u8 sta_id = IWL_INVALID_STATION; 767 756 unsigned long flags; 768 - __le16 key_flags = 0; 769 757 int i; 770 - DECLARE_MAC_BUF(mac); 771 758 772 759 sta_id = iwl_find_station(priv, addr); 773 760 if (sta_id == IWL_INVALID_STATION) { ··· 780 771 return; 781 772 } 782 773 783 - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); 784 - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 785 - key_flags &= ~STA_KEY_FLG_INVALID; 786 - 787 - if (sta_id == priv->hw_params.bcast_sta_id) 788 - key_flags |= STA_KEY_MULTICAST_MSK; 789 - 790 774 spin_lock_irqsave(&priv->sta_lock, flags); 791 775 792 - priv->stations[sta_id].sta.key.key_flags = key_flags; 793 776 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; 794 777 795 778 for (i = 0; i < 5; i++)
-1
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 1744 1744 rxq->bd = NULL; 1745 1745 rxq->rb_stts = NULL; 1746 1746 } 1747 - EXPORT_SYMBOL(iwl3945_rx_queue_free); 1748 1747 1749 1748 1750 1749 /* Convert linear signal-to-noise ratio into dB */
+2
include/linux/netfilter/xt_LED.h
··· 1 1 #ifndef _XT_LED_H 2 2 #define _XT_LED_H 3 3 4 + #include <linux/types.h> 5 + 4 6 struct xt_led_info { 5 7 char id[27]; /* Unique ID for this trigger in the LED class */ 6 8 __u8 always_blink; /* Blink even if the LED is already on */
+2
include/linux/netfilter/xt_cluster.h
··· 12 12 u_int32_t flags; 13 13 }; 14 14 15 + #define XT_CLUSTER_NODES_MAX 32 16 + 15 17 #endif /* _XT_CLUSTER_MATCH_H */
-6
net/Kconfig
··· 119 119 <file:Documentation/Changes> under "iptables" for the location of 120 120 these packages. 121 121 122 - Make sure to say N to "Fast switching" below if you intend to say Y 123 - here, as Fast switching currently bypasses netfilter. 124 - 125 - Chances are that you should say Y here if you compile a kernel which 126 - will run as a router and N for regular hosts. If unsure, say N. 127 - 128 122 if NETFILTER 129 123 130 124 config NETFILTER_DEBUG
+3 -4
net/bluetooth/hci_sysfs.c
··· 88 88 static void add_conn(struct work_struct *work) 89 89 { 90 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add); 91 + struct hci_dev *hdev = conn->hdev; 91 92 92 93 /* ensure previous del is complete */ 93 94 flush_work(&conn->work_del); 95 + 96 + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 94 97 95 98 if (device_add(&conn->dev) < 0) { 96 99 BT_ERR("Failed to register connection device"); ··· 157 154 158 155 void hci_conn_add_sysfs(struct hci_conn *conn) 159 156 { 160 - struct hci_dev *hdev = conn->hdev; 161 - 162 157 BT_DBG("conn %p", conn); 163 - 164 - dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 165 158 166 159 queue_work(bt_workq, &conn->work_add); 167 160 }
+2
net/core/skbuff.c
··· 502 502 shinfo->gso_segs = 0; 503 503 shinfo->gso_type = 0; 504 504 shinfo->ip6_frag_id = 0; 505 + shinfo->tx_flags.flags = 0; 505 506 shinfo->frag_list = NULL; 507 + memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 506 508 507 509 memset(skb, 0, offsetof(struct sk_buff, tail)); 508 510 skb->data = skb->head + NET_SKB_PAD;
+1 -1
net/ipv4/Kconfig
··· 407 407 If unsure, say Y. 408 408 409 409 config INET_LRO 410 - tristate "Large Receive Offload (ipv4/tcp)" 410 + bool "Large Receive Offload (ipv4/tcp)" 411 411 412 412 ---help--- 413 413 Support for Large Receive Offload (ipv4/tcp).
+3 -3
net/ipv6/netfilter/ip6t_ipv6header.c
··· 50 50 struct ipv6_opt_hdr _hdr; 51 51 int hdrlen; 52 52 53 - /* Is there enough space for the next ext header? */ 54 - if (len < (int)sizeof(struct ipv6_opt_hdr)) 55 - return false; 56 53 /* No more exthdr -> evaluate */ 57 54 if (nexthdr == NEXTHDR_NONE) { 58 55 temp |= MASK_NONE; 59 56 break; 60 57 } 58 + /* Is there enough space for the next ext header? */ 59 + if (len < (int)sizeof(struct ipv6_opt_hdr)) 60 + return false; 61 61 /* ESP -> evaluate */ 62 62 if (nexthdr == NEXTHDR_ESP) { 63 63 temp |= MASK_ESP;
+1 -1
net/mac80211/rc80211_minstrel.c
··· 476 476 return NULL; 477 477 478 478 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 479 - sband = hw->wiphy->bands[hw->conf.channel->band]; 479 + sband = hw->wiphy->bands[i]; 480 480 if (sband->n_bitrates > max_rates) 481 481 max_rates = sband->n_bitrates; 482 482 }
+49 -44
net/mac80211/rc80211_pid_algo.c
··· 317 317 struct ieee80211_sta *sta, void *priv_sta) 318 318 { 319 319 struct rc_pid_sta_info *spinfo = priv_sta; 320 + struct rc_pid_info *pinfo = priv; 321 + struct rc_pid_rateinfo *rinfo = pinfo->rinfo; 320 322 struct sta_info *si; 323 + int i, j, tmp; 324 + bool s; 321 325 322 326 /* TODO: This routine should consider using RSSI from previous packets 323 327 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 324 328 * Until that method is implemented, we will use the lowest supported 325 329 * rate as a workaround. */ 326 - 327 - spinfo->txrate_idx = rate_lowest_index(sband, sta); 328 - /* HACK */ 329 - si = container_of(sta, struct sta_info, sta); 330 - si->fail_avg = 0; 331 - } 332 - 333 - static void *rate_control_pid_alloc(struct ieee80211_hw *hw, 334 - struct dentry *debugfsdir) 335 - { 336 - struct rc_pid_info *pinfo; 337 - struct rc_pid_rateinfo *rinfo; 338 - struct ieee80211_supported_band *sband; 339 - int i, j, tmp; 340 - bool s; 341 - #ifdef CONFIG_MAC80211_DEBUGFS 342 - struct rc_pid_debugfs_entries *de; 343 - #endif 344 - 345 - sband = hw->wiphy->bands[hw->conf.channel->band]; 346 - 347 - pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 348 - if (!pinfo) 349 - return NULL; 350 - 351 - /* We can safely assume that sband won't change unless we get 352 - * reinitialized. */ 353 - rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); 354 - if (!rinfo) { 355 - kfree(pinfo); 356 - return NULL; 357 - } 358 - 359 - pinfo->target = RC_PID_TARGET_PF; 360 - pinfo->sampling_period = RC_PID_INTERVAL; 361 - pinfo->coeff_p = RC_PID_COEFF_P; 362 - pinfo->coeff_i = RC_PID_COEFF_I; 363 - pinfo->coeff_d = RC_PID_COEFF_D; 364 - pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT; 365 - pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR; 366 - pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION; 367 - pinfo->norm_offset = RC_PID_NORM_OFFSET; 368 - pinfo->rinfo = rinfo; 369 - pinfo->oldrate = 0; 370 330 371 331 /* Sort the rates. This is optimized for the most common case (i.e. 372 332 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed ··· 354 394 if (!s) 355 395 break; 356 396 } 397 + 398 + spinfo->txrate_idx = rate_lowest_index(sband, sta); 399 + /* HACK */ 400 + si = container_of(sta, struct sta_info, sta); 401 + si->fail_avg = 0; 402 + } 403 + 404 + static void *rate_control_pid_alloc(struct ieee80211_hw *hw, 405 + struct dentry *debugfsdir) 406 + { 407 + struct rc_pid_info *pinfo; 408 + struct rc_pid_rateinfo *rinfo; 409 + struct ieee80211_supported_band *sband; 410 + int i, max_rates = 0; 411 + #ifdef CONFIG_MAC80211_DEBUGFS 412 + struct rc_pid_debugfs_entries *de; 413 + #endif 414 + 415 + pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 416 + if (!pinfo) 417 + return NULL; 418 + 419 + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 420 + sband = hw->wiphy->bands[i]; 421 + if (sband->n_bitrates > max_rates) 422 + max_rates = sband->n_bitrates; 423 + } 424 + 425 + rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC); 426 + if (!rinfo) { 427 + kfree(pinfo); 428 + return NULL; 429 + } 430 + 431 + pinfo->target = RC_PID_TARGET_PF; 432 + pinfo->sampling_period = RC_PID_INTERVAL; 433 + pinfo->coeff_p = RC_PID_COEFF_P; 434 + pinfo->coeff_i = RC_PID_COEFF_I; 435 + pinfo->coeff_d = RC_PID_COEFF_D; 436 + pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT; 437 + pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR; 438 + pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION; 439 + pinfo->norm_offset = RC_PID_NORM_OFFSET; 440 + pinfo->rinfo = rinfo; 441 + pinfo->oldrate = 0; 357 442 358 443 #ifdef CONFIG_MAC80211_DEBUGFS 359 444 de = &pinfo->dentries;
+1 -1
net/mac80211/tx.c
··· 772 772 hdrlen = ieee80211_hdrlen(hdr->frame_control); 773 773 774 774 /* internal error, why is TX_FRAGMENTED set? */ 775 - if (WARN_ON(skb->len <= frag_threshold)) 775 + if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) 776 776 return TX_DROP; 777 777 778 778 /*
+7 -2
net/netfilter/ipvs/ip_vs_conn.c
··· 260 260 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 261 261 if (cp->af == af && 262 262 ip_vs_addr_equal(af, s_addr, &cp->caddr) && 263 - ip_vs_addr_equal(af, d_addr, &cp->vaddr) && 263 + /* protocol should only be IPPROTO_IP if 264 + * d_addr is a fwmark */ 265 + ip_vs_addr_equal(protocol == IPPROTO_IP ? AF_UNSPEC : af, 266 + d_addr, &cp->vaddr) && 264 267 s_port == cp->cport && d_port == cp->vport && 265 268 cp->flags & IP_VS_CONN_F_TEMPLATE && 266 269 protocol == cp->protocol) { ··· 701 698 cp->cport = cport; 702 699 ip_vs_addr_copy(af, &cp->vaddr, vaddr); 703 700 cp->vport = vport; 704 - ip_vs_addr_copy(af, &cp->daddr, daddr); 701 + /* proto should only be IPPROTO_IP if d_addr is a fwmark */ 702 + ip_vs_addr_copy(proto == IPPROTO_IP ? AF_UNSPEC : af, 703 + &cp->daddr, daddr); 705 704 cp->dport = dport; 706 705 cp->flags = flags; 707 706 spin_lock_init(&cp->lock);
+2 -2
net/netfilter/ipvs/ip_vs_core.c
··· 278 278 */ 279 279 if (svc->fwmark) { 280 280 union nf_inet_addr fwmark = { 281 - .all = { 0, 0, 0, htonl(svc->fwmark) } 281 + .ip = htonl(svc->fwmark) 282 282 }; 283 283 284 284 ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, ··· 306 306 */ 307 307 if (svc->fwmark) { 308 308 union nf_inet_addr fwmark = { 309 - .all = { 0, 0, 0, htonl(svc->fwmark) } 309 + .ip = htonl(svc->fwmark) 310 310 }; 311 311 312 312 ct = ip_vs_conn_new(svc->af, IPPROTO_IP,
+20 -28
net/netfilter/nf_conntrack_netlink.c
··· 1186 1186 return 0; 1187 1187 } 1188 1188 1189 - static inline void 1190 - ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report) 1191 - { 1192 - unsigned int events = 0; 1193 - 1194 - if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1195 - events |= IPCT_RELATED; 1196 - else 1197 - events |= IPCT_NEW; 1198 - 1199 - nf_conntrack_event_report(IPCT_STATUS | 1200 - IPCT_HELPER | 1201 - IPCT_REFRESH | 1202 - IPCT_PROTOINFO | 1203 - IPCT_NATSEQADJ | 1204 - IPCT_MARK | 1205 - events, 1206 - ct, 1207 - pid, 1208 - report); 1209 - } 1210 - 1211 1189 static struct nf_conn * 1212 1190 ctnetlink_create_conntrack(struct nlattr *cda[], 1213 1191 struct nf_conntrack_tuple *otuple, ··· 1351 1373 err = -ENOENT; 1352 1374 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1353 1375 struct nf_conn *ct; 1376 + enum ip_conntrack_events events; 1354 1377 1355 1378 ct = ctnetlink_create_conntrack(cda, &otuple, 1356 1379 &rtuple, u3); ··· 1362 1383 err = 0; 1363 1384 nf_conntrack_get(&ct->ct_general); 1364 1385 spin_unlock_bh(&nf_conntrack_lock); 1365 - ctnetlink_event_report(ct, 1366 - NETLINK_CB(skb).pid, 1367 - nlmsg_report(nlh)); 1386 + if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1387 + events = IPCT_RELATED; 1388 + else 1389 + events = IPCT_NEW; 1390 + 1391 + nf_conntrack_event_report(IPCT_STATUS | 1392 + IPCT_HELPER | 1393 + IPCT_PROTOINFO | 1394 + IPCT_NATSEQADJ | 1395 + IPCT_MARK | events, 1396 + ct, NETLINK_CB(skb).pid, 1397 + nlmsg_report(nlh)); 1368 1398 nf_ct_put(ct); 1369 1399 } else 1370 1400 spin_unlock_bh(&nf_conntrack_lock); ··· 1392 1404 if (err == 0) { 1393 1405 nf_conntrack_get(&ct->ct_general); 1394 1406 spin_unlock_bh(&nf_conntrack_lock); 1395 - ctnetlink_event_report(ct, 1396 - NETLINK_CB(skb).pid, 1397 - nlmsg_report(nlh)); 1407 + nf_conntrack_event_report(IPCT_STATUS | 1408 + IPCT_HELPER | 1409 + IPCT_PROTOINFO | 1410 + IPCT_NATSEQADJ | 1411 + IPCT_MARK, 1412 + ct, NETLINK_CB(skb).pid, 1413 + nlmsg_report(nlh)); 1398 1414 nf_ct_put(ct); 1399 1415 } else 1400 1416 spin_unlock_bh(&nf_conntrack_lock);
+7 -1
net/netfilter/xt_cluster.c
··· 135 135 { 136 136 struct xt_cluster_match_info *info = par->matchinfo; 137 137 138 - if (info->node_mask >= (1 << info->total_nodes)) { 138 + if (info->total_nodes > XT_CLUSTER_NODES_MAX) { 139 + printk(KERN_ERR "xt_cluster: you have exceeded the maximum " 140 + "number of cluster nodes (%u > %u)\n", 141 + info->total_nodes, XT_CLUSTER_NODES_MAX); 142 + return false; 143 + } 144 + if (info->node_mask >= (1ULL << info->total_nodes)) { 139 145 printk(KERN_ERR "xt_cluster: this node mask cannot be " 140 146 "higher than the total number of nodes\n"); 141 147 return false;
+1 -1
net/sched/sch_fifo.c
··· 51 51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; 52 52 53 53 if (sch->ops == &bfifo_qdisc_ops) 54 - limit *= qdisc_dev(sch)->mtu; 54 + limit *= psched_mtu(qdisc_dev(sch)); 55 55 56 56 q->limit = limit; 57 57 } else {
+6 -5
net/wimax/op-msg.c
··· 149 149 } 150 150 result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); 151 151 if (result < 0) { 152 - dev_err(dev, "no memory to add payload in attribute\n"); 152 + dev_err(dev, "no memory to add payload (msg %p size %zu) in " 153 + "attribute: %d\n", msg, size, result); 153 154 goto error_nla_put; 154 155 } 155 156 genlmsg_end(skb, genl_msg); ··· 300 299 struct sk_buff *skb; 301 300 302 301 skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); 303 - if (skb == NULL) 304 - goto error_msg_new; 305 - result = wimax_msg_send(wimax_dev, skb); 306 - error_msg_new: 302 + if (IS_ERR(skb)) 303 + result = PTR_ERR(skb); 304 + else 305 + result = wimax_msg_send(wimax_dev, skb); 307 306 return result; 308 307 } 309 308 EXPORT_SYMBOL_GPL(wimax_msg);
+15 -2
net/wimax/stack.c
··· 338 338 */ 339 339 void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) 340 340 { 341 + /* 342 + * A driver cannot take the wimax_dev out of the 343 + * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If 344 + * the wimax_dev's state is still NULL, we ignore any request 345 + * to change its state because it means it hasn't been yet 346 + * registered. 347 + * 348 + * There is no need to complain about it, as routines that 349 + * call this might be shared from different code paths that 350 + * are called before or after wimax_dev_add() has done its 351 + * job. 352 + */ 341 353 mutex_lock(&wimax_dev->mutex); 342 - __wimax_state_change(wimax_dev, new_state); 354 + if (wimax_dev->state > __WIMAX_ST_NULL) 355 + __wimax_state_change(wimax_dev, new_state); 343 356 mutex_unlock(&wimax_dev->mutex); 344 357 return; 345 358 } ··· 389 376 void wimax_dev_init(struct wimax_dev *wimax_dev) 390 377 { 391 378 INIT_LIST_HEAD(&wimax_dev->id_table_node); 392 - __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); 379 + __wimax_state_set(wimax_dev, __WIMAX_ST_NULL); 393 380 mutex_init(&wimax_dev->mutex); 394 381 mutex_init(&wimax_dev->mutex_reset); 395 382 }
+15 -2
net/wireless/reg.c
··· 907 907 int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, 908 908 const struct ieee80211_reg_rule **reg_rule) 909 909 { 910 + assert_cfg80211_lock(); 910 911 return freq_reg_info_regd(wiphy, center_freq, 911 912 bandwidth, reg_rule, NULL); 912 913 } ··· 1134 1133 if (is_world_regdom(cfg80211_regdomain->alpha2) || 1135 1134 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) 1136 1135 return true; 1137 - if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && 1136 + if (last_request && 1137 + last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && 1138 1138 wiphy->custom_regulatory) 1139 1139 return true; 1140 1140 return false; ··· 1144 1142 /* Reap the advantages of previously found beacons */ 1145 1143 static void reg_process_beacons(struct wiphy *wiphy) 1146 1144 { 1145 + /* 1146 + * Means we are just firing up cfg80211, so no beacons would 1147 + * have been processed yet. 1148 + */ 1149 + if (!last_request) 1150 + return; 1147 1151 if (!reg_is_world_roaming(wiphy)) 1148 1152 return; 1149 1153 wiphy_update_beacon_reg(wiphy); ··· 1183 1175 const struct ieee80211_power_rule *power_rule = NULL; 1184 1176 struct ieee80211_supported_band *sband; 1185 1177 struct ieee80211_channel *chan; 1178 + 1179 + assert_cfg80211_lock(); 1186 1180 1187 1181 sband = wiphy->bands[band]; 1188 1182 BUG_ON(chan_idx >= sband->n_channels); ··· 1224 1214 const struct ieee80211_regdomain *regd) 1225 1215 { 1226 1216 enum ieee80211_band band; 1217 + 1218 + mutex_lock(&cfg80211_mutex); 1227 1219 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1228 1220 if (wiphy->bands[band]) 1229 1221 handle_band_custom(wiphy, band, regd); 1230 1222 } 1223 + mutex_unlock(&cfg80211_mutex); 1231 1224 } 1232 1225 EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1233 1226 ··· 1436 1423 return call_crda(last_request->alpha2); 1437 1424 } 1438 1425 1439 - /* This currently only processes user and driver regulatory hints */ 1426 + /* This processes *all* regulatory hints */ 1440 1427 static void reg_process_hint(struct regulatory_request *reg_request) 1441 1428 { 1442 1429 int r = 0;
+1
net/wireless/scan.c
··· 395 395 memcpy(ies, res->pub.information_elements, ielen); 396 396 found->ies_allocated = true; 397 397 found->pub.information_elements = ies; 398 + found->pub.len_information_elements = ielen; 398 399 } 399 400 } 400 401 }