Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (59 commits)
igbvf.txt: Add igbvf Documentation
igb.txt: Add igb documentation
e100/e1000*/igb*/ixgb*: Add missing read memory barrier
ixgbe: fix build error with FCOE_CONFIG without DCB_CONFIG
netxen: protect tx timeout recovery by rtnl lock
isdn: gigaset: use after free
isdn: gigaset: add missing unlock
solos-pci: Fix race condition in tasklet RX handling
pkt_sched: Fix sch_sfq vs tcf_bind_filter oops
net: disable preemption before call smp_processor_id()
tcp: no md5sig option size check bug
iwlwifi: fix locking assertions
iwlwifi: fix TX tracer
isdn: fix information leak
net: Fix napi_gro_frags vs netpoll path
usbnet: remove noisy and hardly useful printk
rtl8180: avoid potential NULL deref in rtl8180_beacon_work
ath9k: Remove myself from the MAINTAINERS list
libertas: scan before assocation if no BSSID was given
libertas: fix association with some APs by using extended rates
...

+1319 -458
+132
Documentation/networking/igb.txt
··· 1 + Linux* Base Driver for Intel(R) Network Connection 2 + ================================================== 3 + 4 + Intel Gigabit Linux driver. 5 + Copyright(c) 1999 - 2010 Intel Corporation. 6 + 7 + Contents 8 + ======== 9 + 10 + - Identifying Your Adapter 11 + - Additional Configurations 12 + - Support 13 + 14 + Identifying Your Adapter 15 + ======================== 16 + 17 + This driver supports all 82575, 82576 and 82580-based Intel (R) gigabit network 18 + connections. 19 + 20 + For specific information on how to identify your adapter, go to the Adapter & 21 + Driver ID Guide at: 22 + 23 + http://support.intel.com/support/go/network/adapter/idguide.htm 24 + 25 + Command Line Parameters 26 + ======================= 27 + 28 + The default value for each parameter is generally the recommended setting, 29 + unless otherwise noted. 30 + 31 + max_vfs 32 + ------- 33 + Valid Range: 0-7 34 + Default Value: 0 35 + 36 + This parameter adds support for SR-IOV. It causes the driver to spawn up to 37 + max_vfs worth of virtual function. 38 + 39 + Additional Configurations 40 + ========================= 41 + 42 + Jumbo Frames 43 + ------------ 44 + Jumbo Frames support is enabled by changing the MTU to a value larger than 45 + the default of 1500. Use the ifconfig command to increase the MTU size. 46 + For example: 47 + 48 + ifconfig eth<x> mtu 9000 up 49 + 50 + This setting is not saved across reboots. 51 + 52 + Notes: 53 + 54 + - The maximum MTU setting for Jumbo Frames is 9216. This value coincides 55 + with the maximum Jumbo Frames size of 9234 bytes. 56 + 57 + - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or 58 + loss of link. 59 + 60 + Ethtool 61 + ------- 62 + The driver utilizes the ethtool interface for driver configuration and 63 + diagnostics, as well as displaying statistical information. 64 + 65 + http://sourceforge.net/projects/gkernel. 66 + 67 + Enabling Wake on LAN* (WoL) 68 + --------------------------- 69 + WoL is configured through the Ethtool* utility. 70 + 71 + For instructions on enabling WoL with Ethtool, refer to the Ethtool man page. 72 + 73 + WoL will be enabled on the system during the next shut down or reboot. 74 + For this driver version, in order to enable WoL, the igb driver must be 75 + loaded when shutting down or rebooting the system. 76 + 77 + Wake On LAN is only supported on port A of multi-port adapters. 78 + 79 + Wake On LAN is not supported for the Intel(R) Gigabit VT Quad Port Server 80 + Adapter. 81 + 82 + Multiqueue 83 + ---------- 84 + In this mode, a separate MSI-X vector is allocated for each queue and one 85 + for "other" interrupts such as link status change and errors. All 86 + interrupts are throttled via interrupt moderation. Interrupt moderation 87 + must be used to avoid interrupt storms while the driver is processing one 88 + interrupt. The moderation value should be at least as large as the expected 89 + time for the driver to process an interrupt. Multiqueue is off by default. 90 + 91 + REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not 92 + found, the system will fallback to MSI or to Legacy interrupts. 93 + 94 + LRO 95 + --- 96 + Large Receive Offload (LRO) is a technique for increasing inbound throughput 97 + of high-bandwidth network connections by reducing CPU overhead. It works by 98 + aggregating multiple incoming packets from a single stream into a larger 99 + buffer before they are passed higher up the networking stack, thus reducing 100 + the number of packets that have to be processed. LRO combines multiple 101 + Ethernet frames into a single receive in the stack, thereby potentially 102 + decreasing CPU utilization for receives. 103 + 104 + NOTE: You need to have inet_lro enabled via either the CONFIG_INET_LRO or 105 + CONFIG_INET_LRO_MODULE kernel config option. Additionally, if 106 + CONFIG_INET_LRO_MODULE is used, the inet_lro module needs to be loaded 107 + before the igb driver. 108 + 109 + You can verify that the driver is using LRO by looking at these counters in 110 + Ethtool: 111 + 112 + lro_aggregated - count of total packets that were combined 113 + lro_flushed - counts the number of packets flushed out of LRO 114 + lro_no_desc - counts the number of times an LRO descriptor was not available 115 + for the LRO packet 116 + 117 + NOTE: IPv6 and UDP are not supported by LRO. 118 + 119 + Support 120 + ======= 121 + 122 + For general information, go to the Intel support website at: 123 + 124 + www.intel.com/support/ 125 + 126 + or the Intel Wired Networking project hosted by Sourceforge at: 127 + 128 + http://sourceforge.net/projects/e1000 129 + 130 + If an issue is identified with the released source code on the supported 131 + kernel with a supported adapter, email the specific information related 132 + to the issue to e1000-devel@lists.sf.net
+78
Documentation/networking/igbvf.txt
··· 1 + Linux* Base Driver for Intel(R) Network Connection 2 + ================================================== 3 + 4 + Intel Gigabit Linux driver. 5 + Copyright(c) 1999 - 2010 Intel Corporation. 6 + 7 + Contents 8 + ======== 9 + 10 + - Identifying Your Adapter 11 + - Additional Configurations 12 + - Support 13 + 14 + This file describes the igbvf Linux* Base Driver for Intel Network Connection. 15 + 16 + The igbvf driver supports 82576-based virtual function devices that can only 17 + be activated on kernels that support SR-IOV. SR-IOV requires the correct 18 + platform and OS support. 19 + 20 + The igbvf driver requires the igb driver, version 2.0 or later. The igbvf 21 + driver supports virtual functions generated by the igb driver with a max_vfs 22 + value of 1 or greater. For more information on the max_vfs parameter refer 23 + to the README included with the igb driver. 24 + 25 + The guest OS loading the igbvf driver must support MSI-X interrupts. 26 + 27 + This driver is only supported as a loadable module at this time. Intel is 28 + not supplying patches against the kernel source to allow for static linking 29 + of the driver. For questions related to hardware requirements, refer to the 30 + documentation supplied with your Intel Gigabit adapter. All hardware 31 + requirements listed apply to use with Linux. 32 + 33 + Instructions on updating ethtool can be found in the section "Additional 34 + Configurations" later in this document. 35 + 36 + VLANs: There is a limit of a total of 32 shared VLANs to 1 or more VFs. 37 + 38 + Identifying Your Adapter 39 + ======================== 40 + 41 + The igbvf driver supports 82576-based virtual function devices that can only 42 + be activated on kernels that support SR-IOV. 43 + 44 + For more information on how to identify your adapter, go to the Adapter & 45 + Driver ID Guide at: 46 + 47 + http://support.intel.com/support/go/network/adapter/idguide.htm 48 + 49 + For the latest Intel network drivers for Linux, refer to the following 50 + website. In the search field, enter your adapter name or type, or use the 51 + networking link on the left to search for your adapter: 52 + 53 + http://downloadcenter.intel.com/scripts-df-external/Support_Intel.aspx 54 + 55 + Additional Configurations 56 + ========================= 57 + 58 + Ethtool 59 + ------- 60 + The driver utilizes the ethtool interface for driver configuration and 61 + diagnostics, as well as displaying statistical information. 62 + 63 + http://sourceforge.net/projects/gkernel. 64 + 65 + Support 66 + ======= 67 + 68 + For general information, go to the Intel support website at: 69 + 70 + http://support.intel.com 71 + 72 + or the Intel Wired Networking project hosted by Sourceforge at: 73 + 74 + http://sourceforge.net/projects/e1000 75 + 76 + If an issue is identified with the released source code on the supported 77 + kernel with a supported adapter, email the specific information related 78 + to the issue to e1000-devel@lists.sf.net
-1
MAINTAINERS
··· 1085 1085 ATHEROS ATH9K WIRELESS DRIVER 1086 1086 M: "Luis R. Rodriguez" <lrodriguez@atheros.com> 1087 1087 M: Jouni Malinen <jmalinen@atheros.com> 1088 - M: Sujith Manoharan <Sujith.Manoharan@atheros.com> 1089 1088 M: Vasanthakumar Thiagarajan <vasanth@atheros.com> 1090 1089 M: Senthil Balasubramanian <senthilkumar@atheros.com> 1091 1090 L: linux-wireless@vger.kernel.org
+6 -1
drivers/atm/solos-pci.c
··· 781 781 sk_for_each(s, node, head) { 782 782 vcc = atm_sk(s); 783 783 if (vcc->dev == dev && vcc->vci == vci && 784 - vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE) 784 + vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && 785 + test_bit(ATM_VF_READY, &vcc->flags)) 785 786 goto out; 786 787 } 787 788 vcc = NULL; ··· 908 907 clear_bit(ATM_VF_ADDR, &vcc->flags); 909 908 clear_bit(ATM_VF_READY, &vcc->flags); 910 909 910 + /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the 911 + tasklet has finished processing any incoming packets (and, more to 912 + the point, using the vcc pointer). */ 913 + tasklet_unlock_wait(&card->tlet); 911 914 return; 912 915 } 913 916
+1 -1
drivers/char/pcmcia/ipwireless/network.c
··· 239 239 return err; 240 240 } 241 241 242 - static struct ppp_channel_ops ipwireless_ppp_channel_ops = { 242 + static const struct ppp_channel_ops ipwireless_ppp_channel_ops = { 243 243 .start_xmit = ipwireless_ppp_start_xmit, 244 244 .ioctl = ipwireless_ppp_ioctl 245 245 };
+4 -2
drivers/isdn/gigaset/bas-gigaset.c
··· 1914 1914 * The next command will reopen the AT channel automatically. 1915 1915 */ 1916 1916 if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) { 1917 - kfree(cb); 1918 1917 rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); 1919 1918 if (cb->wake_tasklet) 1920 1919 tasklet_schedule(cb->wake_tasklet); 1921 - return rc < 0 ? rc : cb->len; 1920 + if (!rc) 1921 + rc = cb->len; 1922 + kfree(cb); 1923 + return rc; 1922 1924 } 1923 1925 1924 1926 spin_lock_irqsave(&cs->cmdlock, flags);
+1
drivers/isdn/gigaset/capi.c
··· 1052 1052 do { 1053 1053 if (bcap->bcnext == ap) { 1054 1054 bcap->bcnext = bcap->bcnext->bcnext; 1055 + spin_unlock_irqrestore(&bcs->aplock, flags); 1055 1056 return; 1056 1057 } 1057 1058 bcap = bcap->bcnext;
+5 -5
drivers/isdn/sc/ioctl.c
··· 174 174 pr_debug("%s: SCIOGETSPID: ioctl received\n", 175 175 sc_adapter[card]->devicename); 176 176 177 - spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); 177 + spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL); 178 178 if (!spid) { 179 179 kfree(rcvmsg); 180 180 return -ENOMEM; ··· 194 194 kfree(rcvmsg); 195 195 return status; 196 196 } 197 - strcpy(spid, rcvmsg->msg_data.byte_array); 197 + strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE); 198 198 199 199 /* 200 200 * Package the switch type and send to user space ··· 266 266 return status; 267 267 } 268 268 269 - dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL); 269 + dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL); 270 270 if (!dn) { 271 271 kfree(rcvmsg); 272 272 return -ENOMEM; 273 273 } 274 - strcpy(dn, rcvmsg->msg_data.byte_array); 274 + strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE); 275 275 kfree(rcvmsg); 276 276 277 277 /* ··· 337 337 pr_debug("%s: SCIOSTAT: ioctl received\n", 338 338 sc_adapter[card]->devicename); 339 339 340 - bi = kmalloc (sizeof(boardInfo), GFP_KERNEL); 340 + bi = kzalloc(sizeof(boardInfo), GFP_KERNEL); 341 341 if (!bi) { 342 342 kfree(rcvmsg); 343 343 return -ENOMEM;
+13 -12
drivers/net/cxgb3/cxgb3_main.c
··· 3198 3198 } 3199 3199 } 3200 3200 3201 + err = pci_enable_device(pdev); 3202 + if (err) { 3203 + dev_err(&pdev->dev, "cannot enable PCI device\n"); 3204 + goto out; 3205 + } 3206 + 3201 3207 err = pci_request_regions(pdev, DRV_NAME); 3202 3208 if (err) { 3203 3209 /* Just info, some other driver may have claimed the device. */ 3204 3210 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); 3205 - return err; 3206 - } 3207 - 3208 - err = pci_enable_device(pdev); 3209 - if (err) { 3210 - dev_err(&pdev->dev, "cannot enable PCI device\n"); 3211 - goto out_release_regions; 3211 + goto out_disable_device; 3212 3212 } 3213 3213 3214 3214 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { ··· 3217 3217 if (err) { 3218 3218 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 3219 3219 "coherent allocations\n"); 3220 - goto out_disable_device; 3220 + goto out_release_regions; 3221 3221 } 3222 3222 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { 3223 3223 dev_err(&pdev->dev, "no usable DMA configuration\n"); 3224 - goto out_disable_device; 3224 + goto out_release_regions; 3225 3225 } 3226 3226 3227 3227 pci_set_master(pdev); ··· 3234 3234 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3235 3235 if (!adapter) { 3236 3236 err = -ENOMEM; 3237 - goto out_disable_device; 3237 + goto out_release_regions; 3238 3238 } 3239 3239 3240 3240 adapter->nofail_skb = ··· 3370 3370 out_free_adapter: 3371 3371 kfree(adapter); 3372 3372 3373 - out_disable_device: 3374 - pci_disable_device(pdev); 3375 3373 out_release_regions: 3376 3374 pci_release_regions(pdev); 3375 + out_disable_device: 3376 + pci_disable_device(pdev); 3377 3377 pci_set_drvdata(pdev, NULL); 3378 + out: 3378 3379 return err; 3379 3380 } 3380 3381
+18 -17
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 2462 2462 version_printed = 1; 2463 2463 } 2464 2464 2465 - /* 2466 - * Reserve PCI resources for the device. If we can't get them some 2467 - * other driver may have already claimed the device ... 2468 - */ 2469 - err = pci_request_regions(pdev, KBUILD_MODNAME); 2470 - if (err) { 2471 - dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 2472 - return err; 2473 - } 2474 2465 2475 2466 /* 2476 2467 * Initialize generic PCI device state. ··· 2469 2478 err = pci_enable_device(pdev); 2470 2479 if (err) { 2471 2480 dev_err(&pdev->dev, "cannot enable PCI device\n"); 2472 - goto err_release_regions; 2481 + return err; 2482 + } 2483 + 2484 + /* 2485 + * Reserve PCI resources for the device. If we can't get them some 2486 + * other driver may have already claimed the device ... 2487 + */ 2488 + err = pci_request_regions(pdev, KBUILD_MODNAME); 2489 + if (err) { 2490 + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 2491 + goto err_disable_device; 2473 2492 } 2474 2493 2475 2494 /* ··· 2492 2491 if (err) { 2493 2492 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" 2494 2493 " coherent allocations\n"); 2495 - goto err_disable_device; 2494 + goto err_release_regions; 2496 2495 } 2497 2496 pci_using_dac = 1; 2498 2497 } else { 2499 2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2500 2499 if (err != 0) { 2501 2500 dev_err(&pdev->dev, "no usable DMA configuration\n"); 2502 - goto err_disable_device; 2501 + goto err_release_regions; 2503 2502 } 2504 2503 pci_using_dac = 0; 2505 2504 } ··· 2515 2514 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 2516 2515 if (!adapter) { 2517 2516 err = -ENOMEM; 2518 - goto err_disable_device; 2517 + goto err_release_regions; 2519 2518 } 2520 2519 pci_set_drvdata(pdev, adapter); 2521 2520 adapter->pdev = pdev; ··· 2751 2750 kfree(adapter); 2752 2751 pci_set_drvdata(pdev, NULL); 2753 2752 2754 - err_disable_device: 2755 - pci_disable_device(pdev); 2756 - pci_clear_master(pdev); 2757 - 2758 2753 err_release_regions: 2759 2754 pci_release_regions(pdev); 2760 2755 pci_set_drvdata(pdev, NULL); 2756 + pci_clear_master(pdev); 2757 + 2758 + err_disable_device: 2759 + pci_disable_device(pdev); 2761 2760 2762 2761 err_out: 2763 2762 return err;
+1 -1
drivers/net/davinci_emac.c
··· 2944 2944 release_mem_region(res->start, res->end - res->start + 1); 2945 2945 2946 2946 unregister_netdev(ndev); 2947 - free_netdev(ndev); 2948 2947 iounmap(priv->remap_addr); 2948 + free_netdev(ndev); 2949 2949 2950 2950 clk_disable(emac_clk); 2951 2951 clk_put(emac_clk);
+2
drivers/net/e100.c
··· 1779 1779 for (cb = nic->cb_to_clean; 1780 1780 cb->status & cpu_to_le16(cb_complete); 1781 1781 cb = nic->cb_to_clean = cb->next) { 1782 + rmb(); /* read skb after status */ 1782 1783 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, 1783 1784 "cb[%d]->status = 0x%04X\n", 1784 1785 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), ··· 1928 1927 1929 1928 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, 1930 1929 "status=0x%04X\n", rfd_status); 1930 + rmb(); /* read size after status bit */ 1931 1931 1932 1932 /* If data isn't ready, nothing to indicate */ 1933 1933 if (unlikely(!(rfd_status & cb_complete))) {
+3
drivers/net/e1000/e1000_main.c
··· 3454 3454 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3455 3455 (count < tx_ring->count)) { 3456 3456 bool cleaned = false; 3457 + rmb(); /* read buffer_info after eop_desc */ 3457 3458 for ( ; !cleaned; count++) { 3458 3459 tx_desc = E1000_TX_DESC(*tx_ring, i); 3459 3460 buffer_info = &tx_ring->buffer_info[i]; ··· 3644 3643 if (*work_done >= work_to_do) 3645 3644 break; 3646 3645 (*work_done)++; 3646 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 3647 3647 3648 3648 status = rx_desc->status; 3649 3649 skb = buffer_info->skb; ··· 3851 3849 if (*work_done >= work_to_do) 3852 3850 break; 3853 3851 (*work_done)++; 3852 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 3854 3853 3855 3854 status = rx_desc->status; 3856 3855 skb = buffer_info->skb;
+4
drivers/net/e1000e/netdev.c
··· 781 781 if (*work_done >= work_to_do) 782 782 break; 783 783 (*work_done)++; 784 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 784 785 785 786 status = rx_desc->status; 786 787 skb = buffer_info->skb; ··· 992 991 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 993 992 (count < tx_ring->count)) { 994 993 bool cleaned = false; 994 + rmb(); /* read buffer_info after eop_desc */ 995 995 for (; !cleaned; count++) { 996 996 tx_desc = E1000_TX_DESC(*tx_ring, i); 997 997 buffer_info = &tx_ring->buffer_info[i]; ··· 1089 1087 break; 1090 1088 (*work_done)++; 1091 1089 skb = buffer_info->skb; 1090 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 1092 1091 1093 1092 /* in the packet split case this is header only */ 1094 1093 prefetch(skb->data - NET_IP_ALIGN); ··· 1289 1286 if (*work_done >= work_to_do) 1290 1287 break; 1291 1288 (*work_done)++; 1289 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 1292 1290 1293 1291 status = rx_desc->status; 1294 1292 skb = buffer_info->skb;
+2 -15
drivers/net/enic/enic_main.c
··· 1087 1087 { 1088 1088 struct vic_provinfo *vp; 1089 1089 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1090 - u8 *uuid; 1091 1090 char uuid_str[38]; 1092 - static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-" 1093 - "%02X%02X-%02X%02X%02X%02X%0X%02X"; 1094 1091 int err; 1095 1092 1096 1093 err = enic_vnic_dev_deinit(enic); ··· 1118 1121 ETH_ALEN, mac); 1119 1122 1120 1123 if (enic->pp.set & ENIC_SET_INSTANCE) { 1121 - uuid = enic->pp.instance_uuid; 1122 - sprintf(uuid_str, uuid_fmt, 1123 - uuid[0], uuid[1], uuid[2], uuid[3], 1124 - uuid[4], uuid[5], uuid[6], uuid[7], 1125 - uuid[8], uuid[9], uuid[10], uuid[11], 1126 - uuid[12], uuid[13], uuid[14], uuid[15]); 1124 + sprintf(uuid_str, "%pUB", enic->pp.instance_uuid); 1127 1125 vic_provinfo_add_tlv(vp, 1128 1126 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1129 1127 sizeof(uuid_str), uuid_str); 1130 1128 } 1131 1129 1132 1130 if (enic->pp.set & ENIC_SET_HOST) { 1133 - uuid = enic->pp.host_uuid; 1134 - sprintf(uuid_str, uuid_fmt, 1135 - uuid[0], uuid[1], uuid[2], uuid[3], 1136 - uuid[4], uuid[5], uuid[6], uuid[7], 1137 - uuid[8], uuid[9], uuid[10], uuid[11], 1138 - uuid[12], uuid[13], uuid[14], uuid[15]); 1131 + sprintf(uuid_str, "%pUB", enic->pp.host_uuid); 1139 1132 vic_provinfo_add_tlv(vp, 1140 1133 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1141 1134 sizeof(uuid_str), uuid_str);
+2
drivers/net/igb/igb_main.c
··· 5353 5353 5354 5354 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 5355 5355 (count < tx_ring->count)) { 5356 + rmb(); /* read buffer_info after eop_desc status */ 5356 5357 for (cleaned = false; !cleaned; count++) { 5357 5358 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5358 5359 buffer_info = &tx_ring->buffer_info[i]; ··· 5559 5558 if (*work_done >= budget) 5560 5559 break; 5561 5560 (*work_done)++; 5561 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 5562 5562 5563 5563 skb = buffer_info->skb; 5564 5564 prefetch(skb->data - NET_IP_ALIGN);
+2
drivers/net/igbvf/netdev.c
··· 248 248 if (*work_done >= work_to_do) 249 249 break; 250 250 (*work_done)++; 251 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 251 252 252 253 buffer_info = &rx_ring->buffer_info[i]; 253 254 ··· 781 780 782 781 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 783 782 (count < tx_ring->count)) { 783 + rmb(); /* read buffer_info after eop_desc status */ 784 784 for (cleaned = false; !cleaned; count++) { 785 785 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 786 786 buffer_info = &tx_ring->buffer_info[i];
+2
drivers/net/ixgb/ixgb_main.c
··· 1816 1816 1817 1817 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1818 1818 1819 + rmb(); /* read buffer_info after eop_desc */ 1819 1820 for (cleaned = false; !cleaned; ) { 1820 1821 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1821 1822 buffer_info = &tx_ring->buffer_info[i]; ··· 1977 1976 break; 1978 1977 1979 1978 (*work_done)++; 1979 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 1980 1980 status = rx_desc->status; 1981 1981 skb = buffer_info->skb; 1982 1982 buffer_info->skb = NULL;
+11 -4
drivers/net/ixgbe/ixgbe_main.c
··· 748 748 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 749 749 (count < tx_ring->work_limit)) { 750 750 bool cleaned = false; 751 + rmb(); /* read buffer_info after eop_desc */ 751 752 for ( ; !cleaned; count++) { 752 753 struct sk_buff *skb; 753 754 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); ··· 6156 6155 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6157 6156 txq += adapter->ring_feature[RING_F_FCOE].mask; 6158 6157 return txq; 6158 + #ifdef CONFIG_IXGBE_DCB 6159 6159 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6160 6160 txq = adapter->fcoe.up; 6161 6161 return txq; 6162 + #endif 6162 6163 } 6163 6164 } 6164 6165 #endif ··· 6219 6216 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6220 6217 (skb->protocol == htons(ETH_P_FCOE) || 6221 6218 skb->protocol == htons(ETH_P_FIP))) { 6222 - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6223 - << IXGBE_TX_FLAGS_VLAN_SHIFT); 6224 - tx_flags |= ((adapter->fcoe.up << 13) 6225 - << IXGBE_TX_FLAGS_VLAN_SHIFT); 6219 + #ifdef CONFIG_IXGBE_DCB 6220 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6221 + tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6222 + << IXGBE_TX_FLAGS_VLAN_SHIFT); 6223 + tx_flags |= ((adapter->fcoe.up << 13) 6224 + << IXGBE_TX_FLAGS_VLAN_SHIFT); 6225 + } 6226 + #endif 6226 6227 /* flag for FCoE offloads */ 6227 6228 if (skb->protocol == htons(ETH_P_FCOE)) 6228 6229 tx_flags |= IXGBE_TX_FLAGS_FCOE;
+2
drivers/net/ixgbevf/ixgbevf_main.c
··· 231 231 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 232 232 (count < tx_ring->work_limit)) { 233 233 bool cleaned = false; 234 + rmb(); /* read buffer_info after eop_desc */ 234 235 for ( ; !cleaned; count++) { 235 236 struct sk_buff *skb; 236 237 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); ··· 519 518 break; 520 519 (*work_done)++; 521 520 521 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 522 522 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 523 523 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); 524 524 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+7 -8
drivers/net/netxen/netxen_nic_main.c
··· 2001 2001 if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) 2002 2002 goto request_reset; 2003 2003 2004 + rtnl_lock(); 2004 2005 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 2005 2006 /* try to scrub interrupt */ 2006 2007 netxen_napi_disable(adapter); 2007 - 2008 - adapter->netdev->trans_start = jiffies; 2009 2008 2010 2009 netxen_napi_enable(adapter); 2011 2010 2012 2011 netif_wake_queue(adapter->netdev); 2013 2012 2014 2013 clear_bit(__NX_RESETTING, &adapter->state); 2015 - return; 2016 2014 } else { 2017 2015 clear_bit(__NX_RESETTING, &adapter->state); 2018 - if (!netxen_nic_reset_context(adapter)) { 2019 - adapter->netdev->trans_start = jiffies; 2020 - return; 2016 + if (netxen_nic_reset_context(adapter)) { 2017 + rtnl_unlock(); 2018 + goto request_reset; 2021 2019 } 2022 - 2023 - /* context reset failed, fall through for fw reset */ 2024 2020 } 2021 + adapter->netdev->trans_start = jiffies; 2022 + rtnl_unlock(); 2023 + return; 2025 2024 2026 2025 request_reset: 2027 2026 adapter->need_fw_reset = 1;
+3 -3
drivers/net/ppp_async.c
··· 108 108 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 109 109 int len, int inbound); 110 110 111 - static struct ppp_channel_ops async_ops = { 112 - ppp_async_send, 113 - ppp_async_ioctl 111 + static const struct ppp_channel_ops async_ops = { 112 + .start_xmit = ppp_async_send, 113 + .ioctl = ppp_async_ioctl, 114 114 }; 115 115 116 116 /*
+3 -3
drivers/net/ppp_synctty.c
··· 97 97 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf, 98 98 char *flags, int count); 99 99 100 - static struct ppp_channel_ops sync_ops = { 101 - ppp_sync_send, 102 - ppp_sync_ioctl 100 + static const struct ppp_channel_ops sync_ops = { 101 + .start_xmit = ppp_sync_send, 102 + .ioctl = ppp_sync_ioctl, 103 103 }; 104 104 105 105 /*
+2 -2
drivers/net/pppoe.c
··· 92 92 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 93 93 94 94 static const struct proto_ops pppoe_ops; 95 - static struct ppp_channel_ops pppoe_chan_ops; 95 + static const struct ppp_channel_ops pppoe_chan_ops; 96 96 97 97 /* per-net private data for this module */ 98 98 static int pppoe_net_id __read_mostly; ··· 963 963 return __pppoe_xmit(sk, skb); 964 964 } 965 965 966 - static struct ppp_channel_ops pppoe_chan_ops = { 966 + static const struct ppp_channel_ops pppoe_chan_ops = { 967 967 .start_xmit = pppoe_xmit, 968 968 }; 969 969
-1
drivers/net/usb/usbnet.c
··· 1457 1457 spin_lock_irq(&dev->txq.lock); 1458 1458 while ((res = usb_get_from_anchor(&dev->deferred))) { 1459 1459 1460 - printk(KERN_INFO"%s has delayed data\n", __func__); 1461 1460 skb = (struct sk_buff *)res->context; 1462 1461 retval = usb_submit_urb(res, GFP_ATOMIC); 1463 1462 if (retval < 0) {
+2 -2
drivers/net/via-velocity.c
··· 2763 2763 2764 2764 vptr->dev = dev; 2765 2765 2766 - dev->irq = pdev->irq; 2767 - 2768 2766 ret = pci_enable_device(pdev); 2769 2767 if (ret < 0) 2770 2768 goto err_free_dev; 2769 + 2770 + dev->irq = pdev->irq; 2771 2771 2772 2772 ret = velocity_get_pci_info(vptr, pdev); 2773 2773 if (ret < 0) {
+14
drivers/net/virtio_net.c
··· 705 705 return 0; 706 706 } 707 707 708 + static void virtnet_get_drvinfo(struct net_device *dev, 709 + struct ethtool_drvinfo *drvinfo) 710 + { 711 + struct virtnet_info *vi = netdev_priv(dev); 712 + struct virtio_device *vdev = vi->vdev; 713 + 714 + strncpy(drvinfo->driver, KBUILD_MODNAME, ARRAY_SIZE(drvinfo->driver)); 715 + strncpy(drvinfo->version, "N/A", ARRAY_SIZE(drvinfo->version)); 716 + strncpy(drvinfo->fw_version, "N/A", ARRAY_SIZE(drvinfo->fw_version)); 717 + strncpy(drvinfo->bus_info, dev_name(&vdev->dev), 718 + ARRAY_SIZE(drvinfo->bus_info)); 719 + } 720 + 708 721 static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 709 722 { 710 723 struct virtnet_info *vi = netdev_priv(dev); ··· 830 817 } 831 818 832 819 static const struct ethtool_ops virtnet_ethtool_ops = { 820 + .get_drvinfo = virtnet_get_drvinfo, 833 821 .set_tx_csum = virtnet_set_tx_csum, 834 822 .set_sg = ethtool_op_set_sg, 835 823 .set_tso = ethtool_op_set_tso,
+27 -16
drivers/net/wireless/ath/ath9k/ar9002_calib.c
··· 63 63 u8 rxchainmask, 64 64 struct ath9k_cal_list *currCal) 65 65 { 66 + struct ath9k_hw_cal_data *caldata = ah->caldata; 66 67 bool iscaldone = false; 67 68 68 69 if (currCal->calState == CAL_RUNNING) { ··· 82 81 } 83 82 84 83 currCal->calData->calPostProc(ah, numChains); 85 - ichan->CalValid |= currCal->calData->calType; 84 + caldata->CalValid |= currCal->calData->calType; 86 85 currCal->calState = CAL_DONE; 87 86 iscaldone = true; 88 87 } else { 89 88 ar9002_hw_setup_calibration(ah, currCal); 90 89 } 91 90 } 92 - } else if (!(ichan->CalValid & currCal->calData->calType)) { 91 + } else if (!(caldata->CalValid & currCal->calData->calType)) { 93 92 ath9k_hw_reset_calibration(ah, currCal); 94 93 } 95 94 ··· 687 686 { 688 687 bool iscaldone = true; 689 688 struct ath9k_cal_list *currCal = ah->cal_list_curr; 689 + bool nfcal, nfcal_pending = false; 690 690 691 - if (currCal && 691 + nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF); 692 + if (ah->caldata) 693 + nfcal_pending = ah->caldata->nfcal_pending; 694 + 695 + if (currCal && !nfcal && 692 696 (currCal->calState == CAL_RUNNING || 693 697 currCal->calState == CAL_WAITING)) { 694 698 iscaldone = ar9002_hw_per_calibration(ah, chan, ··· 709 703 } 710 704 711 705 /* Do NF cal only at longer intervals */ 712 - if (longcal) { 706 + if (longcal || nfcal_pending) { 713 707 /* Do periodic PAOffset Cal */ 714 708 ar9002_hw_pa_cal(ah, false); 715 709 ar9002_hw_olc_temp_compensation(ah); ··· 718 712 * Get the value from the previous NF cal and update 719 713 * history buffer. 720 714 */ 721 - ath9k_hw_getnf(ah, chan); 715 + if (ath9k_hw_getnf(ah, chan)) { 716 + /* 717 + * Load the NF from history buffer of the current 718 + * channel. 719 + * NF is slow time-variant, so it is OK to use a 720 + * historical value. 721 + */ 722 + ath9k_hw_loadnf(ah, ah->curchan); 723 + } 722 724 723 - /* 724 - * Load the NF from history buffer of the current channel. 725 - * NF is slow time-variant, so it is OK to use a historical 726 - * value. 727 - */ 728 - ath9k_hw_loadnf(ah, ah->curchan); 729 - 730 - ath9k_hw_start_nfcal(ah); 725 + if (longcal) 726 + ath9k_hw_start_nfcal(ah, false); 731 727 } 732 728 733 729 return iscaldone; ··· 877 869 ar9002_hw_pa_cal(ah, true); 878 870 879 871 /* Do NF Calibration after DC offset and other calibrations */ 880 - REG_WRITE(ah, AR_PHY_AGC_CONTROL, 881 - REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF); 872 + ath9k_hw_start_nfcal(ah, true); 873 + 874 + if (ah->caldata) 875 + ah->caldata->nfcal_pending = true; 882 876 883 877 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 884 878 ··· 911 901 ath9k_hw_reset_calibration(ah, ah->cal_list_curr); 912 902 } 913 903 914 - chan->CalValid = 0; 904 + if (ah->caldata) 905 + ah->caldata->CalValid = 0; 915 906 916 907 return true; 917 908 }
+14 -4
drivers/net/wireless/ath/ath9k/ar9003_calib.c
··· 68 68 u8 rxchainmask, 69 69 struct ath9k_cal_list *currCal) 70 70 { 71 + struct ath9k_hw_cal_data *caldata = ah->caldata; 71 72 /* Cal is assumed not done until explicitly set below */ 72 73 bool iscaldone = false; 73 74 ··· 96 95 currCal->calData->calPostProc(ah, numChains); 97 96 98 97 /* Calibration has finished. */ 99 - ichan->CalValid |= currCal->calData->calType; 98 + caldata->CalValid |= currCal->calData->calType; 100 99 currCal->calState = CAL_DONE; 101 100 iscaldone = true; 102 101 } else { ··· 107 106 ar9003_hw_setup_calibration(ah, currCal); 108 107 } 109 108 } 110 - } else if (!(ichan->CalValid & currCal->calData->calType)) { 109 + } else if (!(caldata->CalValid & currCal->calData->calType)) { 111 110 /* If current cal is marked invalid in channel, kick it off */ 112 111 ath9k_hw_reset_calibration(ah, currCal); 113 112 } ··· 150 149 /* Do NF cal only at longer intervals */ 151 150 if (longcal) { 152 151 /* 152 + * Get the value from the previous NF cal and update 153 + * history buffer. 154 + */ 155 + ath9k_hw_getnf(ah, chan); 156 + 157 + /* 153 158 * Load the NF from history buffer of the current channel. 154 159 * NF is slow time-variant, so it is OK to use a historical 155 160 * value. ··· 163 156 ath9k_hw_loadnf(ah, ah->curchan); 164 157 165 158 /* start NF calibration, without updating BB NF register */ 166 - ath9k_hw_start_nfcal(ah); 159 + ath9k_hw_start_nfcal(ah, false); 167 160 } 168 161 169 162 return iscaldone; ··· 769 762 /* Revert chainmasks to their original values before NF cal */ 770 763 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 771 764 765 + ath9k_hw_start_nfcal(ah, true); 766 + 772 767 /* Initialize list pointers */ 773 768 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 774 769 ··· 794 785 if (ah->cal_list_curr) 795 786 ath9k_hw_reset_calibration(ah, ah->cal_list_curr); 796 787 797 - chan->CalValid = 0; 788 + if (ah->caldata) 789 + ah->caldata->CalValid = 0; 798 790 799 791 return true; 800 792 }
+381 -7
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
··· 41 41 #define LE16(x) __constant_cpu_to_le16(x) 42 42 #define LE32(x) __constant_cpu_to_le32(x) 43 43 44 + /* Local defines to distinguish between extension and control CTL's */ 45 + #define EXT_ADDITIVE (0x8000) 46 + #define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) 47 + #define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) 48 + #define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) 49 + #define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ 50 + #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ 51 + #define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */ 52 + #define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */ 53 + #define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */ 54 + 55 + #define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ 56 + #define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ 57 + 44 58 static const struct ar9300_eeprom ar9300_default = { 45 59 .eepromVersion = 2, 46 60 .templateVersion = 2, ··· 622 608 }, 623 609 } 624 610 }; 611 + 612 + static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) 613 + { 614 + if (fbin == AR9300_BCHAN_UNUSED) 615 + return fbin; 616 + 617 + return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); 618 + } 625 619 626 620 static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) 627 621 { ··· 1439 1417 #undef POW_SM 1440 1418 } 1441 1419 1442 - static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq) 1420 + static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, 1421 + u8 *targetPowerValT2) 1443 1422 { 1444 - u8 targetPowerValT2[ar9300RateSize]; 1445 1423 /* XXX: hard code for now, need to get from eeprom struct */ 1446 1424 u8 ht40PowerIncForPdadc = 0; 1447 1425 bool is2GHz = false; ··· 1575 1553 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); 1576 1554 i++; 1577 1555 } 1578 - 1579 - /* Write target power array to registers */ 1580 - ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 1581 1556 } 1582 1557 1583 1558 static int ar9003_hw_cal_pier_get(struct ath_hw *ah, ··· 1818 1799 return 0; 1819 1800 } 1820 1801 1802 + static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep, 1803 + int idx, 1804 + int edge, 1805 + bool is2GHz) 1806 + { 1807 + struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; 1808 + struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; 1809 + 1810 + if (is2GHz) 1811 + return ctl_2g[idx].ctlEdges[edge].tPower; 1812 + else 1813 + return ctl_5g[idx].ctlEdges[edge].tPower; 1814 + } 1815 + 1816 + static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, 1817 + int idx, 1818 + unsigned int edge, 1819 + u16 freq, 1820 + bool is2GHz) 1821 + { 1822 + struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; 1823 + struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; 1824 + 1825 + u8 *ctl_freqbin = is2GHz ? 1826 + &eep->ctl_freqbin_2G[idx][0] : 1827 + &eep->ctl_freqbin_5G[idx][0]; 1828 + 1829 + if (is2GHz) { 1830 + if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && 1831 + ctl_2g[idx].ctlEdges[edge - 1].flag) 1832 + return ctl_2g[idx].ctlEdges[edge - 1].tPower; 1833 + } else { 1834 + if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && 1835 + ctl_5g[idx].ctlEdges[edge - 1].flag) 1836 + return ctl_5g[idx].ctlEdges[edge - 1].tPower; 1837 + } 1838 + 1839 + return AR9300_MAX_RATE_POWER; 1840 + } 1841 + 1842 + /* 1843 + * Find the maximum conformance test limit for the given channel and CTL info 1844 + */ 1845 + static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep, 1846 + u16 freq, int idx, bool is2GHz) 1847 + { 1848 + u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; 1849 + u8 *ctl_freqbin = is2GHz ? 1850 + &eep->ctl_freqbin_2G[idx][0] : 1851 + &eep->ctl_freqbin_5G[idx][0]; 1852 + u16 num_edges = is2GHz ? 1853 + AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G; 1854 + unsigned int edge; 1855 + 1856 + /* Get the edge power */ 1857 + for (edge = 0; 1858 + (edge < num_edges) && (ctl_freqbin[edge] != AR9300_BCHAN_UNUSED); 1859 + edge++) { 1860 + /* 1861 + * If there's an exact channel match or an inband flag set 1862 + * on the lower channel use the given rdEdgePower 1863 + */ 1864 + if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) { 1865 + twiceMaxEdgePower = 1866 + ar9003_hw_get_direct_edge_power(eep, idx, 1867 + edge, is2GHz); 1868 + break; 1869 + } else if ((edge > 0) && 1870 + (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge], 1871 + is2GHz))) { 1872 + twiceMaxEdgePower = 1873 + ar9003_hw_get_indirect_edge_power(eep, idx, 1874 + edge, freq, 1875 + is2GHz); 1876 + /* 1877 + * Leave loop - no more affecting edges possible in 1878 + * this monotonic increasing list 1879 + */ 1880 + break; 1881 + } 1882 + } 1883 + return twiceMaxEdgePower; 1884 + } 1885 + 1886 + static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, 1887 + struct ath9k_channel *chan, 1888 + u8 *pPwrArray, u16 cfgCtl, 1889 + u8 twiceAntennaReduction, 1890 + u8 twiceMaxRegulatoryPower, 1891 + u16 powerLimit) 1892 + { 1893 + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1894 + struct ath_common *common = ath9k_hw_common(ah); 1895 + struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; 1896 + u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; 1897 + static const u16 tpScaleReductionTable[5] = { 1898 + 0, 3, 6, 9, AR9300_MAX_RATE_POWER 1899 + }; 1900 + int i; 1901 + int16_t twiceLargestAntenna; 1902 + u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 1903 + u16 ctlModesFor11a[] = { 1904 + CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 1905 + }; 1906 + u16 ctlModesFor11g[] = { 1907 + CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, 1908 + CTL_11G_EXT, CTL_2GHT40 1909 + }; 1910 + u16 numCtlModes, *pCtlMode, ctlMode, freq; 1911 + struct chan_centers centers; 1912 + u8 *ctlIndex; 1913 + u8 ctlNum; 1914 + u16 twiceMinEdgePower; 1915 + bool is2ghz = IS_CHAN_2GHZ(chan); 1916 + 1917 + ath9k_hw_get_channel_centers(ah, chan, &centers); 1918 + 1919 + /* Compute TxPower reduction due to Antenna Gain */ 1920 + if (is2ghz) 1921 + twiceLargestAntenna = pEepData->modalHeader2G.antennaGain; 1922 + else 1923 + twiceLargestAntenna = pEepData->modalHeader5G.antennaGain; 1924 + 1925 + twiceLargestAntenna = (int16_t)min((twiceAntennaReduction) - 1926 + twiceLargestAntenna, 0); 1927 + 1928 + /* 1929 + * scaledPower is the minimum of the user input power level 1930 + * and the regulatory allowed power level 1931 + */ 1932 + maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; 1933 + 1934 + if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) { 1935 + maxRegAllowedPower -= 1936 + (tpScaleReductionTable[(regulatory->tp_scale)] * 2); 1937 + } 1938 + 1939 + scaledPower = min(powerLimit, maxRegAllowedPower); 1940 + 1941 + /* 1942 + * Reduce scaled Power by number of chains active to get 1943 + * to per chain tx power level 1944 + */ 1945 + switch (ar5416_get_ntxchains(ah->txchainmask)) { 1946 + case 1: 1947 + break; 1948 + case 2: 1949 + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 1950 + break; 1951 + case 3: 1952 + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 1953 + break; 1954 + } 1955 + 1956 + scaledPower = max((u16)0, scaledPower); 1957 + 1958 + /* 1959 + * Get target powers from EEPROM - our baseline for TX Power 1960 + */ 1961 + if (is2ghz) { 1962 + /* Setup for CTL modes */ 1963 + /* CTL_11B, CTL_11G, CTL_2GHT20 */ 1964 + numCtlModes = 1965 + ARRAY_SIZE(ctlModesFor11g) - 1966 + SUB_NUM_CTL_MODES_AT_2G_40; 1967 + pCtlMode = ctlModesFor11g; 1968 + if (IS_CHAN_HT40(chan)) 1969 + /* All 2G CTL's */ 1970 + numCtlModes = ARRAY_SIZE(ctlModesFor11g); 1971 + } else { 1972 + /* Setup for CTL modes */ 1973 + /* CTL_11A, CTL_5GHT20 */ 1974 + numCtlModes = ARRAY_SIZE(ctlModesFor11a) - 1975 + SUB_NUM_CTL_MODES_AT_5G_40; 1976 + pCtlMode = ctlModesFor11a; 1977 + if (IS_CHAN_HT40(chan)) 1978 + /* All 5G CTL's */ 1979 + numCtlModes = ARRAY_SIZE(ctlModesFor11a); 1980 + } 1981 + 1982 + /* 1983 + * For MIMO, need to apply regulatory caps individually across 1984 + * dynamically running modes: CCK, OFDM, HT20, HT40 1985 + * 1986 + * The outer loop walks through each possible applicable runtime mode. 1987 + * The inner loop walks through each ctlIndex entry in EEPROM. 1988 + * The ctl value is encoded as [7:4] == test group, [3:0] == test mode. 1989 + */ 1990 + for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { 1991 + bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || 1992 + (pCtlMode[ctlMode] == CTL_2GHT40); 1993 + if (isHt40CtlMode) 1994 + freq = centers.synth_center; 1995 + else if (pCtlMode[ctlMode] & EXT_ADDITIVE) 1996 + freq = centers.ext_center; 1997 + else 1998 + freq = centers.ctl_center; 1999 + 2000 + ath_print(common, ATH_DBG_REGULATORY, 2001 + "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, " 2002 + "EXT_ADDITIVE %d\n", 2003 + ctlMode, numCtlModes, isHt40CtlMode, 2004 + (pCtlMode[ctlMode] & EXT_ADDITIVE)); 2005 + 2006 + /* walk through each CTL index stored in EEPROM */ 2007 + if (is2ghz) { 2008 + ctlIndex = pEepData->ctlIndex_2G; 2009 + ctlNum = AR9300_NUM_CTLS_2G; 2010 + } else { 2011 + ctlIndex = pEepData->ctlIndex_5G; 2012 + ctlNum = AR9300_NUM_CTLS_5G; 2013 + } 2014 + 2015 + for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { 2016 + ath_print(common, ATH_DBG_REGULATORY, 2017 + "LOOP-Ctlidx %d: cfgCtl 0x%2.2x " 2018 + "pCtlMode 0x%2.2x ctlIndex 0x%2.2x " 2019 + "chan %dn", 2020 + i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], 2021 + chan->channel); 2022 + 2023 + /* 2024 + * compare test group from regulatory 2025 + * channel list with test mode from pCtlMode 2026 + * list 2027 + */ 2028 + if ((((cfgCtl & ~CTL_MODE_M) | 2029 + (pCtlMode[ctlMode] & CTL_MODE_M)) == 2030 + ctlIndex[i]) || 2031 + (((cfgCtl & ~CTL_MODE_M) | 2032 + (pCtlMode[ctlMode] & CTL_MODE_M)) == 2033 + ((ctlIndex[i] & CTL_MODE_M) | 2034 + SD_NO_CTL))) { 2035 + twiceMinEdgePower = 2036 + ar9003_hw_get_max_edge_power(pEepData, 2037 + freq, i, 2038 + is2ghz); 2039 + 2040 + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) 2041 + /* 2042 + * Find the minimum of all CTL 2043 + * edge powers that apply to 2044 + * this channel 2045 + */ 2046 + twiceMaxEdgePower = 2047 + min(twiceMaxEdgePower, 2048 + twiceMinEdgePower); 2049 + else { 2050 + /* specific */ 2051 + twiceMaxEdgePower = 2052 + twiceMinEdgePower; 2053 + break; 2054 + } 2055 + } 2056 + } 2057 + 2058 + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); 2059 + 2060 + ath_print(common, ATH_DBG_REGULATORY, 2061 + "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d " 2062 + "sP %d minCtlPwr %d\n", 2063 + ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, 2064 + scaledPower, minCtlPower); 2065 + 2066 + /* Apply ctl mode to correct target power set */ 2067 + switch (pCtlMode[ctlMode]) { 2068 + case CTL_11B: 2069 + for (i = ALL_TARGET_LEGACY_1L_5L; 2070 + i <= ALL_TARGET_LEGACY_11S; i++) 2071 + pPwrArray[i] = 2072 + (u8)min((u16)pPwrArray[i], 2073 + minCtlPower); 2074 + break; 2075 + case CTL_11A: 2076 + case CTL_11G: 2077 + for (i = ALL_TARGET_LEGACY_6_24; 2078 + i <= ALL_TARGET_LEGACY_54; i++) 2079 + pPwrArray[i] = 2080 + (u8)min((u16)pPwrArray[i], 2081 + minCtlPower); 2082 + break; 2083 + case CTL_5GHT20: 2084 + case CTL_2GHT20: 2085 + for (i = ALL_TARGET_HT20_0_8_16; 2086 + i <= ALL_TARGET_HT20_21; i++) 2087 + pPwrArray[i] = 2088 + (u8)min((u16)pPwrArray[i], 2089 + minCtlPower); 2090 + pPwrArray[ALL_TARGET_HT20_22] = 2091 + (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], 2092 + minCtlPower); 2093 + pPwrArray[ALL_TARGET_HT20_23] = 2094 + (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], 2095 + minCtlPower); 2096 + break; 2097 + case CTL_5GHT40: 2098 + case CTL_2GHT40: 2099 + for (i = ALL_TARGET_HT40_0_8_16; 2100 + i <= ALL_TARGET_HT40_23; i++) 2101 + pPwrArray[i] = 2102 + (u8)min((u16)pPwrArray[i], 2103 + minCtlPower); 2104 + break; 2105 + default: 2106 + break; 2107 + } 2108 + } /* end ctl mode checking */ 2109 + } 2110 + 1821 2111 static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, 1822 2112 struct ath9k_channel *chan, u16 cfgCtl, 1823 2113 u8 twiceAntennaReduction, 1824 2114 u8 twiceMaxRegulatoryPower, 1825 2115 u8 powerLimit) 1826 2116 { 1827 - ah->txpower_limit = powerLimit; 1828 - ar9003_hw_set_target_power_eeprom(ah, chan->channel); 2117 + struct ath_common *common = ath9k_hw_common(ah); 2118 + u8 targetPowerValT2[ar9300RateSize]; 2119 + unsigned int i = 0; 2120 + 2121 + ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); 2122 + ar9003_hw_set_power_per_rate_table(ah, chan, 2123 + targetPowerValT2, cfgCtl, 2124 + twiceAntennaReduction, 2125 + twiceMaxRegulatoryPower, 2126 + powerLimit); 2127 + 2128 + while (i < ar9300RateSize) { 2129 + ath_print(common, ATH_DBG_EEPROM, 2130 + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 2131 + i++; 2132 + ath_print(common, ATH_DBG_EEPROM, 2133 + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 2134 + i++; 2135 + ath_print(common, ATH_DBG_EEPROM, 2136 + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 2137 + i++; 2138 + ath_print(common, ATH_DBG_EEPROM, 2139 + "TPC[%02d] 0x%08x\n\n", i, targetPowerValT2[i]); 2140 + i++; 2141 + } 2142 + 2143 + /* Write target power array to registers */ 2144 + ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 2145 + 2146 + /* 2147 + * This is the TX power we send back to driver core, 2148 + * and it can use to pass to userspace to display our 2149 + * currently configured TX power setting. 2150 + * 2151 + * Since power is rate dependent, use one of the indices 2152 + * from the AR9300_Rates enum to select an entry from 2153 + * targetPowerValT2[] to report. Currently returns the 2154 + * power for HT40 MCS 0, HT20 MCS 0, or OFDM 6 Mbps 2155 + * as CCK power is less interesting (?). 2156 + */ 2157 + i = ALL_TARGET_LEGACY_6_24; /* legacy */ 2158 + if (IS_CHAN_HT40(chan)) 2159 + i = ALL_TARGET_HT40_0_8_16; /* ht40 */ 2160 + else if (IS_CHAN_HT20(chan)) 2161 + i = ALL_TARGET_HT20_0_8_16; /* ht20 */ 2162 + 2163 + ah->txpower_limit = targetPowerValT2[i]; 2164 + 1829 2165 ar9003_hw_calibration_apply(ah, chan->channel); 1830 2166 } 1831 2167
+9 -8
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
··· 577 577 } 578 578 579 579 void ar9003_paprd_populate_single_table(struct ath_hw *ah, 580 - struct ath9k_channel *chan, int chain) 580 + struct ath9k_hw_cal_data *caldata, 581 + int chain) 581 582 { 582 - u32 *paprd_table_val = chan->pa_table[chain]; 583 - u32 small_signal_gain = chan->small_signal_gain[chain]; 583 + u32 *paprd_table_val = caldata->pa_table[chain]; 584 + u32 small_signal_gain = caldata->small_signal_gain[chain]; 584 585 u32 training_power; 585 586 u32 reg = 0; 586 587 int i; ··· 655 654 } 656 655 EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); 657 656 658 - int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, 659 - int chain) 657 + int ar9003_paprd_create_curve(struct ath_hw *ah, 658 + struct ath9k_hw_cal_data *caldata, int chain) 660 659 { 661 - u16 *small_signal_gain = &chan->small_signal_gain[chain]; 662 - u32 *pa_table = chan->pa_table[chain]; 660 + u16 *small_signal_gain = &caldata->small_signal_gain[chain]; 661 + u32 *pa_table = caldata->pa_table[chain]; 663 662 u32 *data_L, *data_U; 664 663 int i, status = 0; 665 664 u32 *buf; 666 665 u32 reg; 667 666 668 - memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain])); 667 + memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain])); 669 668 670 669 buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC); 671 670 if (!buf)
+5 -1
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 542 542 u32 reg = INI_RA(iniArr, i, 0); 543 543 u32 val = INI_RA(iniArr, i, column); 544 544 545 - REG_WRITE(ah, reg, val); 545 + if (reg >= 0x16000 && reg < 0x17000) 546 + ath9k_hw_analog_shift_regwrite(ah, reg, val); 547 + else 548 + REG_WRITE(ah, reg, val); 549 + 546 550 DO_DELAY(regWrites); 547 551 } 548 552 }
+2 -1
drivers/net/wireless/ath/ath9k/ath9k.h
··· 510 510 #define SC_OP_BEACONS BIT(1) 511 511 #define SC_OP_RXAGGR BIT(2) 512 512 #define SC_OP_TXAGGR BIT(3) 513 - #define SC_OP_FULL_RESET BIT(4) 513 + #define SC_OP_OFFCHANNEL BIT(4) 514 514 #define SC_OP_PREAMBLE_SHORT BIT(5) 515 515 #define SC_OP_PROTECT_ENABLE BIT(6) 516 516 #define SC_OP_RXFLUSH BIT(7) ··· 609 609 struct ath_wiphy { 610 610 struct ath_softc *sc; /* shared for all virtual wiphys */ 611 611 struct ieee80211_hw *hw; 612 + struct ath9k_hw_cal_data caldata; 612 613 enum ath_wiphy_state { 613 614 ATH_WIPHY_INACTIVE, 614 615 ATH_WIPHY_ACTIVE,
+64 -54
drivers/net/wireless/ath/ath9k/calib.c
··· 22 22 /* We can tune this as we go by monitoring really low values */ 23 23 #define ATH9K_NF_TOO_LOW -60 24 24 25 - /* AR5416 may return very high value (like -31 dBm), in those cases the nf 26 - * is incorrect and we should use the static NF value. Later we can try to 27 - * find out why they are reporting these values */ 28 - 29 - static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf) 30 - { 31 - if (nf > ATH9K_NF_TOO_LOW) { 32 - ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 33 - "noise floor value detected (%d) is " 34 - "lower than what we think is a " 35 - "reasonable value (%d)\n", 36 - nf, ATH9K_NF_TOO_LOW); 37 - return false; 38 - } 39 - return true; 40 - } 41 - 42 25 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) 43 26 { 44 27 int16_t nfval; ··· 104 121 ah->cal_samples = 0; 105 122 } 106 123 124 + static s16 ath9k_hw_get_default_nf(struct ath_hw *ah, 125 + struct ath9k_channel *chan) 126 + { 127 + struct ath_nf_limits *limit; 128 + 129 + if (!chan || IS_CHAN_2GHZ(chan)) 130 + limit = &ah->nf_2g; 131 + else 132 + limit = &ah->nf_5g; 133 + 134 + return limit->nominal; 135 + } 136 + 107 137 /* This is done for the currently configured channel */ 108 138 bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 109 139 { ··· 124 128 struct ieee80211_conf *conf = &common->hw->conf; 125 129 struct ath9k_cal_list *currCal = ah->cal_list_curr; 126 130 127 - if (!ah->curchan) 131 + if (!ah->caldata) 128 132 return true; 129 133 130 134 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) ··· 147 151 "Resetting Cal %d state for channel %u\n", 148 152 currCal->calData->calType, conf->channel->center_freq); 149 153 150 - ah->curchan->CalValid &= ~currCal->calData->calType; 154 + ah->caldata->CalValid &= ~currCal->calData->calType; 151 155 currCal->calState = CAL_WAITING; 152 156 153 157 return false; 154 158 } 155 159 EXPORT_SYMBOL(ath9k_hw_reset_calvalid); 156 160 157 - void ath9k_hw_start_nfcal(struct ath_hw *ah) 161 + void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update) 158 162 { 163 + if (ah->caldata) 164 + ah->caldata->nfcal_pending = true; 165 + 159 166 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 160 167 AR_PHY_AGC_CONTROL_ENABLE_NF); 161 - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 168 + 169 + if (update) 170 + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, 162 171 AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 172 + else 173 + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 174 + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 175 + 163 176 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 164 177 } 165 178 166 179 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) 167 180 { 168 - struct ath9k_nfcal_hist *h; 181 + struct ath9k_nfcal_hist *h = NULL; 169 182 unsigned i, j; 170 183 int32_t val; 171 184 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 172 185 struct ath_common *common = ath9k_hw_common(ah); 186 + s16 default_nf = ath9k_hw_get_default_nf(ah, chan); 173 187 174 - h = ah->nfCalHist; 188 + if (ah->caldata) 189 + h = ah->caldata->nfCalHist; 175 190 176 191 for (i = 0; i < NUM_NF_READINGS; i++) { 177 192 if (chainmask & (1 << i)) { 193 + s16 nfval; 194 + 195 + if (h) 196 + nfval = h[i].privNF; 197 + else 198 + nfval = default_nf; 199 + 178 200 val = REG_READ(ah, ah->nf_regs[i]); 179 201 val &= 0xFFFFFE00; 180 - val |= (((u32) (h[i].privNF) << 1) & 0x1ff); 202 + val |= (((u32) nfval << 1) & 0x1ff); 181 203 REG_WRITE(ah, ah->nf_regs[i], val); 182 204 } 183 205 } ··· 291 277 } 292 278 } 293 279 294 - int16_t ath9k_hw_getnf(struct ath_hw *ah, 295 - struct ath9k_channel *chan) 280 + bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) 296 281 { 297 282 struct ath_common *common = ath9k_hw_common(ah); 298 283 int16_t nf, nfThresh; 299 284 int16_t nfarray[NUM_NF_READINGS] = { 0 }; 300 285 struct ath9k_nfcal_hist *h; 301 286 struct ieee80211_channel *c = chan->chan; 287 + struct ath9k_hw_cal_data *caldata = ah->caldata; 288 + 289 + if (!caldata) 290 + return false; 302 291 303 292 chan->channelFlags &= (~CHANNEL_CW_INT); 304 293 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 305 294 ath_print(common, ATH_DBG_CALIBRATE, 306 295 "NF did not complete in calibration window\n"); 307 296 nf = 0; 308 - chan->rawNoiseFloor = nf; 309 - return chan->rawNoiseFloor; 297 + caldata->rawNoiseFloor = nf; 298 + return false; 310 299 } else { 311 300 ath9k_hw_do_getnf(ah, nfarray); 312 301 ath9k_hw_nf_sanitize(ah, nfarray); ··· 324 307 } 325 308 } 326 309 327 - h = ah->nfCalHist; 328 - 310 + h = caldata->nfCalHist; 311 + caldata->nfcal_pending = false; 329 312 ath9k_hw_update_nfcal_hist_buffer(h, nfarray); 330 - chan->rawNoiseFloor = h[0].privNF; 331 - 332 - return chan->rawNoiseFloor; 313 + caldata->rawNoiseFloor = h[0].privNF; 314 + return true; 333 315 } 334 316 335 - void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah) 317 + void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, 318 + struct ath9k_channel *chan) 336 319 { 337 - struct ath_nf_limits *limit; 320 + struct ath9k_nfcal_hist *h; 321 + s16 default_nf; 338 322 int i, j; 339 323 340 - if (!ah->curchan || IS_CHAN_2GHZ(ah->curchan)) 341 - limit = &ah->nf_2g; 342 - else 343 - limit = &ah->nf_5g; 324 + if (!ah->caldata) 325 + return; 344 326 327 + h = ah->caldata->nfCalHist; 328 + default_nf = ath9k_hw_get_default_nf(ah, chan); 345 329 for (i = 0; i < NUM_NF_READINGS; i++) { 346 - ah->nfCalHist[i].currIndex = 0; 347 - ah->nfCalHist[i].privNF = limit->nominal; 348 - ah->nfCalHist[i].invalidNFcount = 349 - AR_PHY_CCA_FILTERWINDOW_LENGTH; 330 + h[i].currIndex = 0; 331 + h[i].privNF = default_nf; 332 + h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH; 350 333 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { 351 - ah->nfCalHist[i].nfCalBuffer[j] = limit->nominal; 334 + h[i].nfCalBuffer[j] = default_nf; 352 335 } 353 336 } 354 337 } 355 338 356 339 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) 357 340 { 358 - s16 nf; 341 + if (!ah->caldata || !ah->caldata->rawNoiseFloor) 342 + return ath9k_hw_get_default_nf(ah, chan); 359 343 360 - if (chan->rawNoiseFloor == 0) 361 - nf = -96; 362 - else 363 - nf = chan->rawNoiseFloor; 364 - 365 - if (!ath9k_hw_nf_in_range(ah, nf)) 366 - nf = ATH_DEFAULT_NOISE_FLOOR; 367 - 368 - return nf; 344 + return ah->caldata->rawNoiseFloor; 369 345 } 370 346 EXPORT_SYMBOL(ath9k_hw_getchan_noise);
+4 -4
drivers/net/wireless/ath/ath9k/calib.h
··· 108 108 }; 109 109 110 110 bool ath9k_hw_reset_calvalid(struct ath_hw *ah); 111 - void ath9k_hw_start_nfcal(struct ath_hw *ah); 111 + void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update); 112 112 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); 113 - int16_t ath9k_hw_getnf(struct ath_hw *ah, 114 - struct ath9k_channel *chan); 115 - void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); 113 + bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan); 114 + void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, 115 + struct ath9k_channel *chan); 116 116 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 117 117 void ath9k_hw_reset_calibration(struct ath_hw *ah, 118 118 struct ath9k_cal_list *currCal);
+2
drivers/net/wireless/ath/ath9k/htc.h
··· 353 353 u16 seq_no; 354 354 u32 bmiss_cnt; 355 355 356 + struct ath9k_hw_cal_data caldata[38]; 357 + 356 358 spinlock_t beacon_lock; 357 359 358 360 bool tx_queues_stop;
+6 -4
drivers/net/wireless/ath/ath9k/htc_drv_main.c
··· 125 125 struct ieee80211_conf *conf = &common->hw->conf; 126 126 bool fastcc = true; 127 127 struct ieee80211_channel *channel = hw->conf.channel; 128 + struct ath9k_hw_cal_data *caldata; 128 129 enum htc_phymode mode; 129 130 __be16 htc_mode; 130 131 u8 cmd_rsp; ··· 150 149 priv->ah->curchan->channel, 151 150 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf)); 152 151 153 - ret = ath9k_hw_reset(ah, hchan, fastcc); 152 + caldata = &priv->caldata[channel->hw_value]; 153 + ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 154 154 if (ret) { 155 155 ath_print(common, ATH_DBG_FATAL, 156 156 "Unable to reset channel (%u Mhz) " ··· 1030 1028 ah->curchan = ath9k_cmn_get_curchannel(hw, ah); 1031 1029 1032 1030 /* Reset the HW */ 1033 - ret = ath9k_hw_reset(ah, ah->curchan, false); 1031 + ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 1034 1032 if (ret) { 1035 1033 ath_print(common, ATH_DBG_FATAL, 1036 1034 "Unable to reset hardware; reset status %d " ··· 1093 1091 ah->curchan = ath9k_cmn_get_curchannel(hw, ah); 1094 1092 1095 1093 /* Reset the HW */ 1096 - ret = ath9k_hw_reset(ah, ah->curchan, false); 1094 + ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 1097 1095 if (ret) { 1098 1096 ath_print(common, ATH_DBG_FATAL, 1099 1097 "Unable to reset hardware; reset status %d " ··· 1181 1179 ath9k_hw_configpcipowersave(ah, 0, 0); 1182 1180 1183 1181 ath9k_hw_htc_resetinit(ah); 1184 - ret = ath9k_hw_reset(ah, init_channel, false); 1182 + ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1185 1183 if (ret) { 1186 1184 ath_print(common, ATH_DBG_FATAL, 1187 1185 "Unable to reset hardware; reset status %d "
+14 -11
drivers/net/wireless/ath/ath9k/hw.c
··· 610 610 else 611 611 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); 612 612 613 - ath9k_init_nfcal_hist_buffer(ah); 614 613 ah->bb_watchdog_timeout_ms = 25; 615 614 616 615 common->state = ATH_HW_INITIALIZED; ··· 1182 1183 1183 1184 ath9k_hw_spur_mitigate_freq(ah, chan); 1184 1185 1185 - if (!chan->oneTimeCalsDone) 1186 - chan->oneTimeCalsDone = true; 1187 - 1188 1186 return true; 1189 1187 } 1190 1188 ··· 1214 1218 EXPORT_SYMBOL(ath9k_hw_check_alive); 1215 1219 1216 1220 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1217 - bool bChannelChange) 1221 + struct ath9k_hw_cal_data *caldata, bool bChannelChange) 1218 1222 { 1219 1223 struct ath_common *common = ath9k_hw_common(ah); 1220 1224 u32 saveLedState; ··· 1239 1243 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1240 1244 return -EIO; 1241 1245 1242 - if (curchan && !ah->chip_fullsleep) 1246 + if (curchan && !ah->chip_fullsleep && ah->caldata) 1243 1247 ath9k_hw_getnf(ah, curchan); 1248 + 1249 + ah->caldata = caldata; 1250 + if (caldata && 1251 + (chan->channel != caldata->channel || 1252 + (chan->channelFlags & ~CHANNEL_CW_INT) != 1253 + (caldata->channelFlags & ~CHANNEL_CW_INT))) { 1254 + /* Operating channel changed, reset channel calibration data */ 1255 + memset(caldata, 0, sizeof(*caldata)); 1256 + ath9k_init_nfcal_hist_buffer(ah, chan); 1257 + } 1244 1258 1245 1259 if (bChannelChange && 1246 1260 (ah->chip_fullsleep != true) && ··· 1262 1256 1263 1257 if (ath9k_hw_channel_change(ah, chan)) { 1264 1258 ath9k_hw_loadnf(ah, ah->curchan); 1265 - ath9k_hw_start_nfcal(ah); 1259 + ath9k_hw_start_nfcal(ah, true); 1266 1260 return 0; 1267 1261 } 1268 1262 } ··· 1467 1461 if (ah->btcoex_hw.enabled) 1468 1462 ath9k_hw_btcoex_enable(ah); 1469 1463 1470 - if (AR_SREV_9300_20_OR_LATER(ah)) { 1471 - ath9k_hw_loadnf(ah, curchan); 1472 - ath9k_hw_start_nfcal(ah); 1464 + if (AR_SREV_9300_20_OR_LATER(ah)) 1473 1465 ar9003_hw_bb_watchdog_config(ah); 1474 - } 1475 1466 1476 1467 return 0; 1477 1468 }
+20 -13
drivers/net/wireless/ath/ath9k/hw.h
··· 346 346 CHANNEL_HT40PLUS | \ 347 347 CHANNEL_HT40MINUS) 348 348 349 + struct ath9k_hw_cal_data { 350 + u16 channel; 351 + u32 channelFlags; 352 + int32_t CalValid; 353 + int8_t iCoff; 354 + int8_t qCoff; 355 + int16_t rawNoiseFloor; 356 + bool paprd_done; 357 + bool nfcal_pending; 358 + u16 small_signal_gain[AR9300_MAX_CHAINS]; 359 + u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; 360 + struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 361 + }; 362 + 349 363 struct ath9k_channel { 350 364 struct ieee80211_channel *chan; 351 365 u16 channel; 352 366 u32 channelFlags; 353 367 u32 chanmode; 354 - int32_t CalValid; 355 - bool oneTimeCalsDone; 356 - int8_t iCoff; 357 - int8_t qCoff; 358 - int16_t rawNoiseFloor; 359 - bool paprd_done; 360 - u16 small_signal_gain[AR9300_MAX_CHAINS]; 361 - u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; 362 368 }; 363 369 364 370 #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ ··· 675 669 enum nl80211_iftype opmode; 676 670 enum ath9k_power_mode power_mode; 677 671 678 - struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 672 + struct ath9k_hw_cal_data *caldata; 679 673 struct ath9k_pacal_info pacal_info; 680 674 struct ar5416Stats stats; 681 675 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; ··· 869 863 void ath9k_hw_deinit(struct ath_hw *ah); 870 864 int ath9k_hw_init(struct ath_hw *ah); 871 865 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 872 - bool bChannelChange); 866 + struct ath9k_hw_cal_data *caldata, bool bChannelChange); 873 867 int ath9k_hw_fill_cap_info(struct ath_hw *ah); 874 868 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); 875 869 ··· 964 958 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); 965 959 void ar9003_paprd_enable(struct ath_hw *ah, bool val); 966 960 void ar9003_paprd_populate_single_table(struct ath_hw *ah, 967 - struct ath9k_channel *chan, int chain); 968 - int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, 969 - int chain); 961 + struct ath9k_hw_cal_data *caldata, 962 + int chain); 963 + int ar9003_paprd_create_curve(struct ath_hw *ah, 964 + struct ath9k_hw_cal_data *caldata, int chain); 970 965 int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); 971 966 int ar9003_paprd_init_table(struct ath_hw *ah); 972 967 bool ar9003_paprd_is_done(struct ath_hw *ah);
+60 -44
drivers/net/wireless/ath/ath9k/main.c
··· 154 154 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 155 155 } 156 156 157 + static void ath_start_ani(struct ath_common *common) 158 + { 159 + struct ath_hw *ah = common->ah; 160 + unsigned long timestamp = jiffies_to_msecs(jiffies); 161 + struct ath_softc *sc = (struct ath_softc *) common->priv; 162 + 163 + if (!(sc->sc_flags & SC_OP_ANI_RUN)) 164 + return; 165 + 166 + if (sc->sc_flags & SC_OP_OFFCHANNEL) 167 + return; 168 + 169 + common->ani.longcal_timer = timestamp; 170 + common->ani.shortcal_timer = timestamp; 171 + common->ani.checkani_timer = timestamp; 172 + 173 + mod_timer(&common->ani.timer, 174 + jiffies + 175 + msecs_to_jiffies((u32)ah->config.ani_poll_interval)); 176 + } 177 + 157 178 /* 158 179 * Set/change channels. If the channel is really being changed, it's done 159 180 * by reseting the chip. To accomplish this we must first cleanup any pending ··· 183 162 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 184 163 struct ath9k_channel *hchan) 185 164 { 165 + struct ath_wiphy *aphy = hw->priv; 186 166 struct ath_hw *ah = sc->sc_ah; 187 167 struct ath_common *common = ath9k_hw_common(ah); 188 168 struct ieee80211_conf *conf = &common->hw->conf; 189 169 bool fastcc = true, stopped; 190 170 struct ieee80211_channel *channel = hw->conf.channel; 171 + struct ath9k_hw_cal_data *caldata = NULL; 191 172 int r; 192 173 193 174 if (sc->sc_flags & SC_OP_INVALID) 194 175 return -EIO; 176 + 177 + del_timer_sync(&common->ani.timer); 178 + cancel_work_sync(&sc->paprd_work); 179 + cancel_work_sync(&sc->hw_check_work); 180 + cancel_delayed_work_sync(&sc->tx_complete_work); 195 181 196 182 ath9k_ps_wakeup(sc); 197 183 ··· 219 191 * to flush data frames already in queue because of 220 192 * changing channel. */ 221 193 222 - if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) 194 + if (!stopped || !(sc->sc_flags & SC_OP_OFFCHANNEL)) 223 195 fastcc = false; 196 + 197 + if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) 198 + caldata = &aphy->caldata; 224 199 225 200 ath_print(common, ATH_DBG_CONFIG, 226 201 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n", ··· 232 201 233 202 spin_lock_bh(&sc->sc_resetlock); 234 203 235 - r = ath9k_hw_reset(ah, hchan, fastcc); 204 + r = ath9k_hw_reset(ah, hchan, caldata, fastcc); 236 205 if (r) { 237 206 ath_print(common, ATH_DBG_FATAL, 238 207 "Unable to reset channel (%u MHz), " ··· 242 211 goto ps_restore; 243 212 } 244 213 spin_unlock_bh(&sc->sc_resetlock); 245 - 246 - sc->sc_flags &= ~SC_OP_FULL_RESET; 247 214 248 215 if (ath_startrecv(sc) != 0) { 249 216 ath_print(common, ATH_DBG_FATAL, ··· 254 225 ath_update_txpow(sc); 255 226 ath9k_hw_set_interrupts(ah, ah->imask); 256 227 228 + if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) { 229 + ath_start_ani(common); 230 + ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 231 + ath_beacon_config(sc, NULL); 232 + } 233 + 257 234 ps_restore: 258 235 ath9k_ps_restore(sc); 259 236 return r; ··· 268 233 static void ath_paprd_activate(struct ath_softc *sc) 269 234 { 270 235 struct ath_hw *ah = sc->sc_ah; 236 + struct ath9k_hw_cal_data *caldata = ah->caldata; 271 237 int chain; 272 238 273 - if (!ah->curchan->paprd_done) 239 + if (!caldata || !caldata->paprd_done) 274 240 return; 275 241 276 242 ath9k_ps_wakeup(sc); 243 + ar9003_paprd_enable(ah, false); 277 244 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 278 245 if (!(ah->caps.tx_chainmask & BIT(chain))) 279 246 continue; 280 247 281 - ar9003_paprd_populate_single_table(ah, ah->curchan, chain); 248 + ar9003_paprd_populate_single_table(ah, caldata, chain); 282 249 } 283 250 284 251 ar9003_paprd_enable(ah, true); ··· 298 261 int band = hw->conf.channel->band; 299 262 struct ieee80211_supported_band *sband = &sc->sbands[band]; 300 263 struct ath_tx_control txctl; 264 + struct ath9k_hw_cal_data *caldata = ah->caldata; 301 265 int qnum, ftype; 302 266 int chain_ok = 0; 303 267 int chain; 304 268 int len = 1800; 305 269 int time_left; 306 270 int i; 271 + 272 + if (!caldata) 273 + return; 307 274 308 275 skb = alloc_skb(len, GFP_KERNEL); 309 276 if (!skb) ··· 363 322 if (!ar9003_paprd_is_done(ah)) 364 323 break; 365 324 366 - if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0) 325 + if (ar9003_paprd_create_curve(ah, caldata, chain) != 0) 367 326 break; 368 327 369 328 chain_ok = 1; ··· 371 330 kfree_skb(skb); 372 331 373 332 if (chain_ok) { 374 - ah->curchan->paprd_done = true; 333 + caldata->paprd_done = true; 375 334 ath_paprd_activate(sc); 376 335 } 377 336 ··· 480 439 cal_interval = min(cal_interval, (u32)short_cal_interval); 481 440 482 441 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 483 - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && 484 - !(sc->sc_flags & SC_OP_SCANNING)) { 485 - if (!sc->sc_ah->curchan->paprd_done) 442 + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) { 443 + if (!ah->caldata->paprd_done) 486 444 ieee80211_queue_work(sc->hw, &sc->paprd_work); 487 445 else 488 446 ath_paprd_activate(sc); 489 447 } 490 - } 491 - 492 - static void ath_start_ani(struct ath_common *common) 493 - { 494 - struct ath_hw *ah = common->ah; 495 - unsigned long timestamp = jiffies_to_msecs(jiffies); 496 - struct ath_softc *sc = (struct ath_softc *) common->priv; 497 - 498 - if (!(sc->sc_flags & SC_OP_ANI_RUN)) 499 - return; 500 - 501 - common->ani.longcal_timer = timestamp; 502 - common->ani.shortcal_timer = timestamp; 503 - common->ani.checkani_timer = timestamp; 504 - 505 - mod_timer(&common->ani.timer, 506 - jiffies + 507 - msecs_to_jiffies((u32)ah->config.ani_poll_interval)); 508 448 } 509 449 510 450 /* ··· 499 477 struct ath_hw *ah = sc->sc_ah; 500 478 struct ath_common *common = ath9k_hw_common(ah); 501 479 502 - if ((sc->sc_flags & SC_OP_SCANNING) || is_ht || 480 + if ((sc->sc_flags & SC_OP_OFFCHANNEL) || is_ht || 503 481 (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) { 504 482 common->tx_chainmask = ah->caps.tx_chainmask; 505 483 common->rx_chainmask = ah->caps.rx_chainmask; ··· 839 817 ah->curchan = ath_get_curchannel(sc, sc->hw); 840 818 841 819 spin_lock_bh(&sc->sc_resetlock); 842 - r = ath9k_hw_reset(ah, ah->curchan, false); 820 + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 843 821 if (r) { 844 822 ath_print(common, ATH_DBG_FATAL, 845 823 "Unable to reset channel (%u MHz), " ··· 899 877 ah->curchan = ath_get_curchannel(sc, hw); 900 878 901 879 spin_lock_bh(&sc->sc_resetlock); 902 - r = ath9k_hw_reset(ah, ah->curchan, false); 880 + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 903 881 if (r) { 904 882 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 905 883 "Unable to reset channel (%u MHz), " ··· 932 910 ath_flushrecv(sc); 933 911 934 912 spin_lock_bh(&sc->sc_resetlock); 935 - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 913 + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 936 914 if (r) 937 915 ath_print(common, ATH_DBG_FATAL, 938 916 "Unable to reset hardware; reset status %d\n", r); ··· 1107 1085 * and then setup of the interrupt mask. 1108 1086 */ 1109 1087 spin_lock_bh(&sc->sc_resetlock); 1110 - r = ath9k_hw_reset(ah, init_channel, false); 1088 + r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1111 1089 if (r) { 1112 1090 ath_print(common, ATH_DBG_FATAL, 1113 1091 "Unable to reset hardware; reset status %d " ··· 1601 1579 1602 1580 aphy->chan_idx = pos; 1603 1581 aphy->chan_is_ht = conf_is_ht(conf); 1582 + if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 1583 + sc->sc_flags |= SC_OP_OFFCHANNEL; 1584 + else 1585 + sc->sc_flags &= ~SC_OP_OFFCHANNEL; 1604 1586 1605 1587 if (aphy->state == ATH_WIPHY_SCAN || 1606 1588 aphy->state == ATH_WIPHY_ACTIVE) ··· 2016 1990 { 2017 1991 struct ath_wiphy *aphy = hw->priv; 2018 1992 struct ath_softc *sc = aphy->sc; 2019 - struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2020 1993 2021 1994 mutex_lock(&sc->mutex); 2022 1995 if (ath9k_wiphy_scanning(sc)) { ··· 2033 2008 aphy->state = ATH_WIPHY_SCAN; 2034 2009 ath9k_wiphy_pause_all_forced(sc, aphy); 2035 2010 sc->sc_flags |= SC_OP_SCANNING; 2036 - del_timer_sync(&common->ani.timer); 2037 - cancel_work_sync(&sc->paprd_work); 2038 - cancel_work_sync(&sc->hw_check_work); 2039 - cancel_delayed_work_sync(&sc->tx_complete_work); 2040 2011 mutex_unlock(&sc->mutex); 2041 2012 } 2042 2013 ··· 2044 2023 { 2045 2024 struct ath_wiphy *aphy = hw->priv; 2046 2025 struct ath_softc *sc = aphy->sc; 2047 - struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2048 2026 2049 2027 mutex_lock(&sc->mutex); 2050 2028 aphy->state = ATH_WIPHY_ACTIVE; 2051 2029 sc->sc_flags &= ~SC_OP_SCANNING; 2052 - sc->sc_flags |= SC_OP_FULL_RESET; 2053 - ath_start_ani(common); 2054 - ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 2055 - ath_beacon_config(sc, NULL); 2056 2030 mutex_unlock(&sc->mutex); 2057 2031 } 2058 2032
+5 -5
drivers/net/wireless/ath/ath9k/recv.c
··· 1140 1140 if (flush) 1141 1141 goto requeue; 1142 1142 1143 + retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1144 + rxs, &decrypt_error); 1145 + if (retval) 1146 + goto requeue; 1147 + 1143 1148 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1144 1149 if (rs.rs_tstamp > tsf_lower && 1145 1150 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) ··· 1153 1148 if (rs.rs_tstamp < tsf_lower && 1154 1149 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1155 1150 rxs->mactime += 0x100000000ULL; 1156 - 1157 - retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1158 - rxs, &decrypt_error); 1159 - if (retval) 1160 - goto requeue; 1161 1151 1162 1152 /* Ensure we always have an skb to requeue once we are done 1163 1153 * processing the current buffer's skb */
+9 -27
drivers/net/wireless/ath/ath9k/xmit.c
··· 120 120 list_add_tail(&ac->list, &txq->axq_acq); 121 121 } 122 122 123 - static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 124 - { 125 - struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 126 - 127 - spin_lock_bh(&txq->axq_lock); 128 - tid->paused++; 129 - spin_unlock_bh(&txq->axq_lock); 130 - } 131 - 132 123 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 133 124 { 134 125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 135 126 136 - BUG_ON(tid->paused <= 0); 127 + WARN_ON(!tid->paused); 128 + 137 129 spin_lock_bh(&txq->axq_lock); 138 - 139 - tid->paused--; 140 - 141 - if (tid->paused > 0) 142 - goto unlock; 130 + tid->paused = false; 143 131 144 132 if (list_empty(&tid->buf_q)) 145 133 goto unlock; ··· 145 157 struct list_head bf_head; 146 158 INIT_LIST_HEAD(&bf_head); 147 159 148 - BUG_ON(tid->paused <= 0); 160 + WARN_ON(!tid->paused); 161 + 149 162 spin_lock_bh(&txq->axq_lock); 150 - 151 - tid->paused--; 152 - 153 - if (tid->paused > 0) { 154 - spin_unlock_bh(&txq->axq_lock); 155 - return; 156 - } 163 + tid->paused = false; 157 164 158 165 while (!list_empty(&tid->buf_q)) { 159 166 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); ··· 794 811 an = (struct ath_node *)sta->drv_priv; 795 812 txtid = ATH_AN_2_TID(an, tid); 796 813 txtid->state |= AGGR_ADDBA_PROGRESS; 797 - ath_tx_pause_tid(sc, txtid); 814 + txtid->paused = true; 798 815 *ssn = txtid->seq_start; 799 816 } 800 817 ··· 818 835 return; 819 836 } 820 837 821 - ath_tx_pause_tid(sc, txtid); 822 - 823 838 /* drop all software retried frames and mark this TID */ 824 839 spin_lock_bh(&txq->axq_lock); 840 + txtid->paused = true; 825 841 while (!list_empty(&txtid->buf_q)) { 826 842 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 827 843 if (!bf_isretried(bf)) { ··· 1163 1181 "Failed to stop TX DMA. Resetting hardware!\n"); 1164 1182 1165 1183 spin_lock_bh(&sc->sc_resetlock); 1166 - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 1184 + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1167 1185 if (r) 1168 1186 ath_print(common, ATH_DBG_FATAL, 1169 1187 "Unable to reset hardware; reset status %d\n",
+4
drivers/net/wireless/ipw2x00/ipw2100.c
··· 1924 1924 bg_band->channels = 1925 1925 kzalloc(geo->bg_channels * 1926 1926 sizeof(struct ieee80211_channel), GFP_KERNEL); 1927 + if (!bg_band->channels) { 1928 + ipw2100_down(priv); 1929 + return -ENOMEM; 1930 + } 1927 1931 /* translate geo->bg to bg_band.channels */ 1928 1932 for (i = 0; i < geo->bg_channels; i++) { 1929 1933 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
··· 980 980 le32_to_cpu(bt->lo_priority_tx_req_cnt), 981 981 accum_bt->lo_priority_tx_req_cnt); 982 982 pos += scnprintf(buf + pos, bufsz - pos, 983 - "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", 983 + "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n", 984 984 le32_to_cpu(bt->lo_priority_tx_denied_cnt), 985 985 accum_bt->lo_priority_tx_denied_cnt); 986 986 pos += scnprintf(buf + pos, bufsz - pos,
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 1429 1429 void iwl_free_tfds_in_queue(struct iwl_priv *priv, 1430 1430 int sta_id, int tid, int freed) 1431 1431 { 1432 - WARN_ON(!spin_is_locked(&priv->sta_lock)); 1432 + lockdep_assert_held(&priv->sta_lock); 1433 1433 1434 1434 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) 1435 1435 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+7 -4
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
··· 300 300 struct ieee80211_sta *sta) 301 301 { 302 302 int ret = -EAGAIN; 303 + u32 load = rs_tl_get_load(lq_data, tid); 303 304 304 - if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 305 + if (load > IWL_AGG_LOAD_THRESHOLD) { 305 306 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 306 307 sta->addr, tid); 307 308 ret = ieee80211_start_tx_ba_session(sta, tid); ··· 312 311 * this might be cause by reloading firmware 313 312 * stop the tx ba session here 314 313 */ 315 - IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", 314 + IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", 316 315 tid); 317 316 ieee80211_stop_tx_ba_session(sta, tid); 318 317 } 319 - } else 320 - IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid); 318 + } else { 319 + IWL_ERR(priv, "Aggregation not enabled for tid %d " 320 + "because load = %u\n", tid, load); 321 + } 321 322 return ret; 322 323 } 323 324
+9 -2
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
··· 1117 1117 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1118 1118 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1119 1119 1120 - WARN_ON(!spin_is_locked(&priv->sta_lock)); 1120 + lockdep_assert_held(&priv->sta_lock); 1121 1121 1122 1122 switch (priv->stations[sta_id].tid[tid].agg.state) { 1123 1123 case IWL_EMPTYING_HW_QUEUE_DELBA: ··· 1331 1331 tid = ba_resp->tid; 1332 1332 agg = &priv->stations[sta_id].tid[tid].agg; 1333 1333 if (unlikely(agg->txq_id != scd_flow)) { 1334 - IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n", 1334 + /* 1335 + * FIXME: this is a uCode bug which need to be addressed, 1336 + * log the information and return for now! 1337 + * since it is possible happen very often and in order 1338 + * not to fill the syslog, don't enable the logging by default 1339 + */ 1340 + IWL_DEBUG_TX_REPLY(priv, 1341 + "BA scd_flow %d does not match txq_id %d\n", 1335 1342 scd_flow, agg->txq_id); 1336 1343 return; 1337 1344 }
+5 -1
drivers/net/wireless/iwlwifi/iwl-core.c
··· 2000 2000 struct ieee80211_vif *vif) 2001 2001 { 2002 2002 struct iwl_priv *priv = hw->priv; 2003 + bool scan_completed = false; 2003 2004 2004 2005 IWL_DEBUG_MAC80211(priv, "enter\n"); 2005 2006 ··· 2014 2013 if (priv->vif == vif) { 2015 2014 priv->vif = NULL; 2016 2015 if (priv->scan_vif == vif) { 2017 - ieee80211_scan_completed(priv->hw, true); 2016 + scan_completed = true; 2018 2017 priv->scan_vif = NULL; 2019 2018 priv->scan_request = NULL; 2020 2019 } 2021 2020 memset(priv->bssid, 0, ETH_ALEN); 2022 2021 } 2023 2022 mutex_unlock(&priv->mutex); 2023 + 2024 + if (scan_completed) 2025 + ieee80211_scan_completed(priv->hw, true); 2024 2026 2025 2027 IWL_DEBUG_MAC80211(priv, "leave\n"); 2026 2028
+1 -1
drivers/net/wireless/iwlwifi/iwl-debug.h
··· 71 71 #define IWL_DEBUG(__priv, level, fmt, args...) 72 72 #define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) 73 73 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, 74 - void *p, u32 len) 74 + const void *p, u32 len) 75 75 {} 76 76 #endif /* CONFIG_IWLWIFI_DEBUG */ 77 77
+1 -1
drivers/net/wireless/iwlwifi/iwl-devtrace.h
··· 193 193 __entry->framelen = buf0_len + buf1_len; 194 194 memcpy(__get_dynamic_array(tfd), tfd, tfdlen); 195 195 memcpy(__get_dynamic_array(buf0), buf0, buf0_len); 196 - memcpy(__get_dynamic_array(buf1), buf1, buf0_len); 196 + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); 197 197 ), 198 198 TP_printk("[%p] TX %.2x (%zu bytes)", 199 199 __entry->priv,
+1 -1
drivers/net/wireless/iwlwifi/iwl-scan.c
··· 298 298 299 299 static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif) 300 300 { 301 - WARN_ON(!mutex_is_locked(&priv->mutex)); 301 + lockdep_assert_held(&priv->mutex); 302 302 303 303 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 304 304 set_bit(STATUS_SCANNING, &priv->status);
+3 -3
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 773 773 774 774 int iwl_restore_default_wep_keys(struct iwl_priv *priv) 775 775 { 776 - WARN_ON(!mutex_is_locked(&priv->mutex)); 776 + lockdep_assert_held(&priv->mutex); 777 777 778 778 return iwl_send_static_wepkey_cmd(priv, 0); 779 779 } ··· 784 784 { 785 785 int ret; 786 786 787 - WARN_ON(!mutex_is_locked(&priv->mutex)); 787 + lockdep_assert_held(&priv->mutex); 788 788 789 789 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 790 790 keyconf->keyidx); ··· 808 808 { 809 809 int ret; 810 810 811 - WARN_ON(!mutex_is_locked(&priv->mutex)); 811 + lockdep_assert_held(&priv->mutex); 812 812 813 813 if (keyconf->keylen != WEP_KEY_LEN_128 && 814 814 keyconf->keylen != WEP_KEY_LEN_64) {
+164 -50
drivers/net/wireless/libertas/cfg.c
··· 257 257 return sizeof(rate_tlv->header) + i; 258 258 } 259 259 260 + /* Add common rates from a TLV and return the new end of the TLV */ 261 + static u8 * 262 + add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) 263 + { 264 + int hw, ap, ap_max = ie[1]; 265 + u8 hw_rate; 266 + 267 + /* Advance past IE header */ 268 + ie += 2; 269 + 270 + lbs_deb_hex(LBS_DEB_ASSOC, "AP IE Rates", (u8 *) ie, ap_max); 271 + 272 + for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { 273 + hw_rate = lbs_rates[hw].bitrate / 5; 274 + for (ap = 0; ap < ap_max; ap++) { 275 + if (hw_rate == (ie[ap] & 0x7f)) { 276 + *tlv++ = ie[ap]; 277 + *nrates = *nrates + 1; 278 + } 279 + } 280 + } 281 + return tlv; 282 + } 260 283 261 284 /* 262 285 * Adds a TLV with all rates the hardware *and* BSS supports. ··· 287 264 static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss) 288 265 { 289 266 struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; 290 - const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); 291 - int n; 267 + const u8 *rates_eid, *ext_rates_eid; 268 + int n = 0; 269 + 270 + rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); 271 + ext_rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES); 292 272 293 273 /* 294 274 * 01 00 TLV_TYPE_RATES ··· 301 275 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); 302 276 tlv += sizeof(rate_tlv->header); 303 277 304 - if (!rates_eid) { 278 + /* Add basic rates */ 279 + if (rates_eid) { 280 + tlv = add_ie_rates(tlv, rates_eid, &n); 281 + 282 + /* Add extended rates, if any */ 283 + if (ext_rates_eid) 284 + tlv = add_ie_rates(tlv, ext_rates_eid, &n); 285 + } else { 286 + lbs_deb_assoc("assoc: bss had no basic rate IE\n"); 305 287 /* Fallback: add basic 802.11b rates */ 306 288 *tlv++ = 0x82; 307 289 *tlv++ = 0x84; 308 290 *tlv++ = 0x8b; 309 291 *tlv++ = 0x96; 310 292 n = 4; 311 - } else { 312 - int hw, ap; 313 - u8 ap_max = rates_eid[1]; 314 - n = 0; 315 - for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { 316 - u8 hw_rate = lbs_rates[hw].bitrate / 5; 317 - for (ap = 0; ap < ap_max; ap++) { 318 - if (hw_rate == (rates_eid[ap+2] & 0x7f)) { 319 - *tlv++ = rates_eid[ap+2]; 320 - n++; 321 - } 322 - } 323 - } 324 293 } 325 294 326 295 rate_tlv->header.len = cpu_to_le16(n); ··· 486 465 lbs_deb_enter(LBS_DEB_CFG80211); 487 466 488 467 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); 489 - nr_sets = le16_to_cpu(resp->size); 468 + nr_sets = le16_to_cpu(scanresp->nr_sets); 469 + 470 + lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n", 471 + nr_sets, bsssize, le16_to_cpu(resp->size)); 472 + 473 + if (nr_sets == 0) { 474 + ret = 0; 475 + goto done; 476 + } 490 477 491 478 /* 492 479 * The general layout of the scan response is described in chapter ··· 699 670 700 671 if (priv->scan_channel >= priv->scan_req->n_channels) { 701 672 /* Mark scan done */ 702 - cfg80211_scan_done(priv->scan_req, false); 673 + if (priv->internal_scan) 674 + kfree(priv->scan_req); 675 + else 676 + cfg80211_scan_done(priv->scan_req, false); 677 + 703 678 priv->scan_req = NULL; 679 + priv->last_scan = jiffies; 704 680 } 705 681 706 682 /* Restart network */ ··· 716 682 717 683 kfree(scan_cmd); 718 684 685 + /* Wake up anything waiting on scan completion */ 686 + if (priv->scan_req == NULL) { 687 + lbs_deb_scan("scan: waking up waiters\n"); 688 + wake_up_all(&priv->scan_q); 689 + } 690 + 719 691 out_no_scan_cmd: 720 692 lbs_deb_leave(LBS_DEB_SCAN); 721 693 } 722 694 695 + static void _internal_start_scan(struct lbs_private *priv, bool internal, 696 + struct cfg80211_scan_request *request) 697 + { 698 + lbs_deb_enter(LBS_DEB_CFG80211); 699 + 700 + lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", 701 + request->n_ssids, request->n_channels, request->ie_len); 702 + 703 + priv->scan_channel = 0; 704 + queue_delayed_work(priv->work_thread, &priv->scan_work, 705 + msecs_to_jiffies(50)); 706 + 707 + priv->scan_req = request; 708 + priv->internal_scan = internal; 709 + 710 + lbs_deb_leave(LBS_DEB_CFG80211); 711 + } 723 712 724 713 static int lbs_cfg_scan(struct wiphy *wiphy, 725 714 struct net_device *dev, ··· 759 702 goto out; 760 703 } 761 704 762 - lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", 763 - request->n_ssids, request->n_channels, request->ie_len); 764 - 765 - priv->scan_channel = 0; 766 - queue_delayed_work(priv->work_thread, &priv->scan_work, 767 - msecs_to_jiffies(50)); 705 + _internal_start_scan(priv, false, request); 768 706 769 707 if (priv->surpriseremoved) 770 708 ret = -EIO; 771 - 772 - priv->scan_req = request; 773 709 774 710 out: 775 711 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); ··· 1050 1000 int status; 1051 1001 int ret; 1052 1002 u8 *pos = &(cmd->iebuf[0]); 1003 + u8 *tmp; 1053 1004 1054 1005 lbs_deb_enter(LBS_DEB_CFG80211); 1055 1006 ··· 1095 1044 pos += lbs_add_cf_param_tlv(pos); 1096 1045 1097 1046 /* add rates TLV */ 1047 + tmp = pos + 4; /* skip Marvell IE header */ 1098 1048 pos += lbs_add_common_rates_tlv(pos, bss); 1049 + lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp); 1099 1050 1100 1051 /* add auth type TLV */ 1101 1052 if (priv->fwrelease >= 0x09000000) ··· 1177 1124 return ret; 1178 1125 } 1179 1126 1127 + static struct cfg80211_scan_request * 1128 + _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme) 1129 + { 1130 + struct cfg80211_scan_request *creq = NULL; 1131 + int i, n_channels = 0; 1132 + enum ieee80211_band band; 1180 1133 1134 + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1135 + if (wiphy->bands[band]) 1136 + n_channels += wiphy->bands[band]->n_channels; 1137 + } 1138 + 1139 + creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + 1140 + n_channels * sizeof(void *), 1141 + GFP_ATOMIC); 1142 + if (!creq) 1143 + return NULL; 1144 + 1145 + /* SSIDs come after channels */ 1146 + creq->ssids = (void *)&creq->channels[n_channels]; 1147 + creq->n_channels = n_channels; 1148 + creq->n_ssids = 1; 1149 + 1150 + /* Scan all available channels */ 1151 + i = 0; 1152 + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1153 + int j; 1154 + 1155 + if (!wiphy->bands[band]) 1156 + continue; 1157 + 1158 + for (j = 0; j < wiphy->bands[band]->n_channels; j++) { 1159 + /* ignore disabled channels */ 1160 + if (wiphy->bands[band]->channels[j].flags & 1161 + IEEE80211_CHAN_DISABLED) 1162 + continue; 1163 + 1164 + creq->channels[i] = &wiphy->bands[band]->channels[j]; 1165 + i++; 1166 + } 1167 + } 1168 + if (i) { 1169 + /* Set real number of channels specified in creq->channels[] */ 1170 + creq->n_channels = i; 1171 + 1172 + /* Scan for the SSID we're going to connect to */ 1173 + memcpy(creq->ssids[0].ssid, sme->ssid, sme->ssid_len); 1174 + creq->ssids[0].ssid_len = sme->ssid_len; 1175 + } else { 1176 + /* No channels found... */ 1177 + kfree(creq); 1178 + creq = NULL; 1179 + } 1180 + 1181 + return creq; 1182 + } 1181 1183 1182 1184 static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, 1183 1185 struct cfg80211_connect_params *sme) ··· 1244 1136 1245 1137 lbs_deb_enter(LBS_DEB_CFG80211); 1246 1138 1247 - if (sme->bssid) { 1248 - bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, 1249 - sme->ssid, sme->ssid_len, 1250 - WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 1251 - } else { 1252 - /* 1253 - * Here we have an impedance mismatch. The firmware command 1254 - * CMD_802_11_ASSOCIATE always needs a BSSID, it cannot 1255 - * connect otherwise. However, for the connect-API of 1256 - * cfg80211 the bssid is purely optional. We don't get one, 1257 - * except the user specifies one on the "iw" command line. 1258 - * 1259 - * If we don't got one, we could initiate a scan and look 1260 - * for the best matching cfg80211_bss entry. 1261 - * 1262 - * Or, better yet, net/wireless/sme.c get's rewritten into 1263 - * something more generally useful. 1139 + if (!sme->bssid) { 1140 + /* Run a scan if one isn't in-progress already and if the last 1141 + * scan was done more than 2 seconds ago. 1264 1142 */ 1265 - lbs_pr_err("TODO: no BSS specified\n"); 1266 - ret = -ENOTSUPP; 1267 - goto done; 1143 + if (priv->scan_req == NULL && 1144 + time_after(jiffies, priv->last_scan + (2 * HZ))) { 1145 + struct cfg80211_scan_request *creq; 1146 + 1147 + creq = _new_connect_scan_req(wiphy, sme); 1148 + if (!creq) { 1149 + ret = -EINVAL; 1150 + goto done; 1151 + } 1152 + 1153 + lbs_deb_assoc("assoc: scanning for compatible AP\n"); 1154 + _internal_start_scan(priv, true, creq); 1155 + } 1156 + 1157 + /* Wait for any in-progress scan to complete */ 1158 + lbs_deb_assoc("assoc: waiting for scan to complete\n"); 1159 + wait_event_interruptible_timeout(priv->scan_q, 1160 + (priv->scan_req == NULL), 1161 + (15 * HZ)); 1162 + lbs_deb_assoc("assoc: scanning competed\n"); 1268 1163 } 1269 1164 1270 - 1165 + /* Find the BSS we want using available scan results */ 1166 + bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, 1167 + sme->ssid, sme->ssid_len, 1168 + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 1271 1169 if (!bss) { 1272 - lbs_pr_err("assicate: bss %pM not in scan results\n", 1170 + lbs_pr_err("assoc: bss %pM not in scan results\n", 1273 1171 sme->bssid); 1274 1172 ret = -ENOENT; 1275 1173 goto done; 1276 1174 } 1277 - lbs_deb_assoc("trying %pM", sme->bssid); 1175 + lbs_deb_assoc("trying %pM\n", bss->bssid); 1278 1176 lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n", 1279 1177 sme->crypto.cipher_group, 1280 1178 sme->key_idx, sme->key_len); ··· 1343 1229 lbs_set_radio(priv, preamble, 1); 1344 1230 1345 1231 /* Do the actual association */ 1346 - lbs_associate(priv, bss, sme); 1232 + ret = lbs_associate(priv, bss, sme); 1347 1233 1348 1234 done: 1349 1235 if (bss)
+5
drivers/net/wireless/libertas/dev.h
··· 161 161 /** Scanning */ 162 162 struct delayed_work scan_work; 163 163 int scan_channel; 164 + /* Queue of things waiting for scan completion */ 165 + wait_queue_head_t scan_q; 166 + /* Whether the scan was initiated internally and not by cfg80211 */ 167 + bool internal_scan; 168 + unsigned long last_scan; 164 169 }; 165 170 166 171 extern struct cmd_confirm_sleep confirm_sleep;
+1
drivers/net/wireless/libertas/main.c
··· 719 719 priv->deep_sleep_required = 0; 720 720 priv->wakeup_dev_required = 0; 721 721 init_waitqueue_head(&priv->ds_awake_q); 722 + init_waitqueue_head(&priv->scan_q); 722 723 priv->authtype_auto = 1; 723 724 priv->is_host_sleep_configured = 0; 724 725 priv->is_host_sleep_activated = 0;
+2
drivers/net/wireless/p54/p54pci.c
··· 43 43 { PCI_DEVICE(0x1260, 0x3886) }, 44 44 /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ 45 45 { PCI_DEVICE(0x1260, 0xffff) }, 46 + /* Standard Microsystems Corp SMC2802W Wireless PCI */ 47 + { PCI_DEVICE(0x10b8, 0x2802) }, 46 48 { }, 47 49 }; 48 50
+12 -13
drivers/net/wireless/rt2x00/rt2x00pci.c
··· 240 240 struct rt2x00_dev *rt2x00dev; 241 241 int retval; 242 242 243 - retval = pci_request_regions(pci_dev, pci_name(pci_dev)); 244 - if (retval) { 245 - ERROR_PROBE("PCI request regions failed.\n"); 246 - return retval; 247 - } 248 - 249 243 retval = pci_enable_device(pci_dev); 250 244 if (retval) { 251 245 ERROR_PROBE("Enable device failed.\n"); 252 - goto exit_release_regions; 246 + return retval; 247 + } 248 + 249 + retval = pci_request_regions(pci_dev, pci_name(pci_dev)); 250 + if (retval) { 251 + ERROR_PROBE("PCI request regions failed.\n"); 252 + goto exit_disable_device; 253 253 } 254 254 255 255 pci_set_master(pci_dev); ··· 260 260 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { 261 261 ERROR_PROBE("PCI DMA not supported.\n"); 262 262 retval = -EIO; 263 - goto exit_disable_device; 263 + goto exit_release_regions; 264 264 } 265 265 266 266 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 267 267 if (!hw) { 268 268 ERROR_PROBE("Failed to allocate hardware.\n"); 269 269 retval = -ENOMEM; 270 - goto exit_disable_device; 270 + goto exit_release_regions; 271 271 } 272 272 273 273 pci_set_drvdata(pci_dev, hw); ··· 300 300 exit_free_device: 301 301 ieee80211_free_hw(hw); 302 302 303 - exit_disable_device: 304 - if (retval != -EBUSY) 305 - pci_disable_device(pci_dev); 306 - 307 303 exit_release_regions: 308 304 pci_release_regions(pci_dev); 305 + 306 + exit_disable_device: 307 + pci_disable_device(pci_dev); 309 308 310 309 pci_set_drvdata(pci_dev, NULL); 311 310
+2
drivers/net/wireless/rtl818x/rtl8180_dev.c
··· 695 695 696 696 /* grab a fresh beacon */ 697 697 skb = ieee80211_beacon_get(dev, vif); 698 + if (!skb) 699 + goto resched; 698 700 699 701 /* 700 702 * update beacon timestamp w/ TSF value
+1 -2
drivers/net/wireless/wl12xx/wl1271_spi.c
··· 160 160 spi_message_add_tail(&t, &m); 161 161 162 162 spi_sync(wl_to_spi(wl), &m); 163 - kfree(cmd); 164 - 165 163 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); 164 + kfree(cmd); 166 165 } 167 166 168 167 #define WL1271_BUSY_WORD_TIMEOUT 1000
+1 -1
include/linux/ppp_channel.h
··· 36 36 37 37 struct ppp_channel { 38 38 void *private; /* channel private data */ 39 - struct ppp_channel_ops *ops; /* operations for this channel */ 39 + const struct ppp_channel_ops *ops; /* operations for this channel */ 40 40 int mtu; /* max transmit packet size */ 41 41 int hdrlen; /* amount of headroom channel needs */ 42 42 void *ppp; /* opaque to channel */
+5
include/linux/skbuff.h
··· 1379 1379 return skb_network_header(skb) - skb->data; 1380 1380 } 1381 1381 1382 + static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1383 + { 1384 + return pskb_may_pull(skb, skb_network_offset(skb) + len); 1385 + } 1386 + 1382 1387 /* 1383 1388 * CPUs often take a performance hit when accessing unaligned memory 1384 1389 * locations. The actual performance hit varies, it can be small if the
+1 -1
include/net/bluetooth/hci_core.h
··· 132 132 133 133 struct inquiry_cache inq_cache; 134 134 struct hci_conn_hash conn_hash; 135 - struct bdaddr_list blacklist; 135 + struct list_head blacklist; 136 136 137 137 struct hci_dev_stats stat; 138 138
+1 -1
net/atm/pppoatm.c
··· 260 260 return -ENOTTY; 261 261 } 262 262 263 - static /*const*/ struct ppp_channel_ops pppoatm_ops = { 263 + static const struct ppp_channel_ops pppoatm_ops = { 264 264 .start_xmit = pppoatm_send, 265 265 .ioctl = pppoatm_devppp_ioctl, 266 266 };
+1 -1
net/bluetooth/hci_core.c
··· 924 924 925 925 hci_conn_hash_init(hdev); 926 926 927 - INIT_LIST_HEAD(&hdev->blacklist.list); 927 + INIT_LIST_HEAD(&hdev->blacklist); 928 928 929 929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 930 930
+3 -5
net/bluetooth/hci_sock.c
··· 168 168 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 169 169 { 170 170 struct list_head *p; 171 - struct bdaddr_list *blacklist = &hdev->blacklist; 172 171 173 - list_for_each(p, &blacklist->list) { 172 + list_for_each(p, &hdev->blacklist) { 174 173 struct bdaddr_list *b; 175 174 176 175 b = list_entry(p, struct bdaddr_list, list); ··· 201 202 202 203 bacpy(&entry->bdaddr, &bdaddr); 203 204 204 - list_add(&entry->list, &hdev->blacklist.list); 205 + list_add(&entry->list, &hdev->blacklist); 205 206 206 207 return 0; 207 208 } ··· 209 210 int hci_blacklist_clear(struct hci_dev *hdev) 210 211 { 211 212 struct list_head *p, *n; 212 - struct bdaddr_list *blacklist = &hdev->blacklist; 213 213 214 - list_for_each_safe(p, n, &blacklist->list) { 214 + list_for_each_safe(p, n, &hdev->blacklist) { 215 215 struct bdaddr_list *b; 216 216 217 217 b = list_entry(p, struct bdaddr_list, list);
+1 -2
net/bluetooth/hci_sysfs.c
··· 439 439 static int blacklist_show(struct seq_file *f, void *p) 440 440 { 441 441 struct hci_dev *hdev = f->private; 442 - struct bdaddr_list *blacklist = &hdev->blacklist; 443 442 struct list_head *l; 444 443 445 444 hci_dev_lock_bh(hdev); 446 445 447 - list_for_each(l, &blacklist->list) { 446 + list_for_each(l, &hdev->blacklist) { 448 447 struct bdaddr_list *b; 449 448 bdaddr_t bdaddr; 450 449
+21 -3
net/bluetooth/l2cap.c
··· 2527 2527 if (pi->imtu != L2CAP_DEFAULT_MTU) 2528 2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 2529 2529 2530 + if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 2531 + !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 2532 + break; 2533 + 2530 2534 rfc.mode = L2CAP_MODE_BASIC; 2531 2535 rfc.txwin_size = 0; 2532 2536 rfc.max_transmit = 0; ··· 2538 2534 rfc.monitor_timeout = 0; 2539 2535 rfc.max_pdu_size = 0; 2540 2536 2537 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2538 + (unsigned long) &rfc); 2541 2539 break; 2542 2540 2543 2541 case L2CAP_MODE_ERTM: ··· 2551 2545 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 2552 2546 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2553 2547 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2548 + 2549 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2550 + (unsigned long) &rfc); 2554 2551 2555 2552 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2556 2553 break; ··· 2575 2566 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2576 2567 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2577 2568 2569 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2570 + (unsigned long) &rfc); 2571 + 2578 2572 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2579 2573 break; 2580 2574 ··· 2588 2576 } 2589 2577 break; 2590 2578 } 2591 - 2592 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2593 - (unsigned long) &rfc); 2594 2579 2595 2580 /* FIXME: Need actual value of the flush timeout */ 2596 2581 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) ··· 3347 3338 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 3348 3339 3349 3340 del_timer(&conn->info_timer); 3341 + 3342 + if (result != L2CAP_IR_SUCCESS) { 3343 + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3344 + conn->info_ident = 0; 3345 + 3346 + l2cap_conn_start(conn); 3347 + 3348 + return 0; 3349 + } 3350 3350 3351 3351 if (type == L2CAP_IT_FEAT_MASK) { 3352 3352 conn->feat_mask = get_unaligned_le32(rsp->data);
+1 -1
net/bluetooth/rfcomm/tty.c
··· 1183 1183 return 0; 1184 1184 } 1185 1185 1186 - void __exit rfcomm_cleanup_ttys(void) 1186 + void rfcomm_cleanup_ttys(void) 1187 1187 { 1188 1188 tty_unregister_driver(rfcomm_tty_driver); 1189 1189 put_tty_driver(rfcomm_tty_driver);
+3 -4
net/core/dev.c
··· 2517 2517 struct rps_dev_flow voidflow, *rflow = &voidflow; 2518 2518 int cpu; 2519 2519 2520 + preempt_disable(); 2520 2521 rcu_read_lock(); 2521 2522 2522 2523 cpu = get_rps_cpu(skb->dev, skb, &rflow); ··· 2527 2526 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2528 2527 2529 2528 rcu_read_unlock(); 2529 + preempt_enable(); 2530 2530 } 2531 2531 #else 2532 2532 { ··· 3074 3072 int mac_len; 3075 3073 enum gro_result ret; 3076 3074 3077 - if (!(skb->dev->features & NETIF_F_GRO)) 3075 + if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3078 3076 goto normal; 3079 3077 3080 3078 if (skb_is_gso(skb) || skb_has_frags(skb)) ··· 3160 3158 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3161 3159 { 3162 3160 struct sk_buff *p; 3163 - 3164 - if (netpoll_rx_on(skb)) 3165 - return GRO_NORMAL; 3166 3161 3167 3162 for (p = napi->gro_list; p; p = p->next) { 3168 3163 NAPI_GRO_CB(p)->same_flow =
+1 -1
net/ipv4/tcp_input.c
··· 3930 3930 if (opsize < 2 || opsize > length) 3931 3931 return NULL; 3932 3932 if (opcode == TCPOPT_MD5SIG) 3933 - return ptr; 3933 + return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 3934 3934 } 3935 3935 ptr += opsize - 2; 3936 3936 length -= opsize;
+1 -1
net/irda/irnet/irnet_ppp.c
··· 20 20 /* Please put other headers in irnet.h - Thanks */ 21 21 22 22 /* Generic PPP callbacks (to call us) */ 23 - static struct ppp_channel_ops irnet_ppp_ops = { 23 + static const struct ppp_channel_ops irnet_ppp_ops = { 24 24 .start_xmit = ppp_irnet_send, 25 25 .ioctl = ppp_irnet_ioctl 26 26 };
+4 -1
net/l2tp/l2tp_ppp.c
··· 135 135 136 136 static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); 137 137 138 - static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; 138 + static const struct ppp_channel_ops pppol2tp_chan_ops = { 139 + .start_xmit = pppol2tp_xmit, 140 + }; 141 + 139 142 static const struct proto_ops pppol2tp_ops; 140 143 141 144 /* Helpers to obtain tunnel/session contexts from sockets.
+2
net/mac80211/main.c
··· 685 685 686 686 return 0; 687 687 688 + #ifdef CONFIG_INET 688 689 fail_ifa: 689 690 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, 690 691 &local->network_latency_notifier); 691 692 rtnl_lock(); 693 + #endif 692 694 fail_pm_qos: 693 695 ieee80211_led_exit(local); 694 696 ieee80211_remove_interfaces(local);
-14
net/mac80211/scan.c
··· 400 400 else 401 401 __set_bit(SCAN_SW_SCANNING, &local->scanning); 402 402 403 - /* 404 - * Kicking off the scan need not be protected, 405 - * only the scan variable stuff, since now 406 - * local->scan_req is assigned and other callers 407 - * will abort their scan attempts. 408 - * 409 - * This avoids too many locking dependencies 410 - * so that the scan completed calls have more 411 - * locking freedom. 412 - */ 413 - 414 403 ieee80211_recalc_idle(local); 415 - mutex_unlock(&local->scan_mtx); 416 404 417 405 if (local->ops->hw_scan) { 418 406 WARN_ON(!ieee80211_prep_hw_scan(local)); 419 407 rc = drv_hw_scan(local, sdata, local->hw_scan_req); 420 408 } else 421 409 rc = ieee80211_start_sw_scan(local); 422 - 423 - mutex_lock(&local->scan_mtx); 424 410 425 411 if (rc) { 426 412 kfree(local->hw_scan_req);
+3
net/rxrpc/ar-ack.c
··· 245 245 _enter("%d,%d,%d", 246 246 call->acks_tail, call->acks_unacked, call->acks_head); 247 247 248 + if (call->state >= RXRPC_CALL_COMPLETE) 249 + return; 250 + 248 251 resend = 0; 249 252 resend_at = 0; 250 253
+2 -4
net/rxrpc/ar-call.c
··· 786 786 787 787 /* 788 788 * handle resend timer expiry 789 + * - may not take call->state_lock as this can deadlock against del_timer_sync() 789 790 */ 790 791 static void rxrpc_resend_time_expired(unsigned long _call) 791 792 { ··· 797 796 if (call->state >= RXRPC_CALL_COMPLETE) 798 797 return; 799 798 800 - read_lock_bh(&call->state_lock); 801 799 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 802 - if (call->state < RXRPC_CALL_COMPLETE && 803 - !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) 800 + if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) 804 801 rxrpc_queue_call(call); 805 - read_unlock_bh(&call->state_lock); 806 802 } 807 803 808 804 /*
+13 -10
net/sched/act_nat.c
··· 114 114 int egress; 115 115 int action; 116 116 int ihl; 117 + int noff; 117 118 118 119 spin_lock(&p->tcf_lock); 119 120 ··· 133 132 if (unlikely(action == TC_ACT_SHOT)) 134 133 goto drop; 135 134 136 - if (!pskb_may_pull(skb, sizeof(*iph))) 135 + noff = skb_network_offset(skb); 136 + if (!pskb_may_pull(skb, sizeof(*iph) + noff)) 137 137 goto drop; 138 138 139 139 iph = ip_hdr(skb); ··· 146 144 147 145 if (!((old_addr ^ addr) & mask)) { 148 146 if (skb_cloned(skb) && 149 - !skb_clone_writable(skb, sizeof(*iph)) && 147 + !skb_clone_writable(skb, sizeof(*iph) + noff) && 150 148 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 151 149 goto drop; 152 150 ··· 174 172 { 175 173 struct tcphdr *tcph; 176 174 177 - if (!pskb_may_pull(skb, ihl + sizeof(*tcph)) || 175 + if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || 178 176 (skb_cloned(skb) && 179 - !skb_clone_writable(skb, ihl + sizeof(*tcph)) && 177 + !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && 180 178 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 181 179 goto drop; 182 180 ··· 188 186 { 189 187 struct udphdr *udph; 190 188 191 - if (!pskb_may_pull(skb, ihl + sizeof(*udph)) || 189 + if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || 192 190 (skb_cloned(skb) && 193 - !skb_clone_writable(skb, ihl + sizeof(*udph)) && 191 + !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && 194 192 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 195 193 goto drop; 196 194 ··· 207 205 { 208 206 struct icmphdr *icmph; 209 207 210 - if (!pskb_may_pull(skb, ihl + sizeof(*icmph))) 208 + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) 211 209 goto drop; 212 210 213 211 icmph = (void *)(skb_network_header(skb) + ihl); ··· 217 215 (icmph->type != ICMP_PARAMETERPROB)) 218 216 break; 219 217 220 - if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 218 + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + 219 + noff)) 221 220 goto drop; 222 221 223 222 icmph = (void *)(skb_network_header(skb) + ihl); ··· 232 229 break; 233 230 234 231 if (skb_cloned(skb) && 235 - !skb_clone_writable(skb, 236 - ihl + sizeof(*icmph) + sizeof(*iph)) && 232 + !skb_clone_writable(skb, ihl + sizeof(*icmph) + 233 + sizeof(*iph) + noff) && 237 234 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 238 235 goto drop; 239 236
+56 -40
net/sched/cls_flow.c
··· 65 65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 66 66 } 67 67 68 - static u32 flow_get_src(const struct sk_buff *skb) 68 + static u32 flow_get_src(struct sk_buff *skb) 69 69 { 70 70 switch (skb->protocol) { 71 71 case htons(ETH_P_IP): 72 - return ntohl(ip_hdr(skb)->saddr); 72 + if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 73 + return ntohl(ip_hdr(skb)->saddr); 74 + break; 73 75 case htons(ETH_P_IPV6): 74 - return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 75 - default: 76 - return addr_fold(skb->sk); 76 + if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 77 + return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 78 + break; 77 79 } 80 + 81 + return addr_fold(skb->sk); 78 82 } 79 83 80 - static u32 flow_get_dst(const struct sk_buff *skb) 84 + static u32 flow_get_dst(struct sk_buff *skb) 81 85 { 82 86 switch (skb->protocol) { 83 87 case htons(ETH_P_IP): 84 - return ntohl(ip_hdr(skb)->daddr); 88 + if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 89 + return ntohl(ip_hdr(skb)->daddr); 90 + break; 85 91 case htons(ETH_P_IPV6): 86 - return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 87 - default: 88 - return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 92 + if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 93 + return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 94 + break; 89 95 } 96 + 97 + return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 90 98 } 91 99 92 - static u32 flow_get_proto(const struct sk_buff *skb) 100 + static u32 flow_get_proto(struct sk_buff *skb) 93 101 { 94 102 switch (skb->protocol) { 95 103 case htons(ETH_P_IP): 96 - return ip_hdr(skb)->protocol; 104 + return pskb_network_may_pull(skb, sizeof(struct iphdr)) ? 105 + ip_hdr(skb)->protocol : 0; 97 106 case htons(ETH_P_IPV6): 98 - return ipv6_hdr(skb)->nexthdr; 107 + return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ? 108 + ipv6_hdr(skb)->nexthdr : 0; 99 109 default: 100 110 return 0; 101 111 } ··· 126 116 } 127 117 } 128 118 129 - static u32 flow_get_proto_src(const struct sk_buff *skb) 119 + static u32 flow_get_proto_src(struct sk_buff *skb) 130 120 { 131 - u32 res = 0; 132 - 133 121 switch (skb->protocol) { 134 122 case htons(ETH_P_IP): { 135 - struct iphdr *iph = ip_hdr(skb); 123 + struct iphdr *iph; 136 124 125 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 126 + break; 127 + iph = ip_hdr(skb); 137 128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 138 - has_ports(iph->protocol)) 139 - res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 129 + has_ports(iph->protocol) && 130 + pskb_network_may_pull(skb, iph->ihl * 4 + 2)) 131 + return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 140 132 break; 141 133 } 142 134 case htons(ETH_P_IPV6): { 143 - struct ipv6hdr *iph = ipv6_hdr(skb); 135 + struct ipv6hdr *iph; 144 136 137 + if (!pskb_network_may_pull(skb, sizeof(*iph) + 2)) 138 + break; 139 + iph = ipv6_hdr(skb); 145 140 if (has_ports(iph->nexthdr)) 146 - res = ntohs(*(__be16 *)&iph[1]); 141 + return ntohs(*(__be16 *)&iph[1]); 147 142 break; 148 143 } 149 - default: 150 - res = addr_fold(skb->sk); 151 144 } 152 145 153 - return res; 146 + return addr_fold(skb->sk); 154 147 } 155 148 156 - static u32 flow_get_proto_dst(const struct sk_buff *skb) 149 + static u32 flow_get_proto_dst(struct sk_buff *skb) 157 150 { 158 - u32 res = 0; 159 - 160 151 switch (skb->protocol) { 161 152 case htons(ETH_P_IP): { 162 - struct iphdr *iph = ip_hdr(skb); 153 + struct iphdr *iph; 163 154 155 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 156 + break; 157 + iph = ip_hdr(skb); 164 158 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 165 - has_ports(iph->protocol)) 166 - res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 159 + has_ports(iph->protocol) && 160 + pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 161 + return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 167 162 break; 168 163 } 169 164 case htons(ETH_P_IPV6): { 170 - struct ipv6hdr *iph = ipv6_hdr(skb); 165 + struct ipv6hdr *iph; 171 166 167 + if (!pskb_network_may_pull(skb, sizeof(*iph) + 4)) 168 + break; 169 + iph = ipv6_hdr(skb); 172 170 if (has_ports(iph->nexthdr)) 173 - res = ntohs(*(__be16 *)((void *)&iph[1] + 2)); 171 + return ntohs(*(__be16 *)((void *)&iph[1] + 2)); 174 172 break; 175 173 } 176 - default: 177 - res = addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 178 174 } 179 175 180 - return res; 176 + return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 181 177 } 182 178 183 179 static u32 flow_get_iif(const struct sk_buff *skb) ··· 227 211 }) 228 212 #endif 229 213 230 - static u32 flow_get_nfct_src(const struct sk_buff *skb) 214 + static u32 flow_get_nfct_src(struct sk_buff *skb) 231 215 { 232 216 switch (skb->protocol) { 233 217 case htons(ETH_P_IP): ··· 239 223 return flow_get_src(skb); 240 224 } 241 225 242 - static u32 flow_get_nfct_dst(const struct sk_buff *skb) 226 + static u32 flow_get_nfct_dst(struct sk_buff *skb) 243 227 { 244 228 switch (skb->protocol) { 245 229 case htons(ETH_P_IP): ··· 251 235 return flow_get_dst(skb); 252 236 } 253 237 254 - static u32 flow_get_nfct_proto_src(const struct sk_buff *skb) 238 + static u32 flow_get_nfct_proto_src(struct sk_buff *skb) 255 239 { 256 240 return ntohs(CTTUPLE(skb, src.u.all)); 257 241 fallback: 258 242 return flow_get_proto_src(skb); 259 243 } 260 244 261 - static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb) 245 + static u32 flow_get_nfct_proto_dst(struct sk_buff *skb) 262 246 { 263 247 return ntohs(CTTUPLE(skb, dst.u.all)); 264 248 fallback: ··· 297 281 return tag & VLAN_VID_MASK; 298 282 } 299 283 300 - static u32 flow_key_get(const struct sk_buff *skb, int key) 284 + static u32 flow_key_get(struct sk_buff *skb, int key) 301 285 { 302 286 switch (key) { 303 287 case FLOW_KEY_SRC:
+10 -2
net/sched/cls_rsvp.h
··· 143 143 u8 tunnelid = 0; 144 144 u8 *xprt; 145 145 #if RSVP_DST_LEN == 4 146 - struct ipv6hdr *nhptr = ipv6_hdr(skb); 146 + struct ipv6hdr *nhptr; 147 + 148 + if (!pskb_network_may_pull(skb, sizeof(*nhptr))) 149 + return -1; 150 + nhptr = ipv6_hdr(skb); 147 151 #else 148 - struct iphdr *nhptr = ip_hdr(skb); 152 + struct iphdr *nhptr; 153 + 154 + if (!pskb_network_may_pull(skb, sizeof(*nhptr))) 155 + return -1; 156 + nhptr = ip_hdr(skb); 149 157 #endif 150 158 151 159 restart:
+27 -9
net/sched/sch_sfq.c
··· 122 122 switch (skb->protocol) { 123 123 case htons(ETH_P_IP): 124 124 { 125 - const struct iphdr *iph = ip_hdr(skb); 125 + const struct iphdr *iph; 126 + 127 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 128 + goto err; 129 + iph = ip_hdr(skb); 126 130 h = (__force u32)iph->daddr; 127 131 h2 = (__force u32)iph->saddr ^ iph->protocol; 128 132 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && ··· 135 131 iph->protocol == IPPROTO_UDPLITE || 136 132 iph->protocol == IPPROTO_SCTP || 137 133 iph->protocol == IPPROTO_DCCP || 138 - iph->protocol == IPPROTO_ESP)) 134 + iph->protocol == IPPROTO_ESP) && 135 + pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 139 136 h2 ^= *(((u32*)iph) + iph->ihl); 140 137 break; 141 138 } 142 139 case htons(ETH_P_IPV6): 143 140 { 144 - struct ipv6hdr *iph = ipv6_hdr(skb); 141 + struct ipv6hdr *iph; 142 + 143 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 144 + goto err; 145 + iph = ipv6_hdr(skb); 145 146 h = (__force u32)iph->daddr.s6_addr32[3]; 146 147 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; 147 - if (iph->nexthdr == IPPROTO_TCP || 148 - iph->nexthdr == IPPROTO_UDP || 149 - iph->nexthdr == IPPROTO_UDPLITE || 150 - iph->nexthdr == IPPROTO_SCTP || 151 - iph->nexthdr == IPPROTO_DCCP || 152 - iph->nexthdr == IPPROTO_ESP) 148 + if ((iph->nexthdr == IPPROTO_TCP || 149 + iph->nexthdr == IPPROTO_UDP || 150 + iph->nexthdr == IPPROTO_UDPLITE || 151 + iph->nexthdr == IPPROTO_SCTP || 152 + iph->nexthdr == IPPROTO_DCCP || 153 + iph->nexthdr == IPPROTO_ESP) && 154 + pskb_network_may_pull(skb, sizeof(*iph) + 4)) 153 155 h2 ^= *(u32*)&iph[1]; 154 156 break; 155 157 } 156 158 default: 159 + err: 157 160 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; 158 161 h2 = (unsigned long)skb->sk; 159 162 } ··· 513 502 return 0; 514 503 } 515 504 505 + static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, 506 + u32 classid) 507 + { 508 + return 0; 509 + } 510 + 516 511 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) 517 512 { 518 513 struct sfq_sched_data *q = qdisc_priv(sch); ··· 573 556 static const struct Qdisc_class_ops sfq_class_ops = { 574 557 .get = sfq_get, 575 558 .tcf_chain = sfq_find_tcf, 559 + .bind_tcf = sfq_bind, 576 560 .dump = sfq_dump_class, 577 561 .dump_stats = sfq_dump_class_stats, 578 562 .walk = sfq_walk,