Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (59 commits)
igbvf.txt: Add igbvf Documentation
igb.txt: Add igb documentation
e100/e1000*/igb*/ixgb*: Add missing read memory barrier
ixgbe: fix build error with FCOE_CONFIG without DCB_CONFIG
netxen: protect tx timeout recovery by rtnl lock
isdn: gigaset: use after free
isdn: gigaset: add missing unlock
solos-pci: Fix race condition in tasklet RX handling
pkt_sched: Fix sch_sfq vs tcf_bind_filter oops
net: disable preemption before call smp_processor_id()
tcp: no md5sig option size check bug
iwlwifi: fix locking assertions
iwlwifi: fix TX tracer
isdn: fix information leak
net: Fix napi_gro_frags vs netpoll path
usbnet: remove noisy and hardly useful printk
rtl8180: avoid potential NULL deref in rtl8180_beacon_work
ath9k: Remove myself from the MAINTAINERS list
libertas: scan before assocation if no BSSID was given
libertas: fix association with some APs by using extended rates
...

+1319 -458
+132
Documentation/networking/igb.txt
···
··· 1 + Linux* Base Driver for Intel(R) Network Connection 2 + ================================================== 3 + 4 + Intel Gigabit Linux driver. 5 + Copyright(c) 1999 - 2010 Intel Corporation. 6 + 7 + Contents 8 + ======== 9 + 10 + - Identifying Your Adapter 11 + - Additional Configurations 12 + - Support 13 + 14 + Identifying Your Adapter 15 + ======================== 16 + 17 + This driver supports all 82575, 82576 and 82580-based Intel (R) gigabit network 18 + connections. 19 + 20 + For specific information on how to identify your adapter, go to the Adapter & 21 + Driver ID Guide at: 22 + 23 + http://support.intel.com/support/go/network/adapter/idguide.htm 24 + 25 + Command Line Parameters 26 + ======================= 27 + 28 + The default value for each parameter is generally the recommended setting, 29 + unless otherwise noted. 30 + 31 + max_vfs 32 + ------- 33 + Valid Range: 0-7 34 + Default Value: 0 35 + 36 + This parameter adds support for SR-IOV. It causes the driver to spawn up to 37 + max_vfs worth of virtual function. 38 + 39 + Additional Configurations 40 + ========================= 41 + 42 + Jumbo Frames 43 + ------------ 44 + Jumbo Frames support is enabled by changing the MTU to a value larger than 45 + the default of 1500. Use the ifconfig command to increase the MTU size. 46 + For example: 47 + 48 + ifconfig eth<x> mtu 9000 up 49 + 50 + This setting is not saved across reboots. 51 + 52 + Notes: 53 + 54 + - The maximum MTU setting for Jumbo Frames is 9216. This value coincides 55 + with the maximum Jumbo Frames size of 9234 bytes. 56 + 57 + - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or 58 + loss of link. 59 + 60 + Ethtool 61 + ------- 62 + The driver utilizes the ethtool interface for driver configuration and 63 + diagnostics, as well as displaying statistical information. 64 + 65 + http://sourceforge.net/projects/gkernel. 66 + 67 + Enabling Wake on LAN* (WoL) 68 + --------------------------- 69 + WoL is configured through the Ethtool* utility. 70 + 71 + For instructions on enabling WoL with Ethtool, refer to the Ethtool man page. 72 + 73 + WoL will be enabled on the system during the next shut down or reboot. 74 + For this driver version, in order to enable WoL, the igb driver must be 75 + loaded when shutting down or rebooting the system. 76 + 77 + Wake On LAN is only supported on port A of multi-port adapters. 78 + 79 + Wake On LAN is not supported for the Intel(R) Gigabit VT Quad Port Server 80 + Adapter. 81 + 82 + Multiqueue 83 + ---------- 84 + In this mode, a separate MSI-X vector is allocated for each queue and one 85 + for "other" interrupts such as link status change and errors. All 86 + interrupts are throttled via interrupt moderation. Interrupt moderation 87 + must be used to avoid interrupt storms while the driver is processing one 88 + interrupt. The moderation value should be at least as large as the expected 89 + time for the driver to process an interrupt. Multiqueue is off by default. 90 + 91 + REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not 92 + found, the system will fallback to MSI or to Legacy interrupts. 93 + 94 + LRO 95 + --- 96 + Large Receive Offload (LRO) is a technique for increasing inbound throughput 97 + of high-bandwidth network connections by reducing CPU overhead. It works by 98 + aggregating multiple incoming packets from a single stream into a larger 99 + buffer before they are passed higher up the networking stack, thus reducing 100 + the number of packets that have to be processed. LRO combines multiple 101 + Ethernet frames into a single receive in the stack, thereby potentially 102 + decreasing CPU utilization for receives. 103 + 104 + NOTE: You need to have inet_lro enabled via either the CONFIG_INET_LRO or 105 + CONFIG_INET_LRO_MODULE kernel config option. Additionally, if 106 + CONFIG_INET_LRO_MODULE is used, the inet_lro module needs to be loaded 107 + before the igb driver. 108 + 109 + You can verify that the driver is using LRO by looking at these counters in 110 + Ethtool: 111 + 112 + lro_aggregated - count of total packets that were combined 113 + lro_flushed - counts the number of packets flushed out of LRO 114 + lro_no_desc - counts the number of times an LRO descriptor was not available 115 + for the LRO packet 116 + 117 + NOTE: IPv6 and UDP are not supported by LRO. 118 + 119 + Support 120 + ======= 121 + 122 + For general information, go to the Intel support website at: 123 + 124 + www.intel.com/support/ 125 + 126 + or the Intel Wired Networking project hosted by Sourceforge at: 127 + 128 + http://sourceforge.net/projects/e1000 129 + 130 + If an issue is identified with the released source code on the supported 131 + kernel with a supported adapter, email the specific information related 132 + to the issue to e1000-devel@lists.sf.net
+78
Documentation/networking/igbvf.txt
···
··· 1 + Linux* Base Driver for Intel(R) Network Connection 2 + ================================================== 3 + 4 + Intel Gigabit Linux driver. 5 + Copyright(c) 1999 - 2010 Intel Corporation. 6 + 7 + Contents 8 + ======== 9 + 10 + - Identifying Your Adapter 11 + - Additional Configurations 12 + - Support 13 + 14 + This file describes the igbvf Linux* Base Driver for Intel Network Connection. 15 + 16 + The igbvf driver supports 82576-based virtual function devices that can only 17 + be activated on kernels that support SR-IOV. SR-IOV requires the correct 18 + platform and OS support. 19 + 20 + The igbvf driver requires the igb driver, version 2.0 or later. The igbvf 21 + driver supports virtual functions generated by the igb driver with a max_vfs 22 + value of 1 or greater. For more information on the max_vfs parameter refer 23 + to the README included with the igb driver. 24 + 25 + The guest OS loading the igbvf driver must support MSI-X interrupts. 26 + 27 + This driver is only supported as a loadable module at this time. Intel is 28 + not supplying patches against the kernel source to allow for static linking 29 + of the driver. For questions related to hardware requirements, refer to the 30 + documentation supplied with your Intel Gigabit adapter. All hardware 31 + requirements listed apply to use with Linux. 32 + 33 + Instructions on updating ethtool can be found in the section "Additional 34 + Configurations" later in this document. 35 + 36 + VLANs: There is a limit of a total of 32 shared VLANs to 1 or more VFs. 37 + 38 + Identifying Your Adapter 39 + ======================== 40 + 41 + The igbvf driver supports 82576-based virtual function devices that can only 42 + be activated on kernels that support SR-IOV. 43 + 44 + For more information on how to identify your adapter, go to the Adapter & 45 + Driver ID Guide at: 46 + 47 + http://support.intel.com/support/go/network/adapter/idguide.htm 48 + 49 + For the latest Intel network drivers for Linux, refer to the following 50 + website. In the search field, enter your adapter name or type, or use the 51 + networking link on the left to search for your adapter: 52 + 53 + http://downloadcenter.intel.com/scripts-df-external/Support_Intel.aspx 54 + 55 + Additional Configurations 56 + ========================= 57 + 58 + Ethtool 59 + ------- 60 + The driver utilizes the ethtool interface for driver configuration and 61 + diagnostics, as well as displaying statistical information. 62 + 63 + http://sourceforge.net/projects/gkernel. 64 + 65 + Support 66 + ======= 67 + 68 + For general information, go to the Intel support website at: 69 + 70 + http://support.intel.com 71 + 72 + or the Intel Wired Networking project hosted by Sourceforge at: 73 + 74 + http://sourceforge.net/projects/e1000 75 + 76 + If an issue is identified with the released source code on the supported 77 + kernel with a supported adapter, email the specific information related 78 + to the issue to e1000-devel@lists.sf.net
-1
MAINTAINERS
··· 1085 ATHEROS ATH9K WIRELESS DRIVER 1086 M: "Luis R. Rodriguez" <lrodriguez@atheros.com> 1087 M: Jouni Malinen <jmalinen@atheros.com> 1088 - M: Sujith Manoharan <Sujith.Manoharan@atheros.com> 1089 M: Vasanthakumar Thiagarajan <vasanth@atheros.com> 1090 M: Senthil Balasubramanian <senthilkumar@atheros.com> 1091 L: linux-wireless@vger.kernel.org
··· 1085 ATHEROS ATH9K WIRELESS DRIVER 1086 M: "Luis R. Rodriguez" <lrodriguez@atheros.com> 1087 M: Jouni Malinen <jmalinen@atheros.com> 1088 M: Vasanthakumar Thiagarajan <vasanth@atheros.com> 1089 M: Senthil Balasubramanian <senthilkumar@atheros.com> 1090 L: linux-wireless@vger.kernel.org
+6 -1
drivers/atm/solos-pci.c
··· 781 sk_for_each(s, node, head) { 782 vcc = atm_sk(s); 783 if (vcc->dev == dev && vcc->vci == vci && 784 - vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE) 785 goto out; 786 } 787 vcc = NULL; ··· 908 clear_bit(ATM_VF_ADDR, &vcc->flags); 909 clear_bit(ATM_VF_READY, &vcc->flags); 910 911 return; 912 } 913
··· 781 sk_for_each(s, node, head) { 782 vcc = atm_sk(s); 783 if (vcc->dev == dev && vcc->vci == vci && 784 + vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && 785 + test_bit(ATM_VF_READY, &vcc->flags)) 786 goto out; 787 } 788 vcc = NULL; ··· 907 clear_bit(ATM_VF_ADDR, &vcc->flags); 908 clear_bit(ATM_VF_READY, &vcc->flags); 909 910 + /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the 911 + tasklet has finished processing any incoming packets (and, more to 912 + the point, using the vcc pointer). */ 913 + tasklet_unlock_wait(&card->tlet); 914 return; 915 } 916
+1 -1
drivers/char/pcmcia/ipwireless/network.c
··· 239 return err; 240 } 241 242 - static struct ppp_channel_ops ipwireless_ppp_channel_ops = { 243 .start_xmit = ipwireless_ppp_start_xmit, 244 .ioctl = ipwireless_ppp_ioctl 245 };
··· 239 return err; 240 } 241 242 + static const struct ppp_channel_ops ipwireless_ppp_channel_ops = { 243 .start_xmit = ipwireless_ppp_start_xmit, 244 .ioctl = ipwireless_ppp_ioctl 245 };
+4 -2
drivers/isdn/gigaset/bas-gigaset.c
··· 1914 * The next command will reopen the AT channel automatically. 1915 */ 1916 if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) { 1917 - kfree(cb); 1918 rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); 1919 if (cb->wake_tasklet) 1920 tasklet_schedule(cb->wake_tasklet); 1921 - return rc < 0 ? rc : cb->len; 1922 } 1923 1924 spin_lock_irqsave(&cs->cmdlock, flags);
··· 1914 * The next command will reopen the AT channel automatically. 1915 */ 1916 if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) { 1917 rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); 1918 if (cb->wake_tasklet) 1919 tasklet_schedule(cb->wake_tasklet); 1920 + if (!rc) 1921 + rc = cb->len; 1922 + kfree(cb); 1923 + return rc; 1924 } 1925 1926 spin_lock_irqsave(&cs->cmdlock, flags);
+1
drivers/isdn/gigaset/capi.c
··· 1052 do { 1053 if (bcap->bcnext == ap) { 1054 bcap->bcnext = bcap->bcnext->bcnext; 1055 return; 1056 } 1057 bcap = bcap->bcnext;
··· 1052 do { 1053 if (bcap->bcnext == ap) { 1054 bcap->bcnext = bcap->bcnext->bcnext; 1055 + spin_unlock_irqrestore(&bcs->aplock, flags); 1056 return; 1057 } 1058 bcap = bcap->bcnext;
+5 -5
drivers/isdn/sc/ioctl.c
··· 174 pr_debug("%s: SCIOGETSPID: ioctl received\n", 175 sc_adapter[card]->devicename); 176 177 - spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); 178 if (!spid) { 179 kfree(rcvmsg); 180 return -ENOMEM; ··· 194 kfree(rcvmsg); 195 return status; 196 } 197 - strcpy(spid, rcvmsg->msg_data.byte_array); 198 199 /* 200 * Package the switch type and send to user space ··· 266 return status; 267 } 268 269 - dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL); 270 if (!dn) { 271 kfree(rcvmsg); 272 return -ENOMEM; 273 } 274 - strcpy(dn, rcvmsg->msg_data.byte_array); 275 kfree(rcvmsg); 276 277 /* ··· 337 pr_debug("%s: SCIOSTAT: ioctl received\n", 338 sc_adapter[card]->devicename); 339 340 - bi = kmalloc (sizeof(boardInfo), GFP_KERNEL); 341 if (!bi) { 342 kfree(rcvmsg); 343 return -ENOMEM;
··· 174 pr_debug("%s: SCIOGETSPID: ioctl received\n", 175 sc_adapter[card]->devicename); 176 177 + spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL); 178 if (!spid) { 179 kfree(rcvmsg); 180 return -ENOMEM; ··· 194 kfree(rcvmsg); 195 return status; 196 } 197 + strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE); 198 199 /* 200 * Package the switch type and send to user space ··· 266 return status; 267 } 268 269 + dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL); 270 if (!dn) { 271 kfree(rcvmsg); 272 return -ENOMEM; 273 } 274 + strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE); 275 kfree(rcvmsg); 276 277 /* ··· 337 pr_debug("%s: SCIOSTAT: ioctl received\n", 338 sc_adapter[card]->devicename); 339 340 + bi = kzalloc(sizeof(boardInfo), GFP_KERNEL); 341 if (!bi) { 342 kfree(rcvmsg); 343 return -ENOMEM;
+13 -12
drivers/net/cxgb3/cxgb3_main.c
··· 3198 } 3199 } 3200 3201 err = pci_request_regions(pdev, DRV_NAME); 3202 if (err) { 3203 /* Just info, some other driver may have claimed the device. */ 3204 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); 3205 - return err; 3206 - } 3207 - 3208 - err = pci_enable_device(pdev); 3209 - if (err) { 3210 - dev_err(&pdev->dev, "cannot enable PCI device\n"); 3211 - goto out_release_regions; 3212 } 3213 3214 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { ··· 3217 if (err) { 3218 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 3219 "coherent allocations\n"); 3220 - goto out_disable_device; 3221 } 3222 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { 3223 dev_err(&pdev->dev, "no usable DMA configuration\n"); 3224 - goto out_disable_device; 3225 } 3226 3227 pci_set_master(pdev); ··· 3234 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3235 if (!adapter) { 3236 err = -ENOMEM; 3237 - goto out_disable_device; 3238 } 3239 3240 adapter->nofail_skb = ··· 3370 out_free_adapter: 3371 kfree(adapter); 3372 3373 - out_disable_device: 3374 - pci_disable_device(pdev); 3375 out_release_regions: 3376 pci_release_regions(pdev); 3377 pci_set_drvdata(pdev, NULL); 3378 return err; 3379 } 3380
··· 3198 } 3199 } 3200 3201 + err = pci_enable_device(pdev); 3202 + if (err) { 3203 + dev_err(&pdev->dev, "cannot enable PCI device\n"); 3204 + goto out; 3205 + } 3206 + 3207 err = pci_request_regions(pdev, DRV_NAME); 3208 if (err) { 3209 /* Just info, some other driver may have claimed the device. */ 3210 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); 3211 + goto out_disable_device; 3212 } 3213 3214 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { ··· 3217 if (err) { 3218 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 3219 "coherent allocations\n"); 3220 + goto out_release_regions; 3221 } 3222 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { 3223 dev_err(&pdev->dev, "no usable DMA configuration\n"); 3224 + goto out_release_regions; 3225 } 3226 3227 pci_set_master(pdev); ··· 3234 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3235 if (!adapter) { 3236 err = -ENOMEM; 3237 + goto out_release_regions; 3238 } 3239 3240 adapter->nofail_skb = ··· 3370 out_free_adapter: 3371 kfree(adapter); 3372 3373 out_release_regions: 3374 pci_release_regions(pdev); 3375 + out_disable_device: 3376 + pci_disable_device(pdev); 3377 pci_set_drvdata(pdev, NULL); 3378 + out: 3379 return err; 3380 } 3381
+18 -17
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 2462 version_printed = 1; 2463 } 2464 2465 - /* 2466 - * Reserve PCI resources for the device. If we can't get them some 2467 - * other driver may have already claimed the device ... 2468 - */ 2469 - err = pci_request_regions(pdev, KBUILD_MODNAME); 2470 - if (err) { 2471 - dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 2472 - return err; 2473 - } 2474 2475 /* 2476 * Initialize generic PCI device state. ··· 2469 err = pci_enable_device(pdev); 2470 if (err) { 2471 dev_err(&pdev->dev, "cannot enable PCI device\n"); 2472 - goto err_release_regions; 2473 } 2474 2475 /* ··· 2492 if (err) { 2493 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" 2494 " coherent allocations\n"); 2495 - goto err_disable_device; 2496 } 2497 pci_using_dac = 1; 2498 } else { 2499 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2500 if (err != 0) { 2501 dev_err(&pdev->dev, "no usable DMA configuration\n"); 2502 - goto err_disable_device; 2503 } 2504 pci_using_dac = 0; 2505 } ··· 2515 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 2516 if (!adapter) { 2517 err = -ENOMEM; 2518 - goto err_disable_device; 2519 } 2520 pci_set_drvdata(pdev, adapter); 2521 adapter->pdev = pdev; ··· 2751 kfree(adapter); 2752 pci_set_drvdata(pdev, NULL); 2753 2754 - err_disable_device: 2755 - pci_disable_device(pdev); 2756 - pci_clear_master(pdev); 2757 - 2758 err_release_regions: 2759 pci_release_regions(pdev); 2760 pci_set_drvdata(pdev, NULL); 2761 2762 err_out: 2763 return err;
··· 2462 version_printed = 1; 2463 } 2464 2465 2466 /* 2467 * Initialize generic PCI device state. ··· 2478 err = pci_enable_device(pdev); 2479 if (err) { 2480 dev_err(&pdev->dev, "cannot enable PCI device\n"); 2481 + return err; 2482 + } 2483 + 2484 + /* 2485 + * Reserve PCI resources for the device. If we can't get them some 2486 + * other driver may have already claimed the device ... 2487 + */ 2488 + err = pci_request_regions(pdev, KBUILD_MODNAME); 2489 + if (err) { 2490 + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 2491 + goto err_disable_device; 2492 } 2493 2494 /* ··· 2491 if (err) { 2492 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" 2493 " coherent allocations\n"); 2494 + goto err_release_regions; 2495 } 2496 pci_using_dac = 1; 2497 } else { 2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2499 if (err != 0) { 2500 dev_err(&pdev->dev, "no usable DMA configuration\n"); 2501 + goto err_release_regions; 2502 } 2503 pci_using_dac = 0; 2504 } ··· 2514 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 2515 if (!adapter) { 2516 err = -ENOMEM; 2517 + goto err_release_regions; 2518 } 2519 pci_set_drvdata(pdev, adapter); 2520 adapter->pdev = pdev; ··· 2750 kfree(adapter); 2751 pci_set_drvdata(pdev, NULL); 2752 2753 err_release_regions: 2754 pci_release_regions(pdev); 2755 pci_set_drvdata(pdev, NULL); 2756 + pci_clear_master(pdev); 2757 + 2758 + err_disable_device: 2759 + pci_disable_device(pdev); 2760 2761 err_out: 2762 return err;
+1 -1
drivers/net/davinci_emac.c
··· 2944 release_mem_region(res->start, res->end - res->start + 1); 2945 2946 unregister_netdev(ndev); 2947 - free_netdev(ndev); 2948 iounmap(priv->remap_addr); 2949 2950 clk_disable(emac_clk); 2951 clk_put(emac_clk);
··· 2944 release_mem_region(res->start, res->end - res->start + 1); 2945 2946 unregister_netdev(ndev); 2947 iounmap(priv->remap_addr); 2948 + free_netdev(ndev); 2949 2950 clk_disable(emac_clk); 2951 clk_put(emac_clk);
+2
drivers/net/e100.c
··· 1779 for (cb = nic->cb_to_clean; 1780 cb->status & cpu_to_le16(cb_complete); 1781 cb = nic->cb_to_clean = cb->next) { 1782 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, 1783 "cb[%d]->status = 0x%04X\n", 1784 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), ··· 1928 1929 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, 1930 "status=0x%04X\n", rfd_status); 1931 1932 /* If data isn't ready, nothing to indicate */ 1933 if (unlikely(!(rfd_status & cb_complete))) {
··· 1779 for (cb = nic->cb_to_clean; 1780 cb->status & cpu_to_le16(cb_complete); 1781 cb = nic->cb_to_clean = cb->next) { 1782 + rmb(); /* read skb after status */ 1783 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, 1784 "cb[%d]->status = 0x%04X\n", 1785 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), ··· 1927 1928 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, 1929 "status=0x%04X\n", rfd_status); 1930 + rmb(); /* read size after status bit */ 1931 1932 /* If data isn't ready, nothing to indicate */ 1933 if (unlikely(!(rfd_status & cb_complete))) {
+3
drivers/net/e1000/e1000_main.c
··· 3454 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3455 (count < tx_ring->count)) { 3456 bool cleaned = false; 3457 for ( ; !cleaned; count++) { 3458 tx_desc = E1000_TX_DESC(*tx_ring, i); 3459 buffer_info = &tx_ring->buffer_info[i]; ··· 3644 if (*work_done >= work_to_do) 3645 break; 3646 (*work_done)++; 3647 3648 status = rx_desc->status; 3649 skb = buffer_info->skb; ··· 3851 if (*work_done >= work_to_do) 3852 break; 3853 (*work_done)++; 3854 3855 status = rx_desc->status; 3856 skb = buffer_info->skb;
··· 3454 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3455 (count < tx_ring->count)) { 3456 bool cleaned = false; 3457 + rmb(); /* read buffer_info after eop_desc */ 3458 for ( ; !cleaned; count++) { 3459 tx_desc = E1000_TX_DESC(*tx_ring, i); 3460 buffer_info = &tx_ring->buffer_info[i]; ··· 3643 if (*work_done >= work_to_do) 3644 break; 3645 (*work_done)++; 3646 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 3647 3648 status = rx_desc->status; 3649 skb = buffer_info->skb; ··· 3849 if (*work_done >= work_to_do) 3850 break; 3851 (*work_done)++; 3852 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 3853 3854 status = rx_desc->status; 3855 skb = buffer_info->skb;
+4
drivers/net/e1000e/netdev.c
··· 781 if (*work_done >= work_to_do) 782 break; 783 (*work_done)++; 784 785 status = rx_desc->status; 786 skb = buffer_info->skb; ··· 992 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 993 (count < tx_ring->count)) { 994 bool cleaned = false; 995 for (; !cleaned; count++) { 996 tx_desc = E1000_TX_DESC(*tx_ring, i); 997 buffer_info = &tx_ring->buffer_info[i]; ··· 1089 break; 1090 (*work_done)++; 1091 skb = buffer_info->skb; 1092 1093 /* in the packet split case this is header only */ 1094 prefetch(skb->data - NET_IP_ALIGN); ··· 1289 if (*work_done >= work_to_do) 1290 break; 1291 (*work_done)++; 1292 1293 status = rx_desc->status; 1294 skb = buffer_info->skb;
··· 781 if (*work_done >= work_to_do) 782 break; 783 (*work_done)++; 784 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 785 786 status = rx_desc->status; 787 skb = buffer_info->skb; ··· 991 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 992 (count < tx_ring->count)) { 993 bool cleaned = false; 994 + rmb(); /* read buffer_info after eop_desc */ 995 for (; !cleaned; count++) { 996 tx_desc = E1000_TX_DESC(*tx_ring, i); 997 buffer_info = &tx_ring->buffer_info[i]; ··· 1087 break; 1088 (*work_done)++; 1089 skb = buffer_info->skb; 1090 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 1091 1092 /* in the packet split case this is header only */ 1093 prefetch(skb->data - NET_IP_ALIGN); ··· 1286 if (*work_done >= work_to_do) 1287 break; 1288 (*work_done)++; 1289 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 1290 1291 status = rx_desc->status; 1292 skb = buffer_info->skb;
+2 -15
drivers/net/enic/enic_main.c
··· 1087 { 1088 struct vic_provinfo *vp; 1089 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1090 - u8 *uuid; 1091 char uuid_str[38]; 1092 - static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-" 1093 - "%02X%02X-%02X%02X%02X%02X%0X%02X"; 1094 int err; 1095 1096 err = enic_vnic_dev_deinit(enic); ··· 1118 ETH_ALEN, mac); 1119 1120 if (enic->pp.set & ENIC_SET_INSTANCE) { 1121 - uuid = enic->pp.instance_uuid; 1122 - sprintf(uuid_str, uuid_fmt, 1123 - uuid[0], uuid[1], uuid[2], uuid[3], 1124 - uuid[4], uuid[5], uuid[6], uuid[7], 1125 - uuid[8], uuid[9], uuid[10], uuid[11], 1126 - uuid[12], uuid[13], uuid[14], uuid[15]); 1127 vic_provinfo_add_tlv(vp, 1128 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1129 sizeof(uuid_str), uuid_str); 1130 } 1131 1132 if (enic->pp.set & ENIC_SET_HOST) { 1133 - uuid = enic->pp.host_uuid; 1134 - sprintf(uuid_str, uuid_fmt, 1135 - uuid[0], uuid[1], uuid[2], uuid[3], 1136 - uuid[4], uuid[5], uuid[6], uuid[7], 1137 - uuid[8], uuid[9], uuid[10], uuid[11], 1138 - uuid[12], uuid[13], uuid[14], uuid[15]); 1139 vic_provinfo_add_tlv(vp, 1140 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1141 sizeof(uuid_str), uuid_str);
··· 1087 { 1088 struct vic_provinfo *vp; 1089 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1090 char uuid_str[38]; 1091 int err; 1092 1093 err = enic_vnic_dev_deinit(enic); ··· 1121 ETH_ALEN, mac); 1122 1123 if (enic->pp.set & ENIC_SET_INSTANCE) { 1124 + sprintf(uuid_str, "%pUB", enic->pp.instance_uuid); 1125 vic_provinfo_add_tlv(vp, 1126 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1127 sizeof(uuid_str), uuid_str); 1128 } 1129 1130 if (enic->pp.set & ENIC_SET_HOST) { 1131 + sprintf(uuid_str, "%pUB", enic->pp.host_uuid); 1132 vic_provinfo_add_tlv(vp, 1133 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1134 sizeof(uuid_str), uuid_str);
+2
drivers/net/igb/igb_main.c
··· 5353 5354 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 5355 (count < tx_ring->count)) { 5356 for (cleaned = false; !cleaned; count++) { 5357 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5358 buffer_info = &tx_ring->buffer_info[i]; ··· 5559 if (*work_done >= budget) 5560 break; 5561 (*work_done)++; 5562 5563 skb = buffer_info->skb; 5564 prefetch(skb->data - NET_IP_ALIGN);
··· 5353 5354 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 5355 (count < tx_ring->count)) { 5356 + rmb(); /* read buffer_info after eop_desc status */ 5357 for (cleaned = false; !cleaned; count++) { 5358 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5359 buffer_info = &tx_ring->buffer_info[i]; ··· 5558 if (*work_done >= budget) 5559 break; 5560 (*work_done)++; 5561 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 5562 5563 skb = buffer_info->skb; 5564 prefetch(skb->data - NET_IP_ALIGN);
+2
drivers/net/igbvf/netdev.c
··· 248 if (*work_done >= work_to_do) 249 break; 250 (*work_done)++; 251 252 buffer_info = &rx_ring->buffer_info[i]; 253 ··· 781 782 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 783 (count < tx_ring->count)) { 784 for (cleaned = false; !cleaned; count++) { 785 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 786 buffer_info = &tx_ring->buffer_info[i];
··· 248 if (*work_done >= work_to_do) 249 break; 250 (*work_done)++; 251 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 252 253 buffer_info = &rx_ring->buffer_info[i]; 254 ··· 780 781 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 782 (count < tx_ring->count)) { 783 + rmb(); /* read buffer_info after eop_desc status */ 784 for (cleaned = false; !cleaned; count++) { 785 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 786 buffer_info = &tx_ring->buffer_info[i];
+2
drivers/net/ixgb/ixgb_main.c
··· 1816 1817 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1818 1819 for (cleaned = false; !cleaned; ) { 1820 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1821 buffer_info = &tx_ring->buffer_info[i]; ··· 1977 break; 1978 1979 (*work_done)++; 1980 status = rx_desc->status; 1981 skb = buffer_info->skb; 1982 buffer_info->skb = NULL;
··· 1816 1817 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1818 1819 + rmb(); /* read buffer_info after eop_desc */ 1820 for (cleaned = false; !cleaned; ) { 1821 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1822 buffer_info = &tx_ring->buffer_info[i]; ··· 1976 break; 1977 1978 (*work_done)++; 1979 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 1980 status = rx_desc->status; 1981 skb = buffer_info->skb; 1982 buffer_info->skb = NULL;
+11 -4
drivers/net/ixgbe/ixgbe_main.c
··· 748 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 749 (count < tx_ring->work_limit)) { 750 bool cleaned = false; 751 for ( ; !cleaned; count++) { 752 struct sk_buff *skb; 753 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); ··· 6156 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6157 txq += adapter->ring_feature[RING_F_FCOE].mask; 6158 return txq; 6159 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6160 txq = adapter->fcoe.up; 6161 return txq; 6162 } 6163 } 6164 #endif ··· 6219 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6220 (skb->protocol == htons(ETH_P_FCOE) || 6221 skb->protocol == htons(ETH_P_FIP))) { 6222 - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6223 - << IXGBE_TX_FLAGS_VLAN_SHIFT); 6224 - tx_flags |= ((adapter->fcoe.up << 13) 6225 - << IXGBE_TX_FLAGS_VLAN_SHIFT); 6226 /* flag for FCoE offloads */ 6227 if (skb->protocol == htons(ETH_P_FCOE)) 6228 tx_flags |= IXGBE_TX_FLAGS_FCOE;
··· 748 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 749 (count < tx_ring->work_limit)) { 750 bool cleaned = false; 751 + rmb(); /* read buffer_info after eop_desc */ 752 for ( ; !cleaned; count++) { 753 struct sk_buff *skb; 754 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); ··· 6155 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6156 txq += adapter->ring_feature[RING_F_FCOE].mask; 6157 return txq; 6158 + #ifdef CONFIG_IXGBE_DCB 6159 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6160 txq = adapter->fcoe.up; 6161 return txq; 6162 + #endif 6163 } 6164 } 6165 #endif ··· 6216 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6217 (skb->protocol == htons(ETH_P_FCOE) || 6218 skb->protocol == htons(ETH_P_FIP))) { 6219 + #ifdef CONFIG_IXGBE_DCB 6220 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6221 + tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6222 + << IXGBE_TX_FLAGS_VLAN_SHIFT); 6223 + tx_flags |= ((adapter->fcoe.up << 13) 6224 + << IXGBE_TX_FLAGS_VLAN_SHIFT); 6225 + } 6226 + #endif 6227 /* flag for FCoE offloads */ 6228 if (skb->protocol == htons(ETH_P_FCOE)) 6229 tx_flags |= IXGBE_TX_FLAGS_FCOE;
+2
drivers/net/ixgbevf/ixgbevf_main.c
··· 231 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 232 (count < tx_ring->work_limit)) { 233 bool cleaned = false; 234 for ( ; !cleaned; count++) { 235 struct sk_buff *skb; 236 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); ··· 519 break; 520 (*work_done)++; 521 522 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 523 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); 524 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
··· 231 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 232 (count < tx_ring->work_limit)) { 233 bool cleaned = false; 234 + rmb(); /* read buffer_info after eop_desc */ 235 for ( ; !cleaned; count++) { 236 struct sk_buff *skb; 237 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); ··· 518 break; 519 (*work_done)++; 520 521 + rmb(); /* read descriptor and rx_buffer_info after status DD */ 522 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 523 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); 524 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+7 -8
drivers/net/netxen/netxen_nic_main.c
··· 2001 if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) 2002 goto request_reset; 2003 2004 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 2005 /* try to scrub interrupt */ 2006 netxen_napi_disable(adapter); 2007 - 2008 - adapter->netdev->trans_start = jiffies; 2009 2010 netxen_napi_enable(adapter); 2011 2012 netif_wake_queue(adapter->netdev); 2013 2014 clear_bit(__NX_RESETTING, &adapter->state); 2015 - return; 2016 } else { 2017 clear_bit(__NX_RESETTING, &adapter->state); 2018 - if (!netxen_nic_reset_context(adapter)) { 2019 - adapter->netdev->trans_start = jiffies; 2020 - return; 2021 } 2022 - 2023 - /* context reset failed, fall through for fw reset */ 2024 } 2025 2026 request_reset: 2027 adapter->need_fw_reset = 1;
··· 2001 if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) 2002 goto request_reset; 2003 2004 + rtnl_lock(); 2005 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 2006 /* try to scrub interrupt */ 2007 netxen_napi_disable(adapter); 2008 2009 netxen_napi_enable(adapter); 2010 2011 netif_wake_queue(adapter->netdev); 2012 2013 clear_bit(__NX_RESETTING, &adapter->state); 2014 } else { 2015 clear_bit(__NX_RESETTING, &adapter->state); 2016 + if (netxen_nic_reset_context(adapter)) { 2017 + rtnl_unlock(); 2018 + goto request_reset; 2019 } 2020 } 2021 + adapter->netdev->trans_start = jiffies; 2022 + rtnl_unlock(); 2023 + return; 2024 2025 request_reset: 2026 adapter->need_fw_reset = 1;
+3 -3
drivers/net/ppp_async.c
··· 108 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 109 int len, int inbound); 110 111 - static struct ppp_channel_ops async_ops = { 112 - ppp_async_send, 113 - ppp_async_ioctl 114 }; 115 116 /*
··· 108 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 109 int len, int inbound); 110 111 + static const struct ppp_channel_ops async_ops = { 112 + .start_xmit = ppp_async_send, 113 + .ioctl = ppp_async_ioctl, 114 }; 115 116 /*
+3 -3
drivers/net/ppp_synctty.c
··· 97 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf, 98 char *flags, int count); 99 100 - static struct ppp_channel_ops sync_ops = { 101 - ppp_sync_send, 102 - ppp_sync_ioctl 103 }; 104 105 /*
··· 97 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf, 98 char *flags, int count); 99 100 + static const struct ppp_channel_ops sync_ops = { 101 + .start_xmit = ppp_sync_send, 102 + .ioctl = ppp_sync_ioctl, 103 }; 104 105 /*
+2 -2
drivers/net/pppoe.c
··· 92 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 93 94 static const struct proto_ops pppoe_ops; 95 - static struct ppp_channel_ops pppoe_chan_ops; 96 97 /* per-net private data for this module */ 98 static int pppoe_net_id __read_mostly; ··· 963 return __pppoe_xmit(sk, skb); 964 } 965 966 - static struct ppp_channel_ops pppoe_chan_ops = { 967 .start_xmit = pppoe_xmit, 968 }; 969
··· 92 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 93 94 static const struct proto_ops pppoe_ops; 95 + static const struct ppp_channel_ops pppoe_chan_ops; 96 97 /* per-net private data for this module */ 98 static int pppoe_net_id __read_mostly; ··· 963 return __pppoe_xmit(sk, skb); 964 } 965 966 + static const struct ppp_channel_ops pppoe_chan_ops = { 967 .start_xmit = pppoe_xmit, 968 }; 969
-1
drivers/net/usb/usbnet.c
··· 1457 spin_lock_irq(&dev->txq.lock); 1458 while ((res = usb_get_from_anchor(&dev->deferred))) { 1459 1460 - printk(KERN_INFO"%s has delayed data\n", __func__); 1461 skb = (struct sk_buff *)res->context; 1462 retval = usb_submit_urb(res, GFP_ATOMIC); 1463 if (retval < 0) {
··· 1457 spin_lock_irq(&dev->txq.lock); 1458 while ((res = usb_get_from_anchor(&dev->deferred))) { 1459 1460 skb = (struct sk_buff *)res->context; 1461 retval = usb_submit_urb(res, GFP_ATOMIC); 1462 if (retval < 0) {
+2 -2
drivers/net/via-velocity.c
··· 2763 2764 vptr->dev = dev; 2765 2766 - dev->irq = pdev->irq; 2767 - 2768 ret = pci_enable_device(pdev); 2769 if (ret < 0) 2770 goto err_free_dev; 2771 2772 ret = velocity_get_pci_info(vptr, pdev); 2773 if (ret < 0) {
··· 2763 2764 vptr->dev = dev; 2765 2766 ret = pci_enable_device(pdev); 2767 if (ret < 0) 2768 goto err_free_dev; 2769 + 2770 + dev->irq = pdev->irq; 2771 2772 ret = velocity_get_pci_info(vptr, pdev); 2773 if (ret < 0) {
+14
drivers/net/virtio_net.c
··· 705 return 0; 706 } 707 708 static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 709 { 710 struct virtnet_info *vi = netdev_priv(dev); ··· 830 } 831 832 static const struct ethtool_ops virtnet_ethtool_ops = { 833 .set_tx_csum = virtnet_set_tx_csum, 834 .set_sg = ethtool_op_set_sg, 835 .set_tso = ethtool_op_set_tso,
··· 705 return 0; 706 } 707 708 + static void virtnet_get_drvinfo(struct net_device *dev, 709 + struct ethtool_drvinfo *drvinfo) 710 + { 711 + struct virtnet_info *vi = netdev_priv(dev); 712 + struct virtio_device *vdev = vi->vdev; 713 + 714 + strncpy(drvinfo->driver, KBUILD_MODNAME, ARRAY_SIZE(drvinfo->driver)); 715 + strncpy(drvinfo->version, "N/A", ARRAY_SIZE(drvinfo->version)); 716 + strncpy(drvinfo->fw_version, "N/A", ARRAY_SIZE(drvinfo->fw_version)); 717 + strncpy(drvinfo->bus_info, dev_name(&vdev->dev), 718 + ARRAY_SIZE(drvinfo->bus_info)); 719 + } 720 + 721 static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 722 { 723 struct virtnet_info *vi = netdev_priv(dev); ··· 817 } 818 819 static const struct ethtool_ops virtnet_ethtool_ops = { 820 + .get_drvinfo = virtnet_get_drvinfo, 821 .set_tx_csum = virtnet_set_tx_csum, 822 .set_sg = ethtool_op_set_sg, 823 .set_tso = ethtool_op_set_tso,
+27 -16
drivers/net/wireless/ath/ath9k/ar9002_calib.c
··· 63 u8 rxchainmask, 64 struct ath9k_cal_list *currCal) 65 { 66 bool iscaldone = false; 67 68 if (currCal->calState == CAL_RUNNING) { ··· 82 } 83 84 currCal->calData->calPostProc(ah, numChains); 85 - ichan->CalValid |= currCal->calData->calType; 86 currCal->calState = CAL_DONE; 87 iscaldone = true; 88 } else { 89 ar9002_hw_setup_calibration(ah, currCal); 90 } 91 } 92 - } else if (!(ichan->CalValid & currCal->calData->calType)) { 93 ath9k_hw_reset_calibration(ah, currCal); 94 } 95 ··· 687 { 688 bool iscaldone = true; 689 struct ath9k_cal_list *currCal = ah->cal_list_curr; 690 691 - if (currCal && 692 (currCal->calState == CAL_RUNNING || 693 currCal->calState == CAL_WAITING)) { 694 iscaldone = ar9002_hw_per_calibration(ah, chan, ··· 709 } 710 711 /* Do NF cal only at longer intervals */ 712 - if (longcal) { 713 /* Do periodic PAOffset Cal */ 714 ar9002_hw_pa_cal(ah, false); 715 ar9002_hw_olc_temp_compensation(ah); ··· 718 * Get the value from the previous NF cal and update 719 * history buffer. 720 */ 721 - ath9k_hw_getnf(ah, chan); 722 723 - /* 724 - * Load the NF from history buffer of the current channel. 725 - * NF is slow time-variant, so it is OK to use a historical 726 - * value. 727 - */ 728 - ath9k_hw_loadnf(ah, ah->curchan); 729 - 730 - ath9k_hw_start_nfcal(ah); 731 } 732 733 return iscaldone; ··· 877 ar9002_hw_pa_cal(ah, true); 878 879 /* Do NF Calibration after DC offset and other calibrations */ 880 - REG_WRITE(ah, AR_PHY_AGC_CONTROL, 881 - REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF); 882 883 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 884 ··· 911 ath9k_hw_reset_calibration(ah, ah->cal_list_curr); 912 } 913 914 - chan->CalValid = 0; 915 916 return true; 917 }
··· 63 u8 rxchainmask, 64 struct ath9k_cal_list *currCal) 65 { 66 + struct ath9k_hw_cal_data *caldata = ah->caldata; 67 bool iscaldone = false; 68 69 if (currCal->calState == CAL_RUNNING) { ··· 81 } 82 83 currCal->calData->calPostProc(ah, numChains); 84 + caldata->CalValid |= currCal->calData->calType; 85 currCal->calState = CAL_DONE; 86 iscaldone = true; 87 } else { 88 ar9002_hw_setup_calibration(ah, currCal); 89 } 90 } 91 + } else if (!(caldata->CalValid & currCal->calData->calType)) { 92 ath9k_hw_reset_calibration(ah, currCal); 93 } 94 ··· 686 { 687 bool iscaldone = true; 688 struct ath9k_cal_list *currCal = ah->cal_list_curr; 689 + bool nfcal, nfcal_pending = false; 690 691 + nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF); 692 + if (ah->caldata) 693 + nfcal_pending = ah->caldata->nfcal_pending; 694 + 695 + if (currCal && !nfcal && 696 (currCal->calState == CAL_RUNNING || 697 currCal->calState == CAL_WAITING)) { 698 iscaldone = ar9002_hw_per_calibration(ah, chan, ··· 703 } 704 705 /* Do NF cal only at longer intervals */ 706 + if (longcal || nfcal_pending) { 707 /* Do periodic PAOffset Cal */ 708 ar9002_hw_pa_cal(ah, false); 709 ar9002_hw_olc_temp_compensation(ah); ··· 712 * Get the value from the previous NF cal and update 713 * history buffer. 714 */ 715 + if (ath9k_hw_getnf(ah, chan)) { 716 + /* 717 + * Load the NF from history buffer of the current 718 + * channel. 719 + * NF is slow time-variant, so it is OK to use a 720 + * historical value. 721 + */ 722 + ath9k_hw_loadnf(ah, ah->curchan); 723 + } 724 725 + if (longcal) 726 + ath9k_hw_start_nfcal(ah, false); 727 } 728 729 return iscaldone; ··· 869 ar9002_hw_pa_cal(ah, true); 870 871 /* Do NF Calibration after DC offset and other calibrations */ 872 + ath9k_hw_start_nfcal(ah, true); 873 + 874 + if (ah->caldata) 875 + ah->caldata->nfcal_pending = true; 876 877 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 878 ··· 901 ath9k_hw_reset_calibration(ah, ah->cal_list_curr); 902 } 903 904 + if (ah->caldata) 905 + ah->caldata->CalValid = 0; 906 907 return true; 908 }
+14 -4
drivers/net/wireless/ath/ath9k/ar9003_calib.c
··· 68 u8 rxchainmask, 69 struct ath9k_cal_list *currCal) 70 { 71 /* Cal is assumed not done until explicitly set below */ 72 bool iscaldone = false; 73 ··· 96 currCal->calData->calPostProc(ah, numChains); 97 98 /* Calibration has finished. */ 99 - ichan->CalValid |= currCal->calData->calType; 100 currCal->calState = CAL_DONE; 101 iscaldone = true; 102 } else { ··· 107 ar9003_hw_setup_calibration(ah, currCal); 108 } 109 } 110 - } else if (!(ichan->CalValid & currCal->calData->calType)) { 111 /* If current cal is marked invalid in channel, kick it off */ 112 ath9k_hw_reset_calibration(ah, currCal); 113 } ··· 150 /* Do NF cal only at longer intervals */ 151 if (longcal) { 152 /* 153 * Load the NF from history buffer of the current channel. 154 * NF is slow time-variant, so it is OK to use a historical 155 * value. ··· 163 ath9k_hw_loadnf(ah, ah->curchan); 164 165 /* start NF calibration, without updating BB NF register */ 166 - ath9k_hw_start_nfcal(ah); 167 } 168 169 return iscaldone; ··· 769 /* Revert chainmasks to their original values before NF cal */ 770 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 771 772 /* Initialize list pointers */ 773 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 774 ··· 794 if (ah->cal_list_curr) 795 ath9k_hw_reset_calibration(ah, ah->cal_list_curr); 796 797 - chan->CalValid = 0; 798 799 return true; 800 }
··· 68 u8 rxchainmask, 69 struct ath9k_cal_list *currCal) 70 { 71 + struct ath9k_hw_cal_data *caldata = ah->caldata; 72 /* Cal is assumed not done until explicitly set below */ 73 bool iscaldone = false; 74 ··· 95 currCal->calData->calPostProc(ah, numChains); 96 97 /* Calibration has finished. */ 98 + caldata->CalValid |= currCal->calData->calType; 99 currCal->calState = CAL_DONE; 100 iscaldone = true; 101 } else { ··· 106 ar9003_hw_setup_calibration(ah, currCal); 107 } 108 } 109 + } else if (!(caldata->CalValid & currCal->calData->calType)) { 110 /* If current cal is marked invalid in channel, kick it off */ 111 ath9k_hw_reset_calibration(ah, currCal); 112 } ··· 149 /* Do NF cal only at longer intervals */ 150 if (longcal) { 151 /* 152 + * Get the value from the previous NF cal and update 153 + * history buffer. 154 + */ 155 + ath9k_hw_getnf(ah, chan); 156 + 157 + /* 158 * Load the NF from history buffer of the current channel. 159 * NF is slow time-variant, so it is OK to use a historical 160 * value. ··· 156 ath9k_hw_loadnf(ah, ah->curchan); 157 158 /* start NF calibration, without updating BB NF register */ 159 + ath9k_hw_start_nfcal(ah, false); 160 } 161 162 return iscaldone; ··· 762 /* Revert chainmasks to their original values before NF cal */ 763 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 764 765 + ath9k_hw_start_nfcal(ah, true); 766 + 767 /* Initialize list pointers */ 768 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 769 ··· 785 if (ah->cal_list_curr) 786 ath9k_hw_reset_calibration(ah, ah->cal_list_curr); 787 788 + if (ah->caldata) 789 + ah->caldata->CalValid = 0; 790 791 return true; 792 }
+381 -7
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
··· 41 #define LE16(x) __constant_cpu_to_le16(x) 42 #define LE32(x) __constant_cpu_to_le32(x) 43 44 static const struct ar9300_eeprom ar9300_default = { 45 .eepromVersion = 2, 46 .templateVersion = 2, ··· 622 }, 623 } 624 }; 625 626 static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) 627 { ··· 1439 #undef POW_SM 1440 } 1441 1442 - static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq) 1443 { 1444 - u8 targetPowerValT2[ar9300RateSize]; 1445 /* XXX: hard code for now, need to get from eeprom struct */ 1446 u8 ht40PowerIncForPdadc = 0; 1447 bool is2GHz = false; ··· 1575 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); 1576 i++; 1577 } 1578 - 1579 - /* Write target power array to registers */ 1580 - ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 1581 } 1582 1583 static int ar9003_hw_cal_pier_get(struct ath_hw *ah, ··· 1818 return 0; 1819 } 1820 1821 static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, 1822 struct ath9k_channel *chan, u16 cfgCtl, 1823 u8 twiceAntennaReduction, 1824 u8 twiceMaxRegulatoryPower, 1825 u8 powerLimit) 1826 { 1827 - ah->txpower_limit = powerLimit; 1828 - ar9003_hw_set_target_power_eeprom(ah, chan->channel); 1829 ar9003_hw_calibration_apply(ah, chan->channel); 1830 } 1831
··· 41 #define LE16(x) __constant_cpu_to_le16(x) 42 #define LE32(x) __constant_cpu_to_le32(x) 43 44 + /* Local defines to distinguish between extension and control CTL's */ 45 + #define EXT_ADDITIVE (0x8000) 46 + #define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) 47 + #define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) 48 + #define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) 49 + #define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ 50 + #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ 51 + #define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */ 52 + #define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */ 53 + #define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */ 54 + 55 + #define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ 56 + #define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ 57 + 58 static const struct ar9300_eeprom ar9300_default = { 59 .eepromVersion = 2, 60 .templateVersion = 2, ··· 608 }, 609 } 610 }; 611 + 612 + static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) 613 + { 614 + if (fbin == AR9300_BCHAN_UNUSED) 615 + return fbin; 616 + 617 + return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); 618 + } 619 620 static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) 621 { ··· 1417 #undef POW_SM 1418 } 1419 1420 + static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, 1421 + u8 *targetPowerValT2) 1422 { 1423 /* XXX: hard code for now, need to get from eeprom struct */ 1424 u8 ht40PowerIncForPdadc = 0; 1425 bool is2GHz = false; ··· 1553 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); 1554 i++; 1555 } 1556 } 1557 1558 static int ar9003_hw_cal_pier_get(struct ath_hw *ah, ··· 1799 return 0; 1800 } 1801 1802 + static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep, 1803 + int idx, 1804 + int edge, 1805 + bool is2GHz) 1806 + { 1807 + struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; 1808 + struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; 1809 + 1810 + if (is2GHz) 1811 + return ctl_2g[idx].ctlEdges[edge].tPower; 1812 + else 1813 + return ctl_5g[idx].ctlEdges[edge].tPower; 1814 + } 1815 + 1816 + static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, 1817 + int idx, 1818 + unsigned int edge, 1819 + u16 freq, 1820 + bool is2GHz) 1821 + { 1822 + struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; 1823 + struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; 1824 + 1825 + u8 *ctl_freqbin = is2GHz ? 1826 + &eep->ctl_freqbin_2G[idx][0] : 1827 + &eep->ctl_freqbin_5G[idx][0]; 1828 + 1829 + if (is2GHz) { 1830 + if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && 1831 + ctl_2g[idx].ctlEdges[edge - 1].flag) 1832 + return ctl_2g[idx].ctlEdges[edge - 1].tPower; 1833 + } else { 1834 + if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && 1835 + ctl_5g[idx].ctlEdges[edge - 1].flag) 1836 + return ctl_5g[idx].ctlEdges[edge - 1].tPower; 1837 + } 1838 + 1839 + return AR9300_MAX_RATE_POWER; 1840 + } 1841 + 1842 + /* 1843 + * Find the maximum conformance test limit for the given channel and CTL info 1844 + */ 1845 + static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep, 1846 + u16 freq, int idx, bool is2GHz) 1847 + { 1848 + u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; 1849 + u8 *ctl_freqbin = is2GHz ? 1850 + &eep->ctl_freqbin_2G[idx][0] : 1851 + &eep->ctl_freqbin_5G[idx][0]; 1852 + u16 num_edges = is2GHz ? 1853 + AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G; 1854 + unsigned int edge; 1855 + 1856 + /* Get the edge power */ 1857 + for (edge = 0; 1858 + (edge < num_edges) && (ctl_freqbin[edge] != AR9300_BCHAN_UNUSED); 1859 + edge++) { 1860 + /* 1861 + * If there's an exact channel match or an inband flag set 1862 + * on the lower channel use the given rdEdgePower 1863 + */ 1864 + if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) { 1865 + twiceMaxEdgePower = 1866 + ar9003_hw_get_direct_edge_power(eep, idx, 1867 + edge, is2GHz); 1868 + break; 1869 + } else if ((edge > 0) && 1870 + (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge], 1871 + is2GHz))) { 1872 + twiceMaxEdgePower = 1873 + ar9003_hw_get_indirect_edge_power(eep, idx, 1874 + edge, freq, 1875 + is2GHz); 1876 + /* 1877 + * Leave loop - no more affecting edges possible in 1878 + * this monotonic increasing list 1879 + */ 1880 + break; 1881 + } 1882 + } 1883 + return twiceMaxEdgePower; 1884 + } 1885 + 1886 + static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, 1887 + struct ath9k_channel *chan, 1888 + u8 *pPwrArray, u16 cfgCtl, 1889 + u8 twiceAntennaReduction, 1890 + u8 twiceMaxRegulatoryPower, 1891 + u16 powerLimit) 1892 + { 1893 + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1894 + struct ath_common *common = ath9k_hw_common(ah); 1895 + struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; 1896 + u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; 1897 + static const u16 tpScaleReductionTable[5] = { 1898 + 0, 3, 6, 9, AR9300_MAX_RATE_POWER 1899 + }; 1900 + int i; 1901 + int16_t twiceLargestAntenna; 1902 + u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 1903 + u16 ctlModesFor11a[] = { 1904 + CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 1905 + }; 1906 + u16 ctlModesFor11g[] = { 1907 + CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, 1908 + CTL_11G_EXT, CTL_2GHT40 1909 + }; 1910 + u16 numCtlModes, *pCtlMode, ctlMode, freq; 1911 + struct chan_centers centers; 1912 + u8 *ctlIndex; 1913 + u8 ctlNum; 1914 + u16 twiceMinEdgePower; 1915 + bool is2ghz = IS_CHAN_2GHZ(chan); 1916 + 1917 + ath9k_hw_get_channel_centers(ah, chan, &centers); 1918 + 1919 + /* Compute TxPower reduction due to Antenna Gain */ 1920 + if (is2ghz) 1921 + twiceLargestAntenna = pEepData->modalHeader2G.antennaGain; 1922 + else 1923 + twiceLargestAntenna = pEepData->modalHeader5G.antennaGain; 1924 + 1925 + twiceLargestAntenna = (int16_t)min((twiceAntennaReduction) - 1926 + twiceLargestAntenna, 0); 1927 + 1928 + /* 1929 + * scaledPower is the minimum of the user input power level 1930 + * and the regulatory allowed power level 1931 + */ 1932 + maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; 1933 + 1934 + if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) { 1935 + maxRegAllowedPower -= 1936 + (tpScaleReductionTable[(regulatory->tp_scale)] * 2); 1937 + } 1938 + 1939 + scaledPower = min(powerLimit, maxRegAllowedPower); 1940 + 1941 + /* 1942 + * Reduce scaled Power by number of chains active to get 1943 + * to per chain tx power level 1944 + */ 1945 + switch (ar5416_get_ntxchains(ah->txchainmask)) { 1946 + case 1: 1947 + break; 1948 + case 2: 1949 + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 1950 + break; 1951 + case 3: 1952 + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 1953 + break; 1954 + } 1955 + 1956 + scaledPower = max((u16)0, scaledPower); 1957 + 1958 + /* 1959 + * Get target powers from EEPROM - our baseline for TX Power 1960 + */ 1961 + if (is2ghz) { 1962 + /* Setup for CTL modes */ 1963 + /* CTL_11B, CTL_11G, CTL_2GHT20 */ 1964 + numCtlModes = 1965 + ARRAY_SIZE(ctlModesFor11g) - 1966 + SUB_NUM_CTL_MODES_AT_2G_40; 1967 + pCtlMode = ctlModesFor11g; 1968 + if (IS_CHAN_HT40(chan)) 1969 + /* All 2G CTL's */ 1970 + numCtlModes = ARRAY_SIZE(ctlModesFor11g); 1971 + } else { 1972 + /* Setup for CTL modes */ 1973 + /* CTL_11A, CTL_5GHT20 */ 1974 + numCtlModes = ARRAY_SIZE(ctlModesFor11a) - 1975 + SUB_NUM_CTL_MODES_AT_5G_40; 1976 + pCtlMode = ctlModesFor11a; 1977 + if (IS_CHAN_HT40(chan)) 1978 + /* All 5G CTL's */ 1979 + numCtlModes = ARRAY_SIZE(ctlModesFor11a); 1980 + } 1981 + 1982 + /* 1983 + * For MIMO, need to apply regulatory caps individually across 1984 + * dynamically running modes: CCK, OFDM, HT20, HT40 1985 + * 1986 + * The outer loop walks through each possible applicable runtime mode. 1987 + * The inner loop walks through each ctlIndex entry in EEPROM. 1988 + * The ctl value is encoded as [7:4] == test group, [3:0] == test mode. 1989 + */ 1990 + for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { 1991 + bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || 1992 + (pCtlMode[ctlMode] == CTL_2GHT40); 1993 + if (isHt40CtlMode) 1994 + freq = centers.synth_center; 1995 + else if (pCtlMode[ctlMode] & EXT_ADDITIVE) 1996 + freq = centers.ext_center; 1997 + else 1998 + freq = centers.ctl_center; 1999 + 2000 + ath_print(common, ATH_DBG_REGULATORY, 2001 + "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, " 2002 + "EXT_ADDITIVE %d\n", 2003 + ctlMode, numCtlModes, isHt40CtlMode, 2004 + (pCtlMode[ctlMode] & EXT_ADDITIVE)); 2005 + 2006 + /* walk through each CTL index stored in EEPROM */ 2007 + if (is2ghz) { 2008 + ctlIndex = pEepData->ctlIndex_2G; 2009 + ctlNum = AR9300_NUM_CTLS_2G; 2010 + } else { 2011 + ctlIndex = pEepData->ctlIndex_5G; 2012 + ctlNum = AR9300_NUM_CTLS_5G; 2013 + } 2014 + 2015 + for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { 2016 + ath_print(common, ATH_DBG_REGULATORY, 2017 + "LOOP-Ctlidx %d: cfgCtl 0x%2.2x " 2018 + "pCtlMode 0x%2.2x ctlIndex 0x%2.2x " 2019 + "chan %dn", 2020 + i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], 2021 + chan->channel); 2022 + 2023 + /* 2024 + * compare test group from regulatory 2025 + * channel list with test mode from pCtlMode 2026 + * list 2027 + */ 2028 + if ((((cfgCtl & ~CTL_MODE_M) | 2029 + (pCtlMode[ctlMode] & CTL_MODE_M)) == 2030 + ctlIndex[i]) || 2031 + (((cfgCtl & ~CTL_MODE_M) | 2032 + (pCtlMode[ctlMode] & CTL_MODE_M)) == 2033 + ((ctlIndex[i] & CTL_MODE_M) | 2034 + SD_NO_CTL))) { 2035 + twiceMinEdgePower = 2036 + ar9003_hw_get_max_edge_power(pEepData, 2037 + freq, i, 2038 + is2ghz); 2039 + 2040 + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) 2041 + /* 2042 + * Find the minimum of all CTL 2043 + * edge powers that apply to 2044 + * this channel 2045 + */ 2046 + twiceMaxEdgePower = 2047 + min(twiceMaxEdgePower, 2048 + twiceMinEdgePower); 2049 + else { 2050 + /* specific */ 2051 + twiceMaxEdgePower = 2052 + twiceMinEdgePower; 2053 + break; 2054 + } 2055 + } 2056 + } 2057 + 2058 + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); 2059 + 2060 + ath_print(common, ATH_DBG_REGULATORY, 2061 + "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d " 2062 + "sP %d minCtlPwr %d\n", 2063 + ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, 2064 + scaledPower, minCtlPower); 2065 + 2066 + /* Apply ctl mode to correct target power set */ 2067 + switch (pCtlMode[ctlMode]) { 2068 + case CTL_11B: 2069 + for (i = ALL_TARGET_LEGACY_1L_5L; 2070 + i <= ALL_TARGET_LEGACY_11S; i++) 2071 + pPwrArray[i] = 2072 + (u8)min((u16)pPwrArray[i], 2073 + minCtlPower); 2074 + break; 2075 + case CTL_11A: 2076 + case CTL_11G: 2077 + for (i = ALL_TARGET_LEGACY_6_24; 2078 + i <= ALL_TARGET_LEGACY_54; i++) 2079 + pPwrArray[i] = 2080 + (u8)min((u16)pPwrArray[i], 2081 + minCtlPower); 2082 + break; 2083 + case CTL_5GHT20: 2084 + case CTL_2GHT20: 2085 + for (i = ALL_TARGET_HT20_0_8_16; 2086 + i <= ALL_TARGET_HT20_21; i++) 2087 + pPwrArray[i] = 2088 + (u8)min((u16)pPwrArray[i], 2089 + minCtlPower); 2090 + pPwrArray[ALL_TARGET_HT20_22] = 2091 + (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], 2092 + minCtlPower); 2093 + pPwrArray[ALL_TARGET_HT20_23] = 2094 + (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], 2095 + minCtlPower); 2096 + break; 2097 + case CTL_5GHT40: 2098 + case CTL_2GHT40: 2099 + for (i = ALL_TARGET_HT40_0_8_16; 2100 + i <= ALL_TARGET_HT40_23; i++) 2101 + pPwrArray[i] = 2102 + (u8)min((u16)pPwrArray[i], 2103 + minCtlPower); 2104 + break; 2105 + default: 2106 + break; 2107 + } 2108 + } /* end ctl mode checking */ 2109 + } 2110 + 2111 static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, 2112 struct ath9k_channel *chan, u16 cfgCtl, 2113 u8 twiceAntennaReduction, 2114 u8 twiceMaxRegulatoryPower, 2115 u8 powerLimit) 2116 { 2117 + struct ath_common *common = ath9k_hw_common(ah); 2118 + u8 targetPowerValT2[ar9300RateSize]; 2119 + unsigned int i = 0; 2120 + 2121 + ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); 2122 + ar9003_hw_set_power_per_rate_table(ah, chan, 2123 + targetPowerValT2, cfgCtl, 2124 + twiceAntennaReduction, 2125 + twiceMaxRegulatoryPower, 2126 + powerLimit); 2127 + 2128 + while (i < ar9300RateSize) { 2129 + ath_print(common, ATH_DBG_EEPROM, 2130 + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 2131 + i++; 2132 + ath_print(common, ATH_DBG_EEPROM, 2133 + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 2134 + i++; 2135 + ath_print(common, ATH_DBG_EEPROM, 2136 + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 2137 + i++; 2138 + ath_print(common, ATH_DBG_EEPROM, 2139 + "TPC[%02d] 0x%08x\n\n", i, targetPowerValT2[i]); 2140 + i++; 2141 + } 2142 + 2143 + /* Write target power array to registers */ 2144 + ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 2145 + 2146 + /* 2147 + * This is the TX power we send back to driver core, 2148 + * and it can use to pass to userspace to display our 2149 + * currently configured TX power setting. 2150 + * 2151 + * Since power is rate dependent, use one of the indices 2152 + * from the AR9300_Rates enum to select an entry from 2153 + * targetPowerValT2[] to report. Currently returns the 2154 + * power for HT40 MCS 0, HT20 MCS 0, or OFDM 6 Mbps 2155 + * as CCK power is less interesting (?). 2156 + */ 2157 + i = ALL_TARGET_LEGACY_6_24; /* legacy */ 2158 + if (IS_CHAN_HT40(chan)) 2159 + i = ALL_TARGET_HT40_0_8_16; /* ht40 */ 2160 + else if (IS_CHAN_HT20(chan)) 2161 + i = ALL_TARGET_HT20_0_8_16; /* ht20 */ 2162 + 2163 + ah->txpower_limit = targetPowerValT2[i]; 2164 + 2165 ar9003_hw_calibration_apply(ah, chan->channel); 2166 } 2167
+9 -8
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
··· 577 } 578 579 void ar9003_paprd_populate_single_table(struct ath_hw *ah, 580 - struct ath9k_channel *chan, int chain) 581 { 582 - u32 *paprd_table_val = chan->pa_table[chain]; 583 - u32 small_signal_gain = chan->small_signal_gain[chain]; 584 u32 training_power; 585 u32 reg = 0; 586 int i; ··· 655 } 656 EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); 657 658 - int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, 659 - int chain) 660 { 661 - u16 *small_signal_gain = &chan->small_signal_gain[chain]; 662 - u32 *pa_table = chan->pa_table[chain]; 663 u32 *data_L, *data_U; 664 int i, status = 0; 665 u32 *buf; 666 u32 reg; 667 668 - memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain])); 669 670 buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC); 671 if (!buf)
··· 577 } 578 579 void ar9003_paprd_populate_single_table(struct ath_hw *ah, 580 + struct ath9k_hw_cal_data *caldata, 581 + int chain) 582 { 583 + u32 *paprd_table_val = caldata->pa_table[chain]; 584 + u32 small_signal_gain = caldata->small_signal_gain[chain]; 585 u32 training_power; 586 u32 reg = 0; 587 int i; ··· 654 } 655 EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); 656 657 + int ar9003_paprd_create_curve(struct ath_hw *ah, 658 + struct ath9k_hw_cal_data *caldata, int chain) 659 { 660 + u16 *small_signal_gain = &caldata->small_signal_gain[chain]; 661 + u32 *pa_table = caldata->pa_table[chain]; 662 u32 *data_L, *data_U; 663 int i, status = 0; 664 u32 *buf; 665 u32 reg; 666 667 + memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain])); 668 669 buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC); 670 if (!buf)
+5 -1
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 542 u32 reg = INI_RA(iniArr, i, 0); 543 u32 val = INI_RA(iniArr, i, column); 544 545 - REG_WRITE(ah, reg, val); 546 DO_DELAY(regWrites); 547 } 548 }
··· 542 u32 reg = INI_RA(iniArr, i, 0); 543 u32 val = INI_RA(iniArr, i, column); 544 545 + if (reg >= 0x16000 && reg < 0x17000) 546 + ath9k_hw_analog_shift_regwrite(ah, reg, val); 547 + else 548 + REG_WRITE(ah, reg, val); 549 + 550 DO_DELAY(regWrites); 551 } 552 }
+2 -1
drivers/net/wireless/ath/ath9k/ath9k.h
··· 510 #define SC_OP_BEACONS BIT(1) 511 #define SC_OP_RXAGGR BIT(2) 512 #define SC_OP_TXAGGR BIT(3) 513 - #define SC_OP_FULL_RESET BIT(4) 514 #define SC_OP_PREAMBLE_SHORT BIT(5) 515 #define SC_OP_PROTECT_ENABLE BIT(6) 516 #define SC_OP_RXFLUSH BIT(7) ··· 609 struct ath_wiphy { 610 struct ath_softc *sc; /* shared for all virtual wiphys */ 611 struct ieee80211_hw *hw; 612 enum ath_wiphy_state { 613 ATH_WIPHY_INACTIVE, 614 ATH_WIPHY_ACTIVE,
··· 510 #define SC_OP_BEACONS BIT(1) 511 #define SC_OP_RXAGGR BIT(2) 512 #define SC_OP_TXAGGR BIT(3) 513 + #define SC_OP_OFFCHANNEL BIT(4) 514 #define SC_OP_PREAMBLE_SHORT BIT(5) 515 #define SC_OP_PROTECT_ENABLE BIT(6) 516 #define SC_OP_RXFLUSH BIT(7) ··· 609 struct ath_wiphy { 610 struct ath_softc *sc; /* shared for all virtual wiphys */ 611 struct ieee80211_hw *hw; 612 + struct ath9k_hw_cal_data caldata; 613 enum ath_wiphy_state { 614 ATH_WIPHY_INACTIVE, 615 ATH_WIPHY_ACTIVE,
+64 -54
drivers/net/wireless/ath/ath9k/calib.c
··· 22 /* We can tune this as we go by monitoring really low values */ 23 #define ATH9K_NF_TOO_LOW -60 24 25 - /* AR5416 may return very high value (like -31 dBm), in those cases the nf 26 - * is incorrect and we should use the static NF value. Later we can try to 27 - * find out why they are reporting these values */ 28 - 29 - static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf) 30 - { 31 - if (nf > ATH9K_NF_TOO_LOW) { 32 - ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 33 - "noise floor value detected (%d) is " 34 - "lower than what we think is a " 35 - "reasonable value (%d)\n", 36 - nf, ATH9K_NF_TOO_LOW); 37 - return false; 38 - } 39 - return true; 40 - } 41 - 42 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) 43 { 44 int16_t nfval; ··· 104 ah->cal_samples = 0; 105 } 106 107 /* This is done for the currently configured channel */ 108 bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 109 { ··· 124 struct ieee80211_conf *conf = &common->hw->conf; 125 struct ath9k_cal_list *currCal = ah->cal_list_curr; 126 127 - if (!ah->curchan) 128 return true; 129 130 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) ··· 147 "Resetting Cal %d state for channel %u\n", 148 currCal->calData->calType, conf->channel->center_freq); 149 150 - ah->curchan->CalValid &= ~currCal->calData->calType; 151 currCal->calState = CAL_WAITING; 152 153 return false; 154 } 155 EXPORT_SYMBOL(ath9k_hw_reset_calvalid); 156 157 - void ath9k_hw_start_nfcal(struct ath_hw *ah) 158 { 159 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 160 AR_PHY_AGC_CONTROL_ENABLE_NF); 161 - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 162 AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 163 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 164 } 165 166 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) 167 { 168 - struct ath9k_nfcal_hist *h; 169 unsigned i, j; 170 int32_t val; 171 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 172 struct ath_common *common = ath9k_hw_common(ah); 173 174 - h = ah->nfCalHist; 175 176 for (i = 0; i < NUM_NF_READINGS; i++) { 177 if (chainmask & (1 << i)) { 178 val = REG_READ(ah, ah->nf_regs[i]); 179 val &= 0xFFFFFE00; 180 - val |= (((u32) (h[i].privNF) << 1) & 0x1ff); 181 REG_WRITE(ah, ah->nf_regs[i], val); 182 } 183 } ··· 291 } 292 } 293 294 - int16_t ath9k_hw_getnf(struct ath_hw *ah, 295 - struct ath9k_channel *chan) 296 { 297 struct ath_common *common = ath9k_hw_common(ah); 298 int16_t nf, nfThresh; 299 int16_t nfarray[NUM_NF_READINGS] = { 0 }; 300 struct ath9k_nfcal_hist *h; 301 struct ieee80211_channel *c = chan->chan; 302 303 chan->channelFlags &= (~CHANNEL_CW_INT); 304 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 305 ath_print(common, ATH_DBG_CALIBRATE, 306 "NF did not complete in calibration window\n"); 307 nf = 0; 308 - chan->rawNoiseFloor = nf; 309 - return chan->rawNoiseFloor; 310 } else { 311 ath9k_hw_do_getnf(ah, nfarray); 312 ath9k_hw_nf_sanitize(ah, nfarray); ··· 324 } 325 } 326 327 - h = ah->nfCalHist; 328 - 329 ath9k_hw_update_nfcal_hist_buffer(h, nfarray); 330 - chan->rawNoiseFloor = h[0].privNF; 331 - 332 - return chan->rawNoiseFloor; 333 } 334 335 - void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah) 336 { 337 - struct ath_nf_limits *limit; 338 int i, j; 339 340 - if (!ah->curchan || IS_CHAN_2GHZ(ah->curchan)) 341 - limit = &ah->nf_2g; 342 - else 343 - limit = &ah->nf_5g; 344 345 for (i = 0; i < NUM_NF_READINGS; i++) { 346 - ah->nfCalHist[i].currIndex = 0; 347 - ah->nfCalHist[i].privNF = limit->nominal; 348 - ah->nfCalHist[i].invalidNFcount = 349 - AR_PHY_CCA_FILTERWINDOW_LENGTH; 350 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { 351 - ah->nfCalHist[i].nfCalBuffer[j] = limit->nominal; 352 } 353 } 354 } 355 356 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) 357 { 358 - s16 nf; 359 360 - if (chan->rawNoiseFloor == 0) 361 - nf = -96; 362 - else 363 - nf = chan->rawNoiseFloor; 364 - 365 - if (!ath9k_hw_nf_in_range(ah, nf)) 366 - nf = ATH_DEFAULT_NOISE_FLOOR; 367 - 368 - return nf; 369 } 370 EXPORT_SYMBOL(ath9k_hw_getchan_noise);
··· 22 /* We can tune this as we go by monitoring really low values */ 23 #define ATH9K_NF_TOO_LOW -60 24 25 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) 26 { 27 int16_t nfval; ··· 121 ah->cal_samples = 0; 122 } 123 124 + static s16 ath9k_hw_get_default_nf(struct ath_hw *ah, 125 + struct ath9k_channel *chan) 126 + { 127 + struct ath_nf_limits *limit; 128 + 129 + if (!chan || IS_CHAN_2GHZ(chan)) 130 + limit = &ah->nf_2g; 131 + else 132 + limit = &ah->nf_5g; 133 + 134 + return limit->nominal; 135 + } 136 + 137 /* This is done for the currently configured channel */ 138 bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 139 { ··· 128 struct ieee80211_conf *conf = &common->hw->conf; 129 struct ath9k_cal_list *currCal = ah->cal_list_curr; 130 131 + if (!ah->caldata) 132 return true; 133 134 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) ··· 151 "Resetting Cal %d state for channel %u\n", 152 currCal->calData->calType, conf->channel->center_freq); 153 154 + ah->caldata->CalValid &= ~currCal->calData->calType; 155 currCal->calState = CAL_WAITING; 156 157 return false; 158 } 159 EXPORT_SYMBOL(ath9k_hw_reset_calvalid); 160 161 + void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update) 162 { 163 + if (ah->caldata) 164 + ah->caldata->nfcal_pending = true; 165 + 166 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 167 AR_PHY_AGC_CONTROL_ENABLE_NF); 168 + 169 + if (update) 170 + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, 171 AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 172 + else 173 + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 174 + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 175 + 176 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 177 } 178 179 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) 180 { 181 + struct ath9k_nfcal_hist *h = NULL; 182 unsigned i, j; 183 int32_t val; 184 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 185 struct ath_common *common = ath9k_hw_common(ah); 186 + s16 default_nf = ath9k_hw_get_default_nf(ah, chan); 187 188 + if (ah->caldata) 189 + h = ah->caldata->nfCalHist; 190 191 for (i = 0; i < NUM_NF_READINGS; i++) { 192 if (chainmask & (1 << i)) { 193 + s16 nfval; 194 + 195 + if (h) 196 + nfval = h[i].privNF; 197 + else 198 + nfval = default_nf; 199 + 200 val = REG_READ(ah, ah->nf_regs[i]); 201 val &= 0xFFFFFE00; 202 + val |= (((u32) nfval << 1) & 0x1ff); 203 REG_WRITE(ah, ah->nf_regs[i], val); 204 } 205 } ··· 277 } 278 } 279 280 + bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) 281 { 282 struct ath_common *common = ath9k_hw_common(ah); 283 int16_t nf, nfThresh; 284 int16_t nfarray[NUM_NF_READINGS] = { 0 }; 285 struct ath9k_nfcal_hist *h; 286 struct ieee80211_channel *c = chan->chan; 287 + struct ath9k_hw_cal_data *caldata = ah->caldata; 288 + 289 + if (!caldata) 290 + return false; 291 292 chan->channelFlags &= (~CHANNEL_CW_INT); 293 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 294 ath_print(common, ATH_DBG_CALIBRATE, 295 "NF did not complete in calibration window\n"); 296 nf = 0; 297 + caldata->rawNoiseFloor = nf; 298 + return false; 299 } else { 300 ath9k_hw_do_getnf(ah, nfarray); 301 ath9k_hw_nf_sanitize(ah, nfarray); ··· 307 } 308 } 309 310 + h = caldata->nfCalHist; 311 + caldata->nfcal_pending = false; 312 ath9k_hw_update_nfcal_hist_buffer(h, nfarray); 313 + caldata->rawNoiseFloor = h[0].privNF; 314 + return true; 315 } 316 317 + void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, 318 + struct ath9k_channel *chan) 319 { 320 + struct ath9k_nfcal_hist *h; 321 + s16 default_nf; 322 int i, j; 323 324 + if (!ah->caldata) 325 + return; 326 327 + h = ah->caldata->nfCalHist; 328 + default_nf = ath9k_hw_get_default_nf(ah, chan); 329 for (i = 0; i < NUM_NF_READINGS; i++) { 330 + h[i].currIndex = 0; 331 + h[i].privNF = default_nf; 332 + h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH; 333 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { 334 + h[i].nfCalBuffer[j] = default_nf; 335 } 336 } 337 } 338 339 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) 340 { 341 + if (!ah->caldata || !ah->caldata->rawNoiseFloor) 342 + return ath9k_hw_get_default_nf(ah, chan); 343 344 + return ah->caldata->rawNoiseFloor; 345 } 346 EXPORT_SYMBOL(ath9k_hw_getchan_noise);
+4 -4
drivers/net/wireless/ath/ath9k/calib.h
··· 108 }; 109 110 bool ath9k_hw_reset_calvalid(struct ath_hw *ah); 111 - void ath9k_hw_start_nfcal(struct ath_hw *ah); 112 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); 113 - int16_t ath9k_hw_getnf(struct ath_hw *ah, 114 - struct ath9k_channel *chan); 115 - void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); 116 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 117 void ath9k_hw_reset_calibration(struct ath_hw *ah, 118 struct ath9k_cal_list *currCal);
··· 108 }; 109 110 bool ath9k_hw_reset_calvalid(struct ath_hw *ah); 111 + void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update); 112 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); 113 + bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan); 114 + void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, 115 + struct ath9k_channel *chan); 116 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 117 void ath9k_hw_reset_calibration(struct ath_hw *ah, 118 struct ath9k_cal_list *currCal);
+2
drivers/net/wireless/ath/ath9k/htc.h
··· 353 u16 seq_no; 354 u32 bmiss_cnt; 355 356 spinlock_t beacon_lock; 357 358 bool tx_queues_stop;
··· 353 u16 seq_no; 354 u32 bmiss_cnt; 355 356 + struct ath9k_hw_cal_data caldata[38]; 357 + 358 spinlock_t beacon_lock; 359 360 bool tx_queues_stop;
+6 -4
drivers/net/wireless/ath/ath9k/htc_drv_main.c
··· 125 struct ieee80211_conf *conf = &common->hw->conf; 126 bool fastcc = true; 127 struct ieee80211_channel *channel = hw->conf.channel; 128 enum htc_phymode mode; 129 __be16 htc_mode; 130 u8 cmd_rsp; ··· 150 priv->ah->curchan->channel, 151 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf)); 152 153 - ret = ath9k_hw_reset(ah, hchan, fastcc); 154 if (ret) { 155 ath_print(common, ATH_DBG_FATAL, 156 "Unable to reset channel (%u Mhz) " ··· 1030 ah->curchan = ath9k_cmn_get_curchannel(hw, ah); 1031 1032 /* Reset the HW */ 1033 - ret = ath9k_hw_reset(ah, ah->curchan, false); 1034 if (ret) { 1035 ath_print(common, ATH_DBG_FATAL, 1036 "Unable to reset hardware; reset status %d " ··· 1093 ah->curchan = ath9k_cmn_get_curchannel(hw, ah); 1094 1095 /* Reset the HW */ 1096 - ret = ath9k_hw_reset(ah, ah->curchan, false); 1097 if (ret) { 1098 ath_print(common, ATH_DBG_FATAL, 1099 "Unable to reset hardware; reset status %d " ··· 1181 ath9k_hw_configpcipowersave(ah, 0, 0); 1182 1183 ath9k_hw_htc_resetinit(ah); 1184 - ret = ath9k_hw_reset(ah, init_channel, false); 1185 if (ret) { 1186 ath_print(common, ATH_DBG_FATAL, 1187 "Unable to reset hardware; reset status %d "
··· 125 struct ieee80211_conf *conf = &common->hw->conf; 126 bool fastcc = true; 127 struct ieee80211_channel *channel = hw->conf.channel; 128 + struct ath9k_hw_cal_data *caldata; 129 enum htc_phymode mode; 130 __be16 htc_mode; 131 u8 cmd_rsp; ··· 149 priv->ah->curchan->channel, 150 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf)); 151 152 + caldata = &priv->caldata[channel->hw_value]; 153 + ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 154 if (ret) { 155 ath_print(common, ATH_DBG_FATAL, 156 "Unable to reset channel (%u Mhz) " ··· 1028 ah->curchan = ath9k_cmn_get_curchannel(hw, ah); 1029 1030 /* Reset the HW */ 1031 + ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 1032 if (ret) { 1033 ath_print(common, ATH_DBG_FATAL, 1034 "Unable to reset hardware; reset status %d " ··· 1091 ah->curchan = ath9k_cmn_get_curchannel(hw, ah); 1092 1093 /* Reset the HW */ 1094 + ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 1095 if (ret) { 1096 ath_print(common, ATH_DBG_FATAL, 1097 "Unable to reset hardware; reset status %d " ··· 1179 ath9k_hw_configpcipowersave(ah, 0, 0); 1180 1181 ath9k_hw_htc_resetinit(ah); 1182 + ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1183 if (ret) { 1184 ath_print(common, ATH_DBG_FATAL, 1185 "Unable to reset hardware; reset status %d "
+14 -11
drivers/net/wireless/ath/ath9k/hw.c
··· 610 else 611 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); 612 613 - ath9k_init_nfcal_hist_buffer(ah); 614 ah->bb_watchdog_timeout_ms = 25; 615 616 common->state = ATH_HW_INITIALIZED; ··· 1182 1183 ath9k_hw_spur_mitigate_freq(ah, chan); 1184 1185 - if (!chan->oneTimeCalsDone) 1186 - chan->oneTimeCalsDone = true; 1187 - 1188 return true; 1189 } 1190 ··· 1214 EXPORT_SYMBOL(ath9k_hw_check_alive); 1215 1216 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1217 - bool bChannelChange) 1218 { 1219 struct ath_common *common = ath9k_hw_common(ah); 1220 u32 saveLedState; ··· 1239 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1240 return -EIO; 1241 1242 - if (curchan && !ah->chip_fullsleep) 1243 ath9k_hw_getnf(ah, curchan); 1244 1245 if (bChannelChange && 1246 (ah->chip_fullsleep != true) && ··· 1262 1263 if (ath9k_hw_channel_change(ah, chan)) { 1264 ath9k_hw_loadnf(ah, ah->curchan); 1265 - ath9k_hw_start_nfcal(ah); 1266 return 0; 1267 } 1268 } ··· 1467 if (ah->btcoex_hw.enabled) 1468 ath9k_hw_btcoex_enable(ah); 1469 1470 - if (AR_SREV_9300_20_OR_LATER(ah)) { 1471 - ath9k_hw_loadnf(ah, curchan); 1472 - ath9k_hw_start_nfcal(ah); 1473 ar9003_hw_bb_watchdog_config(ah); 1474 - } 1475 1476 return 0; 1477 }
··· 610 else 611 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); 612 613 ah->bb_watchdog_timeout_ms = 25; 614 615 common->state = ATH_HW_INITIALIZED; ··· 1183 1184 ath9k_hw_spur_mitigate_freq(ah, chan); 1185 1186 return true; 1187 } 1188 ··· 1218 EXPORT_SYMBOL(ath9k_hw_check_alive); 1219 1220 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1221 + struct ath9k_hw_cal_data *caldata, bool bChannelChange) 1222 { 1223 struct ath_common *common = ath9k_hw_common(ah); 1224 u32 saveLedState; ··· 1243 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1244 return -EIO; 1245 1246 + if (curchan && !ah->chip_fullsleep && ah->caldata) 1247 ath9k_hw_getnf(ah, curchan); 1248 + 1249 + ah->caldata = caldata; 1250 + if (caldata && 1251 + (chan->channel != caldata->channel || 1252 + (chan->channelFlags & ~CHANNEL_CW_INT) != 1253 + (caldata->channelFlags & ~CHANNEL_CW_INT))) { 1254 + /* Operating channel changed, reset channel calibration data */ 1255 + memset(caldata, 0, sizeof(*caldata)); 1256 + ath9k_init_nfcal_hist_buffer(ah, chan); 1257 + } 1258 1259 if (bChannelChange && 1260 (ah->chip_fullsleep != true) && ··· 1256 1257 if (ath9k_hw_channel_change(ah, chan)) { 1258 ath9k_hw_loadnf(ah, ah->curchan); 1259 + ath9k_hw_start_nfcal(ah, true); 1260 return 0; 1261 } 1262 } ··· 1461 if (ah->btcoex_hw.enabled) 1462 ath9k_hw_btcoex_enable(ah); 1463 1464 + if (AR_SREV_9300_20_OR_LATER(ah)) 1465 ar9003_hw_bb_watchdog_config(ah); 1466 1467 return 0; 1468 }
+20 -13
drivers/net/wireless/ath/ath9k/hw.h
··· 346 CHANNEL_HT40PLUS | \ 347 CHANNEL_HT40MINUS) 348 349 struct ath9k_channel { 350 struct ieee80211_channel *chan; 351 u16 channel; 352 u32 channelFlags; 353 u32 chanmode; 354 - int32_t CalValid; 355 - bool oneTimeCalsDone; 356 - int8_t iCoff; 357 - int8_t qCoff; 358 - int16_t rawNoiseFloor; 359 - bool paprd_done; 360 - u16 small_signal_gain[AR9300_MAX_CHAINS]; 361 - u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; 362 }; 363 364 #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ ··· 675 enum nl80211_iftype opmode; 676 enum ath9k_power_mode power_mode; 677 678 - struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 679 struct ath9k_pacal_info pacal_info; 680 struct ar5416Stats stats; 681 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; ··· 869 void ath9k_hw_deinit(struct ath_hw *ah); 870 int ath9k_hw_init(struct ath_hw *ah); 871 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 872 - bool bChannelChange); 873 int ath9k_hw_fill_cap_info(struct ath_hw *ah); 874 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); 875 ··· 964 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); 965 void ar9003_paprd_enable(struct ath_hw *ah, bool val); 966 void ar9003_paprd_populate_single_table(struct ath_hw *ah, 967 - struct ath9k_channel *chan, int chain); 968 - int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, 969 - int chain); 970 int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); 971 int ar9003_paprd_init_table(struct ath_hw *ah); 972 bool ar9003_paprd_is_done(struct ath_hw *ah);
··· 346 CHANNEL_HT40PLUS | \ 347 CHANNEL_HT40MINUS) 348 349 + struct ath9k_hw_cal_data { 350 + u16 channel; 351 + u32 channelFlags; 352 + int32_t CalValid; 353 + int8_t iCoff; 354 + int8_t qCoff; 355 + int16_t rawNoiseFloor; 356 + bool paprd_done; 357 + bool nfcal_pending; 358 + u16 small_signal_gain[AR9300_MAX_CHAINS]; 359 + u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; 360 + struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 361 + }; 362 + 363 struct ath9k_channel { 364 struct ieee80211_channel *chan; 365 u16 channel; 366 u32 channelFlags; 367 u32 chanmode; 368 }; 369 370 #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ ··· 669 enum nl80211_iftype opmode; 670 enum ath9k_power_mode power_mode; 671 672 + struct ath9k_hw_cal_data *caldata; 673 struct ath9k_pacal_info pacal_info; 674 struct ar5416Stats stats; 675 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; ··· 863 void ath9k_hw_deinit(struct ath_hw *ah); 864 int ath9k_hw_init(struct ath_hw *ah); 865 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 866 + struct ath9k_hw_cal_data *caldata, bool bChannelChange); 867 int ath9k_hw_fill_cap_info(struct ath_hw *ah); 868 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); 869 ··· 958 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); 959 void ar9003_paprd_enable(struct ath_hw *ah, bool val); 960 void ar9003_paprd_populate_single_table(struct ath_hw *ah, 961 + struct ath9k_hw_cal_data *caldata, 962 + int chain); 963 + int ar9003_paprd_create_curve(struct ath_hw *ah, 964 + struct ath9k_hw_cal_data *caldata, int chain); 965 int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); 966 int ar9003_paprd_init_table(struct ath_hw *ah); 967 bool ar9003_paprd_is_done(struct ath_hw *ah);
+60 -44
drivers/net/wireless/ath/ath9k/main.c
··· 154 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 155 } 156 157 /* 158 * Set/change channels. If the channel is really being changed, it's done 159 * by reseting the chip. To accomplish this we must first cleanup any pending ··· 183 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 184 struct ath9k_channel *hchan) 185 { 186 struct ath_hw *ah = sc->sc_ah; 187 struct ath_common *common = ath9k_hw_common(ah); 188 struct ieee80211_conf *conf = &common->hw->conf; 189 bool fastcc = true, stopped; 190 struct ieee80211_channel *channel = hw->conf.channel; 191 int r; 192 193 if (sc->sc_flags & SC_OP_INVALID) 194 return -EIO; 195 196 ath9k_ps_wakeup(sc); 197 ··· 219 * to flush data frames already in queue because of 220 * changing channel. */ 221 222 - if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) 223 fastcc = false; 224 225 ath_print(common, ATH_DBG_CONFIG, 226 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n", ··· 232 233 spin_lock_bh(&sc->sc_resetlock); 234 235 - r = ath9k_hw_reset(ah, hchan, fastcc); 236 if (r) { 237 ath_print(common, ATH_DBG_FATAL, 238 "Unable to reset channel (%u MHz), " ··· 242 goto ps_restore; 243 } 244 spin_unlock_bh(&sc->sc_resetlock); 245 - 246 - sc->sc_flags &= ~SC_OP_FULL_RESET; 247 248 if (ath_startrecv(sc) != 0) { 249 ath_print(common, ATH_DBG_FATAL, ··· 254 ath_update_txpow(sc); 255 ath9k_hw_set_interrupts(ah, ah->imask); 256 257 ps_restore: 258 ath9k_ps_restore(sc); 259 return r; ··· 268 static void ath_paprd_activate(struct ath_softc *sc) 269 { 270 struct ath_hw *ah = sc->sc_ah; 271 int chain; 272 273 - if (!ah->curchan->paprd_done) 274 return; 275 276 ath9k_ps_wakeup(sc); 277 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 278 if (!(ah->caps.tx_chainmask & BIT(chain))) 279 continue; 280 281 - ar9003_paprd_populate_single_table(ah, ah->curchan, chain); 282 } 283 284 ar9003_paprd_enable(ah, true); ··· 298 int band = hw->conf.channel->band; 299 struct ieee80211_supported_band *sband = &sc->sbands[band]; 300 struct ath_tx_control txctl; 301 int qnum, ftype; 302 int chain_ok = 0; 303 int chain; 304 int len = 1800; 305 int time_left; 306 int i; 307 308 skb = alloc_skb(len, GFP_KERNEL); 309 if (!skb) ··· 363 if (!ar9003_paprd_is_done(ah)) 364 break; 365 366 - if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0) 367 break; 368 369 chain_ok = 1; ··· 371 kfree_skb(skb); 372 373 if (chain_ok) { 374 - ah->curchan->paprd_done = true; 375 ath_paprd_activate(sc); 376 } 377 ··· 480 cal_interval = min(cal_interval, (u32)short_cal_interval); 481 482 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 483 - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && 484 - !(sc->sc_flags & SC_OP_SCANNING)) { 485 - if (!sc->sc_ah->curchan->paprd_done) 486 ieee80211_queue_work(sc->hw, &sc->paprd_work); 487 else 488 ath_paprd_activate(sc); 489 } 490 - } 491 - 492 - static void ath_start_ani(struct ath_common *common) 493 - { 494 - struct ath_hw *ah = common->ah; 495 - unsigned long timestamp = jiffies_to_msecs(jiffies); 496 - struct ath_softc *sc = (struct ath_softc *) common->priv; 497 - 498 - if (!(sc->sc_flags & SC_OP_ANI_RUN)) 499 - return; 500 - 501 - common->ani.longcal_timer = timestamp; 502 - common->ani.shortcal_timer = timestamp; 503 - common->ani.checkani_timer = timestamp; 504 - 505 - mod_timer(&common->ani.timer, 506 - jiffies + 507 - msecs_to_jiffies((u32)ah->config.ani_poll_interval)); 508 } 509 510 /* ··· 499 struct ath_hw *ah = sc->sc_ah; 500 struct ath_common *common = ath9k_hw_common(ah); 501 502 - if ((sc->sc_flags & SC_OP_SCANNING) || is_ht || 503 (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) { 504 common->tx_chainmask = ah->caps.tx_chainmask; 505 common->rx_chainmask = ah->caps.rx_chainmask; ··· 839 ah->curchan = ath_get_curchannel(sc, sc->hw); 840 841 spin_lock_bh(&sc->sc_resetlock); 842 - r = ath9k_hw_reset(ah, ah->curchan, false); 843 if (r) { 844 ath_print(common, ATH_DBG_FATAL, 845 "Unable to reset channel (%u MHz), " ··· 899 ah->curchan = ath_get_curchannel(sc, hw); 900 901 spin_lock_bh(&sc->sc_resetlock); 902 - r = ath9k_hw_reset(ah, ah->curchan, false); 903 if (r) { 904 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 905 "Unable to reset channel (%u MHz), " ··· 932 ath_flushrecv(sc); 933 934 spin_lock_bh(&sc->sc_resetlock); 935 - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 936 if (r) 937 ath_print(common, ATH_DBG_FATAL, 938 "Unable to reset hardware; reset status %d\n", r); ··· 1107 * and then setup of the interrupt mask. 1108 */ 1109 spin_lock_bh(&sc->sc_resetlock); 1110 - r = ath9k_hw_reset(ah, init_channel, false); 1111 if (r) { 1112 ath_print(common, ATH_DBG_FATAL, 1113 "Unable to reset hardware; reset status %d " ··· 1601 1602 aphy->chan_idx = pos; 1603 aphy->chan_is_ht = conf_is_ht(conf); 1604 1605 if (aphy->state == ATH_WIPHY_SCAN || 1606 aphy->state == ATH_WIPHY_ACTIVE) ··· 2016 { 2017 struct ath_wiphy *aphy = hw->priv; 2018 struct ath_softc *sc = aphy->sc; 2019 - struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2020 2021 mutex_lock(&sc->mutex); 2022 if (ath9k_wiphy_scanning(sc)) { ··· 2033 aphy->state = ATH_WIPHY_SCAN; 2034 ath9k_wiphy_pause_all_forced(sc, aphy); 2035 sc->sc_flags |= SC_OP_SCANNING; 2036 - del_timer_sync(&common->ani.timer); 2037 - cancel_work_sync(&sc->paprd_work); 2038 - cancel_work_sync(&sc->hw_check_work); 2039 - cancel_delayed_work_sync(&sc->tx_complete_work); 2040 mutex_unlock(&sc->mutex); 2041 } 2042 ··· 2044 { 2045 struct ath_wiphy *aphy = hw->priv; 2046 struct ath_softc *sc = aphy->sc; 2047 - struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2048 2049 mutex_lock(&sc->mutex); 2050 aphy->state = ATH_WIPHY_ACTIVE; 2051 sc->sc_flags &= ~SC_OP_SCANNING; 2052 - sc->sc_flags |= SC_OP_FULL_RESET; 2053 - ath_start_ani(common); 2054 - ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 2055 - ath_beacon_config(sc, NULL); 2056 mutex_unlock(&sc->mutex); 2057 } 2058
··· 154 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 155 } 156 157 + static void ath_start_ani(struct ath_common *common) 158 + { 159 + struct ath_hw *ah = common->ah; 160 + unsigned long timestamp = jiffies_to_msecs(jiffies); 161 + struct ath_softc *sc = (struct ath_softc *) common->priv; 162 + 163 + if (!(sc->sc_flags & SC_OP_ANI_RUN)) 164 + return; 165 + 166 + if (sc->sc_flags & SC_OP_OFFCHANNEL) 167 + return; 168 + 169 + common->ani.longcal_timer = timestamp; 170 + common->ani.shortcal_timer = timestamp; 171 + common->ani.checkani_timer = timestamp; 172 + 173 + mod_timer(&common->ani.timer, 174 + jiffies + 175 + msecs_to_jiffies((u32)ah->config.ani_poll_interval)); 176 + } 177 + 178 /* 179 * Set/change channels. If the channel is really being changed, it's done 180 * by reseting the chip. To accomplish this we must first cleanup any pending ··· 162 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 163 struct ath9k_channel *hchan) 164 { 165 + struct ath_wiphy *aphy = hw->priv; 166 struct ath_hw *ah = sc->sc_ah; 167 struct ath_common *common = ath9k_hw_common(ah); 168 struct ieee80211_conf *conf = &common->hw->conf; 169 bool fastcc = true, stopped; 170 struct ieee80211_channel *channel = hw->conf.channel; 171 + struct ath9k_hw_cal_data *caldata = NULL; 172 int r; 173 174 if (sc->sc_flags & SC_OP_INVALID) 175 return -EIO; 176 + 177 + del_timer_sync(&common->ani.timer); 178 + cancel_work_sync(&sc->paprd_work); 179 + cancel_work_sync(&sc->hw_check_work); 180 + cancel_delayed_work_sync(&sc->tx_complete_work); 181 182 ath9k_ps_wakeup(sc); 183 ··· 191 * to flush data frames already in queue because of 192 * changing channel. */ 193 194 + if (!stopped || !(sc->sc_flags & SC_OP_OFFCHANNEL)) 195 fastcc = false; 196 + 197 + if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) 198 + caldata = &aphy->caldata; 199 200 ath_print(common, ATH_DBG_CONFIG, 201 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n", ··· 201 202 spin_lock_bh(&sc->sc_resetlock); 203 204 + r = ath9k_hw_reset(ah, hchan, caldata, fastcc); 205 if (r) { 206 ath_print(common, ATH_DBG_FATAL, 207 "Unable to reset channel (%u MHz), " ··· 211 goto ps_restore; 212 } 213 spin_unlock_bh(&sc->sc_resetlock); 214 215 if (ath_startrecv(sc) != 0) { 216 ath_print(common, ATH_DBG_FATAL, ··· 225 ath_update_txpow(sc); 226 ath9k_hw_set_interrupts(ah, ah->imask); 227 228 + if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) { 229 + ath_start_ani(common); 230 + ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 231 + ath_beacon_config(sc, NULL); 232 + } 233 + 234 ps_restore: 235 ath9k_ps_restore(sc); 236 return r; ··· 233 static void ath_paprd_activate(struct ath_softc *sc) 234 { 235 struct ath_hw *ah = sc->sc_ah; 236 + struct ath9k_hw_cal_data *caldata = ah->caldata; 237 int chain; 238 239 + if (!caldata || !caldata->paprd_done) 240 return; 241 242 ath9k_ps_wakeup(sc); 243 + ar9003_paprd_enable(ah, false); 244 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 245 if (!(ah->caps.tx_chainmask & BIT(chain))) 246 continue; 247 248 + ar9003_paprd_populate_single_table(ah, caldata, chain); 249 } 250 251 ar9003_paprd_enable(ah, true); ··· 261 int band = hw->conf.channel->band; 262 struct ieee80211_supported_band *sband = &sc->sbands[band]; 263 struct ath_tx_control txctl; 264 + struct ath9k_hw_cal_data *caldata = ah->caldata; 265 int qnum, ftype; 266 int chain_ok = 0; 267 int chain; 268 int len = 1800; 269 int time_left; 270 int i; 271 + 272 + if (!caldata) 273 + return; 274 275 skb = alloc_skb(len, GFP_KERNEL); 276 if (!skb) ··· 322 if (!ar9003_paprd_is_done(ah)) 323 break; 324 325 + if (ar9003_paprd_create_curve(ah, caldata, chain) != 0) 326 break; 327 328 chain_ok = 1; ··· 330 kfree_skb(skb); 331 332 if (chain_ok) { 333 + caldata->paprd_done = true; 334 ath_paprd_activate(sc); 335 } 336 ··· 439 cal_interval = min(cal_interval, (u32)short_cal_interval); 440 441 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 442 + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) { 443 + if (!ah->caldata->paprd_done) 444 ieee80211_queue_work(sc->hw, &sc->paprd_work); 445 else 446 ath_paprd_activate(sc); 447 } 448 } 449 450 /* ··· 477 struct ath_hw *ah = sc->sc_ah; 478 struct ath_common *common = ath9k_hw_common(ah); 479 480 + if ((sc->sc_flags & SC_OP_OFFCHANNEL) || is_ht || 481 (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) { 482 common->tx_chainmask = ah->caps.tx_chainmask; 483 common->rx_chainmask = ah->caps.rx_chainmask; ··· 817 ah->curchan = ath_get_curchannel(sc, sc->hw); 818 819 spin_lock_bh(&sc->sc_resetlock); 820 + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 821 if (r) { 822 ath_print(common, ATH_DBG_FATAL, 823 "Unable to reset channel (%u MHz), " ··· 877 ah->curchan = ath_get_curchannel(sc, hw); 878 879 spin_lock_bh(&sc->sc_resetlock); 880 + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 881 if (r) { 882 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 883 "Unable to reset channel (%u MHz), " ··· 910 ath_flushrecv(sc); 911 912 spin_lock_bh(&sc->sc_resetlock); 913 + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 914 if (r) 915 ath_print(common, ATH_DBG_FATAL, 916 "Unable to reset hardware; reset status %d\n", r); ··· 1085 * and then setup of the interrupt mask. 1086 */ 1087 spin_lock_bh(&sc->sc_resetlock); 1088 + r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1089 if (r) { 1090 ath_print(common, ATH_DBG_FATAL, 1091 "Unable to reset hardware; reset status %d " ··· 1579 1580 aphy->chan_idx = pos; 1581 aphy->chan_is_ht = conf_is_ht(conf); 1582 + if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 1583 + sc->sc_flags |= SC_OP_OFFCHANNEL; 1584 + else 1585 + sc->sc_flags &= ~SC_OP_OFFCHANNEL; 1586 1587 if (aphy->state == ATH_WIPHY_SCAN || 1588 aphy->state == ATH_WIPHY_ACTIVE) ··· 1990 { 1991 struct ath_wiphy *aphy = hw->priv; 1992 struct ath_softc *sc = aphy->sc; 1993 1994 mutex_lock(&sc->mutex); 1995 if (ath9k_wiphy_scanning(sc)) { ··· 2008 aphy->state = ATH_WIPHY_SCAN; 2009 ath9k_wiphy_pause_all_forced(sc, aphy); 2010 sc->sc_flags |= SC_OP_SCANNING; 2011 mutex_unlock(&sc->mutex); 2012 } 2013 ··· 2023 { 2024 struct ath_wiphy *aphy = hw->priv; 2025 struct ath_softc *sc = aphy->sc; 2026 2027 mutex_lock(&sc->mutex); 2028 aphy->state = ATH_WIPHY_ACTIVE; 2029 sc->sc_flags &= ~SC_OP_SCANNING; 2030 mutex_unlock(&sc->mutex); 2031 } 2032
+5 -5
drivers/net/wireless/ath/ath9k/recv.c
··· 1140 if (flush) 1141 goto requeue; 1142 1143 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1144 if (rs.rs_tstamp > tsf_lower && 1145 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) ··· 1153 if (rs.rs_tstamp < tsf_lower && 1154 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1155 rxs->mactime += 0x100000000ULL; 1156 - 1157 - retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1158 - rxs, &decrypt_error); 1159 - if (retval) 1160 - goto requeue; 1161 1162 /* Ensure we always have an skb to requeue once we are done 1163 * processing the current buffer's skb */
··· 1140 if (flush) 1141 goto requeue; 1142 1143 + retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1144 + rxs, &decrypt_error); 1145 + if (retval) 1146 + goto requeue; 1147 + 1148 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1149 if (rs.rs_tstamp > tsf_lower && 1150 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) ··· 1148 if (rs.rs_tstamp < tsf_lower && 1149 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1150 rxs->mactime += 0x100000000ULL; 1151 1152 /* Ensure we always have an skb to requeue once we are done 1153 * processing the current buffer's skb */
+9 -27
drivers/net/wireless/ath/ath9k/xmit.c
··· 120 list_add_tail(&ac->list, &txq->axq_acq); 121 } 122 123 - static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 124 - { 125 - struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 126 - 127 - spin_lock_bh(&txq->axq_lock); 128 - tid->paused++; 129 - spin_unlock_bh(&txq->axq_lock); 130 - } 131 - 132 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 133 { 134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 135 136 - BUG_ON(tid->paused <= 0); 137 spin_lock_bh(&txq->axq_lock); 138 - 139 - tid->paused--; 140 - 141 - if (tid->paused > 0) 142 - goto unlock; 143 144 if (list_empty(&tid->buf_q)) 145 goto unlock; ··· 145 struct list_head bf_head; 146 INIT_LIST_HEAD(&bf_head); 147 148 - BUG_ON(tid->paused <= 0); 149 spin_lock_bh(&txq->axq_lock); 150 - 151 - tid->paused--; 152 - 153 - if (tid->paused > 0) { 154 - spin_unlock_bh(&txq->axq_lock); 155 - return; 156 - } 157 158 while (!list_empty(&tid->buf_q)) { 159 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); ··· 794 an = (struct ath_node *)sta->drv_priv; 795 txtid = ATH_AN_2_TID(an, tid); 796 txtid->state |= AGGR_ADDBA_PROGRESS; 797 - ath_tx_pause_tid(sc, txtid); 798 *ssn = txtid->seq_start; 799 } 800 ··· 818 return; 819 } 820 821 - ath_tx_pause_tid(sc, txtid); 822 - 823 /* drop all software retried frames and mark this TID */ 824 spin_lock_bh(&txq->axq_lock); 825 while (!list_empty(&txtid->buf_q)) { 826 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 827 if (!bf_isretried(bf)) { ··· 1163 "Failed to stop TX DMA. Resetting hardware!\n"); 1164 1165 spin_lock_bh(&sc->sc_resetlock); 1166 - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 1167 if (r) 1168 ath_print(common, ATH_DBG_FATAL, 1169 "Unable to reset hardware; reset status %d\n",
··· 120 list_add_tail(&ac->list, &txq->axq_acq); 121 } 122 123 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 124 { 125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 126 127 + WARN_ON(!tid->paused); 128 + 129 spin_lock_bh(&txq->axq_lock); 130 + tid->paused = false; 131 132 if (list_empty(&tid->buf_q)) 133 goto unlock; ··· 157 struct list_head bf_head; 158 INIT_LIST_HEAD(&bf_head); 159 160 + WARN_ON(!tid->paused); 161 + 162 spin_lock_bh(&txq->axq_lock); 163 + tid->paused = false; 164 165 while (!list_empty(&tid->buf_q)) { 166 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); ··· 811 an = (struct ath_node *)sta->drv_priv; 812 txtid = ATH_AN_2_TID(an, tid); 813 txtid->state |= AGGR_ADDBA_PROGRESS; 814 + txtid->paused = true; 815 *ssn = txtid->seq_start; 816 } 817 ··· 835 return; 836 } 837 838 /* drop all software retried frames and mark this TID */ 839 spin_lock_bh(&txq->axq_lock); 840 + txtid->paused = true; 841 while (!list_empty(&txtid->buf_q)) { 842 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 843 if (!bf_isretried(bf)) { ··· 1181 "Failed to stop TX DMA. Resetting hardware!\n"); 1182 1183 spin_lock_bh(&sc->sc_resetlock); 1184 + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1185 if (r) 1186 ath_print(common, ATH_DBG_FATAL, 1187 "Unable to reset hardware; reset status %d\n",
+4
drivers/net/wireless/ipw2x00/ipw2100.c
··· 1924 bg_band->channels = 1925 kzalloc(geo->bg_channels * 1926 sizeof(struct ieee80211_channel), GFP_KERNEL); 1927 /* translate geo->bg to bg_band.channels */ 1928 for (i = 0; i < geo->bg_channels; i++) { 1929 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
··· 1924 bg_band->channels = 1925 kzalloc(geo->bg_channels * 1926 sizeof(struct ieee80211_channel), GFP_KERNEL); 1927 + if (!bg_band->channels) { 1928 + ipw2100_down(priv); 1929 + return -ENOMEM; 1930 + } 1931 /* translate geo->bg to bg_band.channels */ 1932 for (i = 0; i < geo->bg_channels; i++) { 1933 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
··· 980 le32_to_cpu(bt->lo_priority_tx_req_cnt), 981 accum_bt->lo_priority_tx_req_cnt); 982 pos += scnprintf(buf + pos, bufsz - pos, 983 - "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", 984 le32_to_cpu(bt->lo_priority_tx_denied_cnt), 985 accum_bt->lo_priority_tx_denied_cnt); 986 pos += scnprintf(buf + pos, bufsz - pos,
··· 980 le32_to_cpu(bt->lo_priority_tx_req_cnt), 981 accum_bt->lo_priority_tx_req_cnt); 982 pos += scnprintf(buf + pos, bufsz - pos, 983 + "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n", 984 le32_to_cpu(bt->lo_priority_tx_denied_cnt), 985 accum_bt->lo_priority_tx_denied_cnt); 986 pos += scnprintf(buf + pos, bufsz - pos,
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 1429 void iwl_free_tfds_in_queue(struct iwl_priv *priv, 1430 int sta_id, int tid, int freed) 1431 { 1432 - WARN_ON(!spin_is_locked(&priv->sta_lock)); 1433 1434 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) 1435 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
··· 1429 void iwl_free_tfds_in_queue(struct iwl_priv *priv, 1430 int sta_id, int tid, int freed) 1431 { 1432 + lockdep_assert_held(&priv->sta_lock); 1433 1434 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) 1435 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+7 -4
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
··· 300 struct ieee80211_sta *sta) 301 { 302 int ret = -EAGAIN; 303 304 - if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 305 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 306 sta->addr, tid); 307 ret = ieee80211_start_tx_ba_session(sta, tid); ··· 312 * this might be cause by reloading firmware 313 * stop the tx ba session here 314 */ 315 - IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", 316 tid); 317 ieee80211_stop_tx_ba_session(sta, tid); 318 } 319 - } else 320 - IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid); 321 return ret; 322 } 323
··· 300 struct ieee80211_sta *sta) 301 { 302 int ret = -EAGAIN; 303 + u32 load = rs_tl_get_load(lq_data, tid); 304 305 + if (load > IWL_AGG_LOAD_THRESHOLD) { 306 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 307 sta->addr, tid); 308 ret = ieee80211_start_tx_ba_session(sta, tid); ··· 311 * this might be cause by reloading firmware 312 * stop the tx ba session here 313 */ 314 + IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", 315 tid); 316 ieee80211_stop_tx_ba_session(sta, tid); 317 } 318 + } else { 319 + IWL_ERR(priv, "Aggregation not enabled for tid %d " 320 + "because load = %u\n", tid, load); 321 + } 322 return ret; 323 } 324
+9 -2
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
··· 1117 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1118 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1119 1120 - WARN_ON(!spin_is_locked(&priv->sta_lock)); 1121 1122 switch (priv->stations[sta_id].tid[tid].agg.state) { 1123 case IWL_EMPTYING_HW_QUEUE_DELBA: ··· 1331 tid = ba_resp->tid; 1332 agg = &priv->stations[sta_id].tid[tid].agg; 1333 if (unlikely(agg->txq_id != scd_flow)) { 1334 - IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n", 1335 scd_flow, agg->txq_id); 1336 return; 1337 }
··· 1117 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1118 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1119 1120 + lockdep_assert_held(&priv->sta_lock); 1121 1122 switch (priv->stations[sta_id].tid[tid].agg.state) { 1123 case IWL_EMPTYING_HW_QUEUE_DELBA: ··· 1331 tid = ba_resp->tid; 1332 agg = &priv->stations[sta_id].tid[tid].agg; 1333 if (unlikely(agg->txq_id != scd_flow)) { 1334 + /* 1335 + * FIXME: this is a uCode bug which need to be addressed, 1336 + * log the information and return for now! 1337 + * since it is possible happen very often and in order 1338 + * not to fill the syslog, don't enable the logging by default 1339 + */ 1340 + IWL_DEBUG_TX_REPLY(priv, 1341 + "BA scd_flow %d does not match txq_id %d\n", 1342 scd_flow, agg->txq_id); 1343 return; 1344 }
+5 -1
drivers/net/wireless/iwlwifi/iwl-core.c
··· 2000 struct ieee80211_vif *vif) 2001 { 2002 struct iwl_priv *priv = hw->priv; 2003 2004 IWL_DEBUG_MAC80211(priv, "enter\n"); 2005 ··· 2014 if (priv->vif == vif) { 2015 priv->vif = NULL; 2016 if (priv->scan_vif == vif) { 2017 - ieee80211_scan_completed(priv->hw, true); 2018 priv->scan_vif = NULL; 2019 priv->scan_request = NULL; 2020 } 2021 memset(priv->bssid, 0, ETH_ALEN); 2022 } 2023 mutex_unlock(&priv->mutex); 2024 2025 IWL_DEBUG_MAC80211(priv, "leave\n"); 2026
··· 2000 struct ieee80211_vif *vif) 2001 { 2002 struct iwl_priv *priv = hw->priv; 2003 + bool scan_completed = false; 2004 2005 IWL_DEBUG_MAC80211(priv, "enter\n"); 2006 ··· 2013 if (priv->vif == vif) { 2014 priv->vif = NULL; 2015 if (priv->scan_vif == vif) { 2016 + scan_completed = true; 2017 priv->scan_vif = NULL; 2018 priv->scan_request = NULL; 2019 } 2020 memset(priv->bssid, 0, ETH_ALEN); 2021 } 2022 mutex_unlock(&priv->mutex); 2023 + 2024 + if (scan_completed) 2025 + ieee80211_scan_completed(priv->hw, true); 2026 2027 IWL_DEBUG_MAC80211(priv, "leave\n"); 2028
+1 -1
drivers/net/wireless/iwlwifi/iwl-debug.h
··· 71 #define IWL_DEBUG(__priv, level, fmt, args...) 72 #define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) 73 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, 74 - void *p, u32 len) 75 {} 76 #endif /* CONFIG_IWLWIFI_DEBUG */ 77
··· 71 #define IWL_DEBUG(__priv, level, fmt, args...) 72 #define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) 73 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, 74 + const void *p, u32 len) 75 {} 76 #endif /* CONFIG_IWLWIFI_DEBUG */ 77
+1 -1
drivers/net/wireless/iwlwifi/iwl-devtrace.h
··· 193 __entry->framelen = buf0_len + buf1_len; 194 memcpy(__get_dynamic_array(tfd), tfd, tfdlen); 195 memcpy(__get_dynamic_array(buf0), buf0, buf0_len); 196 - memcpy(__get_dynamic_array(buf1), buf1, buf0_len); 197 ), 198 TP_printk("[%p] TX %.2x (%zu bytes)", 199 __entry->priv,
··· 193 __entry->framelen = buf0_len + buf1_len; 194 memcpy(__get_dynamic_array(tfd), tfd, tfdlen); 195 memcpy(__get_dynamic_array(buf0), buf0, buf0_len); 196 + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); 197 ), 198 TP_printk("[%p] TX %.2x (%zu bytes)", 199 __entry->priv,
+1 -1
drivers/net/wireless/iwlwifi/iwl-scan.c
··· 298 299 static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif) 300 { 301 - WARN_ON(!mutex_is_locked(&priv->mutex)); 302 303 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 304 set_bit(STATUS_SCANNING, &priv->status);
··· 298 299 static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif) 300 { 301 + lockdep_assert_held(&priv->mutex); 302 303 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 304 set_bit(STATUS_SCANNING, &priv->status);
+3 -3
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 773 774 int iwl_restore_default_wep_keys(struct iwl_priv *priv) 775 { 776 - WARN_ON(!mutex_is_locked(&priv->mutex)); 777 778 return iwl_send_static_wepkey_cmd(priv, 0); 779 } ··· 784 { 785 int ret; 786 787 - WARN_ON(!mutex_is_locked(&priv->mutex)); 788 789 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 790 keyconf->keyidx); ··· 808 { 809 int ret; 810 811 - WARN_ON(!mutex_is_locked(&priv->mutex)); 812 813 if (keyconf->keylen != WEP_KEY_LEN_128 && 814 keyconf->keylen != WEP_KEY_LEN_64) {
··· 773 774 int iwl_restore_default_wep_keys(struct iwl_priv *priv) 775 { 776 + lockdep_assert_held(&priv->mutex); 777 778 return iwl_send_static_wepkey_cmd(priv, 0); 779 } ··· 784 { 785 int ret; 786 787 + lockdep_assert_held(&priv->mutex); 788 789 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 790 keyconf->keyidx); ··· 808 { 809 int ret; 810 811 + lockdep_assert_held(&priv->mutex); 812 813 if (keyconf->keylen != WEP_KEY_LEN_128 && 814 keyconf->keylen != WEP_KEY_LEN_64) {
+164 -50
drivers/net/wireless/libertas/cfg.c
··· 257 return sizeof(rate_tlv->header) + i; 258 } 259 260 261 /* 262 * Adds a TLV with all rates the hardware *and* BSS supports. ··· 287 static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss) 288 { 289 struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; 290 - const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); 291 - int n; 292 293 /* 294 * 01 00 TLV_TYPE_RATES ··· 301 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); 302 tlv += sizeof(rate_tlv->header); 303 304 - if (!rates_eid) { 305 /* Fallback: add basic 802.11b rates */ 306 *tlv++ = 0x82; 307 *tlv++ = 0x84; 308 *tlv++ = 0x8b; 309 *tlv++ = 0x96; 310 n = 4; 311 - } else { 312 - int hw, ap; 313 - u8 ap_max = rates_eid[1]; 314 - n = 0; 315 - for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { 316 - u8 hw_rate = lbs_rates[hw].bitrate / 5; 317 - for (ap = 0; ap < ap_max; ap++) { 318 - if (hw_rate == (rates_eid[ap+2] & 0x7f)) { 319 - *tlv++ = rates_eid[ap+2]; 320 - n++; 321 - } 322 - } 323 - } 324 } 325 326 rate_tlv->header.len = cpu_to_le16(n); ··· 486 lbs_deb_enter(LBS_DEB_CFG80211); 487 488 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); 489 - nr_sets = le16_to_cpu(resp->size); 490 491 /* 492 * The general layout of the scan response is described in chapter ··· 699 700 if (priv->scan_channel >= priv->scan_req->n_channels) { 701 /* Mark scan done */ 702 - cfg80211_scan_done(priv->scan_req, false); 703 priv->scan_req = NULL; 704 } 705 706 /* Restart network */ ··· 716 717 kfree(scan_cmd); 718 719 out_no_scan_cmd: 720 lbs_deb_leave(LBS_DEB_SCAN); 721 } 722 723 724 static int lbs_cfg_scan(struct wiphy *wiphy, 725 struct net_device *dev, ··· 759 goto out; 760 } 761 762 - lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", 763 - request->n_ssids, request->n_channels, request->ie_len); 764 - 765 - priv->scan_channel = 0; 766 - queue_delayed_work(priv->work_thread, &priv->scan_work, 767 - msecs_to_jiffies(50)); 768 769 if (priv->surpriseremoved) 770 ret = -EIO; 771 - 772 - priv->scan_req = request; 773 774 out: 775 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); ··· 1050 int status; 1051 int ret; 1052 u8 *pos = &(cmd->iebuf[0]); 1053 1054 lbs_deb_enter(LBS_DEB_CFG80211); 1055 ··· 1095 pos += lbs_add_cf_param_tlv(pos); 1096 1097 /* add rates TLV */ 1098 pos += lbs_add_common_rates_tlv(pos, bss); 1099 1100 /* add auth type TLV */ 1101 if (priv->fwrelease >= 0x09000000) ··· 1177 return ret; 1178 } 1179 1180 1181 1182 static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, 1183 struct cfg80211_connect_params *sme) ··· 1244 1245 lbs_deb_enter(LBS_DEB_CFG80211); 1246 1247 - if (sme->bssid) { 1248 - bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, 1249 - sme->ssid, sme->ssid_len, 1250 - WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 1251 - } else { 1252 - /* 1253 - * Here we have an impedance mismatch. The firmware command 1254 - * CMD_802_11_ASSOCIATE always needs a BSSID, it cannot 1255 - * connect otherwise. However, for the connect-API of 1256 - * cfg80211 the bssid is purely optional. We don't get one, 1257 - * except the user specifies one on the "iw" command line. 1258 - * 1259 - * If we don't got one, we could initiate a scan and look 1260 - * for the best matching cfg80211_bss entry. 1261 - * 1262 - * Or, better yet, net/wireless/sme.c get's rewritten into 1263 - * something more generally useful. 1264 */ 1265 - lbs_pr_err("TODO: no BSS specified\n"); 1266 - ret = -ENOTSUPP; 1267 - goto done; 1268 } 1269 1270 - 1271 if (!bss) { 1272 - lbs_pr_err("assicate: bss %pM not in scan results\n", 1273 sme->bssid); 1274 ret = -ENOENT; 1275 goto done; 1276 } 1277 - lbs_deb_assoc("trying %pM", sme->bssid); 1278 lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n", 1279 sme->crypto.cipher_group, 1280 sme->key_idx, sme->key_len); ··· 1343 lbs_set_radio(priv, preamble, 1); 1344 1345 /* Do the actual association */ 1346 - lbs_associate(priv, bss, sme); 1347 1348 done: 1349 if (bss)
··· 257 return sizeof(rate_tlv->header) + i; 258 } 259 260 + /* Add common rates from a TLV and return the new end of the TLV */ 261 + static u8 * 262 + add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) 263 + { 264 + int hw, ap, ap_max = ie[1]; 265 + u8 hw_rate; 266 + 267 + /* Advance past IE header */ 268 + ie += 2; 269 + 270 + lbs_deb_hex(LBS_DEB_ASSOC, "AP IE Rates", (u8 *) ie, ap_max); 271 + 272 + for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { 273 + hw_rate = lbs_rates[hw].bitrate / 5; 274 + for (ap = 0; ap < ap_max; ap++) { 275 + if (hw_rate == (ie[ap] & 0x7f)) { 276 + *tlv++ = ie[ap]; 277 + *nrates = *nrates + 1; 278 + } 279 + } 280 + } 281 + return tlv; 282 + } 283 284 /* 285 * Adds a TLV with all rates the hardware *and* BSS supports. ··· 264 static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss) 265 { 266 struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; 267 + const u8 *rates_eid, *ext_rates_eid; 268 + int n = 0; 269 + 270 + rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); 271 + ext_rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES); 272 273 /* 274 * 01 00 TLV_TYPE_RATES ··· 275 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); 276 tlv += sizeof(rate_tlv->header); 277 278 + /* Add basic rates */ 279 + if (rates_eid) { 280 + tlv = add_ie_rates(tlv, rates_eid, &n); 281 + 282 + /* Add extended rates, if any */ 283 + if (ext_rates_eid) 284 + tlv = add_ie_rates(tlv, ext_rates_eid, &n); 285 + } else { 286 + lbs_deb_assoc("assoc: bss had no basic rate IE\n"); 287 /* Fallback: add basic 802.11b rates */ 288 *tlv++ = 0x82; 289 *tlv++ = 0x84; 290 *tlv++ = 0x8b; 291 *tlv++ = 0x96; 292 n = 4; 293 } 294 295 rate_tlv->header.len = cpu_to_le16(n); ··· 465 lbs_deb_enter(LBS_DEB_CFG80211); 466 467 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); 468 + nr_sets = le16_to_cpu(scanresp->nr_sets); 469 + 470 + lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n", 471 + nr_sets, bsssize, le16_to_cpu(resp->size)); 472 + 473 + if (nr_sets == 0) { 474 + ret = 0; 475 + goto done; 476 + } 477 478 /* 479 * The general layout of the scan response is described in chapter ··· 670 671 if (priv->scan_channel >= priv->scan_req->n_channels) { 672 /* Mark scan done */ 673 + if (priv->internal_scan) 674 + kfree(priv->scan_req); 675 + else 676 + cfg80211_scan_done(priv->scan_req, false); 677 + 678 priv->scan_req = NULL; 679 + priv->last_scan = jiffies; 680 } 681 682 /* Restart network */ ··· 682 683 kfree(scan_cmd); 684 685 + /* Wake up anything waiting on scan completion */ 686 + if (priv->scan_req == NULL) { 687 + lbs_deb_scan("scan: waking up waiters\n"); 688 + wake_up_all(&priv->scan_q); 689 + } 690 + 691 out_no_scan_cmd: 692 lbs_deb_leave(LBS_DEB_SCAN); 693 } 694 695 + static void _internal_start_scan(struct lbs_private *priv, bool internal, 696 + struct cfg80211_scan_request *request) 697 + { 698 + lbs_deb_enter(LBS_DEB_CFG80211); 699 + 700 + lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", 701 + request->n_ssids, request->n_channels, request->ie_len); 702 + 703 + priv->scan_channel = 0; 704 + queue_delayed_work(priv->work_thread, &priv->scan_work, 705 + msecs_to_jiffies(50)); 706 + 707 + priv->scan_req = request; 708 + priv->internal_scan = internal; 709 + 710 + lbs_deb_leave(LBS_DEB_CFG80211); 711 + } 712 713 static int lbs_cfg_scan(struct wiphy *wiphy, 714 struct net_device *dev, ··· 702 goto out; 703 } 704 705 + _internal_start_scan(priv, false, request); 706 707 if (priv->surpriseremoved) 708 ret = -EIO; 709 710 out: 711 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); ··· 1000 int status; 1001 int ret; 1002 u8 *pos = &(cmd->iebuf[0]); 1003 + u8 *tmp; 1004 1005 lbs_deb_enter(LBS_DEB_CFG80211); 1006 ··· 1044 pos += lbs_add_cf_param_tlv(pos); 1045 1046 /* add rates TLV */ 1047 + tmp = pos + 4; /* skip Marvell IE header */ 1048 pos += lbs_add_common_rates_tlv(pos, bss); 1049 + lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp); 1050 1051 /* add auth type TLV */ 1052 if (priv->fwrelease >= 0x09000000) ··· 1124 return ret; 1125 } 1126 1127 + static struct cfg80211_scan_request * 1128 + _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme) 1129 + { 1130 + struct cfg80211_scan_request *creq = NULL; 1131 + int i, n_channels = 0; 1132 + enum ieee80211_band band; 1133 1134 + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1135 + if (wiphy->bands[band]) 1136 + n_channels += wiphy->bands[band]->n_channels; 1137 + } 1138 + 1139 + creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + 1140 + n_channels * sizeof(void *), 1141 + GFP_ATOMIC); 1142 + if (!creq) 1143 + return NULL; 1144 + 1145 + /* SSIDs come after channels */ 1146 + creq->ssids = (void *)&creq->channels[n_channels]; 1147 + creq->n_channels = n_channels; 1148 + creq->n_ssids = 1; 1149 + 1150 + /* Scan all available channels */ 1151 + i = 0; 1152 + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1153 + int j; 1154 + 1155 + if (!wiphy->bands[band]) 1156 + continue; 1157 + 1158 + for (j = 0; j < wiphy->bands[band]->n_channels; j++) { 1159 + /* ignore disabled channels */ 1160 + if (wiphy->bands[band]->channels[j].flags & 1161 + IEEE80211_CHAN_DISABLED) 1162 + continue; 1163 + 1164 + creq->channels[i] = &wiphy->bands[band]->channels[j]; 1165 + i++; 1166 + } 1167 + } 1168 + if (i) { 1169 + /* Set real number of channels specified in creq->channels[] */ 1170 + creq->n_channels = i; 1171 + 1172 + /* Scan for the SSID we're going to connect to */ 1173 + memcpy(creq->ssids[0].ssid, sme->ssid, sme->ssid_len); 1174 + creq->ssids[0].ssid_len = sme->ssid_len; 1175 + } else { 1176 + /* No channels found... */ 1177 + kfree(creq); 1178 + creq = NULL; 1179 + } 1180 + 1181 + return creq; 1182 + } 1183 1184 static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, 1185 struct cfg80211_connect_params *sme) ··· 1136 1137 lbs_deb_enter(LBS_DEB_CFG80211); 1138 1139 + if (!sme->bssid) { 1140 + /* Run a scan if one isn't in-progress already and if the last 1141 + * scan was done more than 2 seconds ago. 1142 */ 1143 + if (priv->scan_req == NULL && 1144 + time_after(jiffies, priv->last_scan + (2 * HZ))) { 1145 + struct cfg80211_scan_request *creq; 1146 + 1147 + creq = _new_connect_scan_req(wiphy, sme); 1148 + if (!creq) { 1149 + ret = -EINVAL; 1150 + goto done; 1151 + } 1152 + 1153 + lbs_deb_assoc("assoc: scanning for compatible AP\n"); 1154 + _internal_start_scan(priv, true, creq); 1155 + } 1156 + 1157 + /* Wait for any in-progress scan to complete */ 1158 + lbs_deb_assoc("assoc: waiting for scan to complete\n"); 1159 + wait_event_interruptible_timeout(priv->scan_q, 1160 + (priv->scan_req == NULL), 1161 + (15 * HZ)); 1162 + lbs_deb_assoc("assoc: scanning competed\n"); 1163 } 1164 1165 + /* Find the BSS we want using available scan results */ 1166 + bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, 1167 + sme->ssid, sme->ssid_len, 1168 + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 1169 if (!bss) { 1170 + lbs_pr_err("assoc: bss %pM not in scan results\n", 1171 sme->bssid); 1172 ret = -ENOENT; 1173 goto done; 1174 } 1175 + lbs_deb_assoc("trying %pM\n", bss->bssid); 1176 lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n", 1177 sme->crypto.cipher_group, 1178 sme->key_idx, sme->key_len); ··· 1229 lbs_set_radio(priv, preamble, 1); 1230 1231 /* Do the actual association */ 1232 + ret = lbs_associate(priv, bss, sme); 1233 1234 done: 1235 if (bss)
+5
drivers/net/wireless/libertas/dev.h
··· 161 /** Scanning */ 162 struct delayed_work scan_work; 163 int scan_channel; 164 }; 165 166 extern struct cmd_confirm_sleep confirm_sleep;
··· 161 /** Scanning */ 162 struct delayed_work scan_work; 163 int scan_channel; 164 + /* Queue of things waiting for scan completion */ 165 + wait_queue_head_t scan_q; 166 + /* Whether the scan was initiated internally and not by cfg80211 */ 167 + bool internal_scan; 168 + unsigned long last_scan; 169 }; 170 171 extern struct cmd_confirm_sleep confirm_sleep;
+1
drivers/net/wireless/libertas/main.c
··· 719 priv->deep_sleep_required = 0; 720 priv->wakeup_dev_required = 0; 721 init_waitqueue_head(&priv->ds_awake_q); 722 priv->authtype_auto = 1; 723 priv->is_host_sleep_configured = 0; 724 priv->is_host_sleep_activated = 0;
··· 719 priv->deep_sleep_required = 0; 720 priv->wakeup_dev_required = 0; 721 init_waitqueue_head(&priv->ds_awake_q); 722 + init_waitqueue_head(&priv->scan_q); 723 priv->authtype_auto = 1; 724 priv->is_host_sleep_configured = 0; 725 priv->is_host_sleep_activated = 0;
+2
drivers/net/wireless/p54/p54pci.c
··· 43 { PCI_DEVICE(0x1260, 0x3886) }, 44 /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ 45 { PCI_DEVICE(0x1260, 0xffff) }, 46 { }, 47 }; 48
··· 43 { PCI_DEVICE(0x1260, 0x3886) }, 44 /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ 45 { PCI_DEVICE(0x1260, 0xffff) }, 46 + /* Standard Microsystems Corp SMC2802W Wireless PCI */ 47 + { PCI_DEVICE(0x10b8, 0x2802) }, 48 { }, 49 }; 50
+12 -13
drivers/net/wireless/rt2x00/rt2x00pci.c
··· 240 struct rt2x00_dev *rt2x00dev; 241 int retval; 242 243 - retval = pci_request_regions(pci_dev, pci_name(pci_dev)); 244 - if (retval) { 245 - ERROR_PROBE("PCI request regions failed.\n"); 246 - return retval; 247 - } 248 - 249 retval = pci_enable_device(pci_dev); 250 if (retval) { 251 ERROR_PROBE("Enable device failed.\n"); 252 - goto exit_release_regions; 253 } 254 255 pci_set_master(pci_dev); ··· 260 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { 261 ERROR_PROBE("PCI DMA not supported.\n"); 262 retval = -EIO; 263 - goto exit_disable_device; 264 } 265 266 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 267 if (!hw) { 268 ERROR_PROBE("Failed to allocate hardware.\n"); 269 retval = -ENOMEM; 270 - goto exit_disable_device; 271 } 272 273 pci_set_drvdata(pci_dev, hw); ··· 300 exit_free_device: 301 ieee80211_free_hw(hw); 302 303 - exit_disable_device: 304 - if (retval != -EBUSY) 305 - pci_disable_device(pci_dev); 306 - 307 exit_release_regions: 308 pci_release_regions(pci_dev); 309 310 pci_set_drvdata(pci_dev, NULL); 311
··· 240 struct rt2x00_dev *rt2x00dev; 241 int retval; 242 243 retval = pci_enable_device(pci_dev); 244 if (retval) { 245 ERROR_PROBE("Enable device failed.\n"); 246 + return retval; 247 + } 248 + 249 + retval = pci_request_regions(pci_dev, pci_name(pci_dev)); 250 + if (retval) { 251 + ERROR_PROBE("PCI request regions failed.\n"); 252 + goto exit_disable_device; 253 } 254 255 pci_set_master(pci_dev); ··· 260 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { 261 ERROR_PROBE("PCI DMA not supported.\n"); 262 retval = -EIO; 263 + goto exit_release_regions; 264 } 265 266 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 267 if (!hw) { 268 ERROR_PROBE("Failed to allocate hardware.\n"); 269 retval = -ENOMEM; 270 + goto exit_release_regions; 271 } 272 273 pci_set_drvdata(pci_dev, hw); ··· 300 exit_free_device: 301 ieee80211_free_hw(hw); 302 303 exit_release_regions: 304 pci_release_regions(pci_dev); 305 + 306 + exit_disable_device: 307 + pci_disable_device(pci_dev); 308 309 pci_set_drvdata(pci_dev, NULL); 310
+2
drivers/net/wireless/rtl818x/rtl8180_dev.c
··· 695 696 /* grab a fresh beacon */ 697 skb = ieee80211_beacon_get(dev, vif); 698 699 /* 700 * update beacon timestamp w/ TSF value
··· 695 696 /* grab a fresh beacon */ 697 skb = ieee80211_beacon_get(dev, vif); 698 + if (!skb) 699 + goto resched; 700 701 /* 702 * update beacon timestamp w/ TSF value
+1 -2
drivers/net/wireless/wl12xx/wl1271_spi.c
··· 160 spi_message_add_tail(&t, &m); 161 162 spi_sync(wl_to_spi(wl), &m); 163 - kfree(cmd); 164 - 165 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); 166 } 167 168 #define WL1271_BUSY_WORD_TIMEOUT 1000
··· 160 spi_message_add_tail(&t, &m); 161 162 spi_sync(wl_to_spi(wl), &m); 163 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); 164 + kfree(cmd); 165 } 166 167 #define WL1271_BUSY_WORD_TIMEOUT 1000
+1 -1
include/linux/ppp_channel.h
··· 36 37 struct ppp_channel { 38 void *private; /* channel private data */ 39 - struct ppp_channel_ops *ops; /* operations for this channel */ 40 int mtu; /* max transmit packet size */ 41 int hdrlen; /* amount of headroom channel needs */ 42 void *ppp; /* opaque to channel */
··· 36 37 struct ppp_channel { 38 void *private; /* channel private data */ 39 + const struct ppp_channel_ops *ops; /* operations for this channel */ 40 int mtu; /* max transmit packet size */ 41 int hdrlen; /* amount of headroom channel needs */ 42 void *ppp; /* opaque to channel */
+5
include/linux/skbuff.h
··· 1379 return skb_network_header(skb) - skb->data; 1380 } 1381 1382 /* 1383 * CPUs often take a performance hit when accessing unaligned memory 1384 * locations. The actual performance hit varies, it can be small if the
··· 1379 return skb_network_header(skb) - skb->data; 1380 } 1381 1382 + static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1383 + { 1384 + return pskb_may_pull(skb, skb_network_offset(skb) + len); 1385 + } 1386 + 1387 /* 1388 * CPUs often take a performance hit when accessing unaligned memory 1389 * locations. The actual performance hit varies, it can be small if the
+1 -1
include/net/bluetooth/hci_core.h
··· 132 133 struct inquiry_cache inq_cache; 134 struct hci_conn_hash conn_hash; 135 - struct bdaddr_list blacklist; 136 137 struct hci_dev_stats stat; 138
··· 132 133 struct inquiry_cache inq_cache; 134 struct hci_conn_hash conn_hash; 135 + struct list_head blacklist; 136 137 struct hci_dev_stats stat; 138
+1 -1
net/atm/pppoatm.c
··· 260 return -ENOTTY; 261 } 262 263 - static /*const*/ struct ppp_channel_ops pppoatm_ops = { 264 .start_xmit = pppoatm_send, 265 .ioctl = pppoatm_devppp_ioctl, 266 };
··· 260 return -ENOTTY; 261 } 262 263 + static const struct ppp_channel_ops pppoatm_ops = { 264 .start_xmit = pppoatm_send, 265 .ioctl = pppoatm_devppp_ioctl, 266 };
+1 -1
net/bluetooth/hci_core.c
··· 924 925 hci_conn_hash_init(hdev); 926 927 - INIT_LIST_HEAD(&hdev->blacklist.list); 928 929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 930
··· 924 925 hci_conn_hash_init(hdev); 926 927 + INIT_LIST_HEAD(&hdev->blacklist); 928 929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 930
+3 -5
net/bluetooth/hci_sock.c
··· 168 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 169 { 170 struct list_head *p; 171 - struct bdaddr_list *blacklist = &hdev->blacklist; 172 173 - list_for_each(p, &blacklist->list) { 174 struct bdaddr_list *b; 175 176 b = list_entry(p, struct bdaddr_list, list); ··· 201 202 bacpy(&entry->bdaddr, &bdaddr); 203 204 - list_add(&entry->list, &hdev->blacklist.list); 205 206 return 0; 207 } ··· 209 int hci_blacklist_clear(struct hci_dev *hdev) 210 { 211 struct list_head *p, *n; 212 - struct bdaddr_list *blacklist = &hdev->blacklist; 213 214 - list_for_each_safe(p, n, &blacklist->list) { 215 struct bdaddr_list *b; 216 217 b = list_entry(p, struct bdaddr_list, list);
··· 168 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 169 { 170 struct list_head *p; 171 172 + list_for_each(p, &hdev->blacklist) { 173 struct bdaddr_list *b; 174 175 b = list_entry(p, struct bdaddr_list, list); ··· 202 203 bacpy(&entry->bdaddr, &bdaddr); 204 205 + list_add(&entry->list, &hdev->blacklist); 206 207 return 0; 208 } ··· 210 int hci_blacklist_clear(struct hci_dev *hdev) 211 { 212 struct list_head *p, *n; 213 214 + list_for_each_safe(p, n, &hdev->blacklist) { 215 struct bdaddr_list *b; 216 217 b = list_entry(p, struct bdaddr_list, list);
+1 -2
net/bluetooth/hci_sysfs.c
··· 439 static int blacklist_show(struct seq_file *f, void *p) 440 { 441 struct hci_dev *hdev = f->private; 442 - struct bdaddr_list *blacklist = &hdev->blacklist; 443 struct list_head *l; 444 445 hci_dev_lock_bh(hdev); 446 447 - list_for_each(l, &blacklist->list) { 448 struct bdaddr_list *b; 449 bdaddr_t bdaddr; 450
··· 439 static int blacklist_show(struct seq_file *f, void *p) 440 { 441 struct hci_dev *hdev = f->private; 442 struct list_head *l; 443 444 hci_dev_lock_bh(hdev); 445 446 + list_for_each(l, &hdev->blacklist) { 447 struct bdaddr_list *b; 448 bdaddr_t bdaddr; 449
+21 -3
net/bluetooth/l2cap.c
··· 2527 if (pi->imtu != L2CAP_DEFAULT_MTU) 2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 2529 2530 rfc.mode = L2CAP_MODE_BASIC; 2531 rfc.txwin_size = 0; 2532 rfc.max_transmit = 0; ··· 2538 rfc.monitor_timeout = 0; 2539 rfc.max_pdu_size = 0; 2540 2541 break; 2542 2543 case L2CAP_MODE_ERTM: ··· 2551 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 2552 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2553 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2554 2555 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2556 break; ··· 2575 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2576 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2577 2578 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2579 break; 2580 ··· 2588 } 2589 break; 2590 } 2591 - 2592 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2593 - (unsigned long) &rfc); 2594 2595 /* FIXME: Need actual value of the flush timeout */ 2596 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) ··· 3347 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 3348 3349 del_timer(&conn->info_timer); 3350 3351 if (type == L2CAP_IT_FEAT_MASK) { 3352 conn->feat_mask = get_unaligned_le32(rsp->data);
··· 2527 if (pi->imtu != L2CAP_DEFAULT_MTU) 2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 2529 2530 + if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 2531 + !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 2532 + break; 2533 + 2534 rfc.mode = L2CAP_MODE_BASIC; 2535 rfc.txwin_size = 0; 2536 rfc.max_transmit = 0; ··· 2534 rfc.monitor_timeout = 0; 2535 rfc.max_pdu_size = 0; 2536 2537 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2538 + (unsigned long) &rfc); 2539 break; 2540 2541 case L2CAP_MODE_ERTM: ··· 2545 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 2546 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2547 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2548 + 2549 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2550 + (unsigned long) &rfc); 2551 2552 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2553 break; ··· 2566 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2567 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2568 2569 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2570 + (unsigned long) &rfc); 2571 + 2572 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2573 break; 2574 ··· 2576 } 2577 break; 2578 } 2579 2580 /* FIXME: Need actual value of the flush timeout */ 2581 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) ··· 3338 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 3339 3340 del_timer(&conn->info_timer); 3341 + 3342 + if (result != L2CAP_IR_SUCCESS) { 3343 + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3344 + conn->info_ident = 0; 3345 + 3346 + l2cap_conn_start(conn); 3347 + 3348 + return 0; 3349 + } 3350 3351 if (type == L2CAP_IT_FEAT_MASK) { 3352 conn->feat_mask = get_unaligned_le32(rsp->data);
+1 -1
net/bluetooth/rfcomm/tty.c
··· 1183 return 0; 1184 } 1185 1186 - void __exit rfcomm_cleanup_ttys(void) 1187 { 1188 tty_unregister_driver(rfcomm_tty_driver); 1189 put_tty_driver(rfcomm_tty_driver);
··· 1183 return 0; 1184 } 1185 1186 + void rfcomm_cleanup_ttys(void) 1187 { 1188 tty_unregister_driver(rfcomm_tty_driver); 1189 put_tty_driver(rfcomm_tty_driver);
+3 -4
net/core/dev.c
··· 2517 struct rps_dev_flow voidflow, *rflow = &voidflow; 2518 int cpu; 2519 2520 rcu_read_lock(); 2521 2522 cpu = get_rps_cpu(skb->dev, skb, &rflow); ··· 2527 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2528 2529 rcu_read_unlock(); 2530 } 2531 #else 2532 { ··· 3074 int mac_len; 3075 enum gro_result ret; 3076 3077 - if (!(skb->dev->features & NETIF_F_GRO)) 3078 goto normal; 3079 3080 if (skb_is_gso(skb) || skb_has_frags(skb)) ··· 3160 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3161 { 3162 struct sk_buff *p; 3163 - 3164 - if (netpoll_rx_on(skb)) 3165 - return GRO_NORMAL; 3166 3167 for (p = napi->gro_list; p; p = p->next) { 3168 NAPI_GRO_CB(p)->same_flow =
··· 2517 struct rps_dev_flow voidflow, *rflow = &voidflow; 2518 int cpu; 2519 2520 + preempt_disable(); 2521 rcu_read_lock(); 2522 2523 cpu = get_rps_cpu(skb->dev, skb, &rflow); ··· 2526 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2527 2528 rcu_read_unlock(); 2529 + preempt_enable(); 2530 } 2531 #else 2532 { ··· 3072 int mac_len; 3073 enum gro_result ret; 3074 3075 + if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3076 goto normal; 3077 3078 if (skb_is_gso(skb) || skb_has_frags(skb)) ··· 3158 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3159 { 3160 struct sk_buff *p; 3161 3162 for (p = napi->gro_list; p; p = p->next) { 3163 NAPI_GRO_CB(p)->same_flow =
+1 -1
net/ipv4/tcp_input.c
··· 3930 if (opsize < 2 || opsize > length) 3931 return NULL; 3932 if (opcode == TCPOPT_MD5SIG) 3933 - return ptr; 3934 } 3935 ptr += opsize - 2; 3936 length -= opsize;
··· 3930 if (opsize < 2 || opsize > length) 3931 return NULL; 3932 if (opcode == TCPOPT_MD5SIG) 3933 + return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 3934 } 3935 ptr += opsize - 2; 3936 length -= opsize;
+1 -1
net/irda/irnet/irnet_ppp.c
··· 20 /* Please put other headers in irnet.h - Thanks */ 21 22 /* Generic PPP callbacks (to call us) */ 23 - static struct ppp_channel_ops irnet_ppp_ops = { 24 .start_xmit = ppp_irnet_send, 25 .ioctl = ppp_irnet_ioctl 26 };
··· 20 /* Please put other headers in irnet.h - Thanks */ 21 22 /* Generic PPP callbacks (to call us) */ 23 + static const struct ppp_channel_ops irnet_ppp_ops = { 24 .start_xmit = ppp_irnet_send, 25 .ioctl = ppp_irnet_ioctl 26 };
+4 -1
net/l2tp/l2tp_ppp.c
··· 135 136 static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); 137 138 - static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; 139 static const struct proto_ops pppol2tp_ops; 140 141 /* Helpers to obtain tunnel/session contexts from sockets.
··· 135 136 static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); 137 138 + static const struct ppp_channel_ops pppol2tp_chan_ops = { 139 + .start_xmit = pppol2tp_xmit, 140 + }; 141 + 142 static const struct proto_ops pppol2tp_ops; 143 144 /* Helpers to obtain tunnel/session contexts from sockets.
+2
net/mac80211/main.c
··· 685 686 return 0; 687 688 fail_ifa: 689 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, 690 &local->network_latency_notifier); 691 rtnl_lock(); 692 fail_pm_qos: 693 ieee80211_led_exit(local); 694 ieee80211_remove_interfaces(local);
··· 685 686 return 0; 687 688 + #ifdef CONFIG_INET 689 fail_ifa: 690 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, 691 &local->network_latency_notifier); 692 rtnl_lock(); 693 + #endif 694 fail_pm_qos: 695 ieee80211_led_exit(local); 696 ieee80211_remove_interfaces(local);
-14
net/mac80211/scan.c
··· 400 else 401 __set_bit(SCAN_SW_SCANNING, &local->scanning); 402 403 - /* 404 - * Kicking off the scan need not be protected, 405 - * only the scan variable stuff, since now 406 - * local->scan_req is assigned and other callers 407 - * will abort their scan attempts. 408 - * 409 - * This avoids too many locking dependencies 410 - * so that the scan completed calls have more 411 - * locking freedom. 412 - */ 413 - 414 ieee80211_recalc_idle(local); 415 - mutex_unlock(&local->scan_mtx); 416 417 if (local->ops->hw_scan) { 418 WARN_ON(!ieee80211_prep_hw_scan(local)); 419 rc = drv_hw_scan(local, sdata, local->hw_scan_req); 420 } else 421 rc = ieee80211_start_sw_scan(local); 422 - 423 - mutex_lock(&local->scan_mtx); 424 425 if (rc) { 426 kfree(local->hw_scan_req);
··· 400 else 401 __set_bit(SCAN_SW_SCANNING, &local->scanning); 402 403 ieee80211_recalc_idle(local); 404 405 if (local->ops->hw_scan) { 406 WARN_ON(!ieee80211_prep_hw_scan(local)); 407 rc = drv_hw_scan(local, sdata, local->hw_scan_req); 408 } else 409 rc = ieee80211_start_sw_scan(local); 410 411 if (rc) { 412 kfree(local->hw_scan_req);
+3
net/rxrpc/ar-ack.c
··· 245 _enter("%d,%d,%d", 246 call->acks_tail, call->acks_unacked, call->acks_head); 247 248 resend = 0; 249 resend_at = 0; 250
··· 245 _enter("%d,%d,%d", 246 call->acks_tail, call->acks_unacked, call->acks_head); 247 248 + if (call->state >= RXRPC_CALL_COMPLETE) 249 + return; 250 + 251 resend = 0; 252 resend_at = 0; 253
+2 -4
net/rxrpc/ar-call.c
··· 786 787 /* 788 * handle resend timer expiry 789 */ 790 static void rxrpc_resend_time_expired(unsigned long _call) 791 { ··· 797 if (call->state >= RXRPC_CALL_COMPLETE) 798 return; 799 800 - read_lock_bh(&call->state_lock); 801 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 802 - if (call->state < RXRPC_CALL_COMPLETE && 803 - !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) 804 rxrpc_queue_call(call); 805 - read_unlock_bh(&call->state_lock); 806 } 807 808 /*
··· 786 787 /* 788 * handle resend timer expiry 789 + * - may not take call->state_lock as this can deadlock against del_timer_sync() 790 */ 791 static void rxrpc_resend_time_expired(unsigned long _call) 792 { ··· 796 if (call->state >= RXRPC_CALL_COMPLETE) 797 return; 798 799 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 800 + if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) 801 rxrpc_queue_call(call); 802 } 803 804 /*
+13 -10
net/sched/act_nat.c
··· 114 int egress; 115 int action; 116 int ihl; 117 118 spin_lock(&p->tcf_lock); 119 ··· 133 if (unlikely(action == TC_ACT_SHOT)) 134 goto drop; 135 136 - if (!pskb_may_pull(skb, sizeof(*iph))) 137 goto drop; 138 139 iph = ip_hdr(skb); ··· 146 147 if (!((old_addr ^ addr) & mask)) { 148 if (skb_cloned(skb) && 149 - !skb_clone_writable(skb, sizeof(*iph)) && 150 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 151 goto drop; 152 ··· 174 { 175 struct tcphdr *tcph; 176 177 - if (!pskb_may_pull(skb, ihl + sizeof(*tcph)) || 178 (skb_cloned(skb) && 179 - !skb_clone_writable(skb, ihl + sizeof(*tcph)) && 180 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 181 goto drop; 182 ··· 188 { 189 struct udphdr *udph; 190 191 - if (!pskb_may_pull(skb, ihl + sizeof(*udph)) || 192 (skb_cloned(skb) && 193 - !skb_clone_writable(skb, ihl + sizeof(*udph)) && 194 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 195 goto drop; 196 ··· 207 { 208 struct icmphdr *icmph; 209 210 - if (!pskb_may_pull(skb, ihl + sizeof(*icmph))) 211 goto drop; 212 213 icmph = (void *)(skb_network_header(skb) + ihl); ··· 217 (icmph->type != ICMP_PARAMETERPROB)) 218 break; 219 220 - if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 221 goto drop; 222 223 icmph = (void *)(skb_network_header(skb) + ihl); ··· 232 break; 233 234 if (skb_cloned(skb) && 235 - !skb_clone_writable(skb, 236 - ihl + sizeof(*icmph) + sizeof(*iph)) && 237 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 238 goto drop; 239
··· 114 int egress; 115 int action; 116 int ihl; 117 + int noff; 118 119 spin_lock(&p->tcf_lock); 120 ··· 132 if (unlikely(action == TC_ACT_SHOT)) 133 goto drop; 134 135 + noff = skb_network_offset(skb); 136 + if (!pskb_may_pull(skb, sizeof(*iph) + noff)) 137 goto drop; 138 139 iph = ip_hdr(skb); ··· 144 145 if (!((old_addr ^ addr) & mask)) { 146 if (skb_cloned(skb) && 147 + !skb_clone_writable(skb, sizeof(*iph) + noff) && 148 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 149 goto drop; 150 ··· 172 { 173 struct tcphdr *tcph; 174 175 + if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || 176 (skb_cloned(skb) && 177 + !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && 178 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 179 goto drop; 180 ··· 186 { 187 struct udphdr *udph; 188 189 + if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || 190 (skb_cloned(skb) && 191 + !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && 192 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 193 goto drop; 194 ··· 205 { 206 struct icmphdr *icmph; 207 208 + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) 209 goto drop; 210 211 icmph = (void *)(skb_network_header(skb) + ihl); ··· 215 (icmph->type != ICMP_PARAMETERPROB)) 216 break; 217 218 + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + 219 + noff)) 220 goto drop; 221 222 icmph = (void *)(skb_network_header(skb) + ihl); ··· 229 break; 230 231 if (skb_cloned(skb) && 232 + !skb_clone_writable(skb, ihl + sizeof(*icmph) + 233 + sizeof(*iph) + noff) && 234 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 235 goto drop; 236
+56 -40
net/sched/cls_flow.c
··· 65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 66 } 67 68 - static u32 flow_get_src(const struct sk_buff *skb) 69 { 70 switch (skb->protocol) { 71 case htons(ETH_P_IP): 72 - return ntohl(ip_hdr(skb)->saddr); 73 case htons(ETH_P_IPV6): 74 - return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 75 - default: 76 - return addr_fold(skb->sk); 77 } 78 } 79 80 - static u32 flow_get_dst(const struct sk_buff *skb) 81 { 82 switch (skb->protocol) { 83 case htons(ETH_P_IP): 84 - return ntohl(ip_hdr(skb)->daddr); 85 case htons(ETH_P_IPV6): 86 - return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 87 - default: 88 - return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 89 } 90 } 91 92 - static u32 flow_get_proto(const struct sk_buff *skb) 93 { 94 switch (skb->protocol) { 95 case htons(ETH_P_IP): 96 - return ip_hdr(skb)->protocol; 97 case htons(ETH_P_IPV6): 98 - return ipv6_hdr(skb)->nexthdr; 99 default: 100 return 0; 101 } ··· 126 } 127 } 128 129 - static u32 flow_get_proto_src(const struct sk_buff *skb) 130 { 131 - u32 res = 0; 132 - 133 switch (skb->protocol) { 134 case htons(ETH_P_IP): { 135 - struct iphdr *iph = ip_hdr(skb); 136 137 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 138 - has_ports(iph->protocol)) 139 - res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 140 break; 141 } 142 case htons(ETH_P_IPV6): { 143 - struct ipv6hdr *iph = ipv6_hdr(skb); 144 145 if (has_ports(iph->nexthdr)) 146 - res = ntohs(*(__be16 *)&iph[1]); 147 break; 148 } 149 - default: 150 - res = addr_fold(skb->sk); 151 } 152 153 - return res; 154 } 155 156 - static u32 flow_get_proto_dst(const struct sk_buff *skb) 157 { 158 - u32 res = 0; 159 - 160 switch (skb->protocol) { 161 case htons(ETH_P_IP): { 162 - struct iphdr *iph = ip_hdr(skb); 163 164 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 165 - has_ports(iph->protocol)) 166 - res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 167 break; 168 } 169 case htons(ETH_P_IPV6): { 170 - struct ipv6hdr *iph = ipv6_hdr(skb); 171 172 if (has_ports(iph->nexthdr)) 173 - res = ntohs(*(__be16 *)((void *)&iph[1] + 2)); 174 break; 175 } 176 - default: 177 - res = addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 178 } 179 180 - return res; 181 } 182 183 static u32 flow_get_iif(const struct sk_buff *skb) ··· 227 }) 228 #endif 229 230 - static u32 flow_get_nfct_src(const struct sk_buff *skb) 231 { 232 switch (skb->protocol) { 233 case htons(ETH_P_IP): ··· 239 return flow_get_src(skb); 240 } 241 242 - static u32 flow_get_nfct_dst(const struct sk_buff *skb) 243 { 244 switch (skb->protocol) { 245 case htons(ETH_P_IP): ··· 251 return flow_get_dst(skb); 252 } 253 254 - static u32 flow_get_nfct_proto_src(const struct sk_buff *skb) 255 { 256 return ntohs(CTTUPLE(skb, src.u.all)); 257 fallback: 258 return flow_get_proto_src(skb); 259 } 260 261 - static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb) 262 { 263 return ntohs(CTTUPLE(skb, dst.u.all)); 264 fallback: ··· 297 return tag & VLAN_VID_MASK; 298 } 299 300 - static u32 flow_key_get(const struct sk_buff *skb, int key) 301 { 302 switch (key) { 303 case FLOW_KEY_SRC:
··· 65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 66 } 67 68 + static u32 flow_get_src(struct sk_buff *skb) 69 { 70 switch (skb->protocol) { 71 case htons(ETH_P_IP): 72 + if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 73 + return ntohl(ip_hdr(skb)->saddr); 74 + break; 75 case htons(ETH_P_IPV6): 76 + if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 77 + return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 78 + break; 79 } 80 + 81 + return addr_fold(skb->sk); 82 } 83 84 + static u32 flow_get_dst(struct sk_buff *skb) 85 { 86 switch (skb->protocol) { 87 case htons(ETH_P_IP): 88 + if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 89 + return ntohl(ip_hdr(skb)->daddr); 90 + break; 91 case htons(ETH_P_IPV6): 92 + if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 93 + return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 94 + break; 95 } 96 + 97 + return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 98 } 99 100 + static u32 flow_get_proto(struct sk_buff *skb) 101 { 102 switch (skb->protocol) { 103 case htons(ETH_P_IP): 104 + return pskb_network_may_pull(skb, sizeof(struct iphdr)) ? 105 + ip_hdr(skb)->protocol : 0; 106 case htons(ETH_P_IPV6): 107 + return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ? 108 + ipv6_hdr(skb)->nexthdr : 0; 109 default: 110 return 0; 111 } ··· 116 } 117 } 118 119 + static u32 flow_get_proto_src(struct sk_buff *skb) 120 { 121 switch (skb->protocol) { 122 case htons(ETH_P_IP): { 123 + struct iphdr *iph; 124 125 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 126 + break; 127 + iph = ip_hdr(skb); 128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 129 + has_ports(iph->protocol) && 130 + pskb_network_may_pull(skb, iph->ihl * 4 + 2)) 131 + return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 132 break; 133 } 134 case htons(ETH_P_IPV6): { 135 + struct ipv6hdr *iph; 136 137 + if (!pskb_network_may_pull(skb, sizeof(*iph) + 2)) 138 + break; 139 + iph = ipv6_hdr(skb); 140 if (has_ports(iph->nexthdr)) 141 + return ntohs(*(__be16 *)&iph[1]); 142 break; 143 } 144 } 145 146 + return addr_fold(skb->sk); 147 } 148 149 + static u32 flow_get_proto_dst(struct sk_buff *skb) 150 { 151 switch (skb->protocol) { 152 case htons(ETH_P_IP): { 153 + struct iphdr *iph; 154 155 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 156 + break; 157 + iph = ip_hdr(skb); 158 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 159 + has_ports(iph->protocol) && 160 + pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 161 + return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 162 break; 163 } 164 case htons(ETH_P_IPV6): { 165 + struct ipv6hdr *iph; 166 167 + if (!pskb_network_may_pull(skb, sizeof(*iph) + 4)) 168 + break; 169 + iph = ipv6_hdr(skb); 170 if (has_ports(iph->nexthdr)) 171 + return ntohs(*(__be16 *)((void *)&iph[1] + 2)); 172 break; 173 } 174 } 175 176 + return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 177 } 178 179 static u32 flow_get_iif(const struct sk_buff *skb) ··· 211 }) 212 #endif 213 214 + static u32 flow_get_nfct_src(struct sk_buff *skb) 215 { 216 switch (skb->protocol) { 217 case htons(ETH_P_IP): ··· 223 return flow_get_src(skb); 224 } 225 226 + static u32 flow_get_nfct_dst(struct sk_buff *skb) 227 { 228 switch (skb->protocol) { 229 case htons(ETH_P_IP): ··· 235 return flow_get_dst(skb); 236 } 237 238 + static u32 flow_get_nfct_proto_src(struct sk_buff *skb) 239 { 240 return ntohs(CTTUPLE(skb, src.u.all)); 241 fallback: 242 return flow_get_proto_src(skb); 243 } 244 245 + static u32 flow_get_nfct_proto_dst(struct sk_buff *skb) 246 { 247 return ntohs(CTTUPLE(skb, dst.u.all)); 248 fallback: ··· 281 return tag & VLAN_VID_MASK; 282 } 283 284 + static u32 flow_key_get(struct sk_buff *skb, int key) 285 { 286 switch (key) { 287 case FLOW_KEY_SRC:
+10 -2
net/sched/cls_rsvp.h
··· 143 u8 tunnelid = 0; 144 u8 *xprt; 145 #if RSVP_DST_LEN == 4 146 - struct ipv6hdr *nhptr = ipv6_hdr(skb); 147 #else 148 - struct iphdr *nhptr = ip_hdr(skb); 149 #endif 150 151 restart:
··· 143 u8 tunnelid = 0; 144 u8 *xprt; 145 #if RSVP_DST_LEN == 4 146 + struct ipv6hdr *nhptr; 147 + 148 + if (!pskb_network_may_pull(skb, sizeof(*nhptr))) 149 + return -1; 150 + nhptr = ipv6_hdr(skb); 151 #else 152 + struct iphdr *nhptr; 153 + 154 + if (!pskb_network_may_pull(skb, sizeof(*nhptr))) 155 + return -1; 156 + nhptr = ip_hdr(skb); 157 #endif 158 159 restart:
+27 -9
net/sched/sch_sfq.c
··· 122 switch (skb->protocol) { 123 case htons(ETH_P_IP): 124 { 125 - const struct iphdr *iph = ip_hdr(skb); 126 h = (__force u32)iph->daddr; 127 h2 = (__force u32)iph->saddr ^ iph->protocol; 128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && ··· 135 iph->protocol == IPPROTO_UDPLITE || 136 iph->protocol == IPPROTO_SCTP || 137 iph->protocol == IPPROTO_DCCP || 138 - iph->protocol == IPPROTO_ESP)) 139 h2 ^= *(((u32*)iph) + iph->ihl); 140 break; 141 } 142 case htons(ETH_P_IPV6): 143 { 144 - struct ipv6hdr *iph = ipv6_hdr(skb); 145 h = (__force u32)iph->daddr.s6_addr32[3]; 146 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; 147 - if (iph->nexthdr == IPPROTO_TCP || 148 - iph->nexthdr == IPPROTO_UDP || 149 - iph->nexthdr == IPPROTO_UDPLITE || 150 - iph->nexthdr == IPPROTO_SCTP || 151 - iph->nexthdr == IPPROTO_DCCP || 152 - iph->nexthdr == IPPROTO_ESP) 153 h2 ^= *(u32*)&iph[1]; 154 break; 155 } 156 default: 157 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; 158 h2 = (unsigned long)skb->sk; 159 } ··· 513 return 0; 514 } 515 516 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) 517 { 518 struct sfq_sched_data *q = qdisc_priv(sch); ··· 573 static const struct Qdisc_class_ops sfq_class_ops = { 574 .get = sfq_get, 575 .tcf_chain = sfq_find_tcf, 576 .dump = sfq_dump_class, 577 .dump_stats = sfq_dump_class_stats, 578 .walk = sfq_walk,
··· 122 switch (skb->protocol) { 123 case htons(ETH_P_IP): 124 { 125 + const struct iphdr *iph; 126 + 127 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 128 + goto err; 129 + iph = ip_hdr(skb); 130 h = (__force u32)iph->daddr; 131 h2 = (__force u32)iph->saddr ^ iph->protocol; 132 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && ··· 131 iph->protocol == IPPROTO_UDPLITE || 132 iph->protocol == IPPROTO_SCTP || 133 iph->protocol == IPPROTO_DCCP || 134 + iph->protocol == IPPROTO_ESP) && 135 + pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 136 h2 ^= *(((u32*)iph) + iph->ihl); 137 break; 138 } 139 case htons(ETH_P_IPV6): 140 { 141 + struct ipv6hdr *iph; 142 + 143 + if (!pskb_network_may_pull(skb, sizeof(*iph))) 144 + goto err; 145 + iph = ipv6_hdr(skb); 146 h = (__force u32)iph->daddr.s6_addr32[3]; 147 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; 148 + if ((iph->nexthdr == IPPROTO_TCP || 149 + iph->nexthdr == IPPROTO_UDP || 150 + iph->nexthdr == IPPROTO_UDPLITE || 151 + iph->nexthdr == IPPROTO_SCTP || 152 + iph->nexthdr == IPPROTO_DCCP || 153 + iph->nexthdr == IPPROTO_ESP) && 154 + pskb_network_may_pull(skb, sizeof(*iph) + 4)) 155 h2 ^= *(u32*)&iph[1]; 156 break; 157 } 158 default: 159 + err: 160 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; 161 h2 = (unsigned long)skb->sk; 162 } ··· 502 return 0; 503 } 504 505 + static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, 506 + u32 classid) 507 + { 508 + return 0; 509 + } 510 + 511 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) 512 { 513 struct sfq_sched_data *q = qdisc_priv(sch); ··· 556 static const struct Qdisc_class_ops sfq_class_ops = { 557 .get = sfq_get, 558 .tcf_chain = sfq_find_tcf, 559 + .bind_tcf = sfq_bind, 560 .dump = sfq_dump_class, 561 .dump_stats = sfq_dump_class_stats, 562 .walk = sfq_walk,