Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem

+6269 -1034
+128
Documentation/networking/nfc.txt
··· 1 + Linux NFC subsystem 2 + =================== 3 + 4 + The Near Field Communication (NFC) subsystem is required to standardize the 5 + NFC device drivers development and to create an unified userspace interface. 6 + 7 + This document covers the architecture overview, the device driver interface 8 + description and the userspace interface description. 9 + 10 + Architecture overview 11 + --------------------- 12 + 13 + The NFC subsystem is responsible for: 14 + - NFC adapters management; 15 + - Polling for targets; 16 + - Low-level data exchange; 17 + 18 + The subsystem is divided in some parts. The 'core' is responsible for 19 + providing the device driver interface. On the other side, it is also 20 + responsible for providing an interface to control operations and low-level 21 + data exchange. 22 + 23 + The control operations are available to userspace via generic netlink. 24 + 25 + The low-level data exchange interface is provided by the new socket family 26 + PF_NFC. The NFC_SOCKPROTO_RAW performs raw communication with NFC targets. 27 + 28 + 29 + +--------------------------------------+ 30 + | USER SPACE | 31 + +--------------------------------------+ 32 + ^ ^ 33 + | low-level | control 34 + | data exchange | operations 35 + | | 36 + | v 37 + | +-----------+ 38 + | AF_NFC | netlink | 39 + | socket +-----------+ 40 + | raw ^ 41 + | | 42 + v v 43 + +---------+ +-----------+ 44 + | rawsock | <--------> | core | 45 + +---------+ +-----------+ 46 + ^ 47 + | 48 + v 49 + +-----------+ 50 + | driver | 51 + +-----------+ 52 + 53 + Device Driver Interface 54 + ----------------------- 55 + 56 + When registering on the NFC subsystem, the device driver must inform the core 57 + of the set of supported NFC protocols and the set of ops callbacks. The ops 58 + callbacks that must be implemented are the following: 59 + 60 + * start_poll - setup the device to poll for targets 61 + * stop_poll - stop on progress polling operation 62 + * activate_target - select and initialize one of the targets found 63 + * deactivate_target - deselect and deinitialize the selected target 64 + * data_exchange - send data and receive the response (transceive operation) 65 + 66 + Userspace interface 67 + -------------------- 68 + 69 + The userspace interface is divided in control operations and low-level data 70 + exchange operation. 71 + 72 + CONTROL OPERATIONS: 73 + 74 + Generic netlink is used to implement the interface to the control operations. 75 + The operations are composed by commands and events, all listed below: 76 + 77 + * NFC_CMD_GET_DEVICE - get specific device info or dump the device list 78 + * NFC_CMD_START_POLL - setup a specific device to polling for targets 79 + * NFC_CMD_STOP_POLL - stop the polling operation in a specific device 80 + * NFC_CMD_GET_TARGET - dump the list of targets found by a specific device 81 + 82 + * NFC_EVENT_DEVICE_ADDED - reports an NFC device addition 83 + * NFC_EVENT_DEVICE_REMOVED - reports an NFC device removal 84 + * NFC_EVENT_TARGETS_FOUND - reports START_POLL results when 1 or more targets 85 + are found 86 + 87 + The user must call START_POLL to poll for NFC targets, passing the desired NFC 88 + protocols through NFC_ATTR_PROTOCOLS attribute. The device remains in polling 89 + state until it finds any target. However, the user can stop the polling 90 + operation by calling STOP_POLL command. In this case, it will be checked if 91 + the requester of STOP_POLL is the same of START_POLL. 92 + 93 + If the polling operation finds one or more targets, the event TARGETS_FOUND is 94 + sent (including the device id). The user must call GET_TARGET to get the list of 95 + all targets found by such device. Each reply message has target attributes with 96 + relevant information such as the supported NFC protocols. 97 + 98 + All polling operations requested through one netlink socket are stopped when 99 + it's closed. 100 + 101 + LOW-LEVEL DATA EXCHANGE: 102 + 103 + The userspace must use PF_NFC sockets to perform any data communication with 104 + targets. All NFC sockets use AF_NFC: 105 + 106 + struct sockaddr_nfc { 107 + sa_family_t sa_family; 108 + __u32 dev_idx; 109 + __u32 target_idx; 110 + __u32 nfc_protocol; 111 + }; 112 + 113 + To establish a connection with one target, the user must create an 114 + NFC_SOCKPROTO_RAW socket and call the 'connect' syscall with the sockaddr_nfc 115 + struct correctly filled. All information comes from NFC_EVENT_TARGETS_FOUND 116 + netlink event. As a target can support more than one NFC protocol, the user 117 + must inform which protocol it wants to use. 118 + 119 + Internally, 'connect' will result in an activate_target call to the driver. 120 + When the socket is closed, the target is deactivated. 121 + 122 + The data format exchanged through the sockets is NFC protocol dependent. For 123 + instance, when communicating with MIFARE tags, the data exchanged are MIFARE 124 + commands and their responses. 125 + 126 + The first received package is the response to the first sent package and so 127 + on. In order to allow valid "empty" responses, every data received has a NULL 128 + header of 1 byte.
-2
drivers/Kconfig
··· 94 94 95 95 source "drivers/leds/Kconfig" 96 96 97 - source "drivers/nfc/Kconfig" 98 - 99 97 source "drivers/accessibility/Kconfig" 100 98 101 99 source "drivers/infiniband/Kconfig"
+1
drivers/Makefile
··· 122 122 obj-y += clk/ 123 123 124 124 obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ 125 + obj-$(CONFIG_NFC) += nfc/
+6
drivers/bcma/Kconfig
··· 27 27 bool "Support for BCMA on PCI-host bus" 28 28 depends on BCMA_HOST_PCI_POSSIBLE 29 29 30 + config BCMA_DRIVER_PCI_HOSTMODE 31 + bool "Driver for PCI core working in hostmode" 32 + depends on BCMA && MIPS 33 + help 34 + PCI core hostmode operation (external PCI bus). 35 + 30 36 config BCMA_DEBUG 31 37 bool "BCMA debugging" 32 38 depends on BCMA
+1
drivers/bcma/Makefile
··· 1 1 bcma-y += main.o scan.o core.o sprom.o 2 2 bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o 3 3 bcma-y += driver_pci.o 4 + bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o 4 5 bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o 5 6 obj-$(CONFIG_BCMA) += bcma.o 6 7
+4
drivers/bcma/bcma_private.h
··· 28 28 extern void __exit bcma_host_pci_exit(void); 29 29 #endif /* CONFIG_BCMA_HOST_PCI */ 30 30 31 + #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE 32 + void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); 33 + #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ 34 + 31 35 #endif
+37 -1
drivers/bcma/driver_pci.c
··· 157 157 * Init. 158 158 **************************************************/ 159 159 160 - void bcma_core_pci_init(struct bcma_drv_pci *pc) 160 + static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) 161 161 { 162 162 bcma_pcicore_serdes_workaround(pc); 163 + } 164 + 165 + static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc) 166 + { 167 + struct bcma_bus *bus = pc->core->bus; 168 + u16 chipid_top; 169 + 170 + chipid_top = (bus->chipinfo.id & 0xFF00); 171 + if (chipid_top != 0x4700 && 172 + chipid_top != 0x5300) 173 + return false; 174 + 175 + if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI) 176 + return false; 177 + 178 + #if 0 179 + /* TODO: on BCMA we use address from EROM instead of magic formula */ 180 + u32 tmp; 181 + return !mips_busprobe32(tmp, (bus->mmio + 182 + (pc->core->core_index * BCMA_CORE_SIZE))); 183 + #endif 184 + 185 + return true; 186 + } 187 + 188 + void bcma_core_pci_init(struct bcma_drv_pci *pc) 189 + { 190 + if (bcma_core_pci_is_in_hostmode(pc)) { 191 + #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE 192 + bcma_core_pci_hostmode_init(pc); 193 + #else 194 + pr_err("Driver compiled without support for hostmode PCI\n"); 195 + #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ 196 + } else { 197 + bcma_core_pci_clientmode_init(pc); 198 + } 163 199 } 164 200 165 201 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
+14
drivers/bcma/driver_pci_host.c
··· 1 + /* 2 + * Broadcom specific AMBA 3 + * PCI Core in hostmode 4 + * 5 + * Licensed under the GNU/GPL. See COPYING for details. 6 + */ 7 + 8 + #include "bcma_private.h" 9 + #include <linux/bcma/bcma.h> 10 + 11 + void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) 12 + { 13 + pr_err("No support for PCI core in hostmode yet\n"); 14 + }
+2 -1
drivers/net/wireless/ath/carl9170/carl9170.h
··· 67 67 68 68 #define PAYLOAD_MAX (CARL9170_MAX_CMD_LEN / 4 - 1) 69 69 70 + static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 3, 2, 1, 0 }; 71 + 70 72 enum carl9170_rf_init_mode { 71 73 CARL9170_RFI_NONE, 72 74 CARL9170_RFI_WARM, ··· 442 440 enum carl9170_ps_off_override_reasons { 443 441 PS_OFF_VIF = BIT(0), 444 442 PS_OFF_BCN = BIT(1), 445 - PS_OFF_5GHZ = BIT(2), 446 443 }; 447 444 448 445 struct carl9170_ba_stats {
+1 -1
drivers/net/wireless/ath/carl9170/fw.c
··· 237 237 ar->disable_offload = true; 238 238 } 239 239 240 - if (SUPP(CARL9170FW_PSM)) 240 + if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM)) 241 241 ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS; 242 242 243 243 if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
+18 -1
drivers/net/wireless/ath/carl9170/fwcmd.h
··· 4 4 * Firmware command interface definitions 5 5 * 6 6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 - * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> 7 + * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com> 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License as published by ··· 54 54 CARL9170_CMD_BCN_CTRL = 0x05, 55 55 CARL9170_CMD_READ_TSF = 0x06, 56 56 CARL9170_CMD_RX_FILTER = 0x07, 57 + CARL9170_CMD_WOL = 0x08, 57 58 58 59 /* CAM */ 59 60 CARL9170_CMD_EKEY = 0x10, ··· 181 180 #define CARL9170_BCN_CTRL_DRAIN 0 182 181 #define CARL9170_BCN_CTRL_CAB_TRIGGER 1 183 182 183 + struct carl9170_wol_cmd { 184 + __le32 flags; 185 + u8 mac[6]; 186 + u8 bssid[6]; 187 + __le32 null_interval; 188 + __le32 free_for_use2; 189 + __le32 mask; 190 + u8 pattern[32]; 191 + } __packed; 192 + 193 + #define CARL9170_WOL_CMD_SIZE 60 194 + 195 + #define CARL9170_WOL_DISCONNECT 1 196 + #define CARL9170_WOL_MAGIC_PKT 2 197 + 184 198 struct carl9170_cmd_head { 185 199 union { 186 200 struct { ··· 219 203 struct carl9170_write_reg wreg; 220 204 struct carl9170_rf_init rf_init; 221 205 struct carl9170_psm psm; 206 + struct carl9170_wol_cmd wol; 222 207 struct carl9170_bcn_ctrl_cmd bcn_ctrl; 223 208 struct carl9170_rx_filter_cmd rx_filter; 224 209 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
+16 -2
drivers/net/wireless/ath/carl9170/fwdesc.h
··· 3 3 * 4 4 * Firmware descriptor format 5 5 * 6 - * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> 6 + * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com> 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify 9 9 * it under the terms of the GNU General Public License as published by ··· 72 72 /* Wake up on WLAN */ 73 73 CARL9170FW_WOL, 74 74 75 + /* Firmware supports PSM in the 5GHZ Band */ 76 + CARL9170FW_FIXED_5GHZ_PSM, 77 + 75 78 /* KEEP LAST */ 76 79 __CARL9170FW_FEATURE_NUM 77 80 }; ··· 85 82 #define DBG_MAGIC "DBG\0" 86 83 #define CHK_MAGIC "CHK\0" 87 84 #define TXSQ_MAGIC "TXSQ" 85 + #define WOL_MAGIC "WOL\0" 88 86 #define LAST_MAGIC "LAST" 89 87 90 88 #define CARL9170FW_SET_DAY(d) (((d) - 1) % 31) ··· 108 104 (sizeof(struct carl9170fw_desc_head)) 109 105 110 106 #define CARL9170FW_OTUS_DESC_MIN_VER 6 111 - #define CARL9170FW_OTUS_DESC_CUR_VER 6 107 + #define CARL9170FW_OTUS_DESC_CUR_VER 7 112 108 struct carl9170fw_otus_desc { 113 109 struct carl9170fw_desc_head head; 114 110 __le32 feature_set; ··· 189 185 } __packed; 190 186 #define CARL9170FW_TXSQ_DESC_SIZE \ 191 187 (sizeof(struct carl9170fw_txsq_desc)) 188 + 189 + #define CARL9170FW_WOL_DESC_MIN_VER 1 190 + #define CARL9170FW_WOL_DESC_CUR_VER 1 191 + struct carl9170fw_wol_desc { 192 + struct carl9170fw_desc_head head; 193 + 194 + __le32 supported_triggers; /* CARL9170_WOL_ */ 195 + } __packed; 196 + #define CARL9170FW_WOL_DESC_SIZE \ 197 + (sizeof(struct carl9170fw_wol_desc)) 192 198 193 199 #define CARL9170FW_LAST_DESC_MIN_VER 1 194 200 #define CARL9170FW_LAST_DESC_CUR_VER 2
+12 -1
drivers/net/wireless/ath/carl9170/hw.h
··· 4 4 * Register map, hardware-specific definitions 5 5 * 6 6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 - * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> 7 + * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com> 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License as published by ··· 357 357 358 358 #define AR9170_MAC_REG_DMA_WLAN_STATUS (AR9170_MAC_REG_BASE + 0xd38) 359 359 #define AR9170_MAC_REG_DMA_STATUS (AR9170_MAC_REG_BASE + 0xd3c) 360 + #define AR9170_MAC_REG_DMA_TXQ_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd40) 361 + #define AR9170_MAC_REG_DMA_TXQ0_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd40) 362 + #define AR9170_MAC_REG_DMA_TXQ1_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd44) 363 + #define AR9170_MAC_REG_DMA_TXQ2_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd48) 364 + #define AR9170_MAC_REG_DMA_TXQ3_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd4c) 365 + #define AR9170_MAC_REG_DMA_TXQ4_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd50) 366 + #define AR9170_MAC_REG_DMA_TXQ0Q1_LEN (AR9170_MAC_REG_BASE + 0xd54) 367 + #define AR9170_MAC_REG_DMA_TXQ2Q3_LEN (AR9170_MAC_REG_BASE + 0xd58) 368 + #define AR9170_MAC_REG_DMA_TXQ4_LEN (AR9170_MAC_REG_BASE + 0xd5c) 360 369 370 + #define AR9170_MAC_REG_DMA_TXQX_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd74) 371 + #define AR9170_MAC_REG_DMA_TXQX_FAIL_ADDR (AR9170_MAC_REG_BASE + 0xd78) 361 372 #define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xd7c) 362 373 #define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f 363 374 #define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
+6 -5
drivers/net/wireless/ath/carl9170/main.c
··· 345 345 carl9170_zap_queues(ar); 346 346 347 347 /* reset QoS defaults */ 348 - CARL9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT */ 349 - CARL9170_FILL_QUEUE(ar->edcf[1], 2, 7, 15, 94); /* VIDEO */ 350 - CARL9170_FILL_QUEUE(ar->edcf[2], 2, 3, 7, 47); /* VOICE */ 351 - CARL9170_FILL_QUEUE(ar->edcf[3], 7, 15, 1023, 0); /* BACKGROUND */ 352 - CARL9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */ 348 + CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47); 349 + CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94); 350 + CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0); 351 + CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0); 352 + CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0); 353 353 354 354 ar->current_factor = ar->current_density = -1; 355 355 /* "The first key is unique." */ ··· 1577 1577 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 1578 1578 IEEE80211_HW_SUPPORTS_PS | 1579 1579 IEEE80211_HW_PS_NULLFUNC_STACK | 1580 + IEEE80211_HW_NEED_DTIM_PERIOD | 1580 1581 IEEE80211_HW_SIGNAL_DBM; 1581 1582 1582 1583 if (!modparam_noht) {
-6
drivers/net/wireless/ath/carl9170/phy.c
··· 1783 1783 } 1784 1784 } 1785 1785 1786 - /* FIXME: PSM does not work in 5GHz Band */ 1787 - if (channel->band == IEEE80211_BAND_5GHZ) 1788 - ar->ps.off_override |= PS_OFF_5GHZ; 1789 - else 1790 - ar->ps.off_override &= ~PS_OFF_5GHZ; 1791 - 1792 1786 ar->channel = channel; 1793 1787 ar->ht_settings = new_ht; 1794 1788 return 0;
+3 -3
drivers/net/wireless/ath/carl9170/version.h
··· 1 1 #ifndef __CARL9170_SHARED_VERSION_H 2 2 #define __CARL9170_SHARED_VERSION_H 3 3 #define CARL9170FW_VERSION_YEAR 11 4 - #define CARL9170FW_VERSION_MONTH 1 5 - #define CARL9170FW_VERSION_DAY 22 6 - #define CARL9170FW_VERSION_GIT "1.9.2" 4 + #define CARL9170FW_VERSION_MONTH 6 5 + #define CARL9170FW_VERSION_DAY 30 6 + #define CARL9170FW_VERSION_GIT "1.9.4" 7 7 #endif /* __CARL9170_SHARED_VERSION_H */
+11 -14
drivers/net/wireless/ath/carl9170/wlan.h
··· 4 4 * RX/TX meta descriptor format 5 5 * 6 6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 - * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> 7 + * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com> 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License as published by ··· 278 278 struct carl9170_tx_superframe { 279 279 struct carl9170_tx_superdesc s; 280 280 struct ar9170_tx_frame f; 281 - } __packed; 281 + } __packed __aligned(4); 282 282 283 283 #endif /* __CARL9170FW__ */ 284 284 ··· 328 328 struct _carl9170_tx_superdesc s; 329 329 struct _ar9170_tx_hwdesc f; 330 330 u8 frame_data[0]; 331 - } __packed; 331 + } __packed __aligned(4); 332 332 333 333 #define CARL9170_TX_SUPERDESC_LEN 24 334 334 #define AR9170_TX_HWDESC_LEN 8 ··· 404 404 (t->DAidx & 0xc0) >> 6; 405 405 } 406 406 407 - enum ar9170_txq { 408 - AR9170_TXQ_BE, 409 - 410 - AR9170_TXQ_VI, 411 - AR9170_TXQ_VO, 412 - AR9170_TXQ_BK, 413 - 414 - __AR9170_NUM_TXQ, 415 - }; 416 - 417 407 /* 418 408 * This is an workaround for several undocumented bugs. 419 409 * Don't mess with the QoS/AC <-> HW Queue map, if you don't ··· 421 431 * result, this makes the device pretty much useless 422 432 * for any serious 802.11n setup. 423 433 */ 424 - static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 }; 434 + enum ar9170_txq { 435 + AR9170_TXQ_BK = 0, /* TXQ0 */ 436 + AR9170_TXQ_BE, /* TXQ1 */ 437 + AR9170_TXQ_VI, /* TXQ2 */ 438 + AR9170_TXQ_VO, /* TXQ3 */ 439 + 440 + __AR9170_NUM_TXQ, 441 + }; 425 442 426 443 #define AR9170_TXQ_DEPTH 32 427 444
+1
drivers/net/wireless/b43/dma.c
··· 1600 1600 dma_rx(ring, &slot); 1601 1601 update_max_used_slots(ring, ++used_slots); 1602 1602 } 1603 + wmb(); 1603 1604 ops->set_current_rxslot(ring, slot); 1604 1605 ring->current_slot = slot; 1605 1606 }
+1 -1
drivers/net/wireless/ipw2x00/ipw2100.c
··· 287 287 "unused", /* HOST_INTERRUPT_COALESCING */ 288 288 "undefined", 289 289 "CARD_DISABLE_PHY_OFF", 290 - "MSDU_TX_RATES" "undefined", 290 + "MSDU_TX_RATES", 291 291 "undefined", 292 292 "SET_STATION_STAT_BITS", 293 293 "CLEAR_STATIONS_STAT_BITS",
+1
drivers/net/wireless/iwlwifi/Makefile
··· 14 14 iwlagn-objs += iwl-1000.o 15 15 iwlagn-objs += iwl-2000.o 16 16 iwlagn-objs += iwl-pci.o 17 + iwlagn-objs += iwl-trans.o 17 18 18 19 iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 19 20 iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-2
drivers/net/wireless/iwlwifi/iwl-1000.c
··· 138 138 139 139 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 140 140 BIT(IEEE80211_BAND_5GHZ); 141 - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 142 141 143 142 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 144 143 if (priv->cfg->rx_with_siso_diversity) ··· 196 197 197 198 static const struct iwl_ops iwl1000_ops = { 198 199 .lib = &iwl1000_lib, 199 - .hcmd = &iwlagn_hcmd, 200 200 .utils = &iwlagn_hcmd_utils, 201 201 }; 202 202
+11 -10
drivers/net/wireless/iwlwifi/iwl-2000.c
··· 50 50 #define IWL2030_UCODE_API_MAX 5 51 51 #define IWL2000_UCODE_API_MAX 5 52 52 #define IWL105_UCODE_API_MAX 5 53 + #define IWL135_UCODE_API_MAX 5 53 54 54 55 /* Lowest firmware API version supported */ 55 56 #define IWL2030_UCODE_API_MIN 5 56 57 #define IWL2000_UCODE_API_MIN 5 57 58 #define IWL105_UCODE_API_MIN 5 59 + #define IWL135_UCODE_API_MIN 5 58 60 59 61 #define IWL2030_FW_PRE "iwlwifi-2030-" 60 62 #define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" ··· 66 64 67 65 #define IWL105_FW_PRE "iwlwifi-105-" 68 66 #define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode" 67 + 68 + #define IWL135_FW_PRE "iwlwifi-135-" 69 + #define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE #api ".ucode" 69 70 70 71 static void iwl2000_set_ct_threshold(struct iwl_priv *priv) 71 72 { ··· 136 131 137 132 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 138 133 BIT(IEEE80211_BAND_5GHZ); 139 - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 140 134 141 135 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 142 136 if (priv->cfg->rx_with_siso_diversity) ··· 197 193 198 194 static const struct iwl_ops iwl2000_ops = { 199 195 .lib = &iwl2000_lib, 200 - .hcmd = &iwlagn_hcmd, 201 196 .utils = &iwlagn_hcmd_utils, 202 197 }; 203 198 204 199 static const struct iwl_ops iwl2030_ops = { 205 200 .lib = &iwl2000_lib, 206 - .hcmd = &iwlagn_bt_hcmd, 207 201 .utils = &iwlagn_hcmd_utils, 208 202 }; 209 203 210 204 static const struct iwl_ops iwl105_ops = { 211 205 .lib = &iwl2000_lib, 212 - .hcmd = &iwlagn_hcmd, 213 206 .utils = &iwlagn_hcmd_utils, 214 207 }; 215 208 216 209 static const struct iwl_ops iwl135_ops = { 217 210 .lib = &iwl2000_lib, 218 - .hcmd = &iwlagn_bt_hcmd, 219 211 .utils = &iwlagn_hcmd_utils, 220 212 }; 221 213 ··· 344 344 }; 345 345 346 346 #define IWL_DEVICE_135 \ 347 - .fw_name_pre = IWL105_FW_PRE, \ 348 - .ucode_api_max = IWL105_UCODE_API_MAX, \ 349 - .ucode_api_min = IWL105_UCODE_API_MIN, \ 347 + .fw_name_pre = IWL135_FW_PRE, \ 348 + .ucode_api_max = IWL135_UCODE_API_MAX, \ 349 + .ucode_api_min = IWL135_UCODE_API_MIN, \ 350 350 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 351 351 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 352 352 .ops = &iwl135_ops, \ ··· 359 359 .rx_with_siso_diversity = true \ 360 360 361 361 struct iwl_cfg iwl135_bg_cfg = { 362 - .name = "105 Series 1x1 BG/BT", 362 + .name = "135 Series 1x1 BG/BT", 363 363 IWL_DEVICE_135, 364 364 }; 365 365 366 366 struct iwl_cfg iwl135_bgn_cfg = { 367 - .name = "105 Series 1x1 BGN/BT", 367 + .name = "135 Series 1x1 BGN/BT", 368 368 IWL_DEVICE_135, 369 369 .ht_params = &iwl2000_ht_params, 370 370 }; ··· 372 372 MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); 373 373 MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); 374 374 MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX)); 375 + MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
-4
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 169 169 170 170 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 171 171 BIT(IEEE80211_BAND_5GHZ); 172 - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 173 172 174 173 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 175 174 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); ··· 213 214 214 215 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 215 216 BIT(IEEE80211_BAND_5GHZ); 216 - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 217 217 218 218 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 219 219 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); ··· 377 379 378 380 static const struct iwl_ops iwl5000_ops = { 379 381 .lib = &iwl5000_lib, 380 - .hcmd = &iwlagn_hcmd, 381 382 .utils = &iwlagn_hcmd_utils, 382 383 }; 383 384 384 385 static const struct iwl_ops iwl5150_ops = { 385 386 .lib = &iwl5150_lib, 386 - .hcmd = &iwlagn_hcmd, 387 387 .utils = &iwlagn_hcmd_utils, 388 388 }; 389 389
-5
drivers/net/wireless/iwlwifi/iwl-6000.c
··· 157 157 158 158 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 159 159 BIT(IEEE80211_BAND_5GHZ); 160 - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 161 160 162 161 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 163 162 if (priv->cfg->rx_with_siso_diversity) ··· 327 328 328 329 static const struct iwl_ops iwl6000_ops = { 329 330 .lib = &iwl6000_lib, 330 - .hcmd = &iwlagn_hcmd, 331 331 .utils = &iwlagn_hcmd_utils, 332 332 }; 333 333 334 334 static const struct iwl_ops iwl6050_ops = { 335 335 .lib = &iwl6000_lib, 336 - .hcmd = &iwlagn_hcmd, 337 336 .utils = &iwlagn_hcmd_utils, 338 337 .nic = &iwl6050_nic_ops, 339 338 }; 340 339 341 340 static const struct iwl_ops iwl6150_ops = { 342 341 .lib = &iwl6000_lib, 343 - .hcmd = &iwlagn_hcmd, 344 342 .utils = &iwlagn_hcmd_utils, 345 343 .nic = &iwl6150_nic_ops, 346 344 }; 347 345 348 346 static const struct iwl_ops iwl6030_ops = { 349 347 .lib = &iwl6030_lib, 350 - .hcmd = &iwlagn_bt_hcmd, 351 348 .utils = &iwlagn_hcmd_utils, 352 349 }; 353 350
+1 -15
drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
··· 205 205 return max_rssi - agc - IWLAGN_RSSI_OFFSET; 206 206 } 207 207 208 - static int iwlagn_set_pan_params(struct iwl_priv *priv) 208 + int iwlagn_set_pan_params(struct iwl_priv *priv) 209 209 { 210 210 struct iwl_wipan_params_cmd cmd; 211 211 struct iwl_rxon_context *ctx_bss, *ctx_pan; ··· 296 296 297 297 return ret; 298 298 } 299 - 300 - struct iwl_hcmd_ops iwlagn_hcmd = { 301 - .set_rxon_chain = iwlagn_set_rxon_chain, 302 - .set_tx_ant = iwlagn_send_tx_ant_config, 303 - .send_bt_config = iwl_send_bt_config, 304 - .set_pan_params = iwlagn_set_pan_params, 305 - }; 306 - 307 - struct iwl_hcmd_ops iwlagn_bt_hcmd = { 308 - .set_rxon_chain = iwlagn_set_rxon_chain, 309 - .set_tx_ant = iwlagn_send_tx_ant_config, 310 - .send_bt_config = iwlagn_send_advance_bt_config, 311 - .set_pan_params = iwlagn_set_pan_params, 312 - }; 313 299 314 300 struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { 315 301 .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
+7 -81
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 628 628 /* the rest are 0 by default */ 629 629 }; 630 630 631 - void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 632 - { 633 - unsigned long flags; 634 - int i; 635 - spin_lock_irqsave(&rxq->lock, flags); 636 - INIT_LIST_HEAD(&rxq->rx_free); 637 - INIT_LIST_HEAD(&rxq->rx_used); 638 - /* Fill the rx_used queue with _all_ of the Rx buffers */ 639 - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 640 - /* In the reset function, these buffers may have been allocated 641 - * to an SKB, so we need to unmap and free potential storage */ 642 - if (rxq->pool[i].page != NULL) { 643 - dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma, 644 - PAGE_SIZE << priv->hw_params.rx_page_order, 645 - DMA_FROM_DEVICE); 646 - __iwl_free_pages(priv, rxq->pool[i].page); 647 - rxq->pool[i].page = NULL; 648 - } 649 - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 650 - } 651 - 652 - for (i = 0; i < RX_QUEUE_SIZE; i++) 653 - rxq->queue[i] = NULL; 654 - 655 - /* Set us so that we have processed and used all buffers, but have 656 - * not restocked the Rx queue with fresh buffers */ 657 - rxq->read = rxq->write = 0; 658 - rxq->write_actual = 0; 659 - rxq->free_count = 0; 660 - spin_unlock_irqrestore(&rxq->lock, flags); 661 - } 662 - 663 631 int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 664 632 { 665 633 u32 rb_size; ··· 699 731 { 700 732 unsigned long flags; 701 733 struct iwl_rx_queue *rxq = &priv->rxq; 702 - int ret; 703 734 704 735 /* nic_init */ 705 736 spin_lock_irqsave(&priv->lock, flags); ··· 714 747 priv->cfg->ops->lib->apm_ops.config(priv); 715 748 716 749 /* Allocate the RX queue, or reset if it is already allocated */ 717 - if (!rxq->bd) { 718 - ret = iwl_rx_queue_alloc(priv); 719 - if (ret) { 720 - IWL_ERR(priv, "Unable to initialize Rx queue\n"); 721 - return -ENOMEM; 722 - } 723 - } else 724 - iwlagn_rx_queue_reset(priv, rxq); 750 + priv->trans.ops->rx_init(priv); 725 751 726 752 iwlagn_rx_replenish(priv); 727 753 ··· 728 768 spin_unlock_irqrestore(&priv->lock, flags); 729 769 730 770 /* Allocate or reset and init all Tx and Command queues */ 731 - if (!priv->txq) { 732 - ret = iwlagn_txq_ctx_alloc(priv); 733 - if (ret) 734 - return ret; 735 - } else 736 - iwlagn_txq_ctx_reset(priv); 771 + if (priv->trans.ops->tx_init(priv)) 772 + return -ENOMEM; 737 773 738 774 if (priv->cfg->base_params->shadow_reg_enable) { 739 775 /* enable shadow regs in HW */ ··· 903 947 iwlagn_rx_allocate(priv, GFP_ATOMIC); 904 948 905 949 iwlagn_rx_queue_restock(priv); 906 - } 907 - 908 - /* Assumes that the skb field of the buffers in 'pool' is kept accurate. 909 - * If an SKB has been detached, the POOL needs to have its SKB set to NULL 910 - * This free routine walks the list of POOL entries and if SKB is set to 911 - * non NULL it is unmapped and freed 912 - */ 913 - void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 914 - { 915 - int i; 916 - for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 917 - if (rxq->pool[i].page != NULL) { 918 - dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma, 919 - PAGE_SIZE << priv->hw_params.rx_page_order, 920 - DMA_FROM_DEVICE); 921 - __iwl_free_pages(priv, rxq->pool[i].page); 922 - rxq->pool[i].page = NULL; 923 - } 924 - } 925 - 926 - dma_free_coherent(priv->bus.dev, 4 * RX_QUEUE_SIZE, 927 - rxq->bd, rxq->bd_dma); 928 - dma_free_coherent(priv->bus.dev, 929 - sizeof(struct iwl_rb_status), 930 - rxq->rb_stts, rxq->rb_stts_dma); 931 - rxq->bd = NULL; 932 - rxq->rb_stts = NULL; 933 950 } 934 951 935 952 int iwlagn_rxq_stop(struct iwl_priv *priv) ··· 1366 1437 /* set scan bit here for PAN params */ 1367 1438 set_bit(STATUS_SCAN_HW, &priv->status); 1368 1439 1369 - if (priv->cfg->ops->hcmd->set_pan_params) { 1370 - ret = priv->cfg->ops->hcmd->set_pan_params(priv); 1371 - if (ret) 1372 - return ret; 1373 - } 1440 + ret = iwlagn_set_pan_params(priv); 1441 + if (ret) 1442 + return ret; 1374 1443 1375 1444 ret = iwl_send_cmd_sync(priv, &cmd); 1376 1445 if (ret) { 1377 1446 clear_bit(STATUS_SCAN_HW, &priv->status); 1378 - if (priv->cfg->ops->hcmd->set_pan_params) 1379 - priv->cfg->ops->hcmd->set_pan_params(priv); 1447 + iwlagn_set_pan_params(priv); 1380 1448 } 1381 1449 1382 1450 return ret;
+7 -12
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
··· 436 436 if (ret) 437 437 return ret; 438 438 439 - if (priv->cfg->ops->hcmd->set_pan_params) { 440 - ret = priv->cfg->ops->hcmd->set_pan_params(priv); 441 - if (ret) 442 - return ret; 443 - } 439 + ret = iwlagn_set_pan_params(priv); 440 + if (ret) 441 + return ret; 444 442 445 443 if (new_assoc) 446 444 return iwlagn_rxon_connect(priv, ctx); ··· 481 483 * set up the SM PS mode to OFF if an HT channel is 482 484 * configured. 483 485 */ 484 - if (priv->cfg->ops->hcmd->set_rxon_chain) 485 - for_each_context(priv, ctx) 486 - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 486 + for_each_context(priv, ctx) 487 + iwlagn_set_rxon_chain(priv, ctx); 487 488 } 488 489 489 490 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ··· 738 741 iwl_set_rxon_ht(priv, &priv->current_ht_config); 739 742 } 740 743 741 - if (priv->cfg->ops->hcmd->set_rxon_chain) 742 - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 744 + iwlagn_set_rxon_chain(priv, ctx); 743 745 744 746 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) 745 747 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; ··· 817 821 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) 818 822 iwlagn_commit_rxon(priv, ctx); 819 823 820 - if (priv->cfg->ops->hcmd->set_pan_params) 821 - priv->cfg->ops->hcmd->set_pan_params(priv); 824 + iwlagn_set_pan_params(priv); 822 825 }
-90
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
··· 878 878 } 879 879 880 880 /** 881 - * iwlagn_txq_ctx_alloc - allocate TX queue context 882 - * Allocate all Tx DMA structures and initialize them 883 - * 884 - * @param priv 885 - * @return error code 886 - */ 887 - int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) 888 - { 889 - int ret; 890 - int txq_id, slots_num; 891 - unsigned long flags; 892 - 893 - /* Free all tx/cmd queues and keep-warm buffer */ 894 - iwlagn_hw_txq_ctx_free(priv); 895 - 896 - ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 897 - priv->hw_params.scd_bc_tbls_size); 898 - if (ret) { 899 - IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); 900 - goto error_bc_tbls; 901 - } 902 - /* Alloc keep-warm buffer */ 903 - ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 904 - if (ret) { 905 - IWL_ERR(priv, "Keep Warm allocation failed\n"); 906 - goto error_kw; 907 - } 908 - 909 - /* allocate tx queue structure */ 910 - ret = iwl_alloc_txq_mem(priv); 911 - if (ret) 912 - goto error; 913 - 914 - spin_lock_irqsave(&priv->lock, flags); 915 - 916 - /* Turn off all Tx DMA fifos */ 917 - iwlagn_txq_set_sched(priv, 0); 918 - 919 - /* Tell NIC where to find the "keep warm" buffer */ 920 - iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 921 - 922 - spin_unlock_irqrestore(&priv->lock, flags); 923 - 924 - /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 925 - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 926 - slots_num = (txq_id == priv->cmd_queue) ? 927 - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 928 - ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 929 - txq_id); 930 - if (ret) { 931 - IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 932 - goto error; 933 - } 934 - } 935 - 936 - return ret; 937 - 938 - error: 939 - iwlagn_hw_txq_ctx_free(priv); 940 - iwlagn_free_dma_ptr(priv, &priv->kw); 941 - error_kw: 942 - iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); 943 - error_bc_tbls: 944 - return ret; 945 - } 946 - 947 - void iwlagn_txq_ctx_reset(struct iwl_priv *priv) 948 - { 949 - int txq_id, slots_num; 950 - unsigned long flags; 951 - 952 - spin_lock_irqsave(&priv->lock, flags); 953 - 954 - /* Turn off all Tx DMA fifos */ 955 - iwlagn_txq_set_sched(priv, 0); 956 - 957 - /* Tell NIC where to find the "keep warm" buffer */ 958 - iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 959 - 960 - spin_unlock_irqrestore(&priv->lock, flags); 961 - 962 - /* Alloc and init all Tx queues, including the command queue (#4) */ 963 - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 964 - slots_num = txq_id == priv->cmd_queue ? 965 - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 966 - iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); 967 - } 968 - } 969 - 970 - /** 971 881 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels 972 882 */ 973 883 void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
+5 -3
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
··· 386 386 spin_lock_irqsave(&priv->lock, flags); 387 387 388 388 priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR); 389 - a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET; 390 - for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET; 389 + a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND; 390 + /* reset conext data memory */ 391 + for (; a < priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_UPPER_BOUND; 391 392 a += 4) 392 393 iwl_write_targ_mem(priv, a, 0); 393 - for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET; 394 + /* reset tx status memory */ 395 + for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_MEM_UPPER_BOUND; 394 396 a += 4) 395 397 iwl_write_targ_mem(priv, a, 0); 396 398 for (; a < priv->scd_base_addr +
+25 -29
drivers/net/wireless/iwlwifi/iwl-agn.c
··· 56 56 #include "iwl-agn-calib.h" 57 57 #include "iwl-agn.h" 58 58 #include "iwl-pci.h" 59 - 59 + #include "iwl-trans.h" 60 60 61 61 /****************************************************************************** 62 62 * ··· 90 90 { 91 91 struct iwl_rxon_context *ctx; 92 92 93 - if (priv->cfg->ops->hcmd->set_rxon_chain) { 94 - for_each_context(priv, ctx) { 95 - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 96 - if (ctx->active.rx_chain != ctx->staging.rx_chain) 97 - iwlagn_commit_rxon(priv, ctx); 98 - } 93 + for_each_context(priv, ctx) { 94 + iwlagn_set_rxon_chain(priv, ctx); 95 + if (ctx->active.rx_chain != ctx->staging.rx_chain) 96 + iwlagn_commit_rxon(priv, ctx); 99 97 } 100 98 } 101 99 ··· 258 260 /* dont send host command if rf-kill is on */ 259 261 if (!iwl_is_ready_rf(priv)) 260 262 return; 261 - priv->cfg->ops->hcmd->send_bt_config(priv); 263 + iwlagn_send_advance_bt_config(priv); 262 264 } 263 265 264 266 static void iwl_bg_bt_full_concurrency(struct work_struct *work) ··· 285 287 * to avoid 3-wire collisions 286 288 */ 287 289 for_each_context(priv, ctx) { 288 - if (priv->cfg->ops->hcmd->set_rxon_chain) 289 - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 290 + iwlagn_set_rxon_chain(priv, ctx); 290 291 iwlagn_commit_rxon(priv, ctx); 291 292 } 292 293 293 - priv->cfg->ops->hcmd->send_bt_config(priv); 294 + iwlagn_send_advance_bt_config(priv); 294 295 out: 295 296 mutex_unlock(&priv->mutex); 296 297 } ··· 2014 2017 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; 2015 2018 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; 2016 2019 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; 2017 - priv->cfg->ops->hcmd->send_bt_config(priv); 2020 + iwlagn_send_advance_bt_config(priv); 2018 2021 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS; 2019 2022 iwlagn_send_prio_tbl(priv); 2020 2023 ··· 2027 2030 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 2028 2031 if (ret) 2029 2032 return ret; 2033 + } else { 2034 + /* 2035 + * default is 2-wire BT coexexistence support 2036 + */ 2037 + iwl_send_bt_config(priv); 2030 2038 } 2039 + 2031 2040 if (priv->hw_params.calib_rt_cfg) 2032 2041 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg); 2033 2042 ··· 2042 2039 priv->active_rate = IWL_RATES_MASK; 2043 2040 2044 2041 /* Configure Tx antenna selection based on H/W config */ 2045 - if (priv->cfg->ops->hcmd->set_tx_ant) 2046 - priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant); 2042 + iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant); 2047 2043 2048 2044 if (iwl_is_associated_ctx(ctx)) { 2049 2045 struct iwl_rxon_cmd *active_rxon = ··· 2056 2054 for_each_context(priv, tmp) 2057 2055 iwl_connection_init_rx_config(priv, tmp); 2058 2056 2059 - if (priv->cfg->ops->hcmd->set_rxon_chain) 2060 - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 2061 - } 2062 - 2063 - if (!priv->cfg->bt_params || (priv->cfg->bt_params && 2064 - !priv->cfg->bt_params->advanced_bt_coexist)) { 2065 - /* 2066 - * default is 2-wire BT coexexistence support 2067 - */ 2068 - priv->cfg->ops->hcmd->send_bt_config(priv); 2057 + iwlagn_set_rxon_chain(priv, ctx); 2069 2058 } 2070 2059 2071 2060 iwl_reset_run_time_calib(priv); ··· 3281 3288 priv->rx_statistics_jiffies = jiffies; 3282 3289 3283 3290 /* Choose which receivers/antennas to use */ 3284 - if (priv->cfg->ops->hcmd->set_rxon_chain) 3285 - priv->cfg->ops->hcmd->set_rxon_chain(priv, 3286 - &priv->contexts[IWL_RXON_CTX_BSS]); 3291 + iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]); 3287 3292 3288 3293 iwl_init_scan_params(priv); 3289 3294 ··· 3508 3517 priv->bus.ops->set_drv_data(&priv->bus, priv); 3509 3518 priv->bus.dev = priv->bus.ops->get_dev(&priv->bus); 3510 3519 3520 + iwl_trans_register(&priv->trans); 3521 + 3511 3522 /* At this point both hw and priv are allocated. */ 3512 3523 3513 3524 SET_IEEE80211_DEV(hw, priv->bus.dev); ··· 3709 3716 3710 3717 iwl_dealloc_ucode(priv); 3711 3718 3712 - if (priv->rxq.bd) 3713 - iwlagn_rx_queue_free(priv, &priv->rxq); 3719 + priv->trans.ops->rx_free(priv); 3714 3720 iwlagn_hw_txq_ctx_free(priv); 3715 3721 3716 3722 iwl_eeprom_free(priv); ··· 3811 3819 3812 3820 module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); 3813 3821 MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); 3822 + 3823 + module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); 3824 + MODULE_PARM_DESC(wd_disable, 3825 + "Disable stuck queue watchdog timer (default: 0 [enabled])"); 3814 3826 3815 3827 /* 3816 3828 * set bt_coex_active to true, uCode will do kill/defer
+1 -4
drivers/net/wireless/iwlwifi/iwl-agn.h
··· 182 182 u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); 183 183 const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, 184 184 size_t offset); 185 - void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 186 185 int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 187 186 int iwlagn_hw_nic_init(struct iwl_priv *priv); 188 187 int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv); ··· 193 194 void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority); 194 195 void iwlagn_rx_replenish(struct iwl_priv *priv); 195 196 void iwlagn_rx_replenish_now(struct iwl_priv *priv); 196 - void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 197 197 int iwlagn_rxq_stop(struct iwl_priv *priv); 198 198 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); 199 199 void iwl_setup_rx_handlers(struct iwl_priv *priv); ··· 218 220 struct iwl_rx_mem_buffer *rxb); 219 221 int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); 220 222 void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv); 221 - int iwlagn_txq_ctx_alloc(struct iwl_priv *priv); 222 - void iwlagn_txq_ctx_reset(struct iwl_priv *priv); 223 223 void iwlagn_txq_ctx_stop(struct iwl_priv *priv); 224 224 225 225 static inline u32 iwl_tx_status_to_mac80211(u32 status) ··· 256 260 /* hcmd */ 257 261 int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); 258 262 int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 263 + int iwlagn_set_pan_params(struct iwl_priv *priv); 259 264 260 265 /* bt coex */ 261 266 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
+3 -19
drivers/net/wireless/iwlwifi/iwl-core.c
··· 585 585 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; 586 586 } 587 587 588 - if (priv->cfg->ops->hcmd->set_rxon_chain) 589 - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 588 + iwlagn_set_rxon_chain(priv, ctx); 590 589 591 590 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " 592 591 "extension channel offset 0x%x\n", ··· 1215 1216 { 1216 1217 iwl_connection_init_rx_config(priv, ctx); 1217 1218 1218 - if (priv->cfg->ops->hcmd->set_rxon_chain) 1219 - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 1219 + iwlagn_set_rxon_chain(priv, ctx); 1220 1220 1221 1221 return iwlagn_commit_rxon(priv, ctx); 1222 1222 } ··· 1368 1370 1369 1371 IWL_DEBUG_MAC80211(priv, "leave\n"); 1370 1372 1371 - } 1372 - 1373 - int iwl_alloc_txq_mem(struct iwl_priv *priv) 1374 - { 1375 - if (!priv->txq) 1376 - priv->txq = kzalloc( 1377 - sizeof(struct iwl_tx_queue) * 1378 - priv->cfg->base_params->num_of_queues, 1379 - GFP_KERNEL); 1380 - if (!priv->txq) { 1381 - IWL_ERR(priv, "Not enough memory for txq\n"); 1382 - return -ENOMEM; 1383 - } 1384 - return 0; 1385 1373 } 1386 1374 1387 1375 void iwl_free_txq_mem(struct iwl_priv *priv) ··· 1837 1853 { 1838 1854 unsigned int timeout = priv->cfg->base_params->wd_timeout; 1839 1855 1840 - if (timeout) 1856 + if (timeout && !iwlagn_mod_params.wd_disable) 1841 1857 mod_timer(&priv->watchdog, 1842 1858 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); 1843 1859 else
+3 -15
drivers/net/wireless/iwlwifi/iwl-core.h
··· 80 80 81 81 #define IWL_CMD(x) case x: return #x 82 82 83 - struct iwl_hcmd_ops { 84 - void (*set_rxon_chain)(struct iwl_priv *priv, 85 - struct iwl_rxon_context *ctx); 86 - int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant); 87 - void (*send_bt_config)(struct iwl_priv *priv); 88 - int (*set_pan_params)(struct iwl_priv *priv); 89 - }; 90 - 91 83 struct iwl_hcmd_utils_ops { 92 84 u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data); 93 85 void (*gain_computation)(struct iwl_priv *priv, ··· 138 146 139 147 struct iwl_ops { 140 148 const struct iwl_lib_ops *lib; 141 - const struct iwl_hcmd_ops *hcmd; 142 149 const struct iwl_hcmd_utils_ops *utils; 143 150 const struct iwl_nic_ops *nic; 144 151 }; ··· 151 160 int restart_fw; /* def: 1 = restart firmware */ 152 161 bool plcp_check; /* def: true = enable plcp health check */ 153 162 bool ack_check; /* def: false = disable ack health check */ 163 + bool wd_disable; /* def: false = enable stuck queue check */ 154 164 bool bt_coex_active; /* def: true = enable bt coex */ 155 165 int led_mode; /* def: 0 = system default */ 156 166 bool no_sleep_autoadjust; /* def: true = disable autoadjust */ ··· 328 336 int iwl_mac_change_interface(struct ieee80211_hw *hw, 329 337 struct ieee80211_vif *vif, 330 338 enum nl80211_iftype newtype, bool newp2p); 331 - int iwl_alloc_txq_mem(struct iwl_priv *priv); 332 339 void iwl_free_txq_mem(struct iwl_priv *priv); 333 340 334 341 #ifdef CONFIG_IWLWIFI_DEBUGFS ··· 373 382 ******************************************************/ 374 383 void iwl_cmd_queue_free(struct iwl_priv *priv); 375 384 void iwl_cmd_queue_unmap(struct iwl_priv *priv); 376 - int iwl_rx_queue_alloc(struct iwl_priv *priv); 377 385 void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 378 386 struct iwl_rx_queue *q); 379 387 int iwl_rx_queue_space(const struct iwl_rx_queue *q); ··· 386 396 * TX 387 397 ******************************************************/ 388 398 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 389 - int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 390 - int slots_num, u32 txq_id); 391 - void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 392 - int slots_num, u32 txq_id); 393 399 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 400 + int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, 401 + int count, int slots_num, u32 id); 394 402 void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id); 395 403 void iwl_setup_watchdog(struct iwl_priv *priv); 396 404 /*****************************************************
+20 -2
drivers/net/wireless/iwlwifi/iwl-dev.h
··· 666 666 u16 max_rxq_size; 667 667 u16 max_rxq_log; 668 668 u32 rx_page_order; 669 - u32 rx_wrt_ptr_reg; 670 669 u8 max_stations; 671 670 u8 ht40_channel; 672 671 u8 max_beacon_itrvl; /* in 1024 ms */ ··· 1227 1228 unsigned int irq; 1228 1229 }; 1229 1230 1231 + struct iwl_trans; 1232 + 1233 + /** 1234 + * struct iwl_trans_ops - transport specific operations 1235 + 1236 + * @rx_init: inits the rx memory, allocate it if needed 1237 + * @rx_free: frees the rx memory 1238 + * @tx_init:inits the tx memory, allocate if needed 1239 + */ 1240 + struct iwl_trans_ops { 1241 + int (*rx_init)(struct iwl_priv *priv); 1242 + void (*rx_free)(struct iwl_priv *priv); 1243 + int (*tx_init)(struct iwl_priv *priv); 1244 + }; 1245 + 1246 + struct iwl_trans { 1247 + const struct iwl_trans_ops *ops; 1248 + }; 1249 + 1230 1250 struct iwl_priv { 1231 1251 1232 1252 /* ieee device used by generic ieee processing code */ ··· 1314 1296 struct mutex mutex; 1315 1297 1316 1298 struct iwl_bus bus; /* bus specific data */ 1299 + struct iwl_trans trans; 1317 1300 1318 1301 /* microcode/device supports multiple contexts */ 1319 1302 u8 valid_contexts; 1320 1303 1321 1304 /* command queue number */ 1322 1305 u8 cmd_queue; 1323 - u8 last_sync_cmd_id; 1324 1306 1325 1307 /* max number of station keys */ 1326 1308 u8 sta_key_max_num;
+3 -10
drivers/net/wireless/iwlwifi/iwl-hcmd.c
··· 171 171 int cmd_idx; 172 172 int ret; 173 173 174 + lockdep_assert_held(&priv->mutex); 175 + 174 176 if (WARN_ON(cmd->flags & CMD_ASYNC)) 175 177 return -EINVAL; 176 178 ··· 183 181 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", 184 182 get_cmd_string(cmd->id)); 185 183 186 - if (test_and_set_bit(STATUS_HCMD_ACTIVE, &priv->status)) { 187 - IWL_ERR(priv, "STATUS_HCMD_ACTIVE already set while sending %s" 188 - ". Previous SYNC cmdn is %s\n", 189 - get_cmd_string(cmd->id), 190 - get_cmd_string(priv->last_sync_cmd_id)); 191 - WARN_ON(1); 192 - } else { 193 - priv->last_sync_cmd_id = cmd->id; 194 - } 195 - 184 + set_bit(STATUS_HCMD_ACTIVE, &priv->status); 196 185 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", 197 186 get_cmd_string(cmd->id)); 198 187
+2 -1
drivers/net/wireless/iwlwifi/iwl-pci.c
··· 67 67 #include "iwl-agn.h" 68 68 #include "iwl-core.h" 69 69 #include "iwl-io.h" 70 + #include "iwl-trans.h" 70 71 71 72 /* PCI registers */ 72 73 #define PCI_CFG_RETRY_TIMEOUT 0x041 ··· 94 93 u16 pci_lnk_ctl; 95 94 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); 96 95 97 - pos = pci_find_capability(pci_dev, PCI_CAP_ID_EXP); 96 + pos = pci_pcie_cap(pci_dev); 98 97 pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); 99 98 return pci_lnk_ctl; 100 99 }
+14 -5
drivers/net/wireless/iwlwifi/iwl-prph.h
··· 168 168 * the scheduler (especially for queue #4/#9, the command queue, otherwise 169 169 * the driver can't issue commands!): 170 170 */ 171 + #define SCD_MEM_LOWER_BOUND (0x0000) 171 172 172 173 /** 173 174 * Max Tx window size is the max number of contiguous TFDs that the scheduler ··· 198 197 #define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) 199 198 #define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) 200 199 201 - #define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600) 202 - #define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1) 203 - #define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0) 200 + /* Context Data */ 201 + #define IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) 202 + #define IWLAGN_SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) 203 + 204 + /* Tx status */ 205 + #define IWLAGN_SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) 206 + #define IWLAGN_SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) 207 + 208 + /* Translation Data */ 209 + #define IWLAGN_SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) 210 + #define IWLAGN_SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808) 204 211 205 212 #define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\ 206 - (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) 213 + (IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) 207 214 208 215 #define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 209 - ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc) 216 + ((IWLAGN_SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) 210 217 211 218 #define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv) \ 212 219 (((1<<(priv)->hw_params.max_txq_num) - 1) &\
+3 -44
drivers/net/wireless/iwlwifi/iwl-rx.c
··· 134 134 void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) 135 135 { 136 136 unsigned long flags; 137 - u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; 138 137 u32 reg; 139 138 140 139 spin_lock_irqsave(&q->lock, flags); ··· 145 146 /* shadow register enabled */ 146 147 /* Device expects a multiple of 8 */ 147 148 q->write_actual = (q->write & ~0x7); 148 - iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual); 149 + iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual); 149 150 } else { 150 151 /* If power-saving is in use, make sure device is awake */ 151 152 if (test_bit(STATUS_POWER_PMI, &priv->status)) { ··· 161 162 } 162 163 163 164 q->write_actual = (q->write & ~0x7); 164 - iwl_write_direct32(priv, rx_wrt_ptr_reg, 165 + iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 165 166 q->write_actual); 166 167 167 168 /* Else device is assumed to be awake */ 168 169 } else { 169 170 /* Device expects a multiple of 8 */ 170 171 q->write_actual = (q->write & ~0x7); 171 - iwl_write_direct32(priv, rx_wrt_ptr_reg, 172 + iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 172 173 q->write_actual); 173 174 } 174 175 } ··· 176 177 177 178 exit_unlock: 178 179 spin_unlock_irqrestore(&q->lock, flags); 179 - } 180 - 181 - int iwl_rx_queue_alloc(struct iwl_priv *priv) 182 - { 183 - struct iwl_rx_queue *rxq = &priv->rxq; 184 - struct device *dev = priv->bus.dev; 185 - int i; 186 - 187 - spin_lock_init(&rxq->lock); 188 - INIT_LIST_HEAD(&rxq->rx_free); 189 - INIT_LIST_HEAD(&rxq->rx_used); 190 - 191 - /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 192 - rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 193 - GFP_KERNEL); 194 - if (!rxq->bd) 195 - goto err_bd; 196 - 197 - rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), 198 - &rxq->rb_stts_dma, GFP_KERNEL); 199 - if (!rxq->rb_stts) 200 - goto err_rb; 201 - 202 - /* Fill the rx_used queue with _all_ of the Rx buffers */ 203 - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 204 - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 205 - 206 - /* Set us so that we have processed and used all buffers, but have 207 - * not restocked the Rx queue with fresh buffers */ 208 - rxq->read = rxq->write = 0; 209 - rxq->write_actual = 0; 210 - rxq->free_count = 0; 211 - rxq->need_update = 0; 212 - return 0; 213 - 214 - err_rb: 215 - dma_free_coherent(dev, 4 * RX_QUEUE_SIZE, rxq->bd, 216 - rxq->bd_dma); 217 - err_bd: 218 - return -ENOMEM; 219 180 } 220 181 221 182 /******************************************************************************
+132 -104
drivers/net/wireless/iwlwifi/iwl-testmode.h
··· 66 66 #include <linux/types.h> 67 67 68 68 69 - /* Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and 69 + /* 70 + * Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and 70 71 * from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX). 71 - * The command ID is carried with IWL_TM_ATTR_COMMAND. There are three types of 72 - * of command from user space and two types of command from kernel space. 73 - * See below. 72 + * The command ID is carried with IWL_TM_ATTR_COMMAND. 73 + * 74 + * @IWL_TM_CMD_APP2DEV_UCODE: 75 + * commands from user application to the uCode, 76 + * the actual uCode host command ID is carried with 77 + * IWL_TM_ATTR_UCODE_CMD_ID 78 + * 79 + * @IWL_TM_CMD_APP2DEV_REG_READ32: 80 + * @IWL_TM_CMD_APP2DEV_REG_WRITE32: 81 + * @IWL_TM_CMD_APP2DEV_REG_WRITE8: 82 + * commands from user applicaiton to access register 83 + * 84 + * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name 85 + * @IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: load initial uCode image 86 + * @IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: perform calibration 87 + * @IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: load runtime uCode image 88 + * @IWL_TM_CMD_APP2DEV_GET_EEPROM: request EEPROM data 89 + * @IWL_TM_CMD_APP2DEV_FIXRATE_REQ: set fix MCS 90 + * commands fom user space for pure driver level operations 91 + * 92 + * @IWL_TM_CMD_APP2DEV_BEGIN_TRACE: 93 + * @IWL_TM_CMD_APP2DEV_END_TRACE: 94 + * @IWL_TM_CMD_APP2DEV_READ_TRACE: 95 + * commands fom user space for uCode trace operations 96 + * 97 + * @IWL_TM_CMD_DEV2APP_SYNC_RSP: 98 + * commands from kernel space to carry the synchronous response 99 + * to user application 100 + * @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT: 101 + * commands from kernel space to multicast the spontaneous messages 102 + * to user application 103 + * @IWL_TM_CMD_DEV2APP_EEPROM_RSP: 104 + * commands from kernel space to carry the eeprom response 105 + * to user application 74 106 */ 75 107 enum iwl_tm_cmd_t { 76 - /* commands from user application to the uCode, 77 - * the actual uCode host command ID is carried with 78 - * IWL_TM_ATTR_UCODE_CMD_ID */ 79 - IWL_TM_CMD_APP2DEV_UCODE = 1, 80 - 81 - /* commands from user applicaiton to access register */ 82 - IWL_TM_CMD_APP2DEV_REG_READ32, 83 - IWL_TM_CMD_APP2DEV_REG_WRITE32, 84 - IWL_TM_CMD_APP2DEV_REG_WRITE8, 85 - 86 - /* commands fom user space for pure driver level operations */ 87 - IWL_TM_CMD_APP2DEV_GET_DEVICENAME, 88 - IWL_TM_CMD_APP2DEV_LOAD_INIT_FW, 89 - IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB, 90 - IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW, 91 - IWL_TM_CMD_APP2DEV_GET_EEPROM, 92 - IWL_TM_CMD_APP2DEV_FIXRATE_REQ, 93 - /* if there is other new command for the driver layer operation, 94 - * append them here */ 95 - 96 - /* commands fom user space for uCode trace operations */ 97 - IWL_TM_CMD_APP2DEV_BEGIN_TRACE, 98 - IWL_TM_CMD_APP2DEV_END_TRACE, 99 - IWL_TM_CMD_APP2DEV_READ_TRACE, 100 - 101 - /* commands from kernel space to carry the synchronous response 102 - * to user application */ 103 - IWL_TM_CMD_DEV2APP_SYNC_RSP, 104 - 105 - /* commands from kernel space to multicast the spontaneous messages 106 - * to user application */ 107 - IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, 108 - 109 - /* commands from kernel space to carry the eeprom response 110 - * to user application */ 111 - IWL_TM_CMD_DEV2APP_EEPROM_RSP, 112 - 113 - IWL_TM_CMD_MAX, 108 + IWL_TM_CMD_APP2DEV_UCODE = 1, 109 + IWL_TM_CMD_APP2DEV_REG_READ32 = 2, 110 + IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3, 111 + IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4, 112 + IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5, 113 + IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6, 114 + IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7, 115 + IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW = 8, 116 + IWL_TM_CMD_APP2DEV_GET_EEPROM = 9, 117 + IWL_TM_CMD_APP2DEV_FIXRATE_REQ = 10, 118 + IWL_TM_CMD_APP2DEV_BEGIN_TRACE = 11, 119 + IWL_TM_CMD_APP2DEV_END_TRACE = 12, 120 + IWL_TM_CMD_APP2DEV_READ_TRACE = 13, 121 + IWL_TM_CMD_DEV2APP_SYNC_RSP = 14, 122 + IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15, 123 + IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16, 124 + IWL_TM_CMD_MAX = 17, 114 125 }; 115 126 127 + /* 128 + * Atrribute filed in testmode command 129 + * See enum iwl_tm_cmd_t. 130 + * 131 + * @IWL_TM_ATTR_NOT_APPLICABLE: 132 + * The attribute is not applicable or invalid 133 + * @IWL_TM_ATTR_COMMAND: 134 + * From user space to kernel space: 135 + * the command either destines to ucode, driver, or register; 136 + * From kernel space to user space: 137 + * the command either carries synchronous response, 138 + * or the spontaneous message multicast from the device; 139 + * 140 + * @IWL_TM_ATTR_UCODE_CMD_ID: 141 + * @IWL_TM_ATTR_UCODE_CMD_DATA: 142 + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE, 143 + * The mandatory fields are : 144 + * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID; 145 + * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands; 146 + * The optional fields are: 147 + * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload 148 + * to the ucode 149 + * 150 + * @IWL_TM_ATTR_REG_OFFSET: 151 + * @IWL_TM_ATTR_REG_VALUE8: 152 + * @IWL_TM_ATTR_REG_VALUE32: 153 + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX, 154 + * The mandatory fields are: 155 + * IWL_TM_ATTR_REG_OFFSET for the offset of the target register; 156 + * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value 157 + * 158 + * @IWL_TM_ATTR_SYNC_RSP: 159 + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP, 160 + * The mandatory fields are: 161 + * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user 162 + * application command 163 + * 164 + * @IWL_TM_ATTR_UCODE_RX_PKT: 165 + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, 166 + * The mandatory fields are: 167 + * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user 168 + * application 169 + * 170 + * @IWL_TM_ATTR_EEPROM: 171 + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM, 172 + * The mandatory fields are: 173 + * IWL_TM_ATTR_EEPROM for the data content responging to the user 174 + * application 175 + * 176 + * @IWL_TM_ATTR_TRACE_ADDR: 177 + * @IWL_TM_ATTR_TRACE_SIZE: 178 + * @IWL_TM_ATTR_TRACE_DUMP: 179 + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE, 180 + * The mandatory fields are: 181 + * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address 182 + * IWL_TM_ATTR_MEM_TRACE_SIZE for the trace buffer size 183 + * IWL_TM_ATTR_MEM_TRACE_DUMP for the trace dump 184 + * 185 + * @IWL_TM_ATTR_FIXRATE: 186 + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ, 187 + * The mandatory fields are: 188 + * IWL_TM_ATTR_FIXRATE for the fixed rate 189 + * 190 + */ 116 191 enum iwl_tm_attr_t { 117 - IWL_TM_ATTR_NOT_APPLICABLE = 0, 118 - 119 - /* From user space to kernel space: 120 - * the command either destines to ucode, driver, or register; 121 - * See enum iwl_tm_cmd_t. 122 - * 123 - * From kernel space to user space: 124 - * the command either carries synchronous response, 125 - * or the spontaneous message multicast from the device; 126 - * See enum iwl_tm_cmd_t. */ 127 - IWL_TM_ATTR_COMMAND, 128 - 129 - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE, 130 - * The mandatory fields are : 131 - * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID; 132 - * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands; 133 - * The optional fields are: 134 - * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload 135 - * to the ucode */ 136 - IWL_TM_ATTR_UCODE_CMD_ID, 137 - IWL_TM_ATTR_UCODE_CMD_DATA, 138 - 139 - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX, 140 - * The mandatory fields are: 141 - * IWL_TM_ATTR_REG_OFFSET for the offset of the target register; 142 - * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value */ 143 - IWL_TM_ATTR_REG_OFFSET, 144 - IWL_TM_ATTR_REG_VALUE8, 145 - IWL_TM_ATTR_REG_VALUE32, 146 - 147 - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP, 148 - * The mandatory fields are: 149 - * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user 150 - * application command */ 151 - IWL_TM_ATTR_SYNC_RSP, 152 - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, 153 - * The mandatory fields are: 154 - * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user 155 - * application */ 156 - IWL_TM_ATTR_UCODE_RX_PKT, 157 - 158 - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM, 159 - * The mandatory fields are: 160 - * IWL_TM_ATTR_EEPROM for the data content responging to the user 161 - * application */ 162 - IWL_TM_ATTR_EEPROM, 163 - 164 - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE, 165 - * The mandatory fields are: 166 - * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address 167 - */ 168 - IWL_TM_ATTR_TRACE_ADDR, 169 - IWL_TM_ATTR_TRACE_SIZE, 170 - IWL_TM_ATTR_TRACE_DUMP, 171 - 172 - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ, 173 - * The mandatory fields are: 174 - * IWL_TM_ATTR_FIXRATE for the fixed rate 175 - */ 176 - IWL_TM_ATTR_FIXRATE, 177 - 178 - IWL_TM_ATTR_MAX, 192 + IWL_TM_ATTR_NOT_APPLICABLE = 0, 193 + IWL_TM_ATTR_COMMAND = 1, 194 + IWL_TM_ATTR_UCODE_CMD_ID = 2, 195 + IWL_TM_ATTR_UCODE_CMD_DATA = 3, 196 + IWL_TM_ATTR_REG_OFFSET = 4, 197 + IWL_TM_ATTR_REG_VALUE8 = 5, 198 + IWL_TM_ATTR_REG_VALUE32 = 6, 199 + IWL_TM_ATTR_SYNC_RSP = 7, 200 + IWL_TM_ATTR_UCODE_RX_PKT = 8, 201 + IWL_TM_ATTR_EEPROM = 9, 202 + IWL_TM_ATTR_TRACE_ADDR = 10, 203 + IWL_TM_ATTR_TRACE_SIZE = 11, 204 + IWL_TM_ATTR_TRACE_DUMP = 12, 205 + IWL_TM_ATTR_FIXRATE = 13, 206 + IWL_TM_ATTR_MAX = 14, 179 207 }; 180 208 181 209 /* uCode trace buffer */
+423
drivers/net/wireless/iwlwifi/iwl-trans.c
··· 1 + /****************************************************************************** 2 + * 3 + * This file is provided under a dual BSD/GPLv2 license. When using or 4 + * redistributing this file, you may do so under either license. 5 + * 6 + * GPL LICENSE SUMMARY 7 + * 8 + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of version 2 of the GNU General Public License as 12 + * published by the Free Software Foundation. 13 + * 14 + * This program is distributed in the hope that it will be useful, but 15 + * WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 22 + * USA 23 + * 24 + * The full GNU General Public License is included in this distribution 25 + * in the file called LICENSE.GPL. 26 + * 27 + * Contact Information: 28 + * Intel Linux Wireless <ilw@linux.intel.com> 29 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 30 + * 31 + * BSD LICENSE 32 + * 33 + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. 34 + * All rights reserved. 35 + * 36 + * Redistribution and use in source and binary forms, with or without 37 + * modification, are permitted provided that the following conditions 38 + * are met: 39 + * 40 + * * Redistributions of source code must retain the above copyright 41 + * notice, this list of conditions and the following disclaimer. 42 + * * Redistributions in binary form must reproduce the above copyright 43 + * notice, this list of conditions and the following disclaimer in 44 + * the documentation and/or other materials provided with the 45 + * distribution. 46 + * * Neither the name Intel Corporation nor the names of its 47 + * contributors may be used to endorse or promote products derived 48 + * from this software without specific prior written permission. 49 + * 50 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 51 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 52 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 53 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 54 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 55 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 56 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 60 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 + * 62 + *****************************************************************************/ 63 + #include "iwl-dev.h" 64 + #include "iwl-trans.h" 65 + #include "iwl-core.h" 66 + #include "iwl-helpers.h" 67 + /*TODO remove uneeded includes when the transport layer tx_free will be here */ 68 + #include "iwl-agn.h" 69 + 70 + static int iwl_trans_rx_alloc(struct iwl_priv *priv) 71 + { 72 + struct iwl_rx_queue *rxq = &priv->rxq; 73 + struct device *dev = priv->bus.dev; 74 + 75 + memset(&priv->rxq, 0, sizeof(priv->rxq)); 76 + 77 + spin_lock_init(&rxq->lock); 78 + INIT_LIST_HEAD(&rxq->rx_free); 79 + INIT_LIST_HEAD(&rxq->rx_used); 80 + 81 + if (WARN_ON(rxq->bd || rxq->rb_stts)) 82 + return -EINVAL; 83 + 84 + /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ 85 + rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 86 + &rxq->bd_dma, GFP_KERNEL); 87 + if (!rxq->bd) 88 + goto err_bd; 89 + memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); 90 + 91 + /*Allocate the driver's pointer to receive buffer status */ 92 + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), 93 + &rxq->rb_stts_dma, GFP_KERNEL); 94 + if (!rxq->rb_stts) 95 + goto err_rb_stts; 96 + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 97 + 98 + return 0; 99 + 100 + err_rb_stts: 101 + dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 102 + rxq->bd, rxq->bd_dma); 103 + memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); 104 + rxq->bd = NULL; 105 + err_bd: 106 + return -ENOMEM; 107 + } 108 + 109 + static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv) 110 + { 111 + struct iwl_rx_queue *rxq = &priv->rxq; 112 + int i; 113 + 114 + /* Fill the rx_used queue with _all_ of the Rx buffers */ 115 + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 116 + /* In the reset function, these buffers may have been allocated 117 + * to an SKB, so we need to unmap and free potential storage */ 118 + if (rxq->pool[i].page != NULL) { 119 + dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma, 120 + PAGE_SIZE << priv->hw_params.rx_page_order, 121 + DMA_FROM_DEVICE); 122 + __iwl_free_pages(priv, rxq->pool[i].page); 123 + rxq->pool[i].page = NULL; 124 + } 125 + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 126 + } 127 + } 128 + 129 + static int iwl_trans_rx_init(struct iwl_priv *priv) 130 + { 131 + struct iwl_rx_queue *rxq = &priv->rxq; 132 + int i, err; 133 + unsigned long flags; 134 + 135 + if (!rxq->bd) { 136 + err = iwl_trans_rx_alloc(priv); 137 + if (err) 138 + return err; 139 + } 140 + 141 + spin_lock_irqsave(&rxq->lock, flags); 142 + INIT_LIST_HEAD(&rxq->rx_free); 143 + INIT_LIST_HEAD(&rxq->rx_used); 144 + 145 + iwl_trans_rxq_free_rx_bufs(priv); 146 + 147 + for (i = 0; i < RX_QUEUE_SIZE; i++) 148 + rxq->queue[i] = NULL; 149 + 150 + /* Set us so that we have processed and used all buffers, but have 151 + * not restocked the Rx queue with fresh buffers */ 152 + rxq->read = rxq->write = 0; 153 + rxq->write_actual = 0; 154 + rxq->free_count = 0; 155 + spin_unlock_irqrestore(&rxq->lock, flags); 156 + 157 + return 0; 158 + } 159 + 160 + static void iwl_trans_rx_free(struct iwl_priv *priv) 161 + { 162 + struct iwl_rx_queue *rxq = &priv->rxq; 163 + unsigned long flags; 164 + 165 + /*if rxq->bd is NULL, it means that nothing has been allocated, 166 + * exit now */ 167 + if (!rxq->bd) { 168 + IWL_DEBUG_INFO(priv, "Free NULL rx context\n"); 169 + return; 170 + } 171 + 172 + spin_lock_irqsave(&rxq->lock, flags); 173 + iwl_trans_rxq_free_rx_bufs(priv); 174 + spin_unlock_irqrestore(&rxq->lock, flags); 175 + 176 + dma_free_coherent(priv->bus.dev, sizeof(__le32) * RX_QUEUE_SIZE, 177 + rxq->bd, rxq->bd_dma); 178 + memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); 179 + rxq->bd = NULL; 180 + 181 + if (rxq->rb_stts) 182 + dma_free_coherent(priv->bus.dev, 183 + sizeof(struct iwl_rb_status), 184 + rxq->rb_stts, rxq->rb_stts_dma); 185 + else 186 + IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n"); 187 + memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); 188 + rxq->rb_stts = NULL; 189 + } 190 + 191 + /* TODO:remove this code duplication */ 192 + static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, 193 + struct iwl_dma_ptr *ptr, size_t size) 194 + { 195 + if (WARN_ON(ptr->addr)) 196 + return -EINVAL; 197 + 198 + ptr->addr = dma_alloc_coherent(priv->bus.dev, size, 199 + &ptr->dma, GFP_KERNEL); 200 + if (!ptr->addr) 201 + return -ENOMEM; 202 + ptr->size = size; 203 + return 0; 204 + } 205 + 206 + static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, 207 + int slots_num, u32 txq_id) 208 + { 209 + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 210 + int i; 211 + 212 + if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) 213 + return -EINVAL; 214 + 215 + txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num, 216 + GFP_KERNEL); 217 + txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num, 218 + GFP_KERNEL); 219 + 220 + if (!txq->meta || !txq->cmd) 221 + goto error; 222 + 223 + for (i = 0; i < slots_num; i++) { 224 + txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), 225 + GFP_KERNEL); 226 + if (!txq->cmd[i]) 227 + goto error; 228 + } 229 + 230 + /* Alloc driver data array and TFD circular buffer */ 231 + /* Driver private data, only for Tx (not command) queues, 232 + * not shared with device. */ 233 + if (txq_id != priv->cmd_queue) { 234 + txq->txb = kzalloc(sizeof(txq->txb[0]) * 235 + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 236 + if (!txq->txb) { 237 + IWL_ERR(priv, "kmalloc for auxiliary BD " 238 + "structures failed\n"); 239 + goto error; 240 + } 241 + } else { 242 + txq->txb = NULL; 243 + } 244 + 245 + /* Circular buffer of transmit frame descriptors (TFDs), 246 + * shared with device */ 247 + txq->tfds = dma_alloc_coherent(priv->bus.dev, tfd_sz, &txq->q.dma_addr, 248 + GFP_KERNEL); 249 + if (!txq->tfds) { 250 + IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 251 + goto error; 252 + } 253 + txq->q.id = txq_id; 254 + 255 + return 0; 256 + error: 257 + kfree(txq->txb); 258 + txq->txb = NULL; 259 + /* since txq->cmd has been zeroed, 260 + * all non allocated cmd[i] will be NULL */ 261 + if (txq->cmd) 262 + for (i = 0; i < slots_num; i++) 263 + kfree(txq->cmd[i]); 264 + kfree(txq->meta); 265 + kfree(txq->cmd); 266 + txq->meta = NULL; 267 + txq->cmd = NULL; 268 + 269 + return -ENOMEM; 270 + 271 + } 272 + 273 + static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 274 + int slots_num, u32 txq_id) 275 + { 276 + int ret; 277 + 278 + txq->need_update = 0; 279 + memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); 280 + 281 + /* 282 + * For the default queues 0-3, set up the swq_id 283 + * already -- all others need to get one later 284 + * (if they need one at all). 285 + */ 286 + if (txq_id < 4) 287 + iwl_set_swq_id(txq, txq_id, txq_id); 288 + 289 + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 290 + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 291 + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 292 + 293 + /* Initialize queue's high/low-water marks, and head/tail indexes */ 294 + ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, 295 + txq_id); 296 + if (ret) 297 + return ret; 298 + 299 + /* 300 + * Tell nic where to find circular buffer of Tx Frame Descriptors for 301 + * given Tx queue, and enable the DMA channel used for that queue. 302 + * Circular buffer (TFD queue in DRAM) physical base address */ 303 + iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 304 + txq->q.dma_addr >> 8); 305 + 306 + return 0; 307 + } 308 + 309 + /** 310 + * iwl_trans_tx_alloc - allocate TX context 311 + * Allocate all Tx DMA structures and initialize them 312 + * 313 + * @param priv 314 + * @return error code 315 + */ 316 + static int iwl_trans_tx_alloc(struct iwl_priv *priv) 317 + { 318 + int ret; 319 + int txq_id, slots_num; 320 + 321 + /*It is not allowed to alloc twice, so warn when this happens. 322 + * We cannot rely on the previous allocation, so free and fail */ 323 + if (WARN_ON(priv->txq)) { 324 + ret = -EINVAL; 325 + goto error; 326 + } 327 + 328 + ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 329 + priv->hw_params.scd_bc_tbls_size); 330 + if (ret) { 331 + IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); 332 + goto error; 333 + } 334 + 335 + /* Alloc keep-warm buffer */ 336 + ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 337 + if (ret) { 338 + IWL_ERR(priv, "Keep Warm allocation failed\n"); 339 + goto error; 340 + } 341 + 342 + priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * 343 + priv->cfg->base_params->num_of_queues, GFP_KERNEL); 344 + if (!priv->txq) { 345 + IWL_ERR(priv, "Not enough memory for txq\n"); 346 + ret = ENOMEM; 347 + goto error; 348 + } 349 + 350 + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 351 + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 352 + slots_num = (txq_id == priv->cmd_queue) ? 353 + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 354 + ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, 355 + txq_id); 356 + if (ret) { 357 + IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id); 358 + goto error; 359 + } 360 + } 361 + 362 + return 0; 363 + 364 + error: 365 + iwlagn_hw_txq_ctx_free(priv); 366 + 367 + return ret; 368 + } 369 + static int iwl_trans_tx_init(struct iwl_priv *priv) 370 + { 371 + int ret; 372 + int txq_id, slots_num; 373 + unsigned long flags; 374 + bool alloc = false; 375 + 376 + if (!priv->txq) { 377 + ret = iwl_trans_tx_alloc(priv); 378 + if (ret) 379 + goto error; 380 + alloc = true; 381 + } 382 + 383 + spin_lock_irqsave(&priv->lock, flags); 384 + 385 + /* Turn off all Tx DMA fifos */ 386 + iwl_write_prph(priv, IWLAGN_SCD_TXFACT, 0); 387 + 388 + /* Tell NIC where to find the "keep warm" buffer */ 389 + iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 390 + 391 + spin_unlock_irqrestore(&priv->lock, flags); 392 + 393 + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 394 + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 395 + slots_num = (txq_id == priv->cmd_queue) ? 396 + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 397 + ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, 398 + txq_id); 399 + if (ret) { 400 + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 401 + goto error; 402 + } 403 + } 404 + 405 + return 0; 406 + error: 407 + /*Upon error, free only if we allocated something */ 408 + if (alloc) 409 + iwlagn_hw_txq_ctx_free(priv); 410 + return ret; 411 + } 412 + 413 + static const struct iwl_trans_ops trans_ops = { 414 + .rx_init = iwl_trans_rx_init, 415 + .rx_free = iwl_trans_rx_free, 416 + 417 + .tx_init = iwl_trans_tx_init, 418 + }; 419 + 420 + void iwl_trans_register(struct iwl_trans *trans) 421 + { 422 + trans->ops = &trans_ops; 423 + }
+64
drivers/net/wireless/iwlwifi/iwl-trans.h
··· 1 + /****************************************************************************** 2 + * 3 + * This file is provided under a dual BSD/GPLv2 license. When using or 4 + * redistributing this file, you may do so under either license. 5 + * 6 + * GPL LICENSE SUMMARY 7 + * 8 + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of version 2 of the GNU General Public License as 12 + * published by the Free Software Foundation. 13 + * 14 + * This program is distributed in the hope that it will be useful, but 15 + * WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 22 + * USA 23 + * 24 + * The full GNU General Public License is included in this distribution 25 + * in the file called LICENSE.GPL. 26 + * 27 + * Contact Information: 28 + * Intel Linux Wireless <ilw@linux.intel.com> 29 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 30 + * 31 + * BSD LICENSE 32 + * 33 + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. 34 + * All rights reserved. 35 + * 36 + * Redistribution and use in source and binary forms, with or without 37 + * modification, are permitted provided that the following conditions 38 + * are met: 39 + * 40 + * * Redistributions of source code must retain the above copyright 41 + * notice, this list of conditions and the following disclaimer. 42 + * * Redistributions in binary form must reproduce the above copyright 43 + * notice, this list of conditions and the following disclaimer in 44 + * the documentation and/or other materials provided with the 45 + * distribution. 46 + * * Neither the name Intel Corporation nor the names of its 47 + * contributors may be used to endorse or promote products derived 48 + * from this software without specific prior written permission. 49 + * 50 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 51 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 52 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 53 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 54 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 55 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 56 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 60 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 + * 62 + *****************************************************************************/ 63 + 64 + void iwl_trans_register(struct iwl_trans *trans);
+1 -138
drivers/net/wireless/iwlwifi/iwl-tx.c
··· 220 220 return 0; 221 221 } 222 222 223 - /* 224 - * Tell nic where to find circular buffer of Tx Frame Descriptors for 225 - * given Tx queue, and enable the DMA channel used for that queue. 226 - * 227 - * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA 228 - * channels supported in hardware. 229 - */ 230 - static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) 231 - { 232 - int txq_id = txq->q.id; 233 - 234 - /* Circular buffer (TFD queue in DRAM) physical base address */ 235 - iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 236 - txq->q.dma_addr >> 8); 237 - 238 - return 0; 239 - } 240 - 241 223 /** 242 224 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 243 225 */ ··· 374 392 return s; 375 393 } 376 394 377 - 378 395 /** 379 396 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 380 397 */ 381 - static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, 398 + int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, 382 399 int count, int slots_num, u32 id) 383 400 { 384 401 q->n_bd = count; ··· 405 424 q->write_ptr = q->read_ptr = 0; 406 425 407 426 return 0; 408 - } 409 - 410 - /** 411 - * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 412 - */ 413 - static int iwl_tx_queue_alloc(struct iwl_priv *priv, 414 - struct iwl_tx_queue *txq, u32 id) 415 - { 416 - struct device *dev = priv->bus.dev; 417 - size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 418 - 419 - /* Driver private data, only for Tx (not command) queues, 420 - * not shared with device. */ 421 - if (id != priv->cmd_queue) { 422 - txq->txb = kzalloc(sizeof(txq->txb[0]) * 423 - TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 424 - if (!txq->txb) { 425 - IWL_ERR(priv, "kmalloc for auxiliary BD " 426 - "structures failed\n"); 427 - goto error; 428 - } 429 - } else { 430 - txq->txb = NULL; 431 - } 432 - 433 - /* Circular buffer of transmit frame descriptors (TFDs), 434 - * shared with device */ 435 - txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, 436 - GFP_KERNEL); 437 - if (!txq->tfds) { 438 - IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 439 - goto error; 440 - } 441 - txq->q.id = id; 442 - 443 - return 0; 444 - 445 - error: 446 - kfree(txq->txb); 447 - txq->txb = NULL; 448 - 449 - return -ENOMEM; 450 - } 451 - 452 - /** 453 - * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue 454 - */ 455 - int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 456 - int slots_num, u32 txq_id) 457 - { 458 - int i, len; 459 - int ret; 460 - 461 - txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num, 462 - GFP_KERNEL); 463 - txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num, 464 - GFP_KERNEL); 465 - 466 - if (!txq->meta || !txq->cmd) 467 - goto out_free_arrays; 468 - 469 - len = sizeof(struct iwl_device_cmd); 470 - for (i = 0; i < slots_num; i++) { 471 - txq->cmd[i] = kmalloc(len, GFP_KERNEL); 472 - if (!txq->cmd[i]) 473 - goto err; 474 - } 475 - 476 - /* Alloc driver data array and TFD circular buffer */ 477 - ret = iwl_tx_queue_alloc(priv, txq, txq_id); 478 - if (ret) 479 - goto err; 480 - 481 - txq->need_update = 0; 482 - 483 - /* 484 - * For the default queues 0-3, set up the swq_id 485 - * already -- all others need to get one later 486 - * (if they need one at all). 487 - */ 488 - if (txq_id < 4) 489 - iwl_set_swq_id(txq, txq_id, txq_id); 490 - 491 - /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 492 - * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 493 - BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 494 - 495 - /* Initialize queue's high/low-water marks, and head/tail indexes */ 496 - ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 497 - if (ret) 498 - return ret; 499 - 500 - /* Tell device where to find queue */ 501 - iwlagn_tx_queue_init(priv, txq); 502 - 503 - return 0; 504 - err: 505 - for (i = 0; i < slots_num; i++) 506 - kfree(txq->cmd[i]); 507 - out_free_arrays: 508 - kfree(txq->meta); 509 - kfree(txq->cmd); 510 - 511 - return -ENOMEM; 512 - } 513 - 514 - void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 515 - int slots_num, u32 txq_id) 516 - { 517 - memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num); 518 - 519 - txq->need_update = 0; 520 - 521 - /* Initialize queue's high/low-water marks, and head/tail indexes */ 522 - iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 523 - 524 - /* Tell device where to find queue */ 525 - iwlagn_tx_queue_init(priv, txq); 526 427 } 527 428 528 429 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
+2 -2
drivers/net/wireless/mwifiex/sdio.h
··· 54 54 55 55 #define SDIO_MP_AGGR_DEF_PKT_LIMIT 8 56 56 57 - #define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (4096) /* 4K */ 57 + #define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */ 58 58 59 59 /* Multi port RX aggregation buffer size */ 60 - #define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (4096) /* 4K */ 60 + #define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (16384) /* 16K */ 61 61 62 62 /* Misc. Config Register : Auto Re-enable interrupts */ 63 63 #define AUTO_RE_ENABLE_INT BIT(4)
+1
drivers/net/wireless/rt2x00/rt2400pci.c
··· 1723 1723 .set_antenna = rt2x00mac_set_antenna, 1724 1724 .get_antenna = rt2x00mac_get_antenna, 1725 1725 .get_ringparam = rt2x00mac_get_ringparam, 1726 + .tx_frames_pending = rt2x00mac_tx_frames_pending, 1726 1727 }; 1727 1728 1728 1729 static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
+1
drivers/net/wireless/rt2x00/rt2500pci.c
··· 2016 2016 .set_antenna = rt2x00mac_set_antenna, 2017 2017 .get_antenna = rt2x00mac_get_antenna, 2018 2018 .get_ringparam = rt2x00mac_get_ringparam, 2019 + .tx_frames_pending = rt2x00mac_tx_frames_pending, 2019 2020 }; 2020 2021 2021 2022 static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
+1
drivers/net/wireless/rt2x00/rt2500usb.c
··· 1827 1827 .set_antenna = rt2x00mac_set_antenna, 1828 1828 .get_antenna = rt2x00mac_get_antenna, 1829 1829 .get_ringparam = rt2x00mac_get_ringparam, 1830 + .tx_frames_pending = rt2x00mac_tx_frames_pending, 1830 1831 }; 1831 1832 1832 1833 static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
+2
drivers/net/wireless/rt2x00/rt2800pci.c
··· 1031 1031 .flush = rt2x00mac_flush, 1032 1032 .get_survey = rt2800_get_survey, 1033 1033 .get_ringparam = rt2x00mac_get_ringparam, 1034 + .tx_frames_pending = rt2x00mac_tx_frames_pending, 1034 1035 }; 1035 1036 1036 1037 static const struct rt2800_ops rt2800pci_rt2800_ops = { ··· 1161 1160 #endif 1162 1161 #ifdef CONFIG_RT2800PCI_RT53XX 1163 1162 { PCI_DEVICE(0x1814, 0x5390) }, 1163 + { PCI_DEVICE(0x1814, 0x539f) }, 1164 1164 #endif 1165 1165 { 0, } 1166 1166 };
+3 -2
drivers/net/wireless/rt2x00/rt2800usb.c
··· 757 757 .flush = rt2x00mac_flush, 758 758 .get_survey = rt2800_get_survey, 759 759 .get_ringparam = rt2x00mac_get_ringparam, 760 + .tx_frames_pending = rt2x00mac_tx_frames_pending, 760 761 }; 761 762 762 763 static const struct rt2800_ops rt2800usb_rt2800_ops = { ··· 1021 1020 { USB_DEVICE(0x0df6, 0x0048) }, 1022 1021 { USB_DEVICE(0x0df6, 0x0051) }, 1023 1022 { USB_DEVICE(0x0df6, 0x005f) }, 1023 + { USB_DEVICE(0x0df6, 0x0060) }, 1024 1024 /* SMC */ 1025 1025 { USB_DEVICE(0x083a, 0x6618) }, 1026 1026 { USB_DEVICE(0x083a, 0x7511) }, ··· 1078 1076 { USB_DEVICE(0x148f, 0x3572) }, 1079 1077 /* Sitecom */ 1080 1078 { USB_DEVICE(0x0df6, 0x0041) }, 1079 + { USB_DEVICE(0x0df6, 0x0062) }, 1081 1080 /* Toshiba */ 1082 1081 { USB_DEVICE(0x0930, 0x0a07) }, 1083 1082 /* Zinwell */ ··· 1177 1174 { USB_DEVICE(0x0df6, 0x004a) }, 1178 1175 { USB_DEVICE(0x0df6, 0x004d) }, 1179 1176 { USB_DEVICE(0x0df6, 0x0053) }, 1180 - { USB_DEVICE(0x0df6, 0x0060) }, 1181 - { USB_DEVICE(0x0df6, 0x0062) }, 1182 1177 /* SMC */ 1183 1178 { USB_DEVICE(0x083a, 0xa512) }, 1184 1179 { USB_DEVICE(0x083a, 0xc522) },
+1
drivers/net/wireless/rt2x00/rt2x00.h
··· 1277 1277 int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 1278 1278 void rt2x00mac_get_ringparam(struct ieee80211_hw *hw, 1279 1279 u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max); 1280 + bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw); 1280 1281 1281 1282 /* 1282 1283 * Driver allocation handlers.
+3 -3
drivers/net/wireless/rt2x00/rt2x00crypto.c
··· 45 45 } 46 46 } 47 47 48 - void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, 48 + void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 49 + struct sk_buff *skb, 49 50 struct txentry_desc *txdesc) 50 51 { 51 - struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 52 - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 52 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 53 53 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 54 54 55 55 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
+2 -1
drivers/net/wireless/rt2x00/rt2x00lib.h
··· 336 336 */ 337 337 #ifdef CONFIG_RT2X00_LIB_CRYPTO 338 338 enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key); 339 - void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, 339 + void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 340 + struct sk_buff *skb, 340 341 struct txentry_desc *txdesc); 341 342 unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev, 342 343 struct sk_buff *skb);
+14
drivers/net/wireless/rt2x00/rt2x00mac.c
··· 818 818 *rx_max = rt2x00dev->rx->limit; 819 819 } 820 820 EXPORT_SYMBOL_GPL(rt2x00mac_get_ringparam); 821 + 822 + bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw) 823 + { 824 + struct rt2x00_dev *rt2x00dev = hw->priv; 825 + struct data_queue *queue; 826 + 827 + tx_queue_for_each(rt2x00dev, queue) { 828 + if (!rt2x00queue_empty(queue)) 829 + return true; 830 + } 831 + 832 + return false; 833 + } 834 + EXPORT_SYMBOL_GPL(rt2x00mac_tx_frames_pending);
+62 -46
drivers/net/wireless/rt2x00/rt2x00queue.c
··· 200 200 skb_pull(skb, l2pad); 201 201 } 202 202 203 - static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, 203 + static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, 204 + struct sk_buff *skb, 204 205 struct txentry_desc *txdesc) 205 206 { 206 - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 207 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 207 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 208 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 208 209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 209 210 210 211 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) ··· 213 212 214 213 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 215 214 216 - if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags)) 215 + if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) 217 216 return; 218 217 219 218 /* ··· 238 237 239 238 } 240 239 241 - static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, 240 + static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, 241 + struct sk_buff *skb, 242 242 struct txentry_desc *txdesc, 243 243 const struct rt2x00_rate *hwrate) 244 244 { 245 - struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 246 - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 245 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 247 246 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 248 247 unsigned int data_length; 249 248 unsigned int duration; ··· 260 259 txdesc->u.plcp.ifs = IFS_SIFS; 261 260 262 261 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 263 - data_length = entry->skb->len + 4; 264 - data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); 262 + data_length = skb->len + 4; 263 + data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); 265 264 266 265 /* 267 266 * PLCP setup ··· 302 301 } 303 302 } 304 303 305 - static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry, 304 + static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, 305 + struct sk_buff *skb, 306 306 struct txentry_desc *txdesc, 307 307 const struct rt2x00_rate *hwrate) 308 308 { 309 - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 309 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 310 310 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 311 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 311 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 312 312 313 313 if (tx_info->control.sta) 314 314 txdesc->u.ht.mpdu_density = ··· 382 380 txdesc->u.ht.txop = TXOP_HTTXOP; 383 381 } 384 382 385 - static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, 383 + static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 384 + struct sk_buff *skb, 386 385 struct txentry_desc *txdesc) 387 386 { 388 - struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 389 - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 390 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 387 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 388 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 391 389 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 392 390 struct ieee80211_rate *rate; 393 391 const struct rt2x00_rate *hwrate = NULL; ··· 397 395 /* 398 396 * Header and frame information. 399 397 */ 400 - txdesc->length = entry->skb->len; 401 - txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 398 + txdesc->length = skb->len; 399 + txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); 402 400 403 401 /* 404 402 * Check whether this frame is to be acked. ··· 473 471 /* 474 472 * Apply TX descriptor handling by components 475 473 */ 476 - rt2x00crypto_create_tx_descriptor(entry, txdesc); 477 - rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 474 + rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); 475 + rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); 478 476 479 477 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 480 - rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate); 478 + rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 479 + hwrate); 481 480 else 482 - rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 481 + rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, 482 + hwrate); 483 483 } 484 484 485 485 static int rt2x00queue_write_tx_data(struct queue_entry *entry, ··· 559 555 bool local) 560 556 { 561 557 struct ieee80211_tx_info *tx_info; 562 - struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 558 + struct queue_entry *entry; 563 559 struct txentry_desc txdesc; 564 560 struct skb_frame_desc *skbdesc; 565 561 u8 rate_idx, rate_flags; 566 - 567 - if (unlikely(rt2x00queue_full(queue))) { 568 - ERROR(queue->rt2x00dev, 569 - "Dropping frame due to full tx queue %d.\n", queue->qid); 570 - return -ENOBUFS; 571 - } 572 - 573 - if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 574 - &entry->flags))) { 575 - ERROR(queue->rt2x00dev, 576 - "Arrived at non-free entry in the non-full queue %d.\n" 577 - "Please file bug report to %s.\n", 578 - queue->qid, DRV_PROJECT); 579 - return -EINVAL; 580 - } 562 + int ret = 0; 581 563 582 564 /* 583 565 * Copy all TX descriptor information into txdesc, 584 566 * after that we are free to use the skb->cb array 585 567 * for our information. 586 568 */ 587 - entry->skb = skb; 588 - rt2x00queue_create_tx_descriptor(entry, &txdesc); 569 + rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc); 589 570 590 571 /* 591 572 * All information is retrieved from the skb->cb array, ··· 582 593 rate_flags = tx_info->control.rates[0].flags; 583 594 skbdesc = get_skb_frame_desc(skb); 584 595 memset(skbdesc, 0, sizeof(*skbdesc)); 585 - skbdesc->entry = entry; 586 596 skbdesc->tx_rate_idx = rate_idx; 587 597 skbdesc->tx_rate_flags = rate_flags; 588 598 ··· 610 622 * for PCI devices. 611 623 */ 612 624 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) 613 - rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); 625 + rt2x00queue_insert_l2pad(skb, txdesc.header_length); 614 626 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) 615 - rt2x00queue_align_frame(entry->skb); 627 + rt2x00queue_align_frame(skb); 628 + 629 + spin_lock(&queue->tx_lock); 630 + 631 + if (unlikely(rt2x00queue_full(queue))) { 632 + ERROR(queue->rt2x00dev, 633 + "Dropping frame due to full tx queue %d.\n", queue->qid); 634 + ret = -ENOBUFS; 635 + goto out; 636 + } 637 + 638 + entry = rt2x00queue_get_entry(queue, Q_INDEX); 639 + 640 + if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 641 + &entry->flags))) { 642 + ERROR(queue->rt2x00dev, 643 + "Arrived at non-free entry in the non-full queue %d.\n" 644 + "Please file bug report to %s.\n", 645 + queue->qid, DRV_PROJECT); 646 + ret = -EINVAL; 647 + goto out; 648 + } 649 + 650 + skbdesc->entry = entry; 651 + entry->skb = skb; 616 652 617 653 /* 618 654 * It could be possible that the queue was corrupted and this ··· 646 634 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { 647 635 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 648 636 entry->skb = NULL; 649 - return -EIO; 637 + ret = -EIO; 638 + goto out; 650 639 } 651 640 652 641 set_bit(ENTRY_DATA_PENDING, &entry->flags); ··· 656 643 rt2x00queue_write_tx_descriptor(entry, &txdesc); 657 644 rt2x00queue_kick_tx_queue(queue, &txdesc); 658 645 659 - return 0; 646 + out: 647 + spin_unlock(&queue->tx_lock); 648 + return ret; 660 649 } 661 650 662 651 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, ··· 712 697 * after that we are free to use the skb->cb array 713 698 * for our information. 714 699 */ 715 - rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc); 700 + rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc); 716 701 717 702 /* 718 703 * Fill in skb descriptor ··· 1199 1184 struct data_queue *queue, enum data_queue_qid qid) 1200 1185 { 1201 1186 mutex_init(&queue->status_lock); 1187 + spin_lock_init(&queue->tx_lock); 1202 1188 spin_lock_init(&queue->index_lock); 1203 1189 1204 1190 queue->rt2x00dev = rt2x00dev;
+2
drivers/net/wireless/rt2x00/rt2x00queue.h
··· 432 432 * @flags: Entry flags, see &enum queue_entry_flags. 433 433 * @status_lock: The mutex for protecting the start/stop/flush 434 434 * handling on this queue. 435 + * @tx_lock: Spinlock to serialize tx operations on this queue. 435 436 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or 436 437 * @index_crypt needs to be changed this lock should be grabbed to prevent 437 438 * index corruption due to concurrency. ··· 459 458 unsigned long flags; 460 459 461 460 struct mutex status_lock; 461 + spinlock_t tx_lock; 462 462 spinlock_t index_lock; 463 463 464 464 unsigned int count;
+1
drivers/net/wireless/rt2x00/rt61pci.c
··· 2982 2982 .set_antenna = rt2x00mac_set_antenna, 2983 2983 .get_antenna = rt2x00mac_get_antenna, 2984 2984 .get_ringparam = rt2x00mac_get_ringparam, 2985 + .tx_frames_pending = rt2x00mac_tx_frames_pending, 2985 2986 }; 2986 2987 2987 2988 static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
+1
drivers/net/wireless/rt2x00/rt73usb.c
··· 2314 2314 .set_antenna = rt2x00mac_set_antenna, 2315 2315 .get_antenna = rt2x00mac_get_antenna, 2316 2316 .get_ringparam = rt2x00mac_get_ringparam, 2317 + .tx_frames_pending = rt2x00mac_tx_frames_pending, 2317 2318 }; 2318 2319 2319 2320 static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
-4
drivers/net/wireless/rtlwifi/pci.c
··· 788 788 { 789 789 struct ieee80211_hw *hw = dev_id; 790 790 struct rtl_priv *rtlpriv = rtl_priv(hw); 791 - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 792 791 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 793 792 unsigned long flags; 794 793 u32 inta = 0; 795 794 u32 intb = 0; 796 - 797 - if (rtlpci->irq_enabled == 0) 798 - return IRQ_HANDLED; 799 795 800 796 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); 801 797
-1
drivers/net/wireless/rtlwifi/pci.h
··· 158 158 bool first_init; 159 159 bool being_init_adapter; 160 160 bool init_ready; 161 - bool irq_enabled; 162 161 163 162 /*Tx */ 164 163 struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
-2
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
··· 1183 1183 1184 1184 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); 1185 1185 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); 1186 - rtlpci->irq_enabled = true; 1187 1186 } 1188 1187 1189 1188 void rtl92ce_disable_interrupt(struct ieee80211_hw *hw) ··· 1192 1193 1193 1194 rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); 1194 1195 rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); 1195 - rtlpci->irq_enabled = false; 1196 1196 synchronize_irq(rtlpci->pdev->irq); 1197 1197 } 1198 1198
-9
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
··· 380 380 0xFFFFFFFF); 381 381 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 382 382 0xFFFFFFFF); 383 - rtlpci->irq_enabled = true; 384 383 } else { 385 384 rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] & 386 385 0xFFFFFFFF); 387 386 rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] & 388 387 0xFFFFFFFF); 389 - rtlusb->irq_enabled = true; 390 388 } 391 389 } 392 390 ··· 396 398 void rtl92c_disable_interrupt(struct ieee80211_hw *hw) 397 399 { 398 400 struct rtl_priv *rtlpriv = rtl_priv(hw); 399 - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 400 - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 401 - struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); 402 401 403 402 rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); 404 403 rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); 405 - if (IS_HARDWARE_TYPE_8192CE(rtlhal)) 406 - rtlpci->irq_enabled = false; 407 - else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) 408 - rtlusb->irq_enabled = false; 409 404 } 410 405 411 406 void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
+11 -8
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
··· 449 449 case HW_VAR_CORRECT_TSF: { 450 450 u8 btype_ibss = ((u8 *) (val))[0]; 451 451 452 - if (btype_ibss == true) 452 + if (btype_ibss) 453 453 _rtl92de_stop_tx_beacon(hw); 454 454 _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3)); 455 455 rtl_write_dword(rtlpriv, REG_TSFTR, ··· 457 457 rtl_write_dword(rtlpriv, REG_TSFTR + 4, 458 458 (u32) ((mac->tsf >> 32) & 0xffffffff)); 459 459 _rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0); 460 - if (btype_ibss == true) 460 + if (btype_ibss) 461 461 _rtl92de_resume_tx_beacon(hw); 462 462 463 463 break; ··· 932 932 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 933 933 ("Failed to download FW. Init HW " 934 934 "without FW..\n")); 935 - err = 1; 936 935 rtlhal->fw_ready = false; 936 + return 1; 937 937 } else { 938 938 rtlhal->fw_ready = true; 939 939 } ··· 1044 1044 if (((tmp_rega & BIT(11)) == BIT(11))) 1045 1045 break; 1046 1046 } 1047 + /* check that loop was successful. If not, exit now */ 1048 + if (i == 10000) { 1049 + rtlpci->init_ready = false; 1050 + return 1; 1051 + } 1047 1052 } 1048 1053 } 1049 1054 rtlpci->init_ready = true; ··· 1147 1142 1148 1143 if (rtlpriv->psc.rfpwr_state != ERFON) 1149 1144 return; 1150 - if (check_bssid == true) { 1145 + if (check_bssid) { 1151 1146 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1152 1147 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr)); 1153 1148 _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(4)); ··· 1226 1221 1227 1222 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); 1228 1223 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); 1229 - rtlpci->irq_enabled = true; 1230 1224 } 1231 1225 1232 1226 void rtl92de_disable_interrupt(struct ieee80211_hw *hw) ··· 1235 1231 1236 1232 rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); 1237 1233 rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); 1238 - rtlpci->irq_enabled = false; 1239 1234 synchronize_irq(rtlpci->pdev->irq); 1240 1235 } 1241 1236 ··· 1790 1787 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n")); 1791 1788 rtlefuse->autoload_failflag = false; 1792 1789 } 1793 - if (rtlefuse->autoload_failflag == true) { 1790 + if (rtlefuse->autoload_failflag) { 1794 1791 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1795 1792 ("RTL819X Not boot from eeprom, check it !!")); 1796 1793 return; ··· 2152 2149 REG_MAC_PINMUX_CFG) & ~(BIT(3))); 2153 2150 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); 2154 2151 e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF; 2155 - if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) { 2152 + if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) { 2156 2153 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 2157 2154 ("GPIOChangeRF - HW Radio ON, RF ON\n")); 2158 2155 e_rfpowerstate_toset = ERFON;
+1 -1
drivers/net/wireless/rtlwifi/rtl8192de/led.c
··· 93 93 break; 94 94 case LED_PIN_LED0: 95 95 ledcfg &= 0xf0; 96 - if (pcipriv->ledctl.led_opendrain == true) 96 + if (pcipriv->ledctl.led_opendrain) 97 97 rtl_write_byte(rtlpriv, REG_LEDCFG2, 98 98 (ledcfg | BIT(1) | BIT(5) | BIT(6))); 99 99 else
+12 -18
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
··· 932 932 enum rf_content content, 933 933 enum radio_path rfpath) 934 934 { 935 - int i, j; 935 + int i; 936 936 u32 *radioa_array_table; 937 937 u32 *radiob_array_table; 938 938 u16 radioa_arraylen, radiob_arraylen; ··· 974 974 mdelay(50); 975 975 } else if (radioa_array_table[i] == 0xfd) { 976 976 /* delay_ms(5); */ 977 - for (j = 0; j < 100; j++) 978 - udelay(MAX_STALL_TIME); 977 + mdelay(5); 979 978 } else if (radioa_array_table[i] == 0xfc) { 980 979 /* delay_ms(1); */ 981 - for (j = 0; j < 20; j++) 982 - udelay(MAX_STALL_TIME); 983 - 980 + mdelay(1); 984 981 } else if (radioa_array_table[i] == 0xfb) { 985 982 udelay(50); 986 983 } else if (radioa_array_table[i] == 0xfa) { ··· 1001 1004 mdelay(50); 1002 1005 } else if (radiob_array_table[i] == 0xfd) { 1003 1006 /* delay_ms(5); */ 1004 - for (j = 0; j < 100; j++) 1005 - udelay(MAX_STALL_TIME); 1007 + mdelay(5); 1006 1008 } else if (radiob_array_table[i] == 0xfc) { 1007 1009 /* delay_ms(1); */ 1008 - for (j = 0; j < 20; j++) 1009 - udelay(MAX_STALL_TIME); 1010 + mdelay(1); 1010 1011 } else if (radiob_array_table[i] == 0xfb) { 1011 1012 udelay(50); 1012 1013 } else if (radiob_array_table[i] == 0xfa) { ··· 1271 1276 { 1272 1277 struct rtl_priv *rtlpriv = rtl_priv(hw); 1273 1278 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1274 - u8 i, value8; 1279 + u8 value8; 1275 1280 1276 1281 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("==>\n")); 1277 1282 rtlhal->bandset = band; ··· 1316 1321 rtl_write_byte(rtlpriv, (rtlhal->interfaceindex == 1317 1322 0 ? REG_MAC0 : REG_MAC1), value8); 1318 1323 } 1319 - for (i = 0; i < 20; i++) 1320 - udelay(MAX_STALL_TIME); 1324 + mdelay(1); 1321 1325 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("<==Switch Band OK.\n")); 1322 1326 } 1323 1327 ··· 1678 1684 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1679 1685 ("Delay %d ms for One shot, path A LOK & IQK.\n", 1680 1686 IQK_DELAY_TIME)); 1681 - udelay(IQK_DELAY_TIME * 1000); 1687 + mdelay(IQK_DELAY_TIME); 1682 1688 /* Check failed */ 1683 1689 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); 1684 1690 RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac)); ··· 1749 1755 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1750 1756 ("Delay %d ms for One shot, path A LOK & IQK.\n", 1751 1757 IQK_DELAY_TIME)); 1752 - udelay(IQK_DELAY_TIME * 1000 * 10); 1758 + mdelay(IQK_DELAY_TIME * 10); 1753 1759 /* Check failed */ 1754 1760 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); 1755 1761 RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac)); ··· 1802 1808 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1803 1809 ("Delay %d ms for One shot, path B LOK & IQK.\n", 1804 1810 IQK_DELAY_TIME)); 1805 - udelay(IQK_DELAY_TIME * 1000); 1811 + mdelay(IQK_DELAY_TIME); 1806 1812 /* Check failed */ 1807 1813 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); 1808 1814 RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac)); ··· 1869 1875 /* delay x ms */ 1870 1876 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1871 1877 ("Delay %d ms for One shot, path B LOK & IQK.\n", 10)); 1872 - udelay(IQK_DELAY_TIME * 1000 * 10); 1878 + mdelay(IQK_DELAY_TIME * 10); 1873 1879 1874 1880 /* Check failed */ 1875 1881 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); ··· 2200 2206 * PHY_REG.txt , and radio_a, radio_b.txt */ 2201 2207 2202 2208 RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK for 5G NORMAL:Start!!!\n")); 2203 - udelay(IQK_DELAY_TIME * 1000 * 20); 2209 + mdelay(IQK_DELAY_TIME * 20); 2204 2210 if (t == 0) { 2205 2211 bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD); 2206 2212 RTPRINT(rtlpriv, FINIT, INIT_IQK, ("==>0x%08x\n", bbvalue));
+8 -8
drivers/net/wireless/rtlwifi/rtl8192de/rf.c
··· 87 87 88 88 if (rtlefuse->eeprom_regulatory != 0) 89 89 turbo_scanoff = true; 90 - if (mac->act_scanning == true) { 90 + if (mac->act_scanning) { 91 91 tx_agc[RF90_PATH_A] = 0x3f3f3f3f; 92 92 tx_agc[RF90_PATH_B] = 0x3f3f3f3f; 93 93 if (turbo_scanoff) { ··· 416 416 struct rtl_priv *rtlpriv = rtl_priv(hw); 417 417 struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); 418 418 u8 u1btmp; 419 - u8 direct = bmac0 == true ? BIT(3) | BIT(2) : BIT(3); 420 - u8 mac_reg = bmac0 == true ? REG_MAC1 : REG_MAC0; 421 - u8 mac_on_bit = bmac0 == true ? MAC1_ON : MAC0_ON; 419 + u8 direct = bmac0 ? BIT(3) | BIT(2) : BIT(3); 420 + u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0; 421 + u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON; 422 422 bool bresult = true; /* true: need to enable BB/RF power */ 423 423 424 424 rtlhal->during_mac0init_radiob = false; ··· 447 447 struct rtl_priv *rtlpriv = rtl_priv(hw); 448 448 struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); 449 449 u8 u1btmp; 450 - u8 direct = bmac0 == true ? BIT(3) | BIT(2) : BIT(3); 451 - u8 mac_reg = bmac0 == true ? REG_MAC1 : REG_MAC0; 452 - u8 mac_on_bit = bmac0 == true ? MAC1_ON : MAC0_ON; 450 + u8 direct = bmac0 ? BIT(3) | BIT(2) : BIT(3); 451 + u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0; 452 + u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON; 453 453 454 454 rtlhal->during_mac0init_radiob = false; 455 455 rtlhal->during_mac1init_radioa = false; ··· 573 573 udelay(1); 574 574 switch (rfpath) { 575 575 case RF90_PATH_A: 576 - if (true_bpath == true) 576 + if (true_bpath) 577 577 rtstatus = rtl92d_phy_config_rf_with_headerfile( 578 578 hw, radiob_txt, 579 579 (enum radio_path)rfpath);
+3 -3
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
··· 614 614 (u8) 615 615 GET_RX_DESC_RXMCS(pdesc)); 616 616 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 617 - if (phystatus == true) { 617 + if (phystatus) { 618 618 p_drvinfo = (struct rx_fwinfo_92d *)(skb->data + 619 619 stats->rx_bufshift); 620 620 _rtl92de_translate_rx_signal_stuff(hw, ··· 876 876 877 877 void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val) 878 878 { 879 - if (istx == true) { 879 + if (istx) { 880 880 switch (desc_name) { 881 881 case HW_DESC_OWN: 882 882 wmb(); ··· 917 917 { 918 918 u32 ret = 0; 919 919 920 - if (istx == true) { 920 + if (istx) { 921 921 switch (desc_name) { 922 922 case HW_DESC_OWN: 923 923 ret = GET_TX_DESC_OWN(p_desc);
-3
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
··· 1214 1214 rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]); 1215 1215 /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */ 1216 1216 rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F); 1217 - 1218 - rtlpci->irq_enabled = true; 1219 1217 } 1220 1218 1221 1219 void rtl92se_disable_interrupt(struct ieee80211_hw *hw) ··· 1224 1226 rtl_write_dword(rtlpriv, INTA_MASK, 0); 1225 1227 rtl_write_dword(rtlpriv, INTA_MASK + 4, 0); 1226 1228 1227 - rtlpci->irq_enabled = false; 1228 1229 synchronize_irq(rtlpci->pdev->irq); 1229 1230 } 1230 1231
+1 -1
drivers/net/wireless/wl12xx/Kconfig
··· 11 11 depends on WL12XX_MENU && GENERIC_HARDIRQS 12 12 depends on INET 13 13 select FW_LOADER 14 - select CRC7 15 14 ---help--- 16 15 This module adds support for wireless adapters based on TI wl1271 and 17 16 TI wl1273 chipsets. This module does *not* include support for wl1251. ··· 32 33 config WL12XX_SPI 33 34 tristate "TI wl12xx SPI support" 34 35 depends on WL12XX && SPI_MASTER 36 + select CRC7 35 37 ---help--- 36 38 This module adds support for the SPI interface of adapters using 37 39 TI wl12xx chipsets. Select this if your platform is using
+48 -1
drivers/net/wireless/wl12xx/acx.c
··· 25 25 26 26 #include <linux/module.h> 27 27 #include <linux/platform_device.h> 28 - #include <linux/crc7.h> 29 28 #include <linux/spi/spi.h> 30 29 #include <linux/slab.h> 31 30 ··· 1067 1068 mem_conf->tx_free_req = mem->min_req_tx_blocks; 1068 1069 mem_conf->rx_free_req = mem->min_req_rx_blocks; 1069 1070 mem_conf->tx_min = mem->tx_min; 1071 + mem_conf->fwlog_blocks = wl->conf.fwlog.mem_blocks; 1070 1072 1071 1073 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 1072 1074 sizeof(*mem_conf)); ··· 1574 1574 1575 1575 out: 1576 1576 kfree(tsf_info); 1577 + return ret; 1578 + } 1579 + 1580 + int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable) 1581 + { 1582 + struct wl1271_acx_ps_rx_streaming *rx_streaming; 1583 + u32 conf_queues, enable_queues; 1584 + int i, ret = 0; 1585 + 1586 + wl1271_debug(DEBUG_ACX, "acx ps rx streaming"); 1587 + 1588 + rx_streaming = kzalloc(sizeof(*rx_streaming), GFP_KERNEL); 1589 + if (!rx_streaming) { 1590 + ret = -ENOMEM; 1591 + goto out; 1592 + } 1593 + 1594 + conf_queues = wl->conf.rx_streaming.queues; 1595 + if (enable) 1596 + enable_queues = conf_queues; 1597 + else 1598 + enable_queues = 0; 1599 + 1600 + for (i = 0; i < 8; i++) { 1601 + /* 1602 + * Skip non-changed queues, to avoid redundant acxs. 1603 + * this check assumes conf.rx_streaming.queues can't 1604 + * be changed while rx_streaming is enabled. 1605 + */ 1606 + if (!(conf_queues & BIT(i))) 1607 + continue; 1608 + 1609 + rx_streaming->tid = i; 1610 + rx_streaming->enable = enable_queues & BIT(i); 1611 + rx_streaming->period = wl->conf.rx_streaming.interval; 1612 + rx_streaming->timeout = wl->conf.rx_streaming.interval; 1613 + 1614 + ret = wl1271_cmd_configure(wl, ACX_PS_RX_STREAMING, 1615 + rx_streaming, 1616 + sizeof(*rx_streaming)); 1617 + if (ret < 0) { 1618 + wl1271_warning("acx ps rx streaming failed: %d", ret); 1619 + goto out; 1620 + } 1621 + } 1622 + out: 1623 + kfree(rx_streaming); 1577 1624 return ret; 1578 1625 } 1579 1626
+16
drivers/net/wireless/wl12xx/acx.h
··· 828 828 u8 tx_free_req; 829 829 u8 rx_free_req; 830 830 u8 tx_min; 831 + u8 fwlog_blocks; 832 + u8 padding[3]; 831 833 } __packed; 832 834 833 835 struct wl1271_acx_mem_map { ··· 1155 1153 u8 padding[3]; 1156 1154 } __packed; 1157 1155 1156 + struct wl1271_acx_ps_rx_streaming { 1157 + struct acx_header header; 1158 + 1159 + u8 tid; 1160 + u8 enable; 1161 + 1162 + /* interval between triggers (10-100 msec) */ 1163 + u8 period; 1164 + 1165 + /* timeout before first trigger (0-200 msec) */ 1166 + u8 timeout; 1167 + } __packed; 1168 + 1158 1169 struct wl1271_acx_max_tx_retry { 1159 1170 struct acx_header header; 1160 1171 ··· 1399 1384 int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn, 1400 1385 bool enable); 1401 1386 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1387 + int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable); 1402 1388 int wl1271_acx_max_tx_retry(struct wl1271 *wl); 1403 1389 int wl1271_acx_config_ps(struct wl1271 *wl); 1404 1390 int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
+33
drivers/net/wireless/wl12xx/boot.c
··· 102 102 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); 103 103 } 104 104 105 + static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl) 106 + { 107 + unsigned int quirks = 0; 108 + unsigned int *fw_ver = wl->chip.fw_ver; 109 + 110 + /* Only for wl127x */ 111 + if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) && 112 + /* Check STA version */ 113 + (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) && 114 + (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) || 115 + /* Check AP version */ 116 + ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) && 117 + (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN)))) 118 + quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS; 119 + 120 + /* Only new station firmwares support routing fw logs to the host */ 121 + if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) && 122 + (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN)) 123 + quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED; 124 + 125 + /* This feature is not yet supported for AP mode */ 126 + if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) 127 + quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED; 128 + 129 + return quirks; 130 + } 131 + 105 132 static void wl1271_parse_fw_ver(struct wl1271 *wl) 106 133 { 107 134 int ret; ··· 143 116 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver)); 144 117 return; 145 118 } 119 + 120 + /* Check if any quirks are needed with older fw versions */ 121 + wl->quirks |= wl12xx_get_fw_ver_quirks(wl); 146 122 } 147 123 148 124 static void wl1271_boot_fw_version(struct wl1271 *wl) ··· 778 748 } else { 779 749 clk |= (wl->ref_clock << 1) << 4; 780 750 } 751 + 752 + if (wl->quirks & WL12XX_QUIRK_LPD_MODE) 753 + clk |= SCRATCH_ENABLE_LPD; 781 754 782 755 wl1271_write32(wl, DRPW_SCRATCH_START, clk); 783 756
+91 -3
drivers/net/wireless/wl12xx/cmd.c
··· 23 23 24 24 #include <linux/module.h> 25 25 #include <linux/platform_device.h> 26 - #include <linux/crc7.h> 27 26 #include <linux/spi/spi.h> 28 27 #include <linux/etherdevice.h> 29 28 #include <linux/ieee80211.h> ··· 105 106 106 107 fail: 107 108 WARN_ON(1); 108 - ieee80211_queue_work(wl->hw, &wl->recovery_work); 109 + wl12xx_queue_recovery_work(wl); 109 110 return ret; 110 111 } 111 112 ··· 133 134 134 135 /* Override the REF CLK from the NVS with the one from platform data */ 135 136 gen_parms->general_params.ref_clock = wl->ref_clock; 137 + 138 + /* LPD mode enable (bits 6-7) in WL1271 AP mode only */ 139 + if (wl->quirks & WL12XX_QUIRK_LPD_MODE) 140 + gen_parms->general_params.general_settings |= 141 + GENERAL_SETTINGS_DRPW_LPD; 136 142 137 143 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); 138 144 if (ret < 0) { ··· 356 352 357 353 ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask); 358 354 if (ret != 0) { 359 - ieee80211_queue_work(wl->hw, &wl->recovery_work); 355 + wl12xx_queue_recovery_work(wl); 360 356 return ret; 361 357 } 362 358 ··· 1220 1216 * due to a firmware bug. 1221 1217 */ 1222 1218 wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID); 1219 + 1220 + out_free: 1221 + kfree(cmd); 1222 + 1223 + out: 1224 + return ret; 1225 + } 1226 + 1227 + int wl12xx_cmd_config_fwlog(struct wl1271 *wl) 1228 + { 1229 + struct wl12xx_cmd_config_fwlog *cmd; 1230 + int ret = 0; 1231 + 1232 + wl1271_debug(DEBUG_CMD, "cmd config firmware logger"); 1233 + 1234 + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1235 + if (!cmd) { 1236 + ret = -ENOMEM; 1237 + goto out; 1238 + } 1239 + 1240 + cmd->logger_mode = wl->conf.fwlog.mode; 1241 + cmd->log_severity = wl->conf.fwlog.severity; 1242 + cmd->timestamp = wl->conf.fwlog.timestamp; 1243 + cmd->output = wl->conf.fwlog.output; 1244 + cmd->threshold = wl->conf.fwlog.threshold; 1245 + 1246 + ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0); 1247 + if (ret < 0) { 1248 + wl1271_error("failed to send config firmware logger command"); 1249 + goto out_free; 1250 + } 1251 + 1252 + out_free: 1253 + kfree(cmd); 1254 + 1255 + out: 1256 + return ret; 1257 + } 1258 + 1259 + int wl12xx_cmd_start_fwlog(struct wl1271 *wl) 1260 + { 1261 + struct wl12xx_cmd_start_fwlog *cmd; 1262 + int ret = 0; 1263 + 1264 + wl1271_debug(DEBUG_CMD, "cmd start firmware logger"); 1265 + 1266 + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1267 + if (!cmd) { 1268 + ret = -ENOMEM; 1269 + goto out; 1270 + } 1271 + 1272 + ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0); 1273 + if (ret < 0) { 1274 + wl1271_error("failed to send start firmware logger command"); 1275 + goto out_free; 1276 + } 1277 + 1278 + out_free: 1279 + kfree(cmd); 1280 + 1281 + out: 1282 + return ret; 1283 + } 1284 + 1285 + int wl12xx_cmd_stop_fwlog(struct wl1271 *wl) 1286 + { 1287 + struct wl12xx_cmd_stop_fwlog *cmd; 1288 + int ret = 0; 1289 + 1290 + wl1271_debug(DEBUG_CMD, "cmd stop firmware logger"); 1291 + 1292 + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1293 + if (!cmd) { 1294 + ret = -ENOMEM; 1295 + goto out; 1296 + } 1297 + 1298 + ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0); 1299 + if (ret < 0) { 1300 + wl1271_error("failed to send stop firmware logger command"); 1301 + goto out_free; 1302 + } 1223 1303 1224 1304 out_free: 1225 1305 kfree(cmd);
+62
drivers/net/wireless/wl12xx/cmd.h
··· 70 70 int wl1271_cmd_stop_bss(struct wl1271 *wl); 71 71 int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid); 72 72 int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid); 73 + int wl12xx_cmd_config_fwlog(struct wl1271 *wl); 74 + int wl12xx_cmd_start_fwlog(struct wl1271 *wl); 75 + int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); 73 76 74 77 enum wl1271_commands { 75 78 CMD_INTERROGATE = 1, /*use this to read information elements*/ ··· 110 107 CMD_START_PERIODIC_SCAN = 50, 111 108 CMD_STOP_PERIODIC_SCAN = 51, 112 109 CMD_SET_STA_STATE = 52, 110 + CMD_CONFIG_FWLOGGER = 53, 111 + CMD_START_FWLOGGER = 54, 112 + CMD_STOP_FWLOGGER = 55, 113 113 114 114 /* AP mode commands */ 115 115 CMD_BSS_START = 60, ··· 579 573 u8 reason_opcode; 580 574 u8 send_deauth_flag; 581 575 u8 padding1; 576 + } __packed; 577 + 578 + /* 579 + * Continuous mode - packets are transferred to the host periodically 580 + * via the data path. 581 + * On demand - Log messages are stored in a cyclic buffer in the 582 + * firmware, and only transferred to the host when explicitly requested 583 + */ 584 + enum wl12xx_fwlogger_log_mode { 585 + WL12XX_FWLOG_CONTINUOUS, 586 + WL12XX_FWLOG_ON_DEMAND 587 + }; 588 + 589 + /* Include/exclude timestamps from the log messages */ 590 + enum wl12xx_fwlogger_timestamp { 591 + WL12XX_FWLOG_TIMESTAMP_DISABLED, 592 + WL12XX_FWLOG_TIMESTAMP_ENABLED 593 + }; 594 + 595 + /* 596 + * Logs can be routed to the debug pinouts (where available), to the host bus 597 + * (SDIO/SPI), or dropped 598 + */ 599 + enum wl12xx_fwlogger_output { 600 + WL12XX_FWLOG_OUTPUT_NONE, 601 + WL12XX_FWLOG_OUTPUT_DBG_PINS, 602 + WL12XX_FWLOG_OUTPUT_HOST, 603 + }; 604 + 605 + struct wl12xx_cmd_config_fwlog { 606 + struct wl1271_cmd_header header; 607 + 608 + /* See enum wl12xx_fwlogger_log_mode */ 609 + u8 logger_mode; 610 + 611 + /* Minimum log level threshold */ 612 + u8 log_severity; 613 + 614 + /* Include/exclude timestamps from the log messages */ 615 + u8 timestamp; 616 + 617 + /* See enum wl1271_fwlogger_output */ 618 + u8 output; 619 + 620 + /* Regulates the frequency of log messages */ 621 + u8 threshold; 622 + 623 + u8 padding[3]; 624 + } __packed; 625 + 626 + struct wl12xx_cmd_start_fwlog { 627 + struct wl1271_cmd_header header; 628 + } __packed; 629 + 630 + struct wl12xx_cmd_stop_fwlog { 631 + struct wl1271_cmd_header header; 582 632 } __packed; 583 633 584 634 #endif /* __WL1271_CMD_H__ */
+55
drivers/net/wireless/wl12xx/conf.h
··· 1248 1248 u8 swallow_clk_diff; 1249 1249 }; 1250 1250 1251 + struct conf_rx_streaming_settings { 1252 + /* 1253 + * RX Streaming duration (in msec) from last tx/rx 1254 + * 1255 + * Range: u32 1256 + */ 1257 + u32 duration; 1258 + 1259 + /* 1260 + * Bitmap of tids to be polled during RX streaming. 1261 + * (Note: it doesn't look like it really matters) 1262 + * 1263 + * Range: 0x1-0xff 1264 + */ 1265 + u8 queues; 1266 + 1267 + /* 1268 + * RX Streaming interval. 1269 + * (Note:this value is also used as the rx streaming timeout) 1270 + * Range: 0 (disabled), 10 - 100 1271 + */ 1272 + u8 interval; 1273 + 1274 + /* 1275 + * enable rx streaming also when there is no coex activity 1276 + */ 1277 + u8 always; 1278 + }; 1279 + 1280 + struct conf_fwlog { 1281 + /* Continuous or on-demand */ 1282 + u8 mode; 1283 + 1284 + /* 1285 + * Number of memory blocks dedicated for the FW logger 1286 + * 1287 + * Range: 1-3, or 0 to disable the FW logger 1288 + */ 1289 + u8 mem_blocks; 1290 + 1291 + /* Minimum log level threshold */ 1292 + u8 severity; 1293 + 1294 + /* Include/exclude timestamps from the log messages */ 1295 + u8 timestamp; 1296 + 1297 + /* See enum wl1271_fwlogger_output */ 1298 + u8 output; 1299 + 1300 + /* Regulates the frequency of log messages */ 1301 + u8 threshold; 1302 + }; 1303 + 1251 1304 struct conf_drv_settings { 1252 1305 struct conf_sg_settings sg; 1253 1306 struct conf_rx_settings rx; ··· 1316 1263 struct conf_memory_settings mem_wl127x; 1317 1264 struct conf_memory_settings mem_wl128x; 1318 1265 struct conf_fm_coex fm_coex; 1266 + struct conf_rx_streaming_settings rx_streaming; 1267 + struct conf_fwlog fwlog; 1319 1268 u8 hci_io_ds; 1320 1269 }; 1321 1270
+136 -2
drivers/net/wireless/wl12xx/debugfs.c
··· 71 71 if (!entry || IS_ERR(entry)) \ 72 72 goto err; \ 73 73 74 + #define DEBUGFS_ADD_PREFIX(prefix, name, parent) \ 75 + do { \ 76 + entry = debugfs_create_file(#name, 0400, parent, \ 77 + wl, &prefix## _## name## _ops); \ 78 + if (!entry || IS_ERR(entry)) \ 79 + goto err; \ 80 + } while (0); 81 + 74 82 #define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \ 75 83 static ssize_t sub## _ ##name## _read(struct file *file, \ 76 84 char __user *userbuf, \ ··· 306 298 struct wl1271 *wl = file->private_data; 307 299 308 300 mutex_lock(&wl->mutex); 309 - ieee80211_queue_work(wl->hw, &wl->recovery_work); 301 + wl12xx_queue_recovery_work(wl); 310 302 mutex_unlock(&wl->mutex); 311 303 312 304 return count; ··· 535 527 .llseek = default_llseek, 536 528 }; 537 529 530 + static ssize_t rx_streaming_interval_write(struct file *file, 531 + const char __user *user_buf, 532 + size_t count, loff_t *ppos) 533 + { 534 + struct wl1271 *wl = file->private_data; 535 + char buf[10]; 536 + size_t len; 537 + unsigned long value; 538 + int ret; 539 + 540 + len = min(count, sizeof(buf) - 1); 541 + if (copy_from_user(buf, user_buf, len)) 542 + return -EFAULT; 543 + buf[len] = '\0'; 544 + 545 + ret = kstrtoul(buf, 0, &value); 546 + if (ret < 0) { 547 + wl1271_warning("illegal value in rx_streaming_interval!"); 548 + return -EINVAL; 549 + } 550 + 551 + /* valid values: 0, 10-100 */ 552 + if (value && (value < 10 || value > 100)) { 553 + wl1271_warning("value is not in range!"); 554 + return -ERANGE; 555 + } 556 + 557 + mutex_lock(&wl->mutex); 558 + 559 + wl->conf.rx_streaming.interval = value; 560 + 561 + ret = wl1271_ps_elp_wakeup(wl); 562 + if (ret < 0) 563 + goto out; 564 + 565 + wl1271_recalc_rx_streaming(wl); 566 + 567 + wl1271_ps_elp_sleep(wl); 568 + out: 569 + mutex_unlock(&wl->mutex); 570 + return count; 571 + } 572 + 573 + static ssize_t rx_streaming_interval_read(struct file *file, 574 + char __user *userbuf, 575 + size_t count, loff_t *ppos) 576 + { 577 + struct wl1271 *wl = file->private_data; 578 + return wl1271_format_buffer(userbuf, count, ppos, 579 + "%d\n", wl->conf.rx_streaming.interval); 580 + } 581 + 582 + static const struct file_operations rx_streaming_interval_ops = { 583 + .read = rx_streaming_interval_read, 584 + .write = rx_streaming_interval_write, 585 + .open = wl1271_open_file_generic, 586 + .llseek = default_llseek, 587 + }; 588 + 589 + static ssize_t rx_streaming_always_write(struct file *file, 590 + const char __user *user_buf, 591 + size_t count, loff_t *ppos) 592 + { 593 + struct wl1271 *wl = file->private_data; 594 + char buf[10]; 595 + size_t len; 596 + unsigned long value; 597 + int ret; 598 + 599 + len = min(count, sizeof(buf) - 1); 600 + if (copy_from_user(buf, user_buf, len)) 601 + return -EFAULT; 602 + buf[len] = '\0'; 603 + 604 + ret = kstrtoul(buf, 0, &value); 605 + if (ret < 0) { 606 + wl1271_warning("illegal value in rx_streaming_write!"); 607 + return -EINVAL; 608 + } 609 + 610 + /* valid values: 0, 10-100 */ 611 + if (!(value == 0 || value == 1)) { 612 + wl1271_warning("value is not in valid!"); 613 + return -EINVAL; 614 + } 615 + 616 + mutex_lock(&wl->mutex); 617 + 618 + wl->conf.rx_streaming.always = value; 619 + 620 + ret = wl1271_ps_elp_wakeup(wl); 621 + if (ret < 0) 622 + goto out; 623 + 624 + wl1271_recalc_rx_streaming(wl); 625 + 626 + wl1271_ps_elp_sleep(wl); 627 + out: 628 + mutex_unlock(&wl->mutex); 629 + return count; 630 + } 631 + 632 + static ssize_t rx_streaming_always_read(struct file *file, 633 + char __user *userbuf, 634 + size_t count, loff_t *ppos) 635 + { 636 + struct wl1271 *wl = file->private_data; 637 + return wl1271_format_buffer(userbuf, count, ppos, 638 + "%d\n", wl->conf.rx_streaming.always); 639 + } 640 + 641 + static const struct file_operations rx_streaming_always_ops = { 642 + .read = rx_streaming_always_read, 643 + .write = rx_streaming_always_write, 644 + .open = wl1271_open_file_generic, 645 + .llseek = default_llseek, 646 + }; 647 + 538 648 static int wl1271_debugfs_add_files(struct wl1271 *wl, 539 649 struct dentry *rootdir) 540 650 { 541 651 int ret = 0; 542 - struct dentry *entry, *stats; 652 + struct dentry *entry, *stats, *streaming; 543 653 544 654 stats = debugfs_create_dir("fw-statistics", rootdir); 545 655 if (!stats || IS_ERR(stats)) { ··· 765 639 DEBUGFS_ADD(driver_state, rootdir); 766 640 DEBUGFS_ADD(dtim_interval, rootdir); 767 641 DEBUGFS_ADD(beacon_interval, rootdir); 642 + 643 + streaming = debugfs_create_dir("rx_streaming", rootdir); 644 + if (!streaming || IS_ERR(streaming)) 645 + goto err; 646 + 647 + DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming); 648 + DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming); 649 + 768 650 769 651 return 0; 770 652
+25 -11
drivers/net/wireless/wl12xx/event.c
··· 133 133 if (ret < 0) 134 134 break; 135 135 136 - /* enable beacon early termination */ 137 - ret = wl1271_acx_bet_enable(wl, true); 138 - if (ret < 0) 139 - break; 136 + /* 137 + * BET has only a minor effect in 5GHz and masks 138 + * channel switch IEs, so we only enable BET on 2.4GHz 139 + */ 140 + if (wl->band == IEEE80211_BAND_2GHZ) 141 + /* enable beacon early termination */ 142 + ret = wl1271_acx_bet_enable(wl, true); 140 143 141 144 if (wl->ps_compl) { 142 145 complete(wl->ps_compl); ··· 186 183 ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap, wl->bssid); 187 184 } 188 185 186 + static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl, 187 + u8 enable) 188 + { 189 + if (enable) { 190 + /* disable dynamic PS when requested by the firmware */ 191 + ieee80211_disable_dyn_ps(wl->vif); 192 + set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); 193 + } else { 194 + ieee80211_enable_dyn_ps(wl->vif); 195 + clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); 196 + wl1271_recalc_rx_streaming(wl); 197 + } 198 + 199 + } 200 + 189 201 static void wl1271_event_mbox_dump(struct event_mailbox *mbox) 190 202 { 191 203 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); ··· 244 226 } 245 227 } 246 228 247 - /* disable dynamic PS when requested by the firmware */ 248 229 if (vector & SOFT_GEMINI_SENSE_EVENT_ID && 249 - wl->bss_type == BSS_TYPE_STA_BSS) { 250 - if (mbox->soft_gemini_sense_info) 251 - ieee80211_disable_dyn_ps(wl->vif); 252 - else 253 - ieee80211_enable_dyn_ps(wl->vif); 254 - } 230 + wl->bss_type == BSS_TYPE_STA_BSS) 231 + wl12xx_event_soft_gemini_sense(wl, 232 + mbox->soft_gemini_sense_info); 255 233 256 234 /* 257 235 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
+3
drivers/net/wireless/wl12xx/ini.h
··· 24 24 #ifndef __INI_H__ 25 25 #define __INI_H__ 26 26 27 + #define GENERAL_SETTINGS_DRPW_LPD 0xc0 28 + #define SCRATCH_ENABLE_LPD BIT(25) 29 + 27 30 #define WL1271_INI_MAX_SMART_REFLEX_PARAM 16 28 31 29 32 struct wl1271_ini_general_params {
+19
drivers/net/wireless/wl12xx/init.c
··· 321 321 return 0; 322 322 } 323 323 324 + static int wl12xx_init_fwlog(struct wl1271 *wl) 325 + { 326 + int ret; 327 + 328 + if (wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) 329 + return 0; 330 + 331 + ret = wl12xx_cmd_config_fwlog(wl); 332 + if (ret < 0) 333 + return ret; 334 + 335 + return 0; 336 + } 337 + 324 338 static int wl1271_sta_hw_init(struct wl1271 *wl) 325 339 { 326 340 int ret; ··· 393 379 return ret; 394 380 395 381 ret = wl1271_acx_sta_mem_cfg(wl); 382 + if (ret < 0) 383 + return ret; 384 + 385 + /* Configure the FW logger */ 386 + ret = wl12xx_init_fwlog(wl); 396 387 if (ret < 0) 397 388 return ret; 398 389
+4 -3
drivers/net/wireless/wl12xx/io.c
··· 23 23 24 24 #include <linux/module.h> 25 25 #include <linux/platform_device.h> 26 - #include <linux/crc7.h> 27 26 #include <linux/spi/spi.h> 28 27 29 28 #include "wl12xx.h" ··· 127 128 128 129 void wl1271_io_reset(struct wl1271 *wl) 129 130 { 130 - wl->if_ops->reset(wl); 131 + if (wl->if_ops->reset) 132 + wl->if_ops->reset(wl); 131 133 } 132 134 133 135 void wl1271_io_init(struct wl1271 *wl) 134 136 { 135 - wl->if_ops->init(wl); 137 + if (wl->if_ops->init) 138 + wl->if_ops->init(wl); 136 139 } 137 140 138 141 void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+14
drivers/net/wireless/wl12xx/io.h
··· 129 129 wl1271_raw_write(wl, physical, buf, len, fixed); 130 130 } 131 131 132 + static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr, 133 + void *buf, size_t len, bool fixed) 134 + { 135 + int physical; 136 + int addr; 137 + 138 + /* Addresses are stored internally as addresses to 32 bytes blocks */ 139 + addr = hwaddr << 5; 140 + 141 + physical = wl1271_translate_addr(wl, addr); 142 + 143 + wl1271_raw_read(wl, physical, buf, len, fixed); 144 + } 145 + 132 146 static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 133 147 { 134 148 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+486 -108
drivers/net/wireless/wl12xx/main.c
··· 31 31 #include <linux/platform_device.h> 32 32 #include <linux/slab.h> 33 33 #include <linux/wl12xx.h> 34 + #include <linux/sched.h> 34 35 35 36 #include "wl12xx.h" 36 37 #include "wl12xx_80211.h" ··· 363 362 .fm_disturbed_band_margin = 0xff, /* default */ 364 363 .swallow_clk_diff = 0xff, /* default */ 365 364 }, 365 + .rx_streaming = { 366 + .duration = 150, 367 + .queues = 0x1, 368 + .interval = 20, 369 + .always = 0, 370 + }, 371 + .fwlog = { 372 + .mode = WL12XX_FWLOG_ON_DEMAND, 373 + .mem_blocks = 2, 374 + .severity = 0, 375 + .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED, 376 + .output = WL12XX_FWLOG_OUTPUT_HOST, 377 + .threshold = 0, 378 + }, 366 379 .hci_io_ds = HCI_IO_DS_6MA, 367 380 }; 381 + 382 + static char *fwlog_param; 368 383 369 384 static void __wl1271_op_remove_interface(struct wl1271 *wl, 370 385 bool reset_tx_queues); ··· 405 388 static DEFINE_MUTEX(wl_list_mutex); 406 389 static LIST_HEAD(wl_list); 407 390 391 + static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate) 392 + { 393 + int ret; 394 + if (operstate != IF_OPER_UP) 395 + return 0; 396 + 397 + if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) 398 + return 0; 399 + 400 + ret = wl1271_cmd_set_sta_state(wl); 401 + if (ret < 0) 402 + return ret; 403 + 404 + wl1271_info("Association completed."); 405 + return 0; 406 + } 408 407 static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, 409 408 void *arg) 410 409 { ··· 470 437 if (ret < 0) 471 438 goto out; 472 439 473 - if ((dev->operstate == IF_OPER_UP) && 474 - !test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) { 475 - wl1271_cmd_set_sta_state(wl); 476 - wl1271_info("Association completed."); 477 - } 440 + wl1271_check_operstate(wl, dev->operstate); 478 441 479 442 wl1271_ps_elp_sleep(wl); 480 443 ··· 502 473 return 0; 503 474 } 504 475 476 + static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable) 477 + { 478 + int ret = 0; 479 + 480 + /* we should hold wl->mutex */ 481 + ret = wl1271_acx_ps_rx_streaming(wl, enable); 482 + if (ret < 0) 483 + goto out; 484 + 485 + if (enable) 486 + set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); 487 + else 488 + clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); 489 + out: 490 + return ret; 491 + } 492 + 493 + /* 494 + * this function is being called when the rx_streaming interval 495 + * has beed changed or rx_streaming should be disabled 496 + */ 497 + int wl1271_recalc_rx_streaming(struct wl1271 *wl) 498 + { 499 + int ret = 0; 500 + int period = wl->conf.rx_streaming.interval; 501 + 502 + /* don't reconfigure if rx_streaming is disabled */ 503 + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) 504 + goto out; 505 + 506 + /* reconfigure/disable according to new streaming_period */ 507 + if (period && 508 + test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) && 509 + (wl->conf.rx_streaming.always || 510 + test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 511 + ret = wl1271_set_rx_streaming(wl, true); 512 + else { 513 + ret = wl1271_set_rx_streaming(wl, false); 514 + /* don't cancel_work_sync since we might deadlock */ 515 + del_timer_sync(&wl->rx_streaming_timer); 516 + } 517 + out: 518 + return ret; 519 + } 520 + 521 + static void wl1271_rx_streaming_enable_work(struct work_struct *work) 522 + { 523 + int ret; 524 + struct wl1271 *wl = 525 + container_of(work, struct wl1271, rx_streaming_enable_work); 526 + 527 + mutex_lock(&wl->mutex); 528 + 529 + if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) || 530 + !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || 531 + (!wl->conf.rx_streaming.always && 532 + !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 533 + goto out; 534 + 535 + if (!wl->conf.rx_streaming.interval) 536 + goto out; 537 + 538 + ret = wl1271_ps_elp_wakeup(wl); 539 + if (ret < 0) 540 + goto out; 541 + 542 + ret = wl1271_set_rx_streaming(wl, true); 543 + if (ret < 0) 544 + goto out_sleep; 545 + 546 + /* stop it after some time of inactivity */ 547 + mod_timer(&wl->rx_streaming_timer, 548 + jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); 549 + 550 + out_sleep: 551 + wl1271_ps_elp_sleep(wl); 552 + out: 553 + mutex_unlock(&wl->mutex); 554 + } 555 + 556 + static void wl1271_rx_streaming_disable_work(struct work_struct *work) 557 + { 558 + int ret; 559 + struct wl1271 *wl = 560 + container_of(work, struct wl1271, rx_streaming_disable_work); 561 + 562 + mutex_lock(&wl->mutex); 563 + 564 + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) 565 + goto out; 566 + 567 + ret = wl1271_ps_elp_wakeup(wl); 568 + if (ret < 0) 569 + goto out; 570 + 571 + ret = wl1271_set_rx_streaming(wl, false); 572 + if (ret) 573 + goto out_sleep; 574 + 575 + out_sleep: 576 + wl1271_ps_elp_sleep(wl); 577 + out: 578 + mutex_unlock(&wl->mutex); 579 + } 580 + 581 + static void wl1271_rx_streaming_timer(unsigned long data) 582 + { 583 + struct wl1271 *wl = (struct wl1271 *)data; 584 + ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work); 585 + } 586 + 505 587 static void wl1271_conf_init(struct wl1271 *wl) 506 588 { 507 589 ··· 628 488 629 489 /* apply driver default configuration */ 630 490 memcpy(&wl->conf, &default_conf, sizeof(default_conf)); 631 - } 632 491 492 + /* Adjust settings according to optional module parameters */ 493 + if (fwlog_param) { 494 + if (!strcmp(fwlog_param, "continuous")) { 495 + wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; 496 + } else if (!strcmp(fwlog_param, "ondemand")) { 497 + wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND; 498 + } else if (!strcmp(fwlog_param, "dbgpins")) { 499 + wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; 500 + wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS; 501 + } else if (!strcmp(fwlog_param, "disable")) { 502 + wl->conf.fwlog.mem_blocks = 0; 503 + wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE; 504 + } else { 505 + wl1271_error("Unknown fwlog parameter %s", fwlog_param); 506 + } 507 + } 508 + } 633 509 634 510 static int wl1271_plt_init(struct wl1271 *wl) 635 511 { ··· 897 741 898 742 /* Return sent skbs to the network stack */ 899 743 while ((skb = skb_dequeue(&wl->deferred_tx_queue))) 900 - ieee80211_tx_status(wl->hw, skb); 744 + ieee80211_tx_status_ni(wl->hw, skb); 901 745 } 902 746 903 747 static void wl1271_netstack_work(struct work_struct *work) ··· 964 808 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { 965 809 wl1271_error("watchdog interrupt received! " 966 810 "starting recovery."); 967 - ieee80211_queue_work(wl->hw, &wl->recovery_work); 811 + wl12xx_queue_recovery_work(wl); 968 812 969 813 /* restarting the chip. ignore any other interrupt. */ 970 814 goto out; ··· 1126 970 return ret; 1127 971 } 1128 972 973 + void wl12xx_queue_recovery_work(struct wl1271 *wl) 974 + { 975 + if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 976 + ieee80211_queue_work(wl->hw, &wl->recovery_work); 977 + } 978 + 979 + size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) 980 + { 981 + size_t len = 0; 982 + 983 + /* The FW log is a length-value list, find where the log end */ 984 + while (len < maxlen) { 985 + if (memblock[len] == 0) 986 + break; 987 + if (len + memblock[len] + 1 > maxlen) 988 + break; 989 + len += memblock[len] + 1; 990 + } 991 + 992 + /* Make sure we have enough room */ 993 + len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size)); 994 + 995 + /* Fill the FW log file, consumed by the sysfs fwlog entry */ 996 + memcpy(wl->fwlog + wl->fwlog_size, memblock, len); 997 + wl->fwlog_size += len; 998 + 999 + return len; 1000 + } 1001 + 1002 + static void wl12xx_read_fwlog_panic(struct wl1271 *wl) 1003 + { 1004 + u32 addr; 1005 + u32 first_addr; 1006 + u8 *block; 1007 + 1008 + if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) || 1009 + (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) || 1010 + (wl->conf.fwlog.mem_blocks == 0)) 1011 + return; 1012 + 1013 + wl1271_info("Reading FW panic log"); 1014 + 1015 + block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL); 1016 + if (!block) 1017 + return; 1018 + 1019 + /* 1020 + * Make sure the chip is awake and the logger isn't active. 1021 + * This might fail if the firmware hanged. 1022 + */ 1023 + if (!wl1271_ps_elp_wakeup(wl)) 1024 + wl12xx_cmd_stop_fwlog(wl); 1025 + 1026 + /* Read the first memory block address */ 1027 + wl1271_fw_status(wl, wl->fw_status); 1028 + first_addr = __le32_to_cpu(wl->fw_status->sta.log_start_addr); 1029 + if (!first_addr) 1030 + goto out; 1031 + 1032 + /* Traverse the memory blocks linked list */ 1033 + addr = first_addr; 1034 + do { 1035 + memset(block, 0, WL12XX_HW_BLOCK_SIZE); 1036 + wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE, 1037 + false); 1038 + 1039 + /* 1040 + * Memory blocks are linked to one another. The first 4 bytes 1041 + * of each memory block hold the hardware address of the next 1042 + * one. The last memory block points to the first one. 1043 + */ 1044 + addr = __le32_to_cpup((__le32 *)block); 1045 + if (!wl12xx_copy_fwlog(wl, block + sizeof(addr), 1046 + WL12XX_HW_BLOCK_SIZE - sizeof(addr))) 1047 + break; 1048 + } while (addr && (addr != first_addr)); 1049 + 1050 + wake_up_interruptible(&wl->fwlog_waitq); 1051 + 1052 + out: 1053 + kfree(block); 1054 + } 1055 + 1129 1056 static void wl1271_recovery_work(struct work_struct *work) 1130 1057 { 1131 1058 struct wl1271 *wl = ··· 1218 979 1219 980 if (wl->state != WL1271_STATE_ON) 1220 981 goto out; 982 + 983 + /* Avoid a recursive recovery */ 984 + set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 985 + 986 + wl12xx_read_fwlog_panic(wl); 1221 987 1222 988 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x", 1223 989 wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4)); ··· 1240 996 1241 997 /* reboot the chipset */ 1242 998 __wl1271_op_remove_interface(wl, false); 999 + 1000 + clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 1001 + 1243 1002 ieee80211_restart_hw(wl->hw); 1244 1003 1245 1004 /* ··· 1321 1074 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 1322 1075 wl->chip.id); 1323 1076 1324 - /* end-of-transaction flag should be set in wl127x AP mode */ 1077 + /* 1078 + * 'end-of-transaction flag' and 'LPD mode flag' 1079 + * should be set in wl127x AP mode only 1080 + */ 1325 1081 if (wl->bss_type == BSS_TYPE_AP_BSS) 1326 - wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION; 1082 + wl->quirks |= (WL12XX_QUIRK_END_OF_TRANSACTION | 1083 + WL12XX_QUIRK_LPD_MODE); 1327 1084 1328 1085 ret = wl1271_setup(wl); 1329 1086 if (ret < 0) ··· 1340 1089 ret = wl1271_setup(wl); 1341 1090 if (ret < 0) 1342 1091 goto out; 1092 + 1343 1093 if (wl1271_set_block_size(wl)) 1344 1094 wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT; 1345 1095 break; ··· 1367 1115 1368 1116 out: 1369 1117 return ret; 1370 - } 1371 - 1372 - static unsigned int wl1271_get_fw_ver_quirks(struct wl1271 *wl) 1373 - { 1374 - unsigned int quirks = 0; 1375 - unsigned int *fw_ver = wl->chip.fw_ver; 1376 - 1377 - /* Only for wl127x */ 1378 - if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) && 1379 - /* Check STA version */ 1380 - (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) && 1381 - (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) || 1382 - /* Check AP version */ 1383 - ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) && 1384 - (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN)))) 1385 - quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS; 1386 - 1387 - return quirks; 1388 1118 } 1389 1119 1390 1120 int wl1271_plt_start(struct wl1271 *wl) ··· 1405 1171 wl1271_notice("firmware booted in PLT mode (%s)", 1406 1172 wl->chip.fw_ver_str); 1407 1173 1408 - /* Check if any quirks are needed with older fw versions */ 1409 - wl->quirks |= wl1271_get_fw_ver_quirks(wl); 1410 1174 goto out; 1411 1175 1412 1176 irq_disable: ··· 1584 1352 }; 1585 1353 1586 1354 #ifdef CONFIG_PM 1587 - static int wl1271_configure_suspend(struct wl1271 *wl) 1355 + static int wl1271_configure_suspend_sta(struct wl1271 *wl) 1588 1356 { 1589 1357 int ret; 1590 - 1591 - if (wl->bss_type != BSS_TYPE_STA_BSS) 1592 - return 0; 1593 1358 1594 1359 mutex_lock(&wl->mutex); 1595 1360 ··· 1632 1403 1633 1404 } 1634 1405 1635 - static void wl1271_configure_resume(struct wl1271 *wl) 1406 + static int wl1271_configure_suspend_ap(struct wl1271 *wl) 1636 1407 { 1637 1408 int ret; 1638 1409 1639 - if (wl->bss_type != BSS_TYPE_STA_BSS) 1410 + mutex_lock(&wl->mutex); 1411 + 1412 + ret = wl1271_ps_elp_wakeup(wl); 1413 + if (ret < 0) 1414 + goto out_unlock; 1415 + 1416 + ret = wl1271_acx_set_ap_beacon_filter(wl, true); 1417 + 1418 + wl1271_ps_elp_sleep(wl); 1419 + out_unlock: 1420 + mutex_unlock(&wl->mutex); 1421 + return ret; 1422 + 1423 + } 1424 + 1425 + static int wl1271_configure_suspend(struct wl1271 *wl) 1426 + { 1427 + if (wl->bss_type == BSS_TYPE_STA_BSS) 1428 + return wl1271_configure_suspend_sta(wl); 1429 + if (wl->bss_type == BSS_TYPE_AP_BSS) 1430 + return wl1271_configure_suspend_ap(wl); 1431 + return 0; 1432 + } 1433 + 1434 + static void wl1271_configure_resume(struct wl1271 *wl) 1435 + { 1436 + int ret; 1437 + bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS; 1438 + bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS; 1439 + 1440 + if (!is_sta && !is_ap) 1640 1441 return; 1641 1442 1642 1443 mutex_lock(&wl->mutex); ··· 1674 1415 if (ret < 0) 1675 1416 goto out; 1676 1417 1677 - /* exit psm if it wasn't configured */ 1678 - if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) 1679 - wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 1680 - wl->basic_rate, true); 1418 + if (is_sta) { 1419 + /* exit psm if it wasn't configured */ 1420 + if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) 1421 + wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 1422 + wl->basic_rate, true); 1423 + } else if (is_ap) { 1424 + wl1271_acx_set_ap_beacon_filter(wl, false); 1425 + } 1681 1426 1682 1427 wl1271_ps_elp_sleep(wl); 1683 1428 out: ··· 1692 1429 struct cfg80211_wowlan *wow) 1693 1430 { 1694 1431 struct wl1271 *wl = hw->priv; 1432 + int ret; 1433 + 1695 1434 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); 1696 - wl->wow_enabled = !!wow; 1697 - if (wl->wow_enabled) { 1698 - int ret; 1699 - ret = wl1271_configure_suspend(wl); 1700 - if (ret < 0) { 1701 - wl1271_warning("couldn't prepare device to suspend"); 1702 - return ret; 1703 - } 1704 - /* flush any remaining work */ 1705 - wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1706 - flush_delayed_work(&wl->scan_complete_work); 1435 + WARN_ON(!wow || !wow->any); 1707 1436 1708 - /* 1709 - * disable and re-enable interrupts in order to flush 1710 - * the threaded_irq 1711 - */ 1712 - wl1271_disable_interrupts(wl); 1713 - 1714 - /* 1715 - * set suspended flag to avoid triggering a new threaded_irq 1716 - * work. no need for spinlock as interrupts are disabled. 1717 - */ 1718 - set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1719 - 1720 - wl1271_enable_interrupts(wl); 1721 - flush_work(&wl->tx_work); 1722 - flush_delayed_work(&wl->pspoll_work); 1723 - flush_delayed_work(&wl->elp_work); 1437 + wl->wow_enabled = true; 1438 + ret = wl1271_configure_suspend(wl); 1439 + if (ret < 0) { 1440 + wl1271_warning("couldn't prepare device to suspend"); 1441 + return ret; 1724 1442 } 1443 + /* flush any remaining work */ 1444 + wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1445 + flush_delayed_work(&wl->scan_complete_work); 1446 + 1447 + /* 1448 + * disable and re-enable interrupts in order to flush 1449 + * the threaded_irq 1450 + */ 1451 + wl1271_disable_interrupts(wl); 1452 + 1453 + /* 1454 + * set suspended flag to avoid triggering a new threaded_irq 1455 + * work. no need for spinlock as interrupts are disabled. 1456 + */ 1457 + set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1458 + 1459 + wl1271_enable_interrupts(wl); 1460 + flush_work(&wl->tx_work); 1461 + flush_delayed_work(&wl->pspoll_work); 1462 + flush_delayed_work(&wl->elp_work); 1463 + 1725 1464 return 0; 1726 1465 } 1727 1466 1728 1467 static int wl1271_op_resume(struct ieee80211_hw *hw) 1729 1468 { 1730 1469 struct wl1271 *wl = hw->priv; 1470 + unsigned long flags; 1471 + bool run_irq_work = false; 1472 + 1731 1473 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", 1732 1474 wl->wow_enabled); 1475 + WARN_ON(!wl->wow_enabled); 1733 1476 1734 1477 /* 1735 1478 * re-enable irq_work enqueuing, and call irq_work directly if 1736 1479 * there is a pending work. 1737 1480 */ 1738 - if (wl->wow_enabled) { 1739 - struct wl1271 *wl = hw->priv; 1740 - unsigned long flags; 1741 - bool run_irq_work = false; 1481 + spin_lock_irqsave(&wl->wl_lock, flags); 1482 + clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1483 + if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags)) 1484 + run_irq_work = true; 1485 + spin_unlock_irqrestore(&wl->wl_lock, flags); 1742 1486 1743 - spin_lock_irqsave(&wl->wl_lock, flags); 1744 - clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1745 - if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags)) 1746 - run_irq_work = true; 1747 - spin_unlock_irqrestore(&wl->wl_lock, flags); 1748 - 1749 - if (run_irq_work) { 1750 - wl1271_debug(DEBUG_MAC80211, 1751 - "run postponed irq_work directly"); 1752 - wl1271_irq(0, wl); 1753 - wl1271_enable_interrupts(wl); 1754 - } 1755 - 1756 - wl1271_configure_resume(wl); 1487 + if (run_irq_work) { 1488 + wl1271_debug(DEBUG_MAC80211, 1489 + "run postponed irq_work directly"); 1490 + wl1271_irq(0, wl); 1491 + wl1271_enable_interrupts(wl); 1757 1492 } 1493 + wl1271_configure_resume(wl); 1494 + wl->wow_enabled = false; 1758 1495 1759 1496 return 0; 1760 1497 } ··· 1892 1629 strncpy(wiphy->fw_version, wl->chip.fw_ver_str, 1893 1630 sizeof(wiphy->fw_version)); 1894 1631 1895 - /* Check if any quirks are needed with older fw versions */ 1896 - wl->quirks |= wl1271_get_fw_ver_quirks(wl); 1897 - 1898 1632 /* 1899 1633 * Now we know if 11a is supported (info from the NVS), so disable 1900 1634 * 11a channels if not supported ··· 1954 1694 cancel_delayed_work_sync(&wl->scan_complete_work); 1955 1695 cancel_work_sync(&wl->netstack_work); 1956 1696 cancel_work_sync(&wl->tx_work); 1697 + del_timer_sync(&wl->rx_streaming_timer); 1698 + cancel_work_sync(&wl->rx_streaming_enable_work); 1699 + cancel_work_sync(&wl->rx_streaming_disable_work); 1957 1700 cancel_delayed_work_sync(&wl->pspoll_work); 1958 1701 cancel_delayed_work_sync(&wl->elp_work); 1959 1702 ··· 3043 2780 } 3044 2781 } 3045 2782 3046 - if (changed & BSS_CHANGED_IBSS) { 3047 - wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d", 3048 - bss_conf->ibss_joined); 3049 - 3050 - if (bss_conf->ibss_joined) { 3051 - u32 rates = bss_conf->basic_rates; 3052 - wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, 3053 - rates); 3054 - wl->basic_rate = wl1271_tx_min_rate_get(wl); 3055 - 3056 - /* by default, use 11b rates */ 3057 - wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES; 3058 - ret = wl1271_acx_sta_rate_policies(wl); 3059 - if (ret < 0) 3060 - goto out; 3061 - } 3062 - } 3063 - 3064 2783 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); 3065 2784 if (ret < 0) 3066 2785 goto out; ··· 3268 3023 } 3269 3024 } 3270 3025 3026 + if (changed & BSS_CHANGED_IBSS) { 3027 + wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d", 3028 + bss_conf->ibss_joined); 3029 + 3030 + if (bss_conf->ibss_joined) { 3031 + u32 rates = bss_conf->basic_rates; 3032 + wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, 3033 + rates); 3034 + wl->basic_rate = wl1271_tx_min_rate_get(wl); 3035 + 3036 + /* by default, use 11b rates */ 3037 + wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES; 3038 + ret = wl1271_acx_sta_rate_policies(wl); 3039 + if (ret < 0) 3040 + goto out; 3041 + } 3042 + } 3043 + 3271 3044 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); 3272 3045 if (ret < 0) 3273 3046 goto out; ··· 3324 3061 wl1271_warning("cmd join failed %d", ret); 3325 3062 goto out; 3326 3063 } 3064 + wl1271_check_operstate(wl, ieee80211_get_operstate(vif)); 3327 3065 } 3328 3066 3329 3067 out: ··· 4048 3784 static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR, 4049 3785 wl1271_sysfs_show_hw_pg_ver, NULL); 4050 3786 3787 + static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, 3788 + struct bin_attribute *bin_attr, 3789 + char *buffer, loff_t pos, size_t count) 3790 + { 3791 + struct device *dev = container_of(kobj, struct device, kobj); 3792 + struct wl1271 *wl = dev_get_drvdata(dev); 3793 + ssize_t len; 3794 + int ret; 3795 + 3796 + ret = mutex_lock_interruptible(&wl->mutex); 3797 + if (ret < 0) 3798 + return -ERESTARTSYS; 3799 + 3800 + /* Let only one thread read the log at a time, blocking others */ 3801 + while (wl->fwlog_size == 0) { 3802 + DEFINE_WAIT(wait); 3803 + 3804 + prepare_to_wait_exclusive(&wl->fwlog_waitq, 3805 + &wait, 3806 + TASK_INTERRUPTIBLE); 3807 + 3808 + if (wl->fwlog_size != 0) { 3809 + finish_wait(&wl->fwlog_waitq, &wait); 3810 + break; 3811 + } 3812 + 3813 + mutex_unlock(&wl->mutex); 3814 + 3815 + schedule(); 3816 + finish_wait(&wl->fwlog_waitq, &wait); 3817 + 3818 + if (signal_pending(current)) 3819 + return -ERESTARTSYS; 3820 + 3821 + ret = mutex_lock_interruptible(&wl->mutex); 3822 + if (ret < 0) 3823 + return -ERESTARTSYS; 3824 + } 3825 + 3826 + /* Check if the fwlog is still valid */ 3827 + if (wl->fwlog_size < 0) { 3828 + mutex_unlock(&wl->mutex); 3829 + return 0; 3830 + } 3831 + 3832 + /* Seeking is not supported - old logs are not kept. Disregard pos. */ 3833 + len = min(count, (size_t)wl->fwlog_size); 3834 + wl->fwlog_size -= len; 3835 + memcpy(buffer, wl->fwlog, len); 3836 + 3837 + /* Make room for new messages */ 3838 + memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size); 3839 + 3840 + mutex_unlock(&wl->mutex); 3841 + 3842 + return len; 3843 + } 3844 + 3845 + static struct bin_attribute fwlog_attr = { 3846 + .attr = {.name = "fwlog", .mode = S_IRUSR}, 3847 + .read = wl1271_sysfs_read_fwlog, 3848 + }; 3849 + 4051 3850 int wl1271_register_hw(struct wl1271 *wl) 4052 3851 { 4053 3852 int ret; ··· 4291 3964 INIT_WORK(&wl->tx_work, wl1271_tx_work); 4292 3965 INIT_WORK(&wl->recovery_work, wl1271_recovery_work); 4293 3966 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); 3967 + INIT_WORK(&wl->rx_streaming_enable_work, 3968 + wl1271_rx_streaming_enable_work); 3969 + INIT_WORK(&wl->rx_streaming_disable_work, 3970 + wl1271_rx_streaming_disable_work); 3971 + 3972 + wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); 3973 + if (!wl->freezable_wq) { 3974 + ret = -ENOMEM; 3975 + goto err_hw; 3976 + } 3977 + 4294 3978 wl->channel = WL1271_DEFAULT_CHANNEL; 4295 3979 wl->beacon_int = WL1271_DEFAULT_BEACON_INT; 4296 3980 wl->default_key = 0; ··· 4327 3989 wl->quirks = 0; 4328 3990 wl->platform_quirks = 0; 4329 3991 wl->sched_scanning = false; 3992 + setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer, 3993 + (unsigned long) wl); 3994 + wl->fwlog_size = 0; 3995 + init_waitqueue_head(&wl->fwlog_waitq); 4330 3996 4331 3997 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 4332 3998 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) ··· 4348 4006 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 4349 4007 if (!wl->aggr_buf) { 4350 4008 ret = -ENOMEM; 4351 - goto err_hw; 4009 + goto err_wq; 4352 4010 } 4353 4011 4354 4012 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl); ··· 4357 4015 goto err_aggr; 4358 4016 } 4359 4017 4018 + /* Allocate one page for the FW log */ 4019 + wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL); 4020 + if (!wl->fwlog) { 4021 + ret = -ENOMEM; 4022 + goto err_dummy_packet; 4023 + } 4024 + 4360 4025 /* Register platform device */ 4361 4026 ret = platform_device_register(wl->plat_dev); 4362 4027 if (ret) { 4363 4028 wl1271_error("couldn't register platform device"); 4364 - goto err_dummy_packet; 4029 + goto err_fwlog; 4365 4030 } 4366 4031 dev_set_drvdata(&wl->plat_dev->dev, wl); 4367 4032 ··· 4386 4037 goto err_bt_coex_state; 4387 4038 } 4388 4039 4040 + /* Create sysfs file for the FW log */ 4041 + ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr); 4042 + if (ret < 0) { 4043 + wl1271_error("failed to create sysfs file fwlog"); 4044 + goto err_hw_pg_ver; 4045 + } 4046 + 4389 4047 return hw; 4048 + 4049 + err_hw_pg_ver: 4050 + device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); 4390 4051 4391 4052 err_bt_coex_state: 4392 4053 device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); ··· 4404 4045 err_platform: 4405 4046 platform_device_unregister(wl->plat_dev); 4406 4047 4048 + err_fwlog: 4049 + free_page((unsigned long)wl->fwlog); 4050 + 4407 4051 err_dummy_packet: 4408 4052 dev_kfree_skb(wl->dummy_packet); 4409 4053 4410 4054 err_aggr: 4411 4055 free_pages((unsigned long)wl->aggr_buf, order); 4056 + 4057 + err_wq: 4058 + destroy_workqueue(wl->freezable_wq); 4412 4059 4413 4060 err_hw: 4414 4061 wl1271_debugfs_exit(wl); ··· 4431 4066 4432 4067 int wl1271_free_hw(struct wl1271 *wl) 4433 4068 { 4069 + /* Unblock any fwlog readers */ 4070 + mutex_lock(&wl->mutex); 4071 + wl->fwlog_size = -1; 4072 + wake_up_interruptible_all(&wl->fwlog_waitq); 4073 + mutex_unlock(&wl->mutex); 4074 + 4075 + device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr); 4434 4076 platform_device_unregister(wl->plat_dev); 4077 + free_page((unsigned long)wl->fwlog); 4435 4078 dev_kfree_skb(wl->dummy_packet); 4436 4079 free_pages((unsigned long)wl->aggr_buf, 4437 4080 get_order(WL1271_AGGR_BUFFER_SIZE)); ··· 4454 4081 4455 4082 kfree(wl->fw_status); 4456 4083 kfree(wl->tx_res_if); 4084 + destroy_workqueue(wl->freezable_wq); 4457 4085 4458 4086 ieee80211_free_hw(wl->hw); 4459 4087 ··· 4466 4092 EXPORT_SYMBOL_GPL(wl12xx_debug_level); 4467 4093 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR); 4468 4094 MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); 4095 + 4096 + module_param_named(fwlog, fwlog_param, charp, 0); 4097 + MODULE_PARM_DESC(keymap, 4098 + "FW logger options: continuous, ondemand, dbgpins or disable"); 4469 4099 4470 4100 MODULE_LICENSE("GPL"); 4471 4101 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
+7 -5
drivers/net/wireless/wl12xx/ps.c
··· 118 118 &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); 119 119 if (ret == 0) { 120 120 wl1271_error("ELP wakeup timeout!"); 121 - ieee80211_queue_work(wl->hw, &wl->recovery_work); 121 + wl12xx_queue_recovery_work(wl); 122 122 ret = -ETIMEDOUT; 123 123 goto err; 124 124 } else if (ret < 0) { ··· 169 169 wl1271_debug(DEBUG_PSM, "leaving psm"); 170 170 171 171 /* disable beacon early termination */ 172 - ret = wl1271_acx_bet_enable(wl, false); 173 - if (ret < 0) 174 - return ret; 172 + if (wl->band == IEEE80211_BAND_2GHZ) { 173 + ret = wl1271_acx_bet_enable(wl, false); 174 + if (ret < 0) 175 + return ret; 176 + } 175 177 176 178 /* disable beacon filtering */ 177 179 ret = wl1271_acx_beacon_filter_opt(wl, false); ··· 204 202 info = IEEE80211_SKB_CB(skb); 205 203 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 206 204 info->status.rates[0].idx = -1; 207 - ieee80211_tx_status(wl->hw, skb); 205 + ieee80211_tx_status_ni(wl->hw, skb); 208 206 filtered++; 209 207 } 210 208 }
+34 -5
drivers/net/wireless/wl12xx/rx.c
··· 22 22 */ 23 23 24 24 #include <linux/gfp.h> 25 + #include <linux/sched.h> 25 26 26 27 #include "wl12xx.h" 27 28 #include "acx.h" ··· 96 95 struct ieee80211_hdr *hdr; 97 96 u8 *buf; 98 97 u8 beacon = 0; 98 + u8 is_data = 0; 99 99 100 100 /* 101 101 * In PLT mode we seem to get frames and mac80211 warns about them, ··· 107 105 108 106 /* the data read starts with the descriptor */ 109 107 desc = (struct wl1271_rx_descriptor *) data; 108 + 109 + if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) { 110 + size_t len = length - sizeof(*desc); 111 + wl12xx_copy_fwlog(wl, data + sizeof(*desc), len); 112 + wake_up_interruptible(&wl->fwlog_waitq); 113 + return 0; 114 + } 110 115 111 116 switch (desc->status & WL1271_RX_DESC_STATUS_MASK) { 112 117 /* discard corrupted packets */ ··· 146 137 hdr = (struct ieee80211_hdr *)skb->data; 147 138 if (ieee80211_is_beacon(hdr->frame_control)) 148 139 beacon = 1; 140 + if (ieee80211_is_data_present(hdr->frame_control)) 141 + is_data = 1; 149 142 150 143 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 151 144 ··· 158 147 skb_trim(skb, skb->len - desc->pad_len); 159 148 160 149 skb_queue_tail(&wl->deferred_rx_queue, skb); 161 - ieee80211_queue_work(wl->hw, &wl->netstack_work); 150 + queue_work(wl->freezable_wq, &wl->netstack_work); 162 151 163 - return 0; 152 + return is_data; 164 153 } 165 154 166 155 void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status) ··· 173 162 u32 mem_block; 174 163 u32 pkt_length; 175 164 u32 pkt_offset; 165 + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 166 + bool had_data = false; 176 167 177 168 while (drv_rx_counter != fw_rx_counter) { 178 169 buf_size = 0; ··· 227 214 * conditions, in that case the received frame will just 228 215 * be dropped. 229 216 */ 230 - wl1271_rx_handle_data(wl, 231 - wl->aggr_buf + pkt_offset, 232 - pkt_length); 217 + if (wl1271_rx_handle_data(wl, 218 + wl->aggr_buf + pkt_offset, 219 + pkt_length) == 1) 220 + had_data = true; 221 + 233 222 wl->rx_counter++; 234 223 drv_rx_counter++; 235 224 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; ··· 245 230 */ 246 231 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 247 232 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 233 + 234 + if (!is_ap && wl->conf.rx_streaming.interval && had_data && 235 + (wl->conf.rx_streaming.always || 236 + test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { 237 + u32 timeout = wl->conf.rx_streaming.duration; 238 + 239 + /* restart rx streaming */ 240 + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) 241 + ieee80211_queue_work(wl->hw, 242 + &wl->rx_streaming_enable_work); 243 + 244 + mod_timer(&wl->rx_streaming_timer, 245 + jiffies + msecs_to_jiffies(timeout)); 246 + } 248 247 } 249 248 250 249 void wl1271_set_default_filters(struct wl1271 *wl)
+12
drivers/net/wireless/wl12xx/rx.h
··· 97 97 #define RX_BUF_SIZE_MASK 0xFFF00 98 98 #define RX_BUF_SIZE_SHIFT_DIV 6 99 99 100 + enum { 101 + WL12XX_RX_CLASS_UNKNOWN, 102 + WL12XX_RX_CLASS_MANAGEMENT, 103 + WL12XX_RX_CLASS_DATA, 104 + WL12XX_RX_CLASS_QOS_DATA, 105 + WL12XX_RX_CLASS_BCN_PRBRSP, 106 + WL12XX_RX_CLASS_EAPOL, 107 + WL12XX_RX_CLASS_BA_EVENT, 108 + WL12XX_RX_CLASS_AMSDU, 109 + WL12XX_RX_CLASS_LOGGER, 110 + }; 111 + 100 112 struct wl1271_rx_descriptor { 101 113 __le16 length; 102 114 u8 status;
+30 -33
drivers/net/wireless/wl12xx/scan.c
··· 62 62 63 63 if (wl->scan.failed) { 64 64 wl1271_info("Scan completed due to error."); 65 - ieee80211_queue_work(wl->hw, &wl->recovery_work); 65 + wl12xx_queue_recovery_work(wl); 66 66 } 67 67 68 68 out: ··· 326 326 struct cfg80211_sched_scan_request *req, 327 327 struct conn_scan_ch_params *channels, 328 328 u32 band, bool radar, bool passive, 329 - int start) 329 + int start, int max_channels) 330 330 { 331 331 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 332 332 int i, j; ··· 334 334 bool force_passive = !req->n_ssids; 335 335 336 336 for (i = 0, j = start; 337 - i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS; 337 + i < req->n_channels && j < max_channels; 338 338 i++) { 339 339 flags = req->channels[i]->flags; 340 340 ··· 380 380 return j - start; 381 381 } 382 382 383 - static int 383 + static bool 384 384 wl1271_scan_sched_scan_channels(struct wl1271 *wl, 385 385 struct cfg80211_sched_scan_request *req, 386 386 struct wl1271_cmd_sched_scan_config *cfg) 387 387 { 388 - int idx = 0; 389 - 390 388 cfg->passive[0] = 391 - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 389 + wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, 392 390 IEEE80211_BAND_2GHZ, 393 - false, true, idx); 394 - idx += cfg->passive[0]; 395 - 391 + false, true, 0, 392 + MAX_CHANNELS_2GHZ); 396 393 cfg->active[0] = 397 - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 394 + wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, 398 395 IEEE80211_BAND_2GHZ, 399 - false, false, idx); 400 - /* 401 - * 5GHz channels always start at position 14, not immediately 402 - * after the last 2.4GHz channel 403 - */ 404 - idx = 14; 405 - 396 + false, false, 397 + cfg->passive[0], 398 + MAX_CHANNELS_2GHZ); 406 399 cfg->passive[1] = 407 - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 400 + wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 408 401 IEEE80211_BAND_5GHZ, 409 - false, true, idx); 410 - idx += cfg->passive[1]; 411 - 402 + false, true, 0, 403 + MAX_CHANNELS_5GHZ); 412 404 cfg->dfs = 413 - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 405 + wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 414 406 IEEE80211_BAND_5GHZ, 415 - true, true, idx); 416 - idx += cfg->dfs; 417 - 407 + true, true, 408 + cfg->passive[1], 409 + MAX_CHANNELS_5GHZ); 418 410 cfg->active[1] = 419 - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 411 + wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 420 412 IEEE80211_BAND_5GHZ, 421 - false, false, idx); 422 - idx += cfg->active[1]; 413 + false, false, 414 + cfg->passive[1] + cfg->dfs, 415 + MAX_CHANNELS_5GHZ); 416 + /* 802.11j channels are not supported yet */ 417 + cfg->passive[2] = 0; 418 + cfg->active[2] = 0; 423 419 424 420 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", 425 421 cfg->active[0], cfg->passive[0]); ··· 423 427 cfg->active[1], cfg->passive[1]); 424 428 wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs); 425 429 426 - return idx; 430 + return cfg->passive[0] || cfg->active[0] || 431 + cfg->passive[1] || cfg->active[1] || cfg->dfs || 432 + cfg->passive[2] || cfg->active[2]; 427 433 } 428 434 429 435 int wl1271_scan_sched_scan_config(struct wl1271 *wl, ··· 434 436 { 435 437 struct wl1271_cmd_sched_scan_config *cfg = NULL; 436 438 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 437 - int i, total_channels, ret; 439 + int i, ret; 438 440 bool force_passive = !req->n_ssids; 439 441 440 442 wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); ··· 469 471 cfg->ssid_len = 0; 470 472 } 471 473 472 - total_channels = wl1271_scan_sched_scan_channels(wl, req, cfg); 473 - if (total_channels == 0) { 474 + if (!wl1271_scan_sched_scan_channels(wl, req, cfg)) { 474 475 wl1271_error("scan channel list is empty"); 475 476 ret = -EINVAL; 476 477 goto out;
+7 -10
drivers/net/wireless/wl12xx/scan.h
··· 112 112 __le32 timeout; 113 113 } __packed; 114 114 115 - #define MAX_CHANNELS_ALL_BANDS 41 115 + #define MAX_CHANNELS_2GHZ 14 116 + #define MAX_CHANNELS_5GHZ 23 117 + #define MAX_CHANNELS_4GHZ 4 118 + 116 119 #define SCAN_MAX_CYCLE_INTERVALS 16 117 120 #define SCAN_MAX_BANDS 3 118 - 119 - enum { 120 - SCAN_CHANNEL_TYPE_2GHZ_PASSIVE, 121 - SCAN_CHANNEL_TYPE_2GHZ_ACTIVE, 122 - SCAN_CHANNEL_TYPE_5GHZ_PASSIVE, 123 - SCAN_CHANNEL_TYPE_5GHZ_ACTIVE, 124 - SCAN_CHANNEL_TYPE_5GHZ_DFS, 125 - }; 126 121 127 122 enum { 128 123 SCAN_SSID_FILTER_ANY = 0, ··· 177 182 178 183 u8 padding[3]; 179 184 180 - struct conn_scan_ch_params channels[MAX_CHANNELS_ALL_BANDS]; 185 + struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; 186 + struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ]; 187 + struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ]; 181 188 } __packed; 182 189 183 190
+28 -43
drivers/net/wireless/wl12xx/sdio.c
··· 23 23 24 24 #include <linux/irq.h> 25 25 #include <linux/module.h> 26 - #include <linux/crc7.h> 27 26 #include <linux/vmalloc.h> 28 27 #include <linux/mmc/sdio_func.h> 29 28 #include <linux/mmc/sdio_ids.h> ··· 44 45 #define SDIO_DEVICE_ID_TI_WL1271 0x4076 45 46 #endif 46 47 47 - static const struct sdio_device_id wl1271_devices[] = { 48 + static const struct sdio_device_id wl1271_devices[] __devinitconst = { 48 49 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, 49 50 {} 50 51 }; ··· 106 107 enable_irq(wl->irq); 107 108 } 108 109 109 - static void wl1271_sdio_reset(struct wl1271 *wl) 110 - { 111 - } 112 - 113 - static void wl1271_sdio_init(struct wl1271 *wl) 114 - { 115 - } 116 - 117 110 static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, 118 111 size_t len, bool fixed) 119 112 { ··· 161 170 struct sdio_func *func = wl_to_func(wl); 162 171 int ret; 163 172 164 - /* Make sure the card will not be powered off by runtime PM */ 165 - ret = pm_runtime_get_sync(&func->dev); 166 - if (ret < 0) 167 - goto out; 173 + /* If enabled, tell runtime PM not to power off the card */ 174 + if (pm_runtime_enabled(&func->dev)) { 175 + ret = pm_runtime_get_sync(&func->dev); 176 + if (ret) 177 + goto out; 178 + } 168 179 169 180 /* Runtime PM might be disabled, so power up the card manually */ 170 181 ret = mmc_power_restore_host(func->card->host); ··· 193 200 if (ret < 0) 194 201 return ret; 195 202 196 - /* Let runtime PM know the card is powered off */ 197 - return pm_runtime_put_sync(&func->dev); 203 + /* If enabled, let runtime PM know the card is powered off */ 204 + if (pm_runtime_enabled(&func->dev)) 205 + ret = pm_runtime_put_sync(&func->dev); 206 + 207 + return ret; 198 208 } 199 209 200 210 static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable) ··· 211 215 static struct wl1271_if_operations sdio_ops = { 212 216 .read = wl1271_sdio_raw_read, 213 217 .write = wl1271_sdio_raw_write, 214 - .reset = wl1271_sdio_reset, 215 - .init = wl1271_sdio_init, 216 218 .power = wl1271_sdio_set_power, 217 219 .dev = wl1271_sdio_wl_to_dev, 218 220 .enable_irq = wl1271_sdio_enable_interrupts, ··· 272 278 goto out_free; 273 279 } 274 280 275 - enable_irq_wake(wl->irq); 276 - device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1); 281 + ret = enable_irq_wake(wl->irq); 282 + if (!ret) { 283 + wl->irq_wake_enabled = true; 284 + device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1); 277 285 286 + /* if sdio can keep power while host is suspended, enable wow */ 287 + mmcflags = sdio_get_host_pm_caps(func); 288 + wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags); 289 + 290 + if (mmcflags & MMC_PM_KEEP_POWER) 291 + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; 292 + } 278 293 disable_irq(wl->irq); 279 - 280 - /* if sdio can keep power while host is suspended, enable wow */ 281 - mmcflags = sdio_get_host_pm_caps(func); 282 - wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags); 283 - 284 - if (mmcflags & MMC_PM_KEEP_POWER) 285 - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; 286 294 287 295 ret = wl1271_init_ieee80211(wl); 288 296 if (ret) ··· 298 302 299 303 /* Tell PM core that we don't need the card to be powered now */ 300 304 pm_runtime_put_noidle(&func->dev); 301 - 302 - wl1271_notice("initialized"); 303 305 304 306 return 0; 305 307 ··· 318 324 pm_runtime_get_noresume(&func->dev); 319 325 320 326 wl1271_unregister_hw(wl); 321 - device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0); 322 - disable_irq_wake(wl->irq); 327 + if (wl->irq_wake_enabled) { 328 + device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0); 329 + disable_irq_wake(wl->irq); 330 + } 323 331 free_irq(wl->irq, wl); 324 332 wl1271_free_hw(wl); 325 333 } ··· 398 402 399 403 static int __init wl1271_init(void) 400 404 { 401 - int ret; 402 - 403 - ret = sdio_register_driver(&wl1271_sdio_driver); 404 - if (ret < 0) { 405 - wl1271_error("failed to register sdio driver: %d", ret); 406 - goto out; 407 - } 408 - 409 - out: 410 - return ret; 405 + return sdio_register_driver(&wl1271_sdio_driver); 411 406 } 412 407 413 408 static void __exit wl1271_exit(void) 414 409 { 415 410 sdio_unregister_driver(&wl1271_sdio_driver); 416 - 417 - wl1271_notice("unloaded"); 418 411 } 419 412 420 413 module_init(wl1271_init);
+1 -14
drivers/net/wireless/wl12xx/spi.c
··· 436 436 if (ret) 437 437 goto out_irq; 438 438 439 - wl1271_notice("initialized"); 440 - 441 439 return 0; 442 440 443 441 out_irq: ··· 472 474 473 475 static int __init wl1271_init(void) 474 476 { 475 - int ret; 476 - 477 - ret = spi_register_driver(&wl1271_spi_driver); 478 - if (ret < 0) { 479 - wl1271_error("failed to register spi driver: %d", ret); 480 - goto out; 481 - } 482 - 483 - out: 484 - return ret; 477 + return spi_register_driver(&wl1271_spi_driver); 485 478 } 486 479 487 480 static void __exit wl1271_exit(void) 488 481 { 489 482 spi_unregister_driver(&wl1271_spi_driver); 490 - 491 - wl1271_notice("unloaded"); 492 483 } 493 484 494 485 module_init(wl1271_init);
+1 -1
drivers/net/wireless/wl12xx/testmode.c
··· 260 260 { 261 261 wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover"); 262 262 263 - ieee80211_queue_work(wl->hw, &wl->recovery_work); 263 + wl12xx_queue_recovery_work(wl); 264 264 265 265 return 0; 266 266 }
+29 -4
drivers/net/wireless/wl12xx/tx.c
··· 562 562 spin_unlock_irqrestore(&wl->wl_lock, flags); 563 563 } 564 564 565 + static bool wl1271_tx_is_data_present(struct sk_buff *skb) 566 + { 567 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 568 + 569 + return ieee80211_is_data_present(hdr->frame_control); 570 + } 571 + 565 572 void wl1271_tx_work_locked(struct wl1271 *wl) 566 573 { 567 574 struct sk_buff *skb; 568 575 u32 buf_offset = 0; 569 576 bool sent_packets = false; 577 + bool had_data = false; 578 + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 570 579 int ret; 571 580 572 581 if (unlikely(wl->state == WL1271_STATE_OFF)) 573 582 return; 574 583 575 584 while ((skb = wl1271_skb_dequeue(wl))) { 585 + if (wl1271_tx_is_data_present(skb)) 586 + had_data = true; 587 + 576 588 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); 577 589 if (ret == -EAGAIN) { 578 590 /* ··· 630 618 wl->tx_packets_count); 631 619 632 620 wl1271_handle_tx_low_watermark(wl); 621 + } 622 + if (!is_ap && wl->conf.rx_streaming.interval && had_data && 623 + (wl->conf.rx_streaming.always || 624 + test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { 625 + u32 timeout = wl->conf.rx_streaming.duration; 626 + 627 + /* enable rx streaming */ 628 + if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) 629 + ieee80211_queue_work(wl->hw, 630 + &wl->rx_streaming_enable_work); 631 + 632 + mod_timer(&wl->rx_streaming_timer, 633 + jiffies + msecs_to_jiffies(timeout)); 633 634 } 634 635 } 635 636 ··· 727 702 728 703 /* return the packet to the stack */ 729 704 skb_queue_tail(&wl->deferred_tx_queue, skb); 730 - ieee80211_queue_work(wl->hw, &wl->netstack_work); 705 + queue_work(wl->freezable_wq, &wl->netstack_work); 731 706 wl1271_free_tx_id(wl, result->id); 732 707 } 733 708 ··· 782 757 info = IEEE80211_SKB_CB(skb); 783 758 info->status.rates[0].idx = -1; 784 759 info->status.rates[0].count = 0; 785 - ieee80211_tx_status(wl->hw, skb); 760 + ieee80211_tx_status_ni(wl->hw, skb); 786 761 total++; 787 762 } 788 763 } ··· 820 795 info = IEEE80211_SKB_CB(skb); 821 796 info->status.rates[0].idx = -1; 822 797 info->status.rates[0].count = 0; 823 - ieee80211_tx_status(wl->hw, skb); 798 + ieee80211_tx_status_ni(wl->hw, skb); 824 799 } 825 800 } 826 801 } ··· 863 838 info->status.rates[0].idx = -1; 864 839 info->status.rates[0].count = 0; 865 840 866 - ieee80211_tx_status(wl->hw, skb); 841 + ieee80211_tx_status_ni(wl->hw, skb); 867 842 } 868 843 } 869 844 }
+36 -2
drivers/net/wireless/wl12xx/wl12xx.h
··· 226 226 #define FW_VER_MINOR_1_SPARE_STA_MIN 58 227 227 #define FW_VER_MINOR_1_SPARE_AP_MIN 47 228 228 229 + #define FW_VER_MINOR_FWLOG_STA_MIN 70 230 + 229 231 struct wl1271_chip { 230 232 u32 id; 231 233 char fw_ver_str[ETHTOOL_BUSINFO_LEN]; ··· 286 284 u8 tx_total; 287 285 u8 reserved1; 288 286 __le16 reserved2; 289 - /* Total structure size is 68 bytes */ 290 - u32 padding; 287 + __le32 log_start_addr; 291 288 } __packed; 292 289 293 290 struct wl1271_fw_full_status { ··· 360 359 WL1271_FLAG_DUMMY_PACKET_PENDING, 361 360 WL1271_FLAG_SUSPENDED, 362 361 WL1271_FLAG_PENDING_WORK, 362 + WL1271_FLAG_SOFT_GEMINI, 363 + WL1271_FLAG_RX_STREAMING_STARTED, 364 + WL1271_FLAG_RECOVERY_IN_PROGRESS, 363 365 }; 364 366 365 367 struct wl1271_link { ··· 447 443 struct sk_buff_head deferred_tx_queue; 448 444 449 445 struct work_struct tx_work; 446 + struct workqueue_struct *freezable_wq; 450 447 451 448 /* Pending TX frames */ 452 449 unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)]; ··· 472 467 473 468 /* Network stack work */ 474 469 struct work_struct netstack_work; 470 + 471 + /* FW log buffer */ 472 + u8 *fwlog; 473 + 474 + /* Number of valid bytes in the FW log buffer */ 475 + ssize_t fwlog_size; 476 + 477 + /* Sysfs FW log entry readers wait queue */ 478 + wait_queue_head_t fwlog_waitq; 475 479 476 480 /* Hardware recovery work */ 477 481 struct work_struct recovery_work; ··· 521 507 522 508 /* Default key (for WEP) */ 523 509 u32 default_key; 510 + 511 + /* Rx Streaming */ 512 + struct work_struct rx_streaming_enable_work; 513 + struct work_struct rx_streaming_disable_work; 514 + struct timer_list rx_streaming_timer; 524 515 525 516 unsigned int filters; 526 517 unsigned int rx_config; ··· 592 573 * (currently, only "ANY" trigger is supported) 593 574 */ 594 575 bool wow_enabled; 576 + bool irq_wake_enabled; 595 577 596 578 /* 597 579 * AP-mode - links indexed by HLID. The global and broadcast links ··· 622 602 623 603 int wl1271_plt_start(struct wl1271 *wl); 624 604 int wl1271_plt_stop(struct wl1271 *wl); 605 + int wl1271_recalc_rx_streaming(struct wl1271 *wl); 606 + void wl12xx_queue_recovery_work(struct wl1271 *wl); 607 + size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); 625 608 626 609 #define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ 627 610 ··· 659 636 660 637 /* WL128X requires aggregated packets to be aligned to the SDIO block size */ 661 638 #define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2) 639 + 640 + /* 641 + * WL127X AP mode requires Low Power DRPw (LPD) enable to reduce power 642 + * consumption 643 + */ 644 + #define WL12XX_QUIRK_LPD_MODE BIT(3) 645 + 646 + /* Older firmwares did not implement the FW logger over bus feature */ 647 + #define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4) 648 + 649 + #define WL12XX_HW_BLOCK_SIZE 256 662 650 663 651 #endif
+12 -12
drivers/nfc/Kconfig
··· 2 2 # Near Field Communication (NFC) devices 3 3 # 4 4 5 - menuconfig NFC_DEVICES 6 - bool "Near Field Communication (NFC) devices" 7 - default n 8 - ---help--- 9 - You'll have to say Y if your computer contains an NFC device that 10 - you want to use under Linux. 11 - 12 - You can say N here if you don't have any Near Field Communication 13 - devices connected to your computer. 14 - 15 - if NFC_DEVICES 5 + menu "Near Field Communication (NFC) devices" 6 + depends on NFC 16 7 17 8 config PN544_NFC 18 9 tristate "PN544 NFC driver" ··· 17 26 To compile this driver as a module, choose m here. The module will 18 27 be called pn544. 19 28 29 + config NFC_PN533 30 + tristate "NXP PN533 USB driver" 31 + depends on USB 32 + help 33 + NXP PN533 USB driver. 34 + This driver provides support for NFC NXP PN533 devices. 20 35 21 - endif # NFC_DEVICES 36 + Say Y here to compile support for PN533 devices into the 37 + kernel or say M to compile it as module (pn533). 38 + 39 + endmenu
+3
drivers/nfc/Makefile
··· 3 3 # 4 4 5 5 obj-$(CONFIG_PN544_NFC) += pn544.o 6 + obj-$(CONFIG_NFC_PN533) += pn533.o 7 + 8 + ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
+1632
drivers/nfc/pn533.c
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 6 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #include <linux/device.h> 25 + #include <linux/kernel.h> 26 + #include <linux/module.h> 27 + #include <linux/slab.h> 28 + #include <linux/usb.h> 29 + #include <linux/nfc.h> 30 + #include <linux/netdevice.h> 31 + #include <net/nfc.h> 32 + 33 + #define VERSION "0.1" 34 + 35 + #define PN533_VENDOR_ID 0x4CC 36 + #define PN533_PRODUCT_ID 0x2533 37 + 38 + #define SCM_VENDOR_ID 0x4E6 39 + #define SCL3711_PRODUCT_ID 0x5591 40 + 41 + static const struct usb_device_id pn533_table[] = { 42 + { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID) }, 43 + { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID) }, 44 + { } 45 + }; 46 + MODULE_DEVICE_TABLE(usb, pn533_table); 47 + 48 + /* frame definitions */ 49 + #define PN533_FRAME_TAIL_SIZE 2 50 + #define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \ 51 + PN533_FRAME_TAIL_SIZE) 52 + #define PN533_FRAME_ACK_SIZE (sizeof(struct pn533_frame) + 1) 53 + #define PN533_FRAME_CHECKSUM(f) (f->data[f->datalen]) 54 + #define PN533_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1]) 55 + 56 + /* start of frame */ 57 + #define PN533_SOF 0x00FF 58 + 59 + /* frame identifier: in/out/error */ 60 + #define PN533_FRAME_IDENTIFIER(f) (f->data[0]) 61 + #define PN533_DIR_OUT 0xD4 62 + #define PN533_DIR_IN 0xD5 63 + 64 + /* PN533 Commands */ 65 + #define PN533_FRAME_CMD(f) (f->data[1]) 66 + #define PN533_FRAME_CMD_PARAMS_PTR(f) (&f->data[2]) 67 + #define PN533_FRAME_CMD_PARAMS_LEN(f) (f->datalen - 2) 68 + 69 + #define PN533_CMD_GET_FIRMWARE_VERSION 0x02 70 + #define PN533_CMD_RF_CONFIGURATION 0x32 71 + #define PN533_CMD_IN_DATA_EXCHANGE 0x40 72 + #define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A 73 + #define PN533_CMD_IN_ATR 0x50 74 + #define PN533_CMD_IN_RELEASE 0x52 75 + 76 + #define PN533_CMD_RESPONSE(cmd) (cmd + 1) 77 + 78 + /* PN533 Return codes */ 79 + #define PN533_CMD_RET_MASK 0x3F 80 + #define PN533_CMD_MI_MASK 0x40 81 + #define PN533_CMD_RET_SUCCESS 0x00 82 + 83 + struct pn533; 84 + 85 + typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg, 86 + u8 *params, int params_len); 87 + 88 + /* structs for pn533 commands */ 89 + 90 + /* PN533_CMD_GET_FIRMWARE_VERSION */ 91 + struct pn533_fw_version { 92 + u8 ic; 93 + u8 ver; 94 + u8 rev; 95 + u8 support; 96 + }; 97 + 98 + /* PN533_CMD_RF_CONFIGURATION */ 99 + #define PN533_CFGITEM_MAX_RETRIES 0x05 100 + 101 + #define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00 102 + #define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF 103 + 104 + struct pn533_config_max_retries { 105 + u8 mx_rty_atr; 106 + u8 mx_rty_psl; 107 + u8 mx_rty_passive_act; 108 + } __packed; 109 + 110 + /* PN533_CMD_IN_LIST_PASSIVE_TARGET */ 111 + 112 + /* felica commands opcode */ 113 + #define PN533_FELICA_OPC_SENSF_REQ 0 114 + #define PN533_FELICA_OPC_SENSF_RES 1 115 + /* felica SENSF_REQ parameters */ 116 + #define PN533_FELICA_SENSF_SC_ALL 0xFFFF 117 + #define PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE 0 118 + #define PN533_FELICA_SENSF_RC_SYSTEM_CODE 1 119 + #define PN533_FELICA_SENSF_RC_ADVANCED_PROTOCOL 2 120 + 121 + /* type B initiator_data values */ 122 + #define PN533_TYPE_B_AFI_ALL_FAMILIES 0 123 + #define PN533_TYPE_B_POLL_METHOD_TIMESLOT 0 124 + #define PN533_TYPE_B_POLL_METHOD_PROBABILISTIC 1 125 + 126 + union pn533_cmd_poll_initdata { 127 + struct { 128 + u8 afi; 129 + u8 polling_method; 130 + } __packed type_b; 131 + struct { 132 + u8 opcode; 133 + __be16 sc; 134 + u8 rc; 135 + u8 tsn; 136 + } __packed felica; 137 + }; 138 + 139 + /* Poll modulations */ 140 + enum { 141 + PN533_POLL_MOD_106KBPS_A, 142 + PN533_POLL_MOD_212KBPS_FELICA, 143 + PN533_POLL_MOD_424KBPS_FELICA, 144 + PN533_POLL_MOD_106KBPS_JEWEL, 145 + PN533_POLL_MOD_847KBPS_B, 146 + 147 + __PN533_POLL_MOD_AFTER_LAST, 148 + }; 149 + #define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1) 150 + 151 + struct pn533_poll_modulations { 152 + struct { 153 + u8 maxtg; 154 + u8 brty; 155 + union pn533_cmd_poll_initdata initiator_data; 156 + } __packed data; 157 + u8 len; 158 + }; 159 + 160 + const struct pn533_poll_modulations poll_mod[] = { 161 + [PN533_POLL_MOD_106KBPS_A] = { 162 + .data = { 163 + .maxtg = 1, 164 + .brty = 0, 165 + }, 166 + .len = 2, 167 + }, 168 + [PN533_POLL_MOD_212KBPS_FELICA] = { 169 + .data = { 170 + .maxtg = 1, 171 + .brty = 1, 172 + .initiator_data.felica = { 173 + .opcode = PN533_FELICA_OPC_SENSF_REQ, 174 + .sc = PN533_FELICA_SENSF_SC_ALL, 175 + .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, 176 + .tsn = 0, 177 + }, 178 + }, 179 + .len = 7, 180 + }, 181 + [PN533_POLL_MOD_424KBPS_FELICA] = { 182 + .data = { 183 + .maxtg = 1, 184 + .brty = 2, 185 + .initiator_data.felica = { 186 + .opcode = PN533_FELICA_OPC_SENSF_REQ, 187 + .sc = PN533_FELICA_SENSF_SC_ALL, 188 + .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, 189 + .tsn = 0, 190 + }, 191 + }, 192 + .len = 7, 193 + }, 194 + [PN533_POLL_MOD_106KBPS_JEWEL] = { 195 + .data = { 196 + .maxtg = 1, 197 + .brty = 4, 198 + }, 199 + .len = 2, 200 + }, 201 + [PN533_POLL_MOD_847KBPS_B] = { 202 + .data = { 203 + .maxtg = 1, 204 + .brty = 8, 205 + .initiator_data.type_b = { 206 + .afi = PN533_TYPE_B_AFI_ALL_FAMILIES, 207 + .polling_method = 208 + PN533_TYPE_B_POLL_METHOD_TIMESLOT, 209 + }, 210 + }, 211 + .len = 3, 212 + }, 213 + }; 214 + 215 + /* PN533_CMD_IN_ATR */ 216 + 217 + struct pn533_cmd_activate_param { 218 + u8 tg; 219 + u8 next; 220 + } __packed; 221 + 222 + struct pn533_cmd_activate_response { 223 + u8 status; 224 + u8 nfcid3t[10]; 225 + u8 didt; 226 + u8 bst; 227 + u8 brt; 228 + u8 to; 229 + u8 ppt; 230 + /* optional */ 231 + u8 gt[]; 232 + } __packed; 233 + 234 + 235 + struct pn533 { 236 + struct usb_device *udev; 237 + struct usb_interface *interface; 238 + struct nfc_dev *nfc_dev; 239 + 240 + struct urb *out_urb; 241 + int out_maxlen; 242 + struct pn533_frame *out_frame; 243 + 244 + struct urb *in_urb; 245 + int in_maxlen; 246 + struct pn533_frame *in_frame; 247 + 248 + struct tasklet_struct tasklet; 249 + struct pn533_frame *tklt_in_frame; 250 + int tklt_in_error; 251 + 252 + pn533_cmd_complete_t cmd_complete; 253 + void *cmd_complete_arg; 254 + struct semaphore cmd_lock; 255 + u8 cmd; 256 + 257 + struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; 258 + u8 poll_mod_count; 259 + u8 poll_mod_curr; 260 + u32 poll_protocols; 261 + 262 + u8 tgt_available_prots; 263 + u8 tgt_active_prot; 264 + }; 265 + 266 + struct pn533_frame { 267 + u8 preamble; 268 + __be16 start_frame; 269 + u8 datalen; 270 + u8 datalen_checksum; 271 + u8 data[]; 272 + } __packed; 273 + 274 + /* The rule: value + checksum = 0 */ 275 + static inline u8 pn533_checksum(u8 value) 276 + { 277 + return ~value + 1; 278 + } 279 + 280 + /* The rule: sum(data elements) + checksum = 0 */ 281 + static u8 pn533_data_checksum(u8 *data, int datalen) 282 + { 283 + u8 sum = 0; 284 + int i; 285 + 286 + for (i = 0; i < datalen; i++) 287 + sum += data[i]; 288 + 289 + return pn533_checksum(sum); 290 + } 291 + 292 + /** 293 + * pn533_tx_frame_ack - create a ack frame 294 + * @frame: The frame to be set as ack 295 + * 296 + * Ack is different type of standard frame. As a standard frame, it has 297 + * preamble and start_frame. However the checksum of this frame must fail, 298 + * i.e. datalen + datalen_checksum must NOT be zero. When the checksum test 299 + * fails and datalen = 0 and datalen_checksum = 0xFF, the frame is a ack. 300 + * After datalen_checksum field, the postamble is placed. 301 + */ 302 + static void pn533_tx_frame_ack(struct pn533_frame *frame) 303 + { 304 + frame->preamble = 0; 305 + frame->start_frame = cpu_to_be16(PN533_SOF); 306 + frame->datalen = 0; 307 + frame->datalen_checksum = 0xFF; 308 + /* data[0] is used as postamble */ 309 + frame->data[0] = 0; 310 + } 311 + 312 + static void pn533_tx_frame_init(struct pn533_frame *frame, u8 cmd) 313 + { 314 + frame->preamble = 0; 315 + frame->start_frame = cpu_to_be16(PN533_SOF); 316 + PN533_FRAME_IDENTIFIER(frame) = PN533_DIR_OUT; 317 + PN533_FRAME_CMD(frame) = cmd; 318 + frame->datalen = 2; 319 + } 320 + 321 + static void pn533_tx_frame_finish(struct pn533_frame *frame) 322 + { 323 + frame->datalen_checksum = pn533_checksum(frame->datalen); 324 + 325 + PN533_FRAME_CHECKSUM(frame) = 326 + pn533_data_checksum(frame->data, frame->datalen); 327 + 328 + PN533_FRAME_POSTAMBLE(frame) = 0; 329 + } 330 + 331 + static bool pn533_rx_frame_is_valid(struct pn533_frame *frame) 332 + { 333 + u8 checksum; 334 + 335 + if (frame->start_frame != cpu_to_be16(PN533_SOF)) 336 + return false; 337 + 338 + checksum = pn533_checksum(frame->datalen); 339 + if (checksum != frame->datalen_checksum) 340 + return false; 341 + 342 + checksum = pn533_data_checksum(frame->data, frame->datalen); 343 + if (checksum != PN533_FRAME_CHECKSUM(frame)) 344 + return false; 345 + 346 + return true; 347 + } 348 + 349 + static bool pn533_rx_frame_is_ack(struct pn533_frame *frame) 350 + { 351 + if (frame->start_frame != cpu_to_be16(PN533_SOF)) 352 + return false; 353 + 354 + if (frame->datalen != 0 || frame->datalen_checksum != 0xFF) 355 + return false; 356 + 357 + return true; 358 + } 359 + 360 + static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd) 361 + { 362 + return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd)); 363 + } 364 + 365 + static void pn533_tasklet_cmd_complete(unsigned long arg) 366 + { 367 + struct pn533 *dev = (struct pn533 *) arg; 368 + struct pn533_frame *in_frame = dev->tklt_in_frame; 369 + int rc; 370 + 371 + if (dev->tklt_in_error) 372 + rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL, 373 + dev->tklt_in_error); 374 + else 375 + rc = dev->cmd_complete(dev, dev->cmd_complete_arg, 376 + PN533_FRAME_CMD_PARAMS_PTR(in_frame), 377 + PN533_FRAME_CMD_PARAMS_LEN(in_frame)); 378 + 379 + if (rc != -EINPROGRESS) 380 + up(&dev->cmd_lock); 381 + } 382 + 383 + static void pn533_recv_response(struct urb *urb) 384 + { 385 + struct pn533 *dev = urb->context; 386 + struct pn533_frame *in_frame; 387 + 388 + dev->tklt_in_frame = NULL; 389 + 390 + switch (urb->status) { 391 + case 0: 392 + /* success */ 393 + break; 394 + case -ECONNRESET: 395 + case -ENOENT: 396 + case -ESHUTDOWN: 397 + nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" 398 + " status: %d", urb->status); 399 + dev->tklt_in_error = urb->status; 400 + goto sched_tasklet; 401 + default: 402 + nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" 403 + " %d", urb->status); 404 + dev->tklt_in_error = urb->status; 405 + goto sched_tasklet; 406 + } 407 + 408 + in_frame = dev->in_urb->transfer_buffer; 409 + 410 + if (!pn533_rx_frame_is_valid(in_frame)) { 411 + nfc_dev_err(&dev->interface->dev, "Received an invalid frame"); 412 + dev->tklt_in_error = -EIO; 413 + goto sched_tasklet; 414 + } 415 + 416 + if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) { 417 + nfc_dev_err(&dev->interface->dev, "The received frame is not " 418 + "response to the last command"); 419 + dev->tklt_in_error = -EIO; 420 + goto sched_tasklet; 421 + } 422 + 423 + nfc_dev_dbg(&dev->interface->dev, "Received a valid frame"); 424 + dev->tklt_in_error = 0; 425 + dev->tklt_in_frame = in_frame; 426 + 427 + sched_tasklet: 428 + tasklet_schedule(&dev->tasklet); 429 + } 430 + 431 + static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags) 432 + { 433 + dev->in_urb->complete = pn533_recv_response; 434 + 435 + return usb_submit_urb(dev->in_urb, flags); 436 + } 437 + 438 + static void pn533_recv_ack(struct urb *urb) 439 + { 440 + struct pn533 *dev = urb->context; 441 + struct pn533_frame *in_frame; 442 + int rc; 443 + 444 + switch (urb->status) { 445 + case 0: 446 + /* success */ 447 + break; 448 + case -ECONNRESET: 449 + case -ENOENT: 450 + case -ESHUTDOWN: 451 + nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" 452 + " status: %d", urb->status); 453 + dev->tklt_in_error = urb->status; 454 + goto sched_tasklet; 455 + default: 456 + nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" 457 + " %d", urb->status); 458 + dev->tklt_in_error = urb->status; 459 + goto sched_tasklet; 460 + } 461 + 462 + in_frame = dev->in_urb->transfer_buffer; 463 + 464 + if (!pn533_rx_frame_is_ack(in_frame)) { 465 + nfc_dev_err(&dev->interface->dev, "Received an invalid ack"); 466 + dev->tklt_in_error = -EIO; 467 + goto sched_tasklet; 468 + } 469 + 470 + nfc_dev_dbg(&dev->interface->dev, "Received a valid ack"); 471 + 472 + rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC); 473 + if (rc) { 474 + nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with" 475 + " result %d", rc); 476 + dev->tklt_in_error = rc; 477 + goto sched_tasklet; 478 + } 479 + 480 + return; 481 + 482 + sched_tasklet: 483 + dev->tklt_in_frame = NULL; 484 + tasklet_schedule(&dev->tasklet); 485 + } 486 + 487 + static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags) 488 + { 489 + dev->in_urb->complete = pn533_recv_ack; 490 + 491 + return usb_submit_urb(dev->in_urb, flags); 492 + } 493 + 494 + static int pn533_send_ack(struct pn533 *dev, gfp_t flags) 495 + { 496 + int rc; 497 + 498 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 499 + 500 + pn533_tx_frame_ack(dev->out_frame); 501 + 502 + dev->out_urb->transfer_buffer = dev->out_frame; 503 + dev->out_urb->transfer_buffer_length = PN533_FRAME_ACK_SIZE; 504 + rc = usb_submit_urb(dev->out_urb, flags); 505 + 506 + return rc; 507 + } 508 + 509 + static int __pn533_send_cmd_frame_async(struct pn533 *dev, 510 + struct pn533_frame *out_frame, 511 + struct pn533_frame *in_frame, 512 + int in_frame_len, 513 + pn533_cmd_complete_t cmd_complete, 514 + void *arg, gfp_t flags) 515 + { 516 + int rc; 517 + 518 + nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", 519 + PN533_FRAME_CMD(out_frame)); 520 + 521 + dev->cmd = PN533_FRAME_CMD(out_frame); 522 + dev->cmd_complete = cmd_complete; 523 + dev->cmd_complete_arg = arg; 524 + 525 + dev->out_urb->transfer_buffer = out_frame; 526 + dev->out_urb->transfer_buffer_length = 527 + PN533_FRAME_SIZE(out_frame); 528 + 529 + dev->in_urb->transfer_buffer = in_frame; 530 + dev->in_urb->transfer_buffer_length = in_frame_len; 531 + 532 + rc = usb_submit_urb(dev->out_urb, flags); 533 + if (rc) 534 + return rc; 535 + 536 + rc = pn533_submit_urb_for_ack(dev, flags); 537 + if (rc) 538 + goto error; 539 + 540 + return 0; 541 + 542 + error: 543 + usb_unlink_urb(dev->out_urb); 544 + return rc; 545 + } 546 + 547 + static int pn533_send_cmd_frame_async(struct pn533 *dev, 548 + struct pn533_frame *out_frame, 549 + struct pn533_frame *in_frame, 550 + int in_frame_len, 551 + pn533_cmd_complete_t cmd_complete, 552 + void *arg, gfp_t flags) 553 + { 554 + int rc; 555 + 556 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 557 + 558 + if (down_trylock(&dev->cmd_lock)) 559 + return -EBUSY; 560 + 561 + rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame, 562 + in_frame_len, cmd_complete, arg, flags); 563 + if (rc) 564 + goto error; 565 + 566 + return 0; 567 + error: 568 + up(&dev->cmd_lock); 569 + return rc; 570 + } 571 + 572 + struct pn533_sync_cmd_response { 573 + int rc; 574 + struct completion done; 575 + }; 576 + 577 + static int pn533_sync_cmd_complete(struct pn533 *dev, void *_arg, 578 + u8 *params, int params_len) 579 + { 580 + struct pn533_sync_cmd_response *arg = _arg; 581 + 582 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 583 + 584 + arg->rc = 0; 585 + 586 + if (params_len < 0) /* error */ 587 + arg->rc = params_len; 588 + 589 + complete(&arg->done); 590 + 591 + return 0; 592 + } 593 + 594 + static int pn533_send_cmd_frame_sync(struct pn533 *dev, 595 + struct pn533_frame *out_frame, 596 + struct pn533_frame *in_frame, 597 + int in_frame_len) 598 + { 599 + int rc; 600 + struct pn533_sync_cmd_response arg; 601 + 602 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 603 + 604 + init_completion(&arg.done); 605 + 606 + rc = pn533_send_cmd_frame_async(dev, out_frame, in_frame, in_frame_len, 607 + pn533_sync_cmd_complete, &arg, GFP_KERNEL); 608 + if (rc) 609 + return rc; 610 + 611 + wait_for_completion(&arg.done); 612 + 613 + return arg.rc; 614 + } 615 + 616 + static void pn533_send_complete(struct urb *urb) 617 + { 618 + struct pn533 *dev = urb->context; 619 + 620 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 621 + 622 + switch (urb->status) { 623 + case 0: 624 + /* success */ 625 + break; 626 + case -ECONNRESET: 627 + case -ENOENT: 628 + case -ESHUTDOWN: 629 + nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" 630 + " status: %d", urb->status); 631 + break; 632 + default: 633 + nfc_dev_dbg(&dev->interface->dev, "Nonzero urb status received:" 634 + " %d", urb->status); 635 + } 636 + } 637 + 638 + struct pn533_target_type_a { 639 + __be16 sens_res; 640 + u8 sel_res; 641 + u8 nfcid_len; 642 + u8 nfcid_data[]; 643 + } __packed; 644 + 645 + 646 + #define PN533_TYPE_A_SENS_RES_NFCID1(x) ((u8)((be16_to_cpu(x) & 0x00C0) >> 6)) 647 + #define PN533_TYPE_A_SENS_RES_SSD(x) ((u8)((be16_to_cpu(x) & 0x001F) >> 0)) 648 + #define PN533_TYPE_A_SENS_RES_PLATCONF(x) ((u8)((be16_to_cpu(x) & 0x0F00) >> 8)) 649 + 650 + #define PN533_TYPE_A_SENS_RES_SSD_JEWEL 0x00 651 + #define PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL 0x0C 652 + 653 + #define PN533_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5) 654 + #define PN533_TYPE_A_SEL_CASCADE(x) (((x) & 0x04) >> 2) 655 + 656 + #define PN533_TYPE_A_SEL_PROT_MIFARE 0 657 + #define PN533_TYPE_A_SEL_PROT_ISO14443 1 658 + #define PN533_TYPE_A_SEL_PROT_DEP 2 659 + #define PN533_TYPE_A_SEL_PROT_ISO14443_DEP 3 660 + 661 + static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a, 662 + int target_data_len) 663 + { 664 + u8 ssd; 665 + u8 platconf; 666 + 667 + if (target_data_len < sizeof(struct pn533_target_type_a)) 668 + return false; 669 + 670 + /* The lenght check of nfcid[] and ats[] are not being performed because 671 + the values are not being used */ 672 + 673 + /* Requirement 4.6.3.3 from NFC Forum Digital Spec */ 674 + ssd = PN533_TYPE_A_SENS_RES_SSD(type_a->sens_res); 675 + platconf = PN533_TYPE_A_SENS_RES_PLATCONF(type_a->sens_res); 676 + 677 + if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL && 678 + platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) || 679 + (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL && 680 + platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL)) 681 + return false; 682 + 683 + /* Requirements 4.8.2.1, 4.8.2.3, 4.8.2.5 and 4.8.2.7 from NFC Forum */ 684 + if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0) 685 + return false; 686 + 687 + return true; 688 + } 689 + 690 + static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data, 691 + int tgt_data_len) 692 + { 693 + struct pn533_target_type_a *tgt_type_a; 694 + 695 + tgt_type_a = (struct pn533_target_type_a *) tgt_data; 696 + 697 + if (!pn533_target_type_a_is_valid(tgt_type_a, tgt_data_len)) 698 + return -EPROTO; 699 + 700 + switch (PN533_TYPE_A_SEL_PROT(tgt_type_a->sel_res)) { 701 + case PN533_TYPE_A_SEL_PROT_MIFARE: 702 + nfc_tgt->supported_protocols = NFC_PROTO_MIFARE_MASK; 703 + break; 704 + case PN533_TYPE_A_SEL_PROT_ISO14443: 705 + nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK; 706 + break; 707 + case PN533_TYPE_A_SEL_PROT_DEP: 708 + nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK; 709 + break; 710 + case PN533_TYPE_A_SEL_PROT_ISO14443_DEP: 711 + nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK | 712 + NFC_PROTO_NFC_DEP_MASK; 713 + break; 714 + } 715 + 716 + nfc_tgt->sens_res = be16_to_cpu(tgt_type_a->sens_res); 717 + nfc_tgt->sel_res = tgt_type_a->sel_res; 718 + 719 + return 0; 720 + } 721 + 722 + struct pn533_target_felica { 723 + u8 pol_res; 724 + u8 opcode; 725 + u8 nfcid2[8]; 726 + u8 pad[8]; 727 + /* optional */ 728 + u8 syst_code[]; 729 + } __packed; 730 + 731 + #define PN533_FELICA_SENSF_NFCID2_DEP_B1 0x01 732 + #define PN533_FELICA_SENSF_NFCID2_DEP_B2 0xFE 733 + 734 + static bool pn533_target_felica_is_valid(struct pn533_target_felica *felica, 735 + int target_data_len) 736 + { 737 + if (target_data_len < sizeof(struct pn533_target_felica)) 738 + return false; 739 + 740 + if (felica->opcode != PN533_FELICA_OPC_SENSF_RES) 741 + return false; 742 + 743 + return true; 744 + } 745 + 746 + static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data, 747 + int tgt_data_len) 748 + { 749 + struct pn533_target_felica *tgt_felica; 750 + 751 + tgt_felica = (struct pn533_target_felica *) tgt_data; 752 + 753 + if (!pn533_target_felica_is_valid(tgt_felica, tgt_data_len)) 754 + return -EPROTO; 755 + 756 + if (tgt_felica->nfcid2[0] == PN533_FELICA_SENSF_NFCID2_DEP_B1 && 757 + tgt_felica->nfcid2[1] == 758 + PN533_FELICA_SENSF_NFCID2_DEP_B2) 759 + nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK; 760 + else 761 + nfc_tgt->supported_protocols = NFC_PROTO_FELICA_MASK; 762 + 763 + return 0; 764 + } 765 + 766 + struct pn533_target_jewel { 767 + __be16 sens_res; 768 + u8 jewelid[4]; 769 + } __packed; 770 + 771 + static bool pn533_target_jewel_is_valid(struct pn533_target_jewel *jewel, 772 + int target_data_len) 773 + { 774 + u8 ssd; 775 + u8 platconf; 776 + 777 + if (target_data_len < sizeof(struct pn533_target_jewel)) 778 + return false; 779 + 780 + /* Requirement 4.6.3.3 from NFC Forum Digital Spec */ 781 + ssd = PN533_TYPE_A_SENS_RES_SSD(jewel->sens_res); 782 + platconf = PN533_TYPE_A_SENS_RES_PLATCONF(jewel->sens_res); 783 + 784 + if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL && 785 + platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) || 786 + (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL && 787 + platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL)) 788 + return false; 789 + 790 + return true; 791 + } 792 + 793 + static int pn533_target_found_jewel(struct nfc_target *nfc_tgt, u8 *tgt_data, 794 + int tgt_data_len) 795 + { 796 + struct pn533_target_jewel *tgt_jewel; 797 + 798 + tgt_jewel = (struct pn533_target_jewel *) tgt_data; 799 + 800 + if (!pn533_target_jewel_is_valid(tgt_jewel, tgt_data_len)) 801 + return -EPROTO; 802 + 803 + nfc_tgt->supported_protocols = NFC_PROTO_JEWEL_MASK; 804 + nfc_tgt->sens_res = be16_to_cpu(tgt_jewel->sens_res); 805 + 806 + return 0; 807 + } 808 + 809 + struct pn533_type_b_prot_info { 810 + u8 bitrate; 811 + u8 fsci_type; 812 + u8 fwi_adc_fo; 813 + } __packed; 814 + 815 + #define PN533_TYPE_B_PROT_FCSI(x) (((x) & 0xF0) >> 4) 816 + #define PN533_TYPE_B_PROT_TYPE(x) (((x) & 0x0F) >> 0) 817 + #define PN533_TYPE_B_PROT_TYPE_RFU_MASK 0x8 818 + 819 + struct pn533_type_b_sens_res { 820 + u8 opcode; 821 + u8 nfcid[4]; 822 + u8 appdata[4]; 823 + struct pn533_type_b_prot_info prot_info; 824 + } __packed; 825 + 826 + #define PN533_TYPE_B_OPC_SENSB_RES 0x50 827 + 828 + struct pn533_target_type_b { 829 + struct pn533_type_b_sens_res sensb_res; 830 + u8 attrib_res_len; 831 + u8 attrib_res[]; 832 + } __packed; 833 + 834 + static bool pn533_target_type_b_is_valid(struct pn533_target_type_b *type_b, 835 + int target_data_len) 836 + { 837 + if (target_data_len < sizeof(struct pn533_target_type_b)) 838 + return false; 839 + 840 + if (type_b->sensb_res.opcode != PN533_TYPE_B_OPC_SENSB_RES) 841 + return false; 842 + 843 + if (PN533_TYPE_B_PROT_TYPE(type_b->sensb_res.prot_info.fsci_type) & 844 + PN533_TYPE_B_PROT_TYPE_RFU_MASK) 845 + return false; 846 + 847 + return true; 848 + } 849 + 850 + static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data, 851 + int tgt_data_len) 852 + { 853 + struct pn533_target_type_b *tgt_type_b; 854 + 855 + tgt_type_b = (struct pn533_target_type_b *) tgt_data; 856 + 857 + if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len)) 858 + return -EPROTO; 859 + 860 + nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK; 861 + 862 + return 0; 863 + } 864 + 865 + struct pn533_poll_response { 866 + u8 nbtg; 867 + u8 tg; 868 + u8 target_data[]; 869 + } __packed; 870 + 871 + static int pn533_target_found(struct pn533 *dev, 872 + struct pn533_poll_response *resp, int resp_len) 873 + { 874 + int target_data_len; 875 + struct nfc_target nfc_tgt; 876 + int rc; 877 + 878 + nfc_dev_dbg(&dev->interface->dev, "%s - modulation=%d", __func__, 879 + dev->poll_mod_curr); 880 + 881 + if (resp->tg != 1) 882 + return -EPROTO; 883 + 884 + target_data_len = resp_len - sizeof(struct pn533_poll_response); 885 + 886 + switch (dev->poll_mod_curr) { 887 + case PN533_POLL_MOD_106KBPS_A: 888 + rc = pn533_target_found_type_a(&nfc_tgt, resp->target_data, 889 + target_data_len); 890 + break; 891 + case PN533_POLL_MOD_212KBPS_FELICA: 892 + case PN533_POLL_MOD_424KBPS_FELICA: 893 + rc = pn533_target_found_felica(&nfc_tgt, resp->target_data, 894 + target_data_len); 895 + break; 896 + case PN533_POLL_MOD_106KBPS_JEWEL: 897 + rc = pn533_target_found_jewel(&nfc_tgt, resp->target_data, 898 + target_data_len); 899 + break; 900 + case PN533_POLL_MOD_847KBPS_B: 901 + rc = pn533_target_found_type_b(&nfc_tgt, resp->target_data, 902 + target_data_len); 903 + break; 904 + default: 905 + nfc_dev_err(&dev->interface->dev, "Unknown current poll" 906 + " modulation"); 907 + return -EPROTO; 908 + } 909 + 910 + if (rc) 911 + return rc; 912 + 913 + if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) { 914 + nfc_dev_dbg(&dev->interface->dev, "The target found does not" 915 + " have the desired protocol"); 916 + return -EAGAIN; 917 + } 918 + 919 + nfc_dev_dbg(&dev->interface->dev, "Target found - supported protocols: " 920 + "0x%x", nfc_tgt.supported_protocols); 921 + 922 + dev->tgt_available_prots = nfc_tgt.supported_protocols; 923 + 924 + nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1); 925 + 926 + return 0; 927 + } 928 + 929 + static void pn533_poll_reset_mod_list(struct pn533 *dev) 930 + { 931 + dev->poll_mod_count = 0; 932 + } 933 + 934 + static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index) 935 + { 936 + dev->poll_mod_active[dev->poll_mod_count] = 937 + (struct pn533_poll_modulations *) &poll_mod[mod_index]; 938 + dev->poll_mod_count++; 939 + } 940 + 941 + static void pn533_poll_create_mod_list(struct pn533 *dev, u32 protocols) 942 + { 943 + pn533_poll_reset_mod_list(dev); 944 + 945 + if (protocols & NFC_PROTO_MIFARE_MASK 946 + || protocols & NFC_PROTO_ISO14443_MASK 947 + || protocols & NFC_PROTO_NFC_DEP_MASK) 948 + pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A); 949 + 950 + if (protocols & NFC_PROTO_FELICA_MASK 951 + || protocols & NFC_PROTO_NFC_DEP_MASK) { 952 + pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA); 953 + pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA); 954 + } 955 + 956 + if (protocols & NFC_PROTO_JEWEL_MASK) 957 + pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL); 958 + 959 + if (protocols & NFC_PROTO_ISO14443_MASK) 960 + pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B); 961 + } 962 + 963 + static void pn533_start_poll_frame(struct pn533_frame *frame, 964 + struct pn533_poll_modulations *mod) 965 + { 966 + 967 + pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET); 968 + 969 + memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len); 970 + frame->datalen += mod->len; 971 + 972 + pn533_tx_frame_finish(frame); 973 + } 974 + 975 + static int pn533_start_poll_complete(struct pn533 *dev, void *arg, 976 + u8 *params, int params_len) 977 + { 978 + struct pn533_poll_response *resp; 979 + struct pn533_poll_modulations *next_mod; 980 + int rc; 981 + 982 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 983 + 984 + if (params_len == -ENOENT) { 985 + nfc_dev_dbg(&dev->interface->dev, "Polling operation has been" 986 + " stopped"); 987 + goto stop_poll; 988 + } 989 + 990 + if (params_len < 0) { 991 + nfc_dev_err(&dev->interface->dev, "Error %d when running poll", 992 + params_len); 993 + goto stop_poll; 994 + } 995 + 996 + resp = (struct pn533_poll_response *) params; 997 + if (resp->nbtg) { 998 + rc = pn533_target_found(dev, resp, params_len); 999 + 1000 + /* We must stop the poll after a valid target found */ 1001 + if (rc == 0) 1002 + goto stop_poll; 1003 + 1004 + if (rc != -EAGAIN) 1005 + nfc_dev_err(&dev->interface->dev, "The target found is" 1006 + " not valid - continuing to poll"); 1007 + } 1008 + 1009 + dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count; 1010 + 1011 + next_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1012 + 1013 + nfc_dev_dbg(&dev->interface->dev, "Polling next modulation (0x%x)", 1014 + dev->poll_mod_curr); 1015 + 1016 + pn533_start_poll_frame(dev->out_frame, next_mod); 1017 + 1018 + /* Don't need to down the semaphore again */ 1019 + rc = __pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, 1020 + dev->in_maxlen, pn533_start_poll_complete, 1021 + NULL, GFP_ATOMIC); 1022 + 1023 + if (rc == -EPERM) { 1024 + nfc_dev_dbg(&dev->interface->dev, "Cannot poll next modulation" 1025 + " because poll has been stopped"); 1026 + goto stop_poll; 1027 + } 1028 + 1029 + if (rc) { 1030 + nfc_dev_err(&dev->interface->dev, "Error %d when trying to poll" 1031 + " next modulation", rc); 1032 + goto stop_poll; 1033 + } 1034 + 1035 + /* Inform caller function to do not up the semaphore */ 1036 + return -EINPROGRESS; 1037 + 1038 + stop_poll: 1039 + pn533_poll_reset_mod_list(dev); 1040 + dev->poll_protocols = 0; 1041 + return 0; 1042 + } 1043 + 1044 + static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 protocols) 1045 + { 1046 + struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1047 + struct pn533_poll_modulations *start_mod; 1048 + int rc; 1049 + 1050 + nfc_dev_dbg(&dev->interface->dev, "%s - protocols=0x%x", __func__, 1051 + protocols); 1052 + 1053 + if (dev->poll_mod_count) { 1054 + nfc_dev_err(&dev->interface->dev, "Polling operation already" 1055 + " active"); 1056 + return -EBUSY; 1057 + } 1058 + 1059 + if (dev->tgt_active_prot) { 1060 + nfc_dev_err(&dev->interface->dev, "Cannot poll with a target" 1061 + " already activated"); 1062 + return -EBUSY; 1063 + } 1064 + 1065 + pn533_poll_create_mod_list(dev, protocols); 1066 + 1067 + if (!dev->poll_mod_count) { 1068 + nfc_dev_err(&dev->interface->dev, "No valid protocols" 1069 + " specified"); 1070 + rc = -EINVAL; 1071 + goto error; 1072 + } 1073 + 1074 + nfc_dev_dbg(&dev->interface->dev, "It will poll %d modulations types", 1075 + dev->poll_mod_count); 1076 + 1077 + dev->poll_mod_curr = 0; 1078 + start_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1079 + 1080 + pn533_start_poll_frame(dev->out_frame, start_mod); 1081 + 1082 + rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, 1083 + dev->in_maxlen, pn533_start_poll_complete, 1084 + NULL, GFP_KERNEL); 1085 + 1086 + if (rc) { 1087 + nfc_dev_err(&dev->interface->dev, "Error %d when trying to" 1088 + " start poll", rc); 1089 + goto error; 1090 + } 1091 + 1092 + dev->poll_protocols = protocols; 1093 + 1094 + return 0; 1095 + 1096 + error: 1097 + pn533_poll_reset_mod_list(dev); 1098 + return rc; 1099 + } 1100 + 1101 + static void pn533_stop_poll(struct nfc_dev *nfc_dev) 1102 + { 1103 + struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1104 + 1105 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1106 + 1107 + if (!dev->poll_mod_count) { 1108 + nfc_dev_dbg(&dev->interface->dev, "Polling operation was not" 1109 + " running"); 1110 + return; 1111 + } 1112 + 1113 + /* An ack will cancel the last issued command (poll) */ 1114 + pn533_send_ack(dev, GFP_KERNEL); 1115 + 1116 + /* prevent pn533_start_poll_complete to issue a new poll meanwhile */ 1117 + usb_kill_urb(dev->in_urb); 1118 + } 1119 + 1120 + static int pn533_activate_target_nfcdep(struct pn533 *dev) 1121 + { 1122 + struct pn533_cmd_activate_param param; 1123 + struct pn533_cmd_activate_response *resp; 1124 + int rc; 1125 + 1126 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1127 + 1128 + pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_ATR); 1129 + 1130 + param.tg = 1; 1131 + param.next = 0; 1132 + memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), &param, 1133 + sizeof(struct pn533_cmd_activate_param)); 1134 + dev->out_frame->datalen += sizeof(struct pn533_cmd_activate_param); 1135 + 1136 + pn533_tx_frame_finish(dev->out_frame); 1137 + 1138 + rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame, 1139 + dev->in_maxlen); 1140 + if (rc) 1141 + return rc; 1142 + 1143 + resp = (struct pn533_cmd_activate_response *) 1144 + PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame); 1145 + rc = resp->status & PN533_CMD_RET_MASK; 1146 + if (rc != PN533_CMD_RET_SUCCESS) 1147 + return -EIO; 1148 + 1149 + return 0; 1150 + } 1151 + 1152 + static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx, 1153 + u32 protocol) 1154 + { 1155 + struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1156 + int rc; 1157 + 1158 + nfc_dev_dbg(&dev->interface->dev, "%s - protocol=%u", __func__, 1159 + protocol); 1160 + 1161 + if (dev->poll_mod_count) { 1162 + nfc_dev_err(&dev->interface->dev, "Cannot activate while" 1163 + " polling"); 1164 + return -EBUSY; 1165 + } 1166 + 1167 + if (dev->tgt_active_prot) { 1168 + nfc_dev_err(&dev->interface->dev, "There is already an active" 1169 + " target"); 1170 + return -EBUSY; 1171 + } 1172 + 1173 + if (!dev->tgt_available_prots) { 1174 + nfc_dev_err(&dev->interface->dev, "There is no available target" 1175 + " to activate"); 1176 + return -EINVAL; 1177 + } 1178 + 1179 + if (!(dev->tgt_available_prots & (1 << protocol))) { 1180 + nfc_dev_err(&dev->interface->dev, "The target does not support" 1181 + " the requested protocol %u", protocol); 1182 + return -EINVAL; 1183 + } 1184 + 1185 + if (protocol == NFC_PROTO_NFC_DEP) { 1186 + rc = pn533_activate_target_nfcdep(dev); 1187 + if (rc) { 1188 + nfc_dev_err(&dev->interface->dev, "Error %d when" 1189 + " activating target with" 1190 + " NFC_DEP protocol", rc); 1191 + return rc; 1192 + } 1193 + } 1194 + 1195 + dev->tgt_active_prot = protocol; 1196 + dev->tgt_available_prots = 0; 1197 + 1198 + return 0; 1199 + } 1200 + 1201 + static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) 1202 + { 1203 + struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1204 + u8 tg; 1205 + u8 status; 1206 + int rc; 1207 + 1208 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1209 + 1210 + if (!dev->tgt_active_prot) { 1211 + nfc_dev_err(&dev->interface->dev, "There is no active target"); 1212 + return; 1213 + } 1214 + 1215 + dev->tgt_active_prot = 0; 1216 + 1217 + pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE); 1218 + 1219 + tg = 1; 1220 + memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), &tg, sizeof(u8)); 1221 + dev->out_frame->datalen += sizeof(u8); 1222 + 1223 + pn533_tx_frame_finish(dev->out_frame); 1224 + 1225 + rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame, 1226 + dev->in_maxlen); 1227 + if (rc) { 1228 + nfc_dev_err(&dev->interface->dev, "Error when sending release" 1229 + " command to the controller"); 1230 + return; 1231 + } 1232 + 1233 + status = PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame)[0]; 1234 + rc = status & PN533_CMD_RET_MASK; 1235 + if (rc != PN533_CMD_RET_SUCCESS) 1236 + nfc_dev_err(&dev->interface->dev, "Error 0x%x when releasing" 1237 + " the target", rc); 1238 + 1239 + return; 1240 + } 1241 + 1242 + #define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3) 1243 + #define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 1244 + 1245 + static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb) 1246 + { 1247 + int payload_len = skb->len; 1248 + struct pn533_frame *out_frame; 1249 + struct sk_buff *discarded; 1250 + u8 tg; 1251 + 1252 + nfc_dev_dbg(&dev->interface->dev, "%s - Sending %d bytes", __func__, 1253 + payload_len); 1254 + 1255 + if (payload_len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { 1256 + /* TODO: Implement support to multi-part data exchange */ 1257 + nfc_dev_err(&dev->interface->dev, "Data length greater than the" 1258 + " max allowed: %d", 1259 + PN533_CMD_DATAEXCH_DATA_MAXLEN); 1260 + return -ENOSYS; 1261 + } 1262 + 1263 + /* Reserving header space */ 1264 + if (skb_cow_head(skb, PN533_CMD_DATAEXCH_HEAD_LEN)) { 1265 + nfc_dev_err(&dev->interface->dev, "Error to add header data"); 1266 + return -ENOMEM; 1267 + } 1268 + 1269 + /* Reserving tail space, see pn533_tx_frame_finish */ 1270 + if (skb_cow_data(skb, PN533_FRAME_TAIL_SIZE, &discarded) < 0) { 1271 + nfc_dev_err(&dev->interface->dev, "Error to add tail data"); 1272 + return -ENOMEM; 1273 + } 1274 + 1275 + skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN); 1276 + out_frame = (struct pn533_frame *) skb->data; 1277 + 1278 + pn533_tx_frame_init(out_frame, PN533_CMD_IN_DATA_EXCHANGE); 1279 + 1280 + tg = 1; 1281 + memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame), &tg, sizeof(u8)); 1282 + out_frame->datalen += sizeof(u8); 1283 + 1284 + /* The data is already in the out_frame, just update the datalen */ 1285 + out_frame->datalen += payload_len; 1286 + 1287 + pn533_tx_frame_finish(out_frame); 1288 + skb_put(skb, PN533_FRAME_TAIL_SIZE); 1289 + 1290 + return 0; 1291 + } 1292 + 1293 + struct pn533_data_exchange_arg { 1294 + struct sk_buff *skb_resp; 1295 + struct sk_buff *skb_out; 1296 + data_exchange_cb_t cb; 1297 + void *cb_context; 1298 + }; 1299 + 1300 + static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, 1301 + u8 *params, int params_len) 1302 + { 1303 + struct pn533_data_exchange_arg *arg = _arg; 1304 + struct sk_buff *skb_resp = arg->skb_resp; 1305 + struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data; 1306 + int err = 0; 1307 + u8 status; 1308 + u8 cmd_ret; 1309 + 1310 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1311 + 1312 + dev_kfree_skb_irq(arg->skb_out); 1313 + 1314 + if (params_len < 0) { /* error */ 1315 + err = params_len; 1316 + goto error; 1317 + } 1318 + 1319 + skb_put(skb_resp, PN533_FRAME_SIZE(in_frame)); 1320 + 1321 + status = params[0]; 1322 + 1323 + cmd_ret = status & PN533_CMD_RET_MASK; 1324 + if (cmd_ret != PN533_CMD_RET_SUCCESS) { 1325 + nfc_dev_err(&dev->interface->dev, "PN533 reported error %d when" 1326 + " exchanging data", cmd_ret); 1327 + err = -EIO; 1328 + goto error; 1329 + } 1330 + 1331 + if (status & PN533_CMD_MI_MASK) { 1332 + /* TODO: Implement support to multi-part data exchange */ 1333 + nfc_dev_err(&dev->interface->dev, "Multi-part message not yet" 1334 + " supported"); 1335 + /* Prevent the other messages from controller */ 1336 + pn533_send_ack(dev, GFP_ATOMIC); 1337 + err = -ENOSYS; 1338 + goto error; 1339 + } 1340 + 1341 + skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN); 1342 + skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE); 1343 + 1344 + arg->cb(arg->cb_context, skb_resp, 0); 1345 + kfree(arg); 1346 + return 0; 1347 + 1348 + error: 1349 + dev_kfree_skb_irq(skb_resp); 1350 + arg->cb(arg->cb_context, NULL, err); 1351 + kfree(arg); 1352 + return 0; 1353 + } 1354 + 1355 + int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, 1356 + struct sk_buff *skb, 1357 + data_exchange_cb_t cb, 1358 + void *cb_context) 1359 + { 1360 + struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1361 + struct pn533_frame *out_frame, *in_frame; 1362 + struct pn533_data_exchange_arg *arg; 1363 + struct sk_buff *skb_resp; 1364 + int skb_resp_len; 1365 + int rc; 1366 + 1367 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1368 + 1369 + if (!dev->tgt_active_prot) { 1370 + nfc_dev_err(&dev->interface->dev, "Cannot exchange data if" 1371 + " there is no active target"); 1372 + rc = -EINVAL; 1373 + goto error; 1374 + } 1375 + 1376 + rc = pn533_data_exchange_tx_frame(dev, skb); 1377 + if (rc) 1378 + goto error; 1379 + 1380 + skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN + 1381 + PN533_CMD_DATAEXCH_DATA_MAXLEN + 1382 + PN533_FRAME_TAIL_SIZE; 1383 + 1384 + skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL); 1385 + if (!skb_resp) { 1386 + rc = -ENOMEM; 1387 + goto error; 1388 + } 1389 + 1390 + in_frame = (struct pn533_frame *) skb_resp->data; 1391 + out_frame = (struct pn533_frame *) skb->data; 1392 + 1393 + arg = kmalloc(sizeof(struct pn533_data_exchange_arg), GFP_KERNEL); 1394 + if (!arg) { 1395 + rc = -ENOMEM; 1396 + goto free_skb_resp; 1397 + } 1398 + 1399 + arg->skb_resp = skb_resp; 1400 + arg->skb_out = skb; 1401 + arg->cb = cb; 1402 + arg->cb_context = cb_context; 1403 + 1404 + rc = pn533_send_cmd_frame_async(dev, out_frame, in_frame, skb_resp_len, 1405 + pn533_data_exchange_complete, arg, 1406 + GFP_KERNEL); 1407 + if (rc) { 1408 + nfc_dev_err(&dev->interface->dev, "Error %d when trying to" 1409 + " perform data_exchange", rc); 1410 + goto free_arg; 1411 + } 1412 + 1413 + return 0; 1414 + 1415 + free_arg: 1416 + kfree(arg); 1417 + free_skb_resp: 1418 + kfree_skb(skb_resp); 1419 + error: 1420 + kfree_skb(skb); 1421 + return rc; 1422 + } 1423 + 1424 + static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 1425 + u8 cfgdata_len) 1426 + { 1427 + int rc; 1428 + u8 *params; 1429 + 1430 + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1431 + 1432 + pn533_tx_frame_init(dev->out_frame, PN533_CMD_RF_CONFIGURATION); 1433 + 1434 + params = PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame); 1435 + params[0] = cfgitem; 1436 + memcpy(&params[1], cfgdata, cfgdata_len); 1437 + dev->out_frame->datalen += (1 + cfgdata_len); 1438 + 1439 + pn533_tx_frame_finish(dev->out_frame); 1440 + 1441 + rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame, 1442 + dev->in_maxlen); 1443 + 1444 + return rc; 1445 + } 1446 + 1447 + struct nfc_ops pn533_nfc_ops = { 1448 + .start_poll = pn533_start_poll, 1449 + .stop_poll = pn533_stop_poll, 1450 + .activate_target = pn533_activate_target, 1451 + .deactivate_target = pn533_deactivate_target, 1452 + .data_exchange = pn533_data_exchange, 1453 + }; 1454 + 1455 + static int pn533_probe(struct usb_interface *interface, 1456 + const struct usb_device_id *id) 1457 + { 1458 + struct pn533_fw_version *fw_ver; 1459 + struct pn533 *dev; 1460 + struct usb_host_interface *iface_desc; 1461 + struct usb_endpoint_descriptor *endpoint; 1462 + struct pn533_config_max_retries max_retries; 1463 + int in_endpoint = 0; 1464 + int out_endpoint = 0; 1465 + int rc = -ENOMEM; 1466 + int i; 1467 + u32 protocols; 1468 + 1469 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1470 + if (!dev) 1471 + return -ENOMEM; 1472 + 1473 + dev->udev = usb_get_dev(interface_to_usbdev(interface)); 1474 + dev->interface = interface; 1475 + sema_init(&dev->cmd_lock, 1); 1476 + 1477 + iface_desc = interface->cur_altsetting; 1478 + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 1479 + endpoint = &iface_desc->endpoint[i].desc; 1480 + 1481 + if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) { 1482 + dev->in_maxlen = le16_to_cpu(endpoint->wMaxPacketSize); 1483 + in_endpoint = endpoint->bEndpointAddress; 1484 + } 1485 + 1486 + if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) { 1487 + dev->out_maxlen = 1488 + le16_to_cpu(endpoint->wMaxPacketSize); 1489 + out_endpoint = endpoint->bEndpointAddress; 1490 + } 1491 + } 1492 + 1493 + if (!in_endpoint || !out_endpoint) { 1494 + nfc_dev_err(&interface->dev, "Could not find bulk-in or" 1495 + " bulk-out endpoint"); 1496 + rc = -ENODEV; 1497 + goto error; 1498 + } 1499 + 1500 + dev->in_frame = kmalloc(dev->in_maxlen, GFP_KERNEL); 1501 + dev->in_urb = usb_alloc_urb(0, GFP_KERNEL); 1502 + dev->out_frame = kmalloc(dev->out_maxlen, GFP_KERNEL); 1503 + dev->out_urb = usb_alloc_urb(0, GFP_KERNEL); 1504 + 1505 + if (!dev->in_frame || !dev->out_frame || 1506 + !dev->in_urb || !dev->out_urb) 1507 + goto error; 1508 + 1509 + usb_fill_bulk_urb(dev->in_urb, dev->udev, 1510 + usb_rcvbulkpipe(dev->udev, in_endpoint), 1511 + NULL, 0, NULL, dev); 1512 + usb_fill_bulk_urb(dev->out_urb, dev->udev, 1513 + usb_sndbulkpipe(dev->udev, out_endpoint), 1514 + NULL, 0, 1515 + pn533_send_complete, dev); 1516 + 1517 + tasklet_init(&dev->tasklet, pn533_tasklet_cmd_complete, (ulong)dev); 1518 + 1519 + usb_set_intfdata(interface, dev); 1520 + 1521 + pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION); 1522 + pn533_tx_frame_finish(dev->out_frame); 1523 + 1524 + rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame, 1525 + dev->in_maxlen); 1526 + if (rc) 1527 + goto kill_tasklet; 1528 + 1529 + fw_ver = (struct pn533_fw_version *) 1530 + PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame); 1531 + nfc_dev_info(&dev->interface->dev, "NXP PN533 firmware ver %d.%d now" 1532 + " attached", fw_ver->ver, fw_ver->rev); 1533 + 1534 + protocols = NFC_PROTO_JEWEL_MASK 1535 + | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK 1536 + | NFC_PROTO_ISO14443_MASK 1537 + | NFC_PROTO_NFC_DEP_MASK; 1538 + 1539 + dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols); 1540 + if (!dev->nfc_dev) 1541 + goto kill_tasklet; 1542 + 1543 + nfc_set_parent_dev(dev->nfc_dev, &interface->dev); 1544 + nfc_set_drvdata(dev->nfc_dev, dev); 1545 + 1546 + rc = nfc_register_device(dev->nfc_dev); 1547 + if (rc) 1548 + goto free_nfc_dev; 1549 + 1550 + max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS; 1551 + max_retries.mx_rty_psl = 2; 1552 + max_retries.mx_rty_passive_act = PN533_CONFIG_MAX_RETRIES_NO_RETRY; 1553 + 1554 + rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES, 1555 + (u8 *) &max_retries, sizeof(max_retries)); 1556 + 1557 + if (rc) { 1558 + nfc_dev_err(&dev->interface->dev, "Error on setting MAX_RETRIES" 1559 + " config"); 1560 + goto free_nfc_dev; 1561 + } 1562 + 1563 + return 0; 1564 + 1565 + free_nfc_dev: 1566 + nfc_free_device(dev->nfc_dev); 1567 + kill_tasklet: 1568 + tasklet_kill(&dev->tasklet); 1569 + error: 1570 + kfree(dev->in_frame); 1571 + usb_free_urb(dev->in_urb); 1572 + kfree(dev->out_frame); 1573 + usb_free_urb(dev->out_urb); 1574 + kfree(dev); 1575 + return rc; 1576 + } 1577 + 1578 + static void pn533_disconnect(struct usb_interface *interface) 1579 + { 1580 + struct pn533 *dev; 1581 + 1582 + dev = usb_get_intfdata(interface); 1583 + usb_set_intfdata(interface, NULL); 1584 + 1585 + nfc_unregister_device(dev->nfc_dev); 1586 + nfc_free_device(dev->nfc_dev); 1587 + 1588 + usb_kill_urb(dev->in_urb); 1589 + usb_kill_urb(dev->out_urb); 1590 + 1591 + tasklet_kill(&dev->tasklet); 1592 + 1593 + kfree(dev->in_frame); 1594 + usb_free_urb(dev->in_urb); 1595 + kfree(dev->out_frame); 1596 + usb_free_urb(dev->out_urb); 1597 + kfree(dev); 1598 + 1599 + nfc_dev_info(&dev->interface->dev, "NXP PN533 NFC device disconnected"); 1600 + } 1601 + 1602 + static struct usb_driver pn533_driver = { 1603 + .name = "pn533", 1604 + .probe = pn533_probe, 1605 + .disconnect = pn533_disconnect, 1606 + .id_table = pn533_table, 1607 + }; 1608 + 1609 + static int __init pn533_init(void) 1610 + { 1611 + int rc; 1612 + 1613 + rc = usb_register(&pn533_driver); 1614 + if (rc) 1615 + err("usb_register failed. Error number %d", rc); 1616 + 1617 + return rc; 1618 + } 1619 + 1620 + static void __exit pn533_exit(void) 1621 + { 1622 + usb_deregister(&pn533_driver); 1623 + } 1624 + 1625 + module_init(pn533_init); 1626 + module_exit(pn533_exit); 1627 + 1628 + MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>," 1629 + " Aloisio Almeida Jr <aloisio.almeida@openbossa.org>"); 1630 + MODULE_DESCRIPTION("PN533 usb driver ver " VERSION); 1631 + MODULE_VERSION(VERSION); 1632 + MODULE_LICENSE("GPL");
+3 -6
drivers/ssb/pci.c
··· 734 734 static void ssb_pci_get_boardinfo(struct ssb_bus *bus, 735 735 struct ssb_boardinfo *bi) 736 736 { 737 - pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_VENDOR_ID, 738 - &bi->vendor); 739 - pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_ID, 740 - &bi->type); 741 - pci_read_config_word(bus->host_pci, PCI_REVISION_ID, 742 - &bi->rev); 737 + bi->vendor = bus->host_pci->subsystem_vendor; 738 + bi->type = bus->host_pci->subsystem_device; 739 + bi->rev = bus->host_pci->revision; 743 740 } 744 741 745 742 int ssb_pci_get_invariants(struct ssb_bus *bus,
+126
include/linux/nfc.h
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 6 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #ifndef __LINUX_NFC_H 25 + #define __LINUX_NFC_H 26 + 27 + #include <linux/types.h> 28 + #include <linux/socket.h> 29 + 30 + #define NFC_GENL_NAME "nfc" 31 + #define NFC_GENL_VERSION 1 32 + 33 + #define NFC_GENL_MCAST_EVENT_NAME "events" 34 + 35 + /** 36 + * enum nfc_commands - supported nfc commands 37 + * 38 + * @NFC_CMD_UNSPEC: unspecified command 39 + * 40 + * @NFC_CMD_GET_DEVICE: request information about a device (requires 41 + * %NFC_ATTR_DEVICE_INDEX) or dump request to get a list of all nfc devices 42 + * @NFC_CMD_START_POLL: start polling for targets using the given protocols 43 + * (requires %NFC_ATTR_DEVICE_INDEX and %NFC_ATTR_PROTOCOLS) 44 + * @NFC_CMD_STOP_POLL: stop polling for targets (requires 45 + * %NFC_ATTR_DEVICE_INDEX) 46 + * @NFC_CMD_GET_TARGET: dump all targets found by the previous poll (requires 47 + * %NFC_ATTR_DEVICE_INDEX) 48 + * @NFC_EVENT_TARGETS_FOUND: event emitted when a new target is found 49 + * (it sends %NFC_ATTR_DEVICE_INDEX) 50 + * @NFC_EVENT_DEVICE_ADDED: event emitted when a new device is registred 51 + * (it sends %NFC_ATTR_DEVICE_NAME, %NFC_ATTR_DEVICE_INDEX and 52 + * %NFC_ATTR_PROTOCOLS) 53 + * @NFC_EVENT_DEVICE_REMOVED: event emitted when a device is removed 54 + * (it sends %NFC_ATTR_DEVICE_INDEX) 55 + */ 56 + enum nfc_commands { 57 + NFC_CMD_UNSPEC, 58 + NFC_CMD_GET_DEVICE, 59 + NFC_CMD_START_POLL, 60 + NFC_CMD_STOP_POLL, 61 + NFC_CMD_GET_TARGET, 62 + NFC_EVENT_TARGETS_FOUND, 63 + NFC_EVENT_DEVICE_ADDED, 64 + NFC_EVENT_DEVICE_REMOVED, 65 + /* private: internal use only */ 66 + __NFC_CMD_AFTER_LAST 67 + }; 68 + #define NFC_CMD_MAX (__NFC_CMD_AFTER_LAST - 1) 69 + 70 + /** 71 + * enum nfc_attrs - supported nfc attributes 72 + * 73 + * @NFC_ATTR_UNSPEC: unspecified attribute 74 + * 75 + * @NFC_ATTR_DEVICE_INDEX: index of nfc device 76 + * @NFC_ATTR_DEVICE_NAME: device name, max 8 chars 77 + * @NFC_ATTR_PROTOCOLS: nfc protocols - bitwise or-ed combination from 78 + * NFC_PROTO_*_MASK constants 79 + * @NFC_ATTR_TARGET_INDEX: index of the nfc target 80 + * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID 81 + * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the 82 + * target is not NFC-Forum compliant) 83 + */ 84 + enum nfc_attrs { 85 + NFC_ATTR_UNSPEC, 86 + NFC_ATTR_DEVICE_INDEX, 87 + NFC_ATTR_DEVICE_NAME, 88 + NFC_ATTR_PROTOCOLS, 89 + NFC_ATTR_TARGET_INDEX, 90 + NFC_ATTR_TARGET_SENS_RES, 91 + NFC_ATTR_TARGET_SEL_RES, 92 + /* private: internal use only */ 93 + __NFC_ATTR_AFTER_LAST 94 + }; 95 + #define NFC_ATTR_MAX (__NFC_ATTR_AFTER_LAST - 1) 96 + 97 + #define NFC_DEVICE_NAME_MAXSIZE 8 98 + 99 + /* NFC protocols */ 100 + #define NFC_PROTO_JEWEL 1 101 + #define NFC_PROTO_MIFARE 2 102 + #define NFC_PROTO_FELICA 3 103 + #define NFC_PROTO_ISO14443 4 104 + #define NFC_PROTO_NFC_DEP 5 105 + 106 + #define NFC_PROTO_MAX 6 107 + 108 + /* NFC protocols masks used in bitsets */ 109 + #define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) 110 + #define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) 111 + #define NFC_PROTO_FELICA_MASK (1 << NFC_PROTO_FELICA) 112 + #define NFC_PROTO_ISO14443_MASK (1 << NFC_PROTO_ISO14443) 113 + #define NFC_PROTO_NFC_DEP_MASK (1 << NFC_PROTO_NFC_DEP) 114 + 115 + struct sockaddr_nfc { 116 + sa_family_t sa_family; 117 + __u32 dev_idx; 118 + __u32 target_idx; 119 + __u32 nfc_protocol; 120 + }; 121 + 122 + /* NFC socket protocols */ 123 + #define NFC_SOCKPROTO_RAW 0 124 + #define NFC_SOCKPROTO_MAX 1 125 + 126 + #endif /*__LINUX_NFC_H */
+39
include/linux/nl80211.h
··· 483 483 * more background information, see 484 484 * http://wireless.kernel.org/en/users/Documentation/WoWLAN. 485 485 * 486 + * @NL80211_CMD_SET_REKEY_OFFLOAD: This command is used give the driver 487 + * the necessary information for supporting GTK rekey offload. This 488 + * feature is typically used during WoWLAN. The configuration data 489 + * is contained in %NL80211_ATTR_REKEY_DATA (which is nested and 490 + * contains the data in sub-attributes). After rekeying happened, 491 + * this command may also be sent by the driver as an MLME event to 492 + * inform userspace of the new replay counter. 493 + * 486 494 * @NL80211_CMD_MAX: highest used command number 487 495 * @__NL80211_CMD_AFTER_LAST: internal use 488 496 */ ··· 612 604 NL80211_CMD_STOP_SCHED_SCAN, 613 605 NL80211_CMD_SCHED_SCAN_RESULTS, 614 606 NL80211_CMD_SCHED_SCAN_STOPPED, 607 + 608 + NL80211_CMD_SET_REKEY_OFFLOAD, 615 609 616 610 /* add new commands above here */ 617 611 ··· 1006 996 * are managed in software: interfaces of these types aren't subject to 1007 997 * any restrictions in their number or combinations. 1008 998 * 999 + * @%NL80211_ATTR_REKEY_DATA: nested attribute containing the information 1000 + * necessary for GTK rekeying in the device, see &enum nl80211_rekey_data. 1001 + * 1009 1002 * @NL80211_ATTR_MAX: highest attribute number currently defined 1010 1003 * @__NL80211_ATTR_AFTER_LAST: internal use 1011 1004 */ ··· 1206 1193 1207 1194 NL80211_ATTR_INTERFACE_COMBINATIONS, 1208 1195 NL80211_ATTR_SOFTWARE_IFTYPES, 1196 + 1197 + NL80211_ATTR_REKEY_DATA, 1209 1198 1210 1199 /* add attributes here, update the policy in nl80211.c */ 1211 1200 ··· 2374 2359 /* keep last */ 2375 2360 NUM_NL80211_PLINK_STATES, 2376 2361 MAX_NL80211_PLINK_STATES = NUM_NL80211_PLINK_STATES - 1 2362 + }; 2363 + 2364 + #define NL80211_KCK_LEN 16 2365 + #define NL80211_KEK_LEN 16 2366 + #define NL80211_REPLAY_CTR_LEN 8 2367 + 2368 + /** 2369 + * enum nl80211_rekey_data - attributes for GTK rekey offload 2370 + * @__NL80211_REKEY_DATA_INVALID: invalid number for nested attributes 2371 + * @NL80211_REKEY_DATA_KEK: key encryption key (binary) 2372 + * @NL80211_REKEY_DATA_KCK: key confirmation key (binary) 2373 + * @NL80211_REKEY_DATA_REPLAY_CTR: replay counter (binary) 2374 + * @NUM_NL80211_REKEY_DATA: number of rekey attributes (internal) 2375 + * @MAX_NL80211_REKEY_DATA: highest rekey attribute (internal) 2376 + */ 2377 + enum nl80211_rekey_data { 2378 + __NL80211_REKEY_DATA_INVALID, 2379 + NL80211_REKEY_DATA_KEK, 2380 + NL80211_REKEY_DATA_KCK, 2381 + NL80211_REKEY_DATA_REPLAY_CTR, 2382 + 2383 + /* keep last */ 2384 + NUM_NL80211_REKEY_DATA, 2385 + MAX_NL80211_REKEY_DATA = NUM_NL80211_REKEY_DATA - 1 2377 2386 }; 2378 2387 2379 2388 #endif /* __LINUX_NL80211_H */
+3 -1
include/linux/socket.h
··· 192 192 #define AF_IEEE802154 36 /* IEEE802154 sockets */ 193 193 #define AF_CAIF 37 /* CAIF sockets */ 194 194 #define AF_ALG 38 /* Algorithm sockets */ 195 - #define AF_MAX 39 /* For now.. */ 195 + #define AF_NFC 39 /* NFC sockets */ 196 + #define AF_MAX 40 /* For now.. */ 196 197 197 198 /* Protocol families, same as address families. */ 198 199 #define PF_UNSPEC AF_UNSPEC ··· 235 234 #define PF_IEEE802154 AF_IEEE802154 236 235 #define PF_CAIF AF_CAIF 237 236 #define PF_ALG AF_ALG 237 + #define PF_NFC AF_NFC 238 238 #define PF_MAX AF_MAX 239 239 240 240 /* Maximum queue length specifiable by listen. */
+1 -1
include/linux/ssb/ssb.h
··· 99 99 struct ssb_boardinfo { 100 100 u16 vendor; 101 101 u16 type; 102 - u16 rev; 102 + u8 rev; 103 103 }; 104 104 105 105
+26
include/net/cfg80211.h
··· 1154 1154 }; 1155 1155 1156 1156 /** 1157 + * struct cfg80211_gtk_rekey_data - rekey data 1158 + * @kek: key encryption key 1159 + * @kck: key confirmation key 1160 + * @replay_ctr: replay counter 1161 + */ 1162 + struct cfg80211_gtk_rekey_data { 1163 + u8 kek[NL80211_KEK_LEN]; 1164 + u8 kck[NL80211_KCK_LEN]; 1165 + u8 replay_ctr[NL80211_REPLAY_CTR_LEN]; 1166 + }; 1167 + 1168 + /** 1157 1169 * struct cfg80211_ops - backend description for wireless configuration 1158 1170 * 1159 1171 * This struct is registered by fullmac card drivers and/or wireless stacks ··· 1208 1196 * @set_default_key: set the default key on an interface 1209 1197 * 1210 1198 * @set_default_mgmt_key: set the default management frame key on an interface 1199 + * 1200 + * @set_rekey_data: give the data necessary for GTK rekeying to the driver 1211 1201 * 1212 1202 * @add_beacon: Add a beacon with given parameters, @head, @interval 1213 1203 * and @dtim_period will be valid, @tail is optional. ··· 1513 1499 struct net_device *dev, 1514 1500 struct cfg80211_sched_scan_request *request); 1515 1501 int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev); 1502 + 1503 + int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev, 1504 + struct cfg80211_gtk_rekey_data *data); 1516 1505 }; 1517 1506 1518 1507 /* ··· 3049 3032 */ 3050 3033 void cfg80211_cqm_pktloss_notify(struct net_device *dev, 3051 3034 const u8 *peer, u32 num_packets, gfp_t gfp); 3035 + 3036 + /** 3037 + * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying 3038 + * @dev: network device 3039 + * @bssid: BSSID of AP (to avoid races) 3040 + * @replay_ctr: new replay counter 3041 + */ 3042 + void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, 3043 + const u8 *replay_ctr, gfp_t gfp); 3052 3044 3053 3045 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 3054 3046
+47
include/net/mac80211.h
··· 1628 1628 * ask the device to suspend. This is only invoked when WoWLAN is 1629 1629 * configured, otherwise the device is deconfigured completely and 1630 1630 * reconfigured at resume time. 1631 + * The driver may also impose special conditions under which it 1632 + * wants to use the "normal" suspend (deconfigure), say if it only 1633 + * supports WoWLAN when the device is associated. In this case, it 1634 + * must return 1 from this function. 1631 1635 * 1632 1636 * @resume: If WoWLAN was configured, this indicates that mac80211 is 1633 1637 * now resuming its operation, after this the device must be fully ··· 1699 1695 * This callback will be called in the context of Rx. Called for drivers 1700 1696 * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY. 1701 1697 * The callback must be atomic. 1698 + * 1699 + * @set_rekey_data: If the device supports GTK rekeying, for example while the 1700 + * host is suspended, it can assign this callback to retrieve the data 1701 + * necessary to do GTK rekeying, this is the KEK, KCK and replay counter. 1702 + * After rekeying was done it should (for example during resume) notify 1703 + * userspace of the new replay counter using ieee80211_gtk_rekey_notify(). 1702 1704 * 1703 1705 * @hw_scan: Ask the hardware to service the scan request, no need to start 1704 1706 * the scan state machine in stack. The scan must honour the channel ··· 1918 1908 struct ieee80211_key_conf *conf, 1919 1909 struct ieee80211_sta *sta, 1920 1910 u32 iv32, u16 *phase1key); 1911 + void (*set_rekey_data)(struct ieee80211_hw *hw, 1912 + struct ieee80211_vif *vif, 1913 + struct cfg80211_gtk_rekey_data *data); 1921 1914 int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1922 1915 struct cfg80211_scan_request *req); 1923 1916 void (*cancel_hw_scan)(struct ieee80211_hw *hw, ··· 2594 2581 void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, 2595 2582 struct sk_buff *skb, 2596 2583 enum ieee80211_tkip_key_type type, u8 *key); 2584 + 2585 + /** 2586 + * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying 2587 + * @vif: virtual interface the rekeying was done on 2588 + * @bssid: The BSSID of the AP, for checking association 2589 + * @replay_ctr: the new replay counter after GTK rekeying 2590 + * @gfp: allocation flags 2591 + */ 2592 + void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, 2593 + const u8 *replay_ctr, gfp_t gfp); 2594 + 2597 2595 /** 2598 2596 * ieee80211_wake_queue - wake specific queue 2599 2597 * @hw: pointer as obtained from ieee80211_alloc_hw(). ··· 2868 2844 */ 2869 2845 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 2870 2846 struct ieee80211_sta *pubsta, bool block); 2847 + 2848 + /** 2849 + * ieee80211_iter_keys - iterate keys programmed into the device 2850 + * @hw: pointer obtained from ieee80211_alloc_hw() 2851 + * @vif: virtual interface to iterate, may be %NULL for all 2852 + * @iter: iterator function that will be called for each key 2853 + * @iter_data: custom data to pass to the iterator function 2854 + * 2855 + * This function can be used to iterate all the keys known to 2856 + * mac80211, even those that weren't previously programmed into 2857 + * the device. This is intended for use in WoWLAN if the device 2858 + * needs reprogramming of the keys during suspend. Note that due 2859 + * to locking reasons, it is also only safe to call this at few 2860 + * spots since it must hold the RTNL and be able to sleep. 2861 + */ 2862 + void ieee80211_iter_keys(struct ieee80211_hw *hw, 2863 + struct ieee80211_vif *vif, 2864 + void (*iter)(struct ieee80211_hw *hw, 2865 + struct ieee80211_vif *vif, 2866 + struct ieee80211_sta *sta, 2867 + struct ieee80211_key_conf *key, 2868 + void *data), 2869 + void *iter_data); 2871 2870 2872 2871 /** 2873 2872 * ieee80211_ap_probereq_get - retrieve a Probe Request template
+156
include/net/nfc.h
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 6 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #ifndef __NET_NFC_H 25 + #define __NET_NFC_H 26 + 27 + #include <linux/device.h> 28 + #include <linux/skbuff.h> 29 + 30 + #define nfc_dev_info(dev, fmt, arg...) dev_info((dev), "NFC: " fmt "\n", ## arg) 31 + #define nfc_dev_err(dev, fmt, arg...) dev_err((dev), "NFC: " fmt "\n", ## arg) 32 + #define nfc_dev_dbg(dev, fmt, arg...) dev_dbg((dev), fmt "\n", ## arg) 33 + 34 + struct nfc_dev; 35 + 36 + /** 37 + * data_exchange_cb_t - Definition of nfc_data_exchange callback 38 + * 39 + * @context: nfc_data_exchange cb_context parameter 40 + * @skb: response data 41 + * @err: If an error has occurred during data exchange, it is the 42 + * error number. Zero means no error. 43 + * 44 + * When a rx or tx package is lost or corrupted or the target gets out 45 + * of the operating field, err is -EIO. 46 + */ 47 + typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb, 48 + int err); 49 + 50 + struct nfc_ops { 51 + int (*start_poll)(struct nfc_dev *dev, u32 protocols); 52 + void (*stop_poll)(struct nfc_dev *dev); 53 + int (*activate_target)(struct nfc_dev *dev, u32 target_idx, 54 + u32 protocol); 55 + void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx); 56 + int (*data_exchange)(struct nfc_dev *dev, u32 target_idx, 57 + struct sk_buff *skb, data_exchange_cb_t cb, 58 + void *cb_context); 59 + }; 60 + 61 + struct nfc_target { 62 + u32 idx; 63 + u32 supported_protocols; 64 + u16 sens_res; 65 + u8 sel_res; 66 + }; 67 + 68 + struct nfc_genl_data { 69 + u32 poll_req_pid; 70 + struct mutex genl_data_mutex; 71 + }; 72 + 73 + struct nfc_dev { 74 + unsigned idx; 75 + unsigned target_idx; 76 + struct nfc_target *targets; 77 + int n_targets; 78 + int targets_generation; 79 + spinlock_t targets_lock; 80 + struct device dev; 81 + bool polling; 82 + struct nfc_genl_data genl_data; 83 + u32 supported_protocols; 84 + 85 + struct nfc_ops *ops; 86 + }; 87 + #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) 88 + 89 + extern struct class nfc_class; 90 + 91 + struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, 92 + u32 supported_protocols); 93 + 94 + /** 95 + * nfc_free_device - free nfc device 96 + * 97 + * @dev: The nfc device to free 98 + */ 99 + static inline void nfc_free_device(struct nfc_dev *dev) 100 + { 101 + put_device(&dev->dev); 102 + } 103 + 104 + int nfc_register_device(struct nfc_dev *dev); 105 + 106 + void nfc_unregister_device(struct nfc_dev *dev); 107 + 108 + /** 109 + * nfc_set_parent_dev - set the parent device 110 + * 111 + * @nfc_dev: The nfc device whose parent is being set 112 + * @dev: The parent device 113 + */ 114 + static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev, 115 + struct device *dev) 116 + { 117 + nfc_dev->dev.parent = dev; 118 + } 119 + 120 + /** 121 + * nfc_set_drvdata - set driver specifc data 122 + * 123 + * @dev: The nfc device 124 + * @data: Pointer to driver specifc data 125 + */ 126 + static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data) 127 + { 128 + dev_set_drvdata(&dev->dev, data); 129 + } 130 + 131 + /** 132 + * nfc_get_drvdata - get driver specifc data 133 + * 134 + * @dev: The nfc device 135 + */ 136 + static inline void *nfc_get_drvdata(struct nfc_dev *dev) 137 + { 138 + return dev_get_drvdata(&dev->dev); 139 + } 140 + 141 + /** 142 + * nfc_device_name - get the nfc device name 143 + * 144 + * @dev: The nfc device whose name to return 145 + */ 146 + static inline const char *nfc_device_name(struct nfc_dev *dev) 147 + { 148 + return dev_name(&dev->dev); 149 + } 150 + 151 + struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp); 152 + 153 + int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, 154 + int ntargets); 155 + 156 + #endif /* __NET_NFC_H */
+1
net/Kconfig
··· 322 322 source "net/9p/Kconfig" 323 323 source "net/caif/Kconfig" 324 324 source "net/ceph/Kconfig" 325 + source "net/nfc/Kconfig" 325 326 326 327 327 328 endif # if NET
+1
net/Makefile
··· 68 68 obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ 69 69 obj-$(CONFIG_CEPH_LIB) += ceph/ 70 70 obj-$(CONFIG_BATMAN_ADV) += batman-adv/ 71 + obj-$(CONFIG_NFC) += nfc/
+3 -3
net/core/sock.c
··· 160 160 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 161 161 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 162 162 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 163 - "sk_lock-AF_MAX" 163 + "sk_lock-AF_NFC" , "sk_lock-AF_MAX" 164 164 }; 165 165 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 166 166 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , ··· 176 176 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 177 177 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 178 178 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 179 - "slock-AF_MAX" 179 + "slock-AF_NFC" , "slock-AF_MAX" 180 180 }; 181 181 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 182 182 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , ··· 192 192 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 193 193 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 194 194 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 195 - "clock-AF_MAX" 195 + "clock-AF_NFC" , "clock-AF_MAX" 196 196 }; 197 197 198 198 /*
+16
net/mac80211/cfg.c
··· 2101 2101 drv_get_ringparam(local, tx, tx_max, rx, rx_max); 2102 2102 } 2103 2103 2104 + static int ieee80211_set_rekey_data(struct wiphy *wiphy, 2105 + struct net_device *dev, 2106 + struct cfg80211_gtk_rekey_data *data) 2107 + { 2108 + struct ieee80211_local *local = wiphy_priv(wiphy); 2109 + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2110 + 2111 + if (!local->ops->set_rekey_data) 2112 + return -EOPNOTSUPP; 2113 + 2114 + drv_set_rekey_data(local, sdata, data); 2115 + 2116 + return 0; 2117 + } 2118 + 2104 2119 struct cfg80211_ops mac80211_config_ops = { 2105 2120 .add_virtual_intf = ieee80211_add_iface, 2106 2121 .del_virtual_intf = ieee80211_del_iface, ··· 2178 2163 .get_antenna = ieee80211_get_antenna, 2179 2164 .set_ringparam = ieee80211_set_ringparam, 2180 2165 .get_ringparam = ieee80211_get_ringparam, 2166 + .set_rekey_data = ieee80211_set_rekey_data, 2181 2167 };
+10
net/mac80211/driver-ops.h
··· 647 647 return ret; 648 648 } 649 649 650 + static inline void drv_set_rekey_data(struct ieee80211_local *local, 651 + struct ieee80211_sub_if_data *sdata, 652 + struct cfg80211_gtk_rekey_data *data) 653 + { 654 + trace_drv_set_rekey_data(local, sdata, data); 655 + if (local->ops->set_rekey_data) 656 + local->ops->set_rekey_data(&local->hw, &sdata->vif, data); 657 + trace_drv_return_void(local); 658 + } 659 + 650 660 #endif /* __MAC80211_DRIVER_OPS */
+49
net/mac80211/driver-trace.h
··· 1024 1024 ) 1025 1025 ); 1026 1026 1027 + TRACE_EVENT(drv_set_rekey_data, 1028 + TP_PROTO(struct ieee80211_local *local, 1029 + struct ieee80211_sub_if_data *sdata, 1030 + struct cfg80211_gtk_rekey_data *data), 1031 + 1032 + TP_ARGS(local, sdata, data), 1033 + 1034 + TP_STRUCT__entry( 1035 + LOCAL_ENTRY 1036 + VIF_ENTRY 1037 + __array(u8, kek, NL80211_KEK_LEN) 1038 + __array(u8, kck, NL80211_KCK_LEN) 1039 + __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN) 1040 + ), 1041 + 1042 + TP_fast_assign( 1043 + LOCAL_ASSIGN; 1044 + VIF_ASSIGN; 1045 + memcpy(__entry->kek, data->kek, NL80211_KEK_LEN); 1046 + memcpy(__entry->kck, data->kck, NL80211_KCK_LEN); 1047 + memcpy(__entry->replay_ctr, data->replay_ctr, 1048 + NL80211_REPLAY_CTR_LEN); 1049 + ), 1050 + 1051 + TP_printk(LOCAL_PR_FMT VIF_PR_FMT, 1052 + LOCAL_PR_ARG, VIF_PR_ARG) 1053 + ); 1054 + 1027 1055 /* 1028 1056 * Tracing for API calls that drivers call. 1029 1057 */ ··· 1319 1291 DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired, 1320 1292 TP_PROTO(struct ieee80211_local *local), 1321 1293 TP_ARGS(local) 1294 + ); 1295 + 1296 + TRACE_EVENT(api_gtk_rekey_notify, 1297 + TP_PROTO(struct ieee80211_sub_if_data *sdata, 1298 + const u8 *bssid, const u8 *replay_ctr), 1299 + 1300 + TP_ARGS(sdata, bssid, replay_ctr), 1301 + 1302 + TP_STRUCT__entry( 1303 + VIF_ENTRY 1304 + __array(u8, bssid, ETH_ALEN) 1305 + __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN) 1306 + ), 1307 + 1308 + TP_fast_assign( 1309 + VIF_ASSIGN; 1310 + memcpy(__entry->bssid, bssid, ETH_ALEN); 1311 + memcpy(__entry->replay_ctr, replay_ctr, NL80211_REPLAY_CTR_LEN); 1312 + ), 1313 + 1314 + TP_printk(VIF_PR_FMT, VIF_PR_ARG) 1322 1315 ); 1323 1316 1324 1317 /*
+3
net/mac80211/ieee80211_i.h
··· 544 544 /* keys */ 545 545 struct list_head key_list; 546 546 547 + /* count for keys needing tailroom space allocation */ 548 + int crypto_tx_tailroom_needed_cnt; 549 + 547 550 struct net_device *dev; 548 551 struct ieee80211_local *local; 549 552
+94 -2
net/mac80211/key.c
··· 61 61 return NULL; 62 62 } 63 63 64 + static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) 65 + { 66 + /* 67 + * When this count is zero, SKB resizing for allocating tailroom 68 + * for IV or MMIC is skipped. But, this check has created two race 69 + * cases in xmit path while transiting from zero count to one: 70 + * 71 + * 1. SKB resize was skipped because no key was added but just before 72 + * the xmit key is added and SW encryption kicks off. 73 + * 74 + * 2. SKB resize was skipped because all the keys were hw planted but 75 + * just before xmit one of the key is deleted and SW encryption kicks 76 + * off. 77 + * 78 + * In both the above case SW encryption will find not enough space for 79 + * tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c) 80 + * 81 + * Solution has been explained at 82 + * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net 83 + */ 84 + 85 + if (!sdata->crypto_tx_tailroom_needed_cnt++) { 86 + /* 87 + * Flush all XMIT packets currently using HW encryption or no 88 + * encryption at all if the count transition is from 0 -> 1. 89 + */ 90 + synchronize_net(); 91 + } 92 + } 93 + 64 94 static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 65 95 { 66 96 struct ieee80211_sub_if_data *sdata; ··· 131 101 132 102 if (!ret) { 133 103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 104 + 105 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 106 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))) 107 + sdata->crypto_tx_tailroom_needed_cnt--; 108 + 134 109 return 0; 135 110 } 136 111 ··· 176 141 177 142 sta = get_sta_for_key(key); 178 143 sdata = key->sdata; 144 + 145 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 146 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))) 147 + increment_tailroom_need_count(sdata); 179 148 180 149 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 181 150 sdata = container_of(sdata->bss, ··· 433 394 ieee80211_aes_key_free(key->u.ccmp.tfm); 434 395 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) 435 396 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 436 - if (key->local) 397 + if (key->local) { 437 398 ieee80211_debugfs_key_remove(key); 399 + key->sdata->crypto_tx_tailroom_needed_cnt--; 400 + } 438 401 439 402 kfree(key); 440 403 } ··· 493 452 else 494 453 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); 495 454 455 + increment_tailroom_need_count(sdata); 456 + 496 457 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 497 458 __ieee80211_key_destroy(old_key); 498 459 ··· 541 498 542 499 mutex_lock(&sdata->local->key_mtx); 543 500 544 - list_for_each_entry(key, &sdata->key_list, list) 501 + sdata->crypto_tx_tailroom_needed_cnt = 0; 502 + 503 + list_for_each_entry(key, &sdata->key_list, list) { 504 + increment_tailroom_need_count(sdata); 545 505 ieee80211_key_enable_hw_accel(key); 506 + } 546 507 547 508 mutex_unlock(&sdata->local->key_mtx); 548 509 } 510 + 511 + void ieee80211_iter_keys(struct ieee80211_hw *hw, 512 + struct ieee80211_vif *vif, 513 + void (*iter)(struct ieee80211_hw *hw, 514 + struct ieee80211_vif *vif, 515 + struct ieee80211_sta *sta, 516 + struct ieee80211_key_conf *key, 517 + void *data), 518 + void *iter_data) 519 + { 520 + struct ieee80211_local *local = hw_to_local(hw); 521 + struct ieee80211_key *key; 522 + struct ieee80211_sub_if_data *sdata; 523 + 524 + ASSERT_RTNL(); 525 + 526 + mutex_lock(&local->key_mtx); 527 + if (vif) { 528 + sdata = vif_to_sdata(vif); 529 + list_for_each_entry(key, &sdata->key_list, list) 530 + iter(hw, &sdata->vif, 531 + key->sta ? &key->sta->sta : NULL, 532 + &key->conf, iter_data); 533 + } else { 534 + list_for_each_entry(sdata, &local->interfaces, list) 535 + list_for_each_entry(key, &sdata->key_list, list) 536 + iter(hw, &sdata->vif, 537 + key->sta ? &key->sta->sta : NULL, 538 + &key->conf, iter_data); 539 + } 540 + mutex_unlock(&local->key_mtx); 541 + } 542 + EXPORT_SYMBOL(ieee80211_iter_keys); 549 543 550 544 void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) 551 545 { ··· 613 533 614 534 mutex_unlock(&sdata->local->key_mtx); 615 535 } 536 + 537 + 538 + void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, 539 + const u8 *replay_ctr, gfp_t gfp) 540 + { 541 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 542 + 543 + trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr); 544 + 545 + cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp); 546 + } 547 + EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify);
+2 -2
net/mac80211/mesh_pathtbl.c
··· 647 647 mpath = node->mpath; 648 648 if (mpath->sdata == sdata && 649 649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 650 - spin_lock_bh(&mpath->state_lock); 650 + spin_lock(&mpath->state_lock); 651 651 mpath->flags |= MESH_PATH_RESOLVING; 652 652 hlist_del_rcu(&node->list); 653 653 call_rcu(&node->rcu, mesh_path_node_reclaim); 654 654 atomic_dec(&tbl->entries); 655 - spin_unlock_bh(&mpath->state_lock); 655 + spin_unlock(&mpath->state_lock); 656 656 goto enddel; 657 657 } 658 658 }
+3 -1
net/mac80211/mlme.c
··· 749 749 container_of(work, struct ieee80211_local, 750 750 dynamic_ps_enable_work); 751 751 struct ieee80211_sub_if_data *sdata = local->ps_sdata; 752 - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 752 + struct ieee80211_if_managed *ifmgd; 753 753 unsigned long flags; 754 754 int q; 755 755 756 756 /* can only happen when PS was just disabled anyway */ 757 757 if (!sdata) 758 758 return; 759 + 760 + ifmgd = &sdata->u.mgd; 759 761 760 762 if (local->hw.conf.flags & IEEE80211_CONF_PS) 761 763 return;
+10 -6
net/mac80211/pm.c
··· 72 72 local->wowlan = wowlan && local->open_count; 73 73 if (local->wowlan) { 74 74 int err = drv_suspend(local, wowlan); 75 - if (err) { 75 + if (err < 0) { 76 76 local->quiescing = false; 77 77 return err; 78 + } else if (err > 0) { 79 + WARN_ON(err != 1); 80 + local->wowlan = false; 81 + } else { 82 + list_for_each_entry(sdata, &local->interfaces, list) { 83 + cancel_work_sync(&sdata->work); 84 + ieee80211_quiesce(sdata); 85 + } 86 + goto suspend; 78 87 } 79 - list_for_each_entry(sdata, &local->interfaces, list) { 80 - cancel_work_sync(&sdata->work); 81 - ieee80211_quiesce(sdata); 82 - } 83 - goto suspend; 84 88 } 85 89 86 90 /* disable keys */
+5 -9
net/mac80211/tx.c
··· 1474 1474 1475 1475 /* device xmit handlers */ 1476 1476 1477 - static int ieee80211_skb_resize(struct ieee80211_local *local, 1477 + static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, 1478 1478 struct sk_buff *skb, 1479 1479 int head_need, bool may_encrypt) 1480 1480 { 1481 + struct ieee80211_local *local = sdata->local; 1481 1482 int tail_need = 0; 1482 1483 1483 - /* 1484 - * This could be optimised, devices that do full hardware 1485 - * crypto (including TKIP MMIC) need no tailroom... But we 1486 - * have no drivers for such devices currently. 1487 - */ 1488 - if (may_encrypt) { 1484 + if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { 1489 1485 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1490 1486 tail_need -= skb_tailroom(skb); 1491 1487 tail_need = max_t(int, tail_need, 0); ··· 1574 1578 headroom -= skb_headroom(skb); 1575 1579 headroom = max_t(int, 0, headroom); 1576 1580 1577 - if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) { 1581 + if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { 1578 1582 dev_kfree_skb(skb); 1579 1583 rcu_read_unlock(); 1580 1584 return; ··· 1941 1945 head_need += IEEE80211_ENCRYPT_HEADROOM; 1942 1946 head_need += local->tx_headroom; 1943 1947 head_need = max_t(int, 0, head_need); 1944 - if (ieee80211_skb_resize(local, skb, head_need, true)) 1948 + if (ieee80211_skb_resize(sdata, skb, head_need, true)) 1945 1949 goto fail; 1946 1950 } 1947 1951
+16
net/nfc/Kconfig
··· 1 + # 2 + # NFC sybsystem configuration 3 + # 4 + 5 + menuconfig NFC 6 + depends on NET && EXPERIMENTAL 7 + tristate "NFC subsystem support (EXPERIMENTAL)" 8 + default n 9 + help 10 + Say Y here if you want to build support for NFC (Near field 11 + communication) devices. 12 + 13 + To compile this support as a module, choose M here: the module will 14 + be called nfc. 15 + 16 + source "drivers/nfc/Kconfig"
+7
net/nfc/Makefile
··· 1 + # 2 + # Makefile for the Linux NFC subsystem. 3 + # 4 + 5 + obj-$(CONFIG_NFC) += nfc.o 6 + 7 + nfc-objs := core.o netlink.o af_nfc.o rawsock.o
+98
net/nfc/af_nfc.c
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 6 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #include <linux/nfc.h> 25 + 26 + #include "nfc.h" 27 + 28 + static DEFINE_RWLOCK(proto_tab_lock); 29 + static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX]; 30 + 31 + static int nfc_sock_create(struct net *net, struct socket *sock, int proto, 32 + int kern) 33 + { 34 + int rc = -EPROTONOSUPPORT; 35 + 36 + if (net != &init_net) 37 + return -EAFNOSUPPORT; 38 + 39 + if (proto < 0 || proto >= NFC_SOCKPROTO_MAX) 40 + return -EINVAL; 41 + 42 + read_lock(&proto_tab_lock); 43 + if (proto_tab[proto] && try_module_get(proto_tab[proto]->owner)) { 44 + rc = proto_tab[proto]->create(net, sock, proto_tab[proto]); 45 + module_put(proto_tab[proto]->owner); 46 + } 47 + read_unlock(&proto_tab_lock); 48 + 49 + return rc; 50 + } 51 + 52 + static struct net_proto_family nfc_sock_family_ops = { 53 + .owner = THIS_MODULE, 54 + .family = PF_NFC, 55 + .create = nfc_sock_create, 56 + }; 57 + 58 + int nfc_proto_register(const struct nfc_protocol *nfc_proto) 59 + { 60 + int rc; 61 + 62 + if (nfc_proto->id < 0 || nfc_proto->id >= NFC_SOCKPROTO_MAX) 63 + return -EINVAL; 64 + 65 + rc = proto_register(nfc_proto->proto, 0); 66 + if (rc) 67 + return rc; 68 + 69 + write_lock(&proto_tab_lock); 70 + if (proto_tab[nfc_proto->id]) 71 + rc = -EBUSY; 72 + else 73 + proto_tab[nfc_proto->id] = nfc_proto; 74 + write_unlock(&proto_tab_lock); 75 + 76 + return rc; 77 + } 78 + EXPORT_SYMBOL(nfc_proto_register); 79 + 80 + void nfc_proto_unregister(const struct nfc_protocol *nfc_proto) 81 + { 82 + write_lock(&proto_tab_lock); 83 + proto_tab[nfc_proto->id] = NULL; 84 + write_unlock(&proto_tab_lock); 85 + 86 + proto_unregister(nfc_proto->proto); 87 + } 88 + EXPORT_SYMBOL(nfc_proto_unregister); 89 + 90 + int __init af_nfc_init(void) 91 + { 92 + return sock_register(&nfc_sock_family_ops); 93 + } 94 + 95 + void af_nfc_exit(void) 96 + { 97 + sock_unregister(PF_NFC); 98 + }
+468
net/nfc/core.c
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 6 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #include <linux/init.h> 25 + #include <linux/kernel.h> 26 + #include <linux/module.h> 27 + #include <linux/slab.h> 28 + 29 + #include "nfc.h" 30 + 31 + #define VERSION "0.1" 32 + 33 + int nfc_devlist_generation; 34 + DEFINE_MUTEX(nfc_devlist_mutex); 35 + 36 + int nfc_printk(const char *level, const char *format, ...) 37 + { 38 + struct va_format vaf; 39 + va_list args; 40 + int r; 41 + 42 + va_start(args, format); 43 + 44 + vaf.fmt = format; 45 + vaf.va = &args; 46 + 47 + r = printk("%sNFC: %pV\n", level, &vaf); 48 + 49 + va_end(args); 50 + 51 + return r; 52 + } 53 + EXPORT_SYMBOL(nfc_printk); 54 + 55 + /** 56 + * nfc_start_poll - start polling for nfc targets 57 + * 58 + * @dev: The nfc device that must start polling 59 + * @protocols: bitset of nfc protocols that must be used for polling 60 + * 61 + * The device remains polling for targets until a target is found or 62 + * the nfc_stop_poll function is called. 63 + */ 64 + int nfc_start_poll(struct nfc_dev *dev, u32 protocols) 65 + { 66 + int rc; 67 + 68 + nfc_dbg("dev_name=%s protocols=0x%x", dev_name(&dev->dev), protocols); 69 + 70 + if (!protocols) 71 + return -EINVAL; 72 + 73 + device_lock(&dev->dev); 74 + 75 + if (!device_is_registered(&dev->dev)) { 76 + rc = -ENODEV; 77 + goto error; 78 + } 79 + 80 + if (dev->polling) { 81 + rc = -EBUSY; 82 + goto error; 83 + } 84 + 85 + rc = dev->ops->start_poll(dev, protocols); 86 + if (!rc) 87 + dev->polling = true; 88 + 89 + error: 90 + device_unlock(&dev->dev); 91 + return rc; 92 + } 93 + 94 + /** 95 + * nfc_stop_poll - stop polling for nfc targets 96 + * 97 + * @dev: The nfc device that must stop polling 98 + */ 99 + int nfc_stop_poll(struct nfc_dev *dev) 100 + { 101 + int rc = 0; 102 + 103 + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); 104 + 105 + device_lock(&dev->dev); 106 + 107 + if (!device_is_registered(&dev->dev)) { 108 + rc = -ENODEV; 109 + goto error; 110 + } 111 + 112 + if (!dev->polling) { 113 + rc = -EINVAL; 114 + goto error; 115 + } 116 + 117 + dev->ops->stop_poll(dev); 118 + dev->polling = false; 119 + 120 + error: 121 + device_unlock(&dev->dev); 122 + return rc; 123 + } 124 + 125 + /** 126 + * nfc_activate_target - prepare the target for data exchange 127 + * 128 + * @dev: The nfc device that found the target 129 + * @target_idx: index of the target that must be activated 130 + * @protocol: nfc protocol that will be used for data exchange 131 + */ 132 + int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) 133 + { 134 + int rc; 135 + 136 + nfc_dbg("dev_name=%s target_idx=%u protocol=%u", dev_name(&dev->dev), 137 + target_idx, protocol); 138 + 139 + device_lock(&dev->dev); 140 + 141 + if (!device_is_registered(&dev->dev)) { 142 + rc = -ENODEV; 143 + goto error; 144 + } 145 + 146 + rc = dev->ops->activate_target(dev, target_idx, protocol); 147 + 148 + error: 149 + device_unlock(&dev->dev); 150 + return rc; 151 + } 152 + 153 + /** 154 + * nfc_deactivate_target - deactivate a nfc target 155 + * 156 + * @dev: The nfc device that found the target 157 + * @target_idx: index of the target that must be deactivated 158 + */ 159 + int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx) 160 + { 161 + int rc = 0; 162 + 163 + nfc_dbg("dev_name=%s target_idx=%u", dev_name(&dev->dev), target_idx); 164 + 165 + device_lock(&dev->dev); 166 + 167 + if (!device_is_registered(&dev->dev)) { 168 + rc = -ENODEV; 169 + goto error; 170 + } 171 + 172 + dev->ops->deactivate_target(dev, target_idx); 173 + 174 + error: 175 + device_unlock(&dev->dev); 176 + return rc; 177 + } 178 + 179 + /** 180 + * nfc_data_exchange - transceive data 181 + * 182 + * @dev: The nfc device that found the target 183 + * @target_idx: index of the target 184 + * @skb: data to be sent 185 + * @cb: callback called when the response is received 186 + * @cb_context: parameter for the callback function 187 + * 188 + * The user must wait for the callback before calling this function again. 189 + */ 190 + int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, 191 + struct sk_buff *skb, 192 + data_exchange_cb_t cb, 193 + void *cb_context) 194 + { 195 + int rc; 196 + 197 + nfc_dbg("dev_name=%s target_idx=%u skb->len=%u", dev_name(&dev->dev), 198 + target_idx, skb->len); 199 + 200 + device_lock(&dev->dev); 201 + 202 + if (!device_is_registered(&dev->dev)) { 203 + rc = -ENODEV; 204 + kfree_skb(skb); 205 + goto error; 206 + } 207 + 208 + rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context); 209 + 210 + error: 211 + device_unlock(&dev->dev); 212 + return rc; 213 + } 214 + 215 + /** 216 + * nfc_alloc_skb - allocate a skb for data exchange responses 217 + * 218 + * @size: size to allocate 219 + * @gfp: gfp flags 220 + */ 221 + struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp) 222 + { 223 + struct sk_buff *skb; 224 + unsigned int total_size; 225 + 226 + total_size = size + 1; 227 + skb = alloc_skb(total_size, gfp); 228 + 229 + if (skb) 230 + skb_reserve(skb, 1); 231 + 232 + return skb; 233 + } 234 + EXPORT_SYMBOL(nfc_alloc_skb); 235 + 236 + /** 237 + * nfc_targets_found - inform that targets were found 238 + * 239 + * @dev: The nfc device that found the targets 240 + * @targets: array of nfc targets found 241 + * @ntargets: targets array size 242 + * 243 + * The device driver must call this function when one or many nfc targets 244 + * are found. After calling this function, the device driver must stop 245 + * polling for targets. 246 + */ 247 + int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, 248 + int n_targets) 249 + { 250 + int i; 251 + 252 + nfc_dbg("dev_name=%s n_targets=%d", dev_name(&dev->dev), n_targets); 253 + 254 + dev->polling = false; 255 + 256 + for (i = 0; i < n_targets; i++) 257 + targets[i].idx = dev->target_idx++; 258 + 259 + spin_lock_bh(&dev->targets_lock); 260 + 261 + dev->targets_generation++; 262 + 263 + kfree(dev->targets); 264 + dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target), 265 + GFP_ATOMIC); 266 + 267 + if (!dev->targets) { 268 + dev->n_targets = 0; 269 + spin_unlock_bh(&dev->targets_lock); 270 + return -ENOMEM; 271 + } 272 + 273 + dev->n_targets = n_targets; 274 + spin_unlock_bh(&dev->targets_lock); 275 + 276 + nfc_genl_targets_found(dev); 277 + 278 + return 0; 279 + } 280 + EXPORT_SYMBOL(nfc_targets_found); 281 + 282 + static void nfc_release(struct device *d) 283 + { 284 + struct nfc_dev *dev = to_nfc_dev(d); 285 + 286 + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); 287 + 288 + nfc_genl_data_exit(&dev->genl_data); 289 + kfree(dev->targets); 290 + kfree(dev); 291 + } 292 + 293 + struct class nfc_class = { 294 + .name = "nfc", 295 + .dev_release = nfc_release, 296 + }; 297 + EXPORT_SYMBOL(nfc_class); 298 + 299 + static int match_idx(struct device *d, void *data) 300 + { 301 + struct nfc_dev *dev = to_nfc_dev(d); 302 + unsigned *idx = data; 303 + 304 + return dev->idx == *idx; 305 + } 306 + 307 + struct nfc_dev *nfc_get_device(unsigned idx) 308 + { 309 + struct device *d; 310 + 311 + d = class_find_device(&nfc_class, NULL, &idx, match_idx); 312 + if (!d) 313 + return NULL; 314 + 315 + return to_nfc_dev(d); 316 + } 317 + 318 + /** 319 + * nfc_allocate_device - allocate a new nfc device 320 + * 321 + * @ops: device operations 322 + * @supported_protocols: NFC protocols supported by the device 323 + */ 324 + struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, 325 + u32 supported_protocols) 326 + { 327 + static atomic_t dev_no = ATOMIC_INIT(0); 328 + struct nfc_dev *dev; 329 + 330 + if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || 331 + !ops->deactivate_target || !ops->data_exchange) 332 + return NULL; 333 + 334 + if (!supported_protocols) 335 + return NULL; 336 + 337 + dev = kzalloc(sizeof(struct nfc_dev), GFP_KERNEL); 338 + if (!dev) 339 + return NULL; 340 + 341 + dev->dev.class = &nfc_class; 342 + dev->idx = atomic_inc_return(&dev_no) - 1; 343 + dev_set_name(&dev->dev, "nfc%d", dev->idx); 344 + device_initialize(&dev->dev); 345 + 346 + dev->ops = ops; 347 + dev->supported_protocols = supported_protocols; 348 + 349 + spin_lock_init(&dev->targets_lock); 350 + nfc_genl_data_init(&dev->genl_data); 351 + 352 + /* first generation must not be 0 */ 353 + dev->targets_generation = 1; 354 + 355 + return dev; 356 + } 357 + EXPORT_SYMBOL(nfc_allocate_device); 358 + 359 + /** 360 + * nfc_register_device - register a nfc device in the nfc subsystem 361 + * 362 + * @dev: The nfc device to register 363 + */ 364 + int nfc_register_device(struct nfc_dev *dev) 365 + { 366 + int rc; 367 + 368 + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); 369 + 370 + mutex_lock(&nfc_devlist_mutex); 371 + nfc_devlist_generation++; 372 + rc = device_add(&dev->dev); 373 + mutex_unlock(&nfc_devlist_mutex); 374 + 375 + if (rc < 0) 376 + return rc; 377 + 378 + rc = nfc_genl_device_added(dev); 379 + if (rc) 380 + nfc_dbg("The userspace won't be notified that the device %s was" 381 + " added", dev_name(&dev->dev)); 382 + 383 + 384 + return 0; 385 + } 386 + EXPORT_SYMBOL(nfc_register_device); 387 + 388 + /** 389 + * nfc_unregister_device - unregister a nfc device in the nfc subsystem 390 + * 391 + * @dev: The nfc device to unregister 392 + */ 393 + void nfc_unregister_device(struct nfc_dev *dev) 394 + { 395 + int rc; 396 + 397 + nfc_dbg("dev_name=%s", dev_name(&dev->dev)); 398 + 399 + mutex_lock(&nfc_devlist_mutex); 400 + nfc_devlist_generation++; 401 + 402 + /* lock to avoid unregistering a device while an operation 403 + is in progress */ 404 + device_lock(&dev->dev); 405 + device_del(&dev->dev); 406 + device_unlock(&dev->dev); 407 + 408 + mutex_unlock(&nfc_devlist_mutex); 409 + 410 + rc = nfc_genl_device_removed(dev); 411 + if (rc) 412 + nfc_dbg("The userspace won't be notified that the device %s" 413 + " was removed", dev_name(&dev->dev)); 414 + 415 + } 416 + EXPORT_SYMBOL(nfc_unregister_device); 417 + 418 + static int __init nfc_init(void) 419 + { 420 + int rc; 421 + 422 + nfc_info("NFC Core ver %s", VERSION); 423 + 424 + rc = class_register(&nfc_class); 425 + if (rc) 426 + return rc; 427 + 428 + rc = nfc_genl_init(); 429 + if (rc) 430 + goto err_genl; 431 + 432 + /* the first generation must not be 0 */ 433 + nfc_devlist_generation = 1; 434 + 435 + rc = rawsock_init(); 436 + if (rc) 437 + goto err_rawsock; 438 + 439 + rc = af_nfc_init(); 440 + if (rc) 441 + goto err_af_nfc; 442 + 443 + return 0; 444 + 445 + err_af_nfc: 446 + rawsock_exit(); 447 + err_rawsock: 448 + nfc_genl_exit(); 449 + err_genl: 450 + class_unregister(&nfc_class); 451 + return rc; 452 + } 453 + 454 + static void __exit nfc_exit(void) 455 + { 456 + af_nfc_exit(); 457 + rawsock_exit(); 458 + nfc_genl_exit(); 459 + class_unregister(&nfc_class); 460 + } 461 + 462 + subsys_initcall(nfc_init); 463 + module_exit(nfc_exit); 464 + 465 + MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>"); 466 + MODULE_DESCRIPTION("NFC Core ver " VERSION); 467 + MODULE_VERSION(VERSION); 468 + MODULE_LICENSE("GPL");
+537
net/nfc/netlink.c
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 6 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #include <net/genetlink.h> 25 + #include <linux/nfc.h> 26 + #include <linux/slab.h> 27 + 28 + #include "nfc.h" 29 + 30 + static struct genl_multicast_group nfc_genl_event_mcgrp = { 31 + .name = NFC_GENL_MCAST_EVENT_NAME, 32 + }; 33 + 34 + struct genl_family nfc_genl_family = { 35 + .id = GENL_ID_GENERATE, 36 + .hdrsize = 0, 37 + .name = NFC_GENL_NAME, 38 + .version = NFC_GENL_VERSION, 39 + .maxattr = NFC_ATTR_MAX, 40 + }; 41 + 42 + static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { 43 + [NFC_ATTR_DEVICE_INDEX] = { .type = NLA_U32 }, 44 + [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING, 45 + .len = NFC_DEVICE_NAME_MAXSIZE }, 46 + [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 }, 47 + }; 48 + 49 + static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, 50 + struct netlink_callback *cb, int flags) 51 + { 52 + void *hdr; 53 + 54 + nfc_dbg("entry"); 55 + 56 + hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 57 + &nfc_genl_family, flags, NFC_CMD_GET_TARGET); 58 + if (!hdr) 59 + return -EMSGSIZE; 60 + 61 + genl_dump_check_consistent(cb, hdr, &nfc_genl_family); 62 + 63 + NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx); 64 + NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, 65 + target->supported_protocols); 66 + NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); 67 + NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); 68 + 69 + return genlmsg_end(msg, hdr); 70 + 71 + nla_put_failure: 72 + genlmsg_cancel(msg, hdr); 73 + return -EMSGSIZE; 74 + } 75 + 76 + static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb) 77 + { 78 + struct nfc_dev *dev; 79 + int rc; 80 + u32 idx; 81 + 82 + rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize, 83 + nfc_genl_family.attrbuf, 84 + nfc_genl_family.maxattr, 85 + nfc_genl_policy); 86 + if (rc < 0) 87 + return ERR_PTR(rc); 88 + 89 + if (!nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX]) 90 + return ERR_PTR(-EINVAL); 91 + 92 + idx = nla_get_u32(nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX]); 93 + 94 + dev = nfc_get_device(idx); 95 + if (!dev) 96 + return ERR_PTR(-ENODEV); 97 + 98 + return dev; 99 + } 100 + 101 + static int nfc_genl_dump_targets(struct sk_buff *skb, 102 + struct netlink_callback *cb) 103 + { 104 + int i = cb->args[0]; 105 + struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; 106 + int rc; 107 + 108 + nfc_dbg("entry"); 109 + 110 + if (!dev) { 111 + dev = __get_device_from_cb(cb); 112 + if (IS_ERR(dev)) 113 + return PTR_ERR(dev); 114 + 115 + cb->args[1] = (long) dev; 116 + } 117 + 118 + spin_lock_bh(&dev->targets_lock); 119 + 120 + cb->seq = dev->targets_generation; 121 + 122 + while (i < dev->n_targets) { 123 + rc = nfc_genl_send_target(skb, &dev->targets[i], cb, 124 + NLM_F_MULTI); 125 + if (rc < 0) 126 + break; 127 + 128 + i++; 129 + } 130 + 131 + spin_unlock_bh(&dev->targets_lock); 132 + 133 + cb->args[0] = i; 134 + 135 + return skb->len; 136 + } 137 + 138 + static int nfc_genl_dump_targets_done(struct netlink_callback *cb) 139 + { 140 + struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; 141 + 142 + nfc_dbg("entry"); 143 + 144 + if (dev) 145 + nfc_put_device(dev); 146 + 147 + return 0; 148 + } 149 + 150 + int nfc_genl_targets_found(struct nfc_dev *dev) 151 + { 152 + struct sk_buff *msg; 153 + void *hdr; 154 + 155 + nfc_dbg("entry"); 156 + 157 + dev->genl_data.poll_req_pid = 0; 158 + 159 + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 160 + if (!msg) 161 + return -ENOMEM; 162 + 163 + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 164 + NFC_EVENT_TARGETS_FOUND); 165 + if (!hdr) 166 + goto free_msg; 167 + 168 + NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 169 + 170 + genlmsg_end(msg, hdr); 171 + 172 + return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); 173 + 174 + nla_put_failure: 175 + genlmsg_cancel(msg, hdr); 176 + free_msg: 177 + nlmsg_free(msg); 178 + return -EMSGSIZE; 179 + } 180 + 181 + int nfc_genl_device_added(struct nfc_dev *dev) 182 + { 183 + struct sk_buff *msg; 184 + void *hdr; 185 + 186 + nfc_dbg("entry"); 187 + 188 + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 189 + if (!msg) 190 + return -ENOMEM; 191 + 192 + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 193 + NFC_EVENT_DEVICE_ADDED); 194 + if (!hdr) 195 + goto free_msg; 196 + 197 + NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 198 + NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 199 + NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 200 + 201 + genlmsg_end(msg, hdr); 202 + 203 + genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 204 + 205 + return 0; 206 + 207 + nla_put_failure: 208 + genlmsg_cancel(msg, hdr); 209 + free_msg: 210 + nlmsg_free(msg); 211 + return -EMSGSIZE; 212 + } 213 + 214 + int nfc_genl_device_removed(struct nfc_dev *dev) 215 + { 216 + struct sk_buff *msg; 217 + void *hdr; 218 + 219 + nfc_dbg("entry"); 220 + 221 + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 222 + if (!msg) 223 + return -ENOMEM; 224 + 225 + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 226 + NFC_EVENT_DEVICE_REMOVED); 227 + if (!hdr) 228 + goto free_msg; 229 + 230 + NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 231 + 232 + genlmsg_end(msg, hdr); 233 + 234 + genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); 235 + 236 + return 0; 237 + 238 + nla_put_failure: 239 + genlmsg_cancel(msg, hdr); 240 + free_msg: 241 + nlmsg_free(msg); 242 + return -EMSGSIZE; 243 + } 244 + 245 + static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, 246 + u32 pid, u32 seq, 247 + struct netlink_callback *cb, 248 + int flags) 249 + { 250 + void *hdr; 251 + 252 + nfc_dbg("entry"); 253 + 254 + hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags, 255 + NFC_CMD_GET_DEVICE); 256 + if (!hdr) 257 + return -EMSGSIZE; 258 + 259 + if (cb) 260 + genl_dump_check_consistent(cb, hdr, &nfc_genl_family); 261 + 262 + NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 263 + NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 264 + NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 265 + 266 + return genlmsg_end(msg, hdr); 267 + 268 + nla_put_failure: 269 + genlmsg_cancel(msg, hdr); 270 + return -EMSGSIZE; 271 + } 272 + 273 + static int nfc_genl_dump_devices(struct sk_buff *skb, 274 + struct netlink_callback *cb) 275 + { 276 + struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; 277 + struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; 278 + bool first_call = false; 279 + 280 + nfc_dbg("entry"); 281 + 282 + if (!iter) { 283 + first_call = true; 284 + iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); 285 + if (!iter) 286 + return -ENOMEM; 287 + cb->args[0] = (long) iter; 288 + } 289 + 290 + mutex_lock(&nfc_devlist_mutex); 291 + 292 + cb->seq = nfc_devlist_generation; 293 + 294 + if (first_call) { 295 + nfc_device_iter_init(iter); 296 + dev = nfc_device_iter_next(iter); 297 + } 298 + 299 + while (dev) { 300 + int rc; 301 + 302 + rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid, 303 + cb->nlh->nlmsg_seq, 304 + cb, NLM_F_MULTI); 305 + if (rc < 0) 306 + break; 307 + 308 + dev = nfc_device_iter_next(iter); 309 + } 310 + 311 + mutex_unlock(&nfc_devlist_mutex); 312 + 313 + cb->args[1] = (long) dev; 314 + 315 + return skb->len; 316 + } 317 + 318 + static int nfc_genl_dump_devices_done(struct netlink_callback *cb) 319 + { 320 + struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; 321 + 322 + nfc_dbg("entry"); 323 + 324 + nfc_device_iter_exit(iter); 325 + kfree(iter); 326 + 327 + return 0; 328 + } 329 + 330 + static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info) 331 + { 332 + struct sk_buff *msg; 333 + struct nfc_dev *dev; 334 + u32 idx; 335 + int rc = -ENOBUFS; 336 + 337 + nfc_dbg("entry"); 338 + 339 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) 340 + return -EINVAL; 341 + 342 + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 343 + 344 + dev = nfc_get_device(idx); 345 + if (!dev) 346 + return -ENODEV; 347 + 348 + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 349 + if (!msg) { 350 + rc = -ENOMEM; 351 + goto out_putdev; 352 + } 353 + 354 + rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq, 355 + NULL, 0); 356 + if (rc < 0) 357 + goto out_free; 358 + 359 + nfc_put_device(dev); 360 + 361 + return genlmsg_reply(msg, info); 362 + 363 + out_free: 364 + nlmsg_free(msg); 365 + out_putdev: 366 + nfc_put_device(dev); 367 + return rc; 368 + } 369 + 370 + static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) 371 + { 372 + struct nfc_dev *dev; 373 + int rc; 374 + u32 idx; 375 + u32 protocols; 376 + 377 + nfc_dbg("entry"); 378 + 379 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 380 + !info->attrs[NFC_ATTR_PROTOCOLS]) 381 + return -EINVAL; 382 + 383 + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 384 + protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); 385 + 386 + dev = nfc_get_device(idx); 387 + if (!dev) 388 + return -ENODEV; 389 + 390 + mutex_lock(&dev->genl_data.genl_data_mutex); 391 + 392 + rc = nfc_start_poll(dev, protocols); 393 + if (!rc) 394 + dev->genl_data.poll_req_pid = info->snd_pid; 395 + 396 + mutex_unlock(&dev->genl_data.genl_data_mutex); 397 + 398 + nfc_put_device(dev); 399 + return rc; 400 + } 401 + 402 + static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) 403 + { 404 + struct nfc_dev *dev; 405 + int rc; 406 + u32 idx; 407 + 408 + nfc_dbg("entry"); 409 + 410 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) 411 + return -EINVAL; 412 + 413 + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 414 + 415 + dev = nfc_get_device(idx); 416 + if (!dev) 417 + return -ENODEV; 418 + 419 + mutex_lock(&dev->genl_data.genl_data_mutex); 420 + 421 + if (dev->genl_data.poll_req_pid != info->snd_pid) { 422 + rc = -EBUSY; 423 + goto out; 424 + } 425 + 426 + rc = nfc_stop_poll(dev); 427 + dev->genl_data.poll_req_pid = 0; 428 + 429 + out: 430 + mutex_unlock(&dev->genl_data.genl_data_mutex); 431 + nfc_put_device(dev); 432 + return rc; 433 + } 434 + 435 + static struct genl_ops nfc_genl_ops[] = { 436 + { 437 + .cmd = NFC_CMD_GET_DEVICE, 438 + .doit = nfc_genl_get_device, 439 + .dumpit = nfc_genl_dump_devices, 440 + .done = nfc_genl_dump_devices_done, 441 + .policy = nfc_genl_policy, 442 + }, 443 + { 444 + .cmd = NFC_CMD_START_POLL, 445 + .doit = nfc_genl_start_poll, 446 + .policy = nfc_genl_policy, 447 + }, 448 + { 449 + .cmd = NFC_CMD_STOP_POLL, 450 + .doit = nfc_genl_stop_poll, 451 + .policy = nfc_genl_policy, 452 + }, 453 + { 454 + .cmd = NFC_CMD_GET_TARGET, 455 + .dumpit = nfc_genl_dump_targets, 456 + .done = nfc_genl_dump_targets_done, 457 + .policy = nfc_genl_policy, 458 + }, 459 + }; 460 + 461 + static int nfc_genl_rcv_nl_event(struct notifier_block *this, 462 + unsigned long event, void *ptr) 463 + { 464 + struct netlink_notify *n = ptr; 465 + struct class_dev_iter iter; 466 + struct nfc_dev *dev; 467 + 468 + if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC) 469 + goto out; 470 + 471 + nfc_dbg("NETLINK_URELEASE event from id %d", n->pid); 472 + 473 + nfc_device_iter_init(&iter); 474 + dev = nfc_device_iter_next(&iter); 475 + 476 + while (dev) { 477 + mutex_lock(&dev->genl_data.genl_data_mutex); 478 + if (dev->genl_data.poll_req_pid == n->pid) { 479 + nfc_stop_poll(dev); 480 + dev->genl_data.poll_req_pid = 0; 481 + } 482 + mutex_unlock(&dev->genl_data.genl_data_mutex); 483 + dev = nfc_device_iter_next(&iter); 484 + } 485 + 486 + nfc_device_iter_exit(&iter); 487 + 488 + out: 489 + return NOTIFY_DONE; 490 + } 491 + 492 + void nfc_genl_data_init(struct nfc_genl_data *genl_data) 493 + { 494 + genl_data->poll_req_pid = 0; 495 + mutex_init(&genl_data->genl_data_mutex); 496 + } 497 + 498 + void nfc_genl_data_exit(struct nfc_genl_data *genl_data) 499 + { 500 + mutex_destroy(&genl_data->genl_data_mutex); 501 + } 502 + 503 + static struct notifier_block nl_notifier = { 504 + .notifier_call = nfc_genl_rcv_nl_event, 505 + }; 506 + 507 + /** 508 + * nfc_genl_init() - Initialize netlink interface 509 + * 510 + * This initialization function registers the nfc netlink family. 511 + */ 512 + int __init nfc_genl_init(void) 513 + { 514 + int rc; 515 + 516 + rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops, 517 + ARRAY_SIZE(nfc_genl_ops)); 518 + if (rc) 519 + return rc; 520 + 521 + rc = genl_register_mc_group(&nfc_genl_family, &nfc_genl_event_mcgrp); 522 + 523 + netlink_register_notifier(&nl_notifier); 524 + 525 + return rc; 526 + } 527 + 528 + /** 529 + * nfc_genl_exit() - Deinitialize netlink interface 530 + * 531 + * This exit function unregisters the nfc netlink family. 532 + */ 533 + void nfc_genl_exit(void) 534 + { 535 + netlink_unregister_notifier(&nl_notifier); 536 + genl_unregister_family(&nfc_genl_family); 537 + }
+117
net/nfc/nfc.h
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 6 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #ifndef __LOCAL_NFC_H 25 + #define __LOCAL_NFC_H 26 + 27 + #include <net/nfc.h> 28 + #include <net/sock.h> 29 + 30 + __attribute__((format (printf, 2, 3))) 31 + int nfc_printk(const char *level, const char *fmt, ...); 32 + 33 + #define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg) 34 + #define nfc_err(fmt, arg...) nfc_printk(KERN_ERR, fmt, ##arg) 35 + #define nfc_dbg(fmt, arg...) pr_debug(fmt "\n", ##arg) 36 + 37 + struct nfc_protocol { 38 + int id; 39 + struct proto *proto; 40 + struct module *owner; 41 + int (*create)(struct net *net, struct socket *sock, 42 + const struct nfc_protocol *nfc_proto); 43 + }; 44 + 45 + struct nfc_rawsock { 46 + struct sock sk; 47 + struct nfc_dev *dev; 48 + u32 target_idx; 49 + struct work_struct tx_work; 50 + bool tx_work_scheduled; 51 + }; 52 + #define nfc_rawsock(sk) ((struct nfc_rawsock *) sk) 53 + #define to_rawsock_sk(_tx_work) \ 54 + ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work)) 55 + 56 + int __init rawsock_init(void); 57 + void rawsock_exit(void); 58 + 59 + int __init af_nfc_init(void); 60 + void af_nfc_exit(void); 61 + int nfc_proto_register(const struct nfc_protocol *nfc_proto); 62 + void nfc_proto_unregister(const struct nfc_protocol *nfc_proto); 63 + 64 + extern int nfc_devlist_generation; 65 + extern struct mutex nfc_devlist_mutex; 66 + 67 + int __init nfc_genl_init(void); 68 + void nfc_genl_exit(void); 69 + 70 + void nfc_genl_data_init(struct nfc_genl_data *genl_data); 71 + void nfc_genl_data_exit(struct nfc_genl_data *genl_data); 72 + 73 + int nfc_genl_targets_found(struct nfc_dev *dev); 74 + 75 + int nfc_genl_device_added(struct nfc_dev *dev); 76 + int nfc_genl_device_removed(struct nfc_dev *dev); 77 + 78 + struct nfc_dev *nfc_get_device(unsigned idx); 79 + 80 + static inline void nfc_put_device(struct nfc_dev *dev) 81 + { 82 + put_device(&dev->dev); 83 + } 84 + 85 + static inline void nfc_device_iter_init(struct class_dev_iter *iter) 86 + { 87 + class_dev_iter_init(iter, &nfc_class, NULL, NULL); 88 + } 89 + 90 + static inline struct nfc_dev *nfc_device_iter_next(struct class_dev_iter *iter) 91 + { 92 + struct device *d = class_dev_iter_next(iter); 93 + if (!d) 94 + return NULL; 95 + 96 + return to_nfc_dev(d); 97 + } 98 + 99 + static inline void nfc_device_iter_exit(struct class_dev_iter *iter) 100 + { 101 + class_dev_iter_exit(iter); 102 + } 103 + 104 + int nfc_start_poll(struct nfc_dev *dev, u32 protocols); 105 + 106 + int nfc_stop_poll(struct nfc_dev *dev); 107 + 108 + int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol); 109 + 110 + int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx); 111 + 112 + int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, 113 + struct sk_buff *skb, 114 + data_exchange_cb_t cb, 115 + void *cb_context); 116 + 117 + #endif /* __LOCAL_NFC_H */
+354
net/nfc/rawsock.c
··· 1 + /* 2 + * Copyright (C) 2011 Instituto Nokia de Tecnologia 3 + * 4 + * Authors: 5 + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 6 + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the 20 + * Free Software Foundation, Inc., 21 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 + */ 23 + 24 + #include <net/tcp_states.h> 25 + #include <linux/nfc.h> 26 + 27 + #include "nfc.h" 28 + 29 + static void rawsock_write_queue_purge(struct sock *sk) 30 + { 31 + nfc_dbg("sk=%p", sk); 32 + 33 + spin_lock_bh(&sk->sk_write_queue.lock); 34 + __skb_queue_purge(&sk->sk_write_queue); 35 + nfc_rawsock(sk)->tx_work_scheduled = false; 36 + spin_unlock_bh(&sk->sk_write_queue.lock); 37 + } 38 + 39 + static void rawsock_report_error(struct sock *sk, int err) 40 + { 41 + nfc_dbg("sk=%p err=%d", sk, err); 42 + 43 + sk->sk_shutdown = SHUTDOWN_MASK; 44 + sk->sk_err = -err; 45 + sk->sk_error_report(sk); 46 + 47 + rawsock_write_queue_purge(sk); 48 + } 49 + 50 + static int rawsock_release(struct socket *sock) 51 + { 52 + struct sock *sk = sock->sk; 53 + 54 + nfc_dbg("sock=%p", sock); 55 + 56 + sock_orphan(sk); 57 + sock_put(sk); 58 + 59 + return 0; 60 + } 61 + 62 + static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, 63 + int len, int flags) 64 + { 65 + struct sock *sk = sock->sk; 66 + struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; 67 + struct nfc_dev *dev; 68 + int rc = 0; 69 + 70 + nfc_dbg("sock=%p sk=%p flags=%d", sock, sk, flags); 71 + 72 + if (!addr || len < sizeof(struct sockaddr_nfc) || 73 + addr->sa_family != AF_NFC) 74 + return -EINVAL; 75 + 76 + nfc_dbg("addr dev_idx=%u target_idx=%u protocol=%u", addr->dev_idx, 77 + addr->target_idx, addr->nfc_protocol); 78 + 79 + lock_sock(sk); 80 + 81 + if (sock->state == SS_CONNECTED) { 82 + rc = -EISCONN; 83 + goto error; 84 + } 85 + 86 + dev = nfc_get_device(addr->dev_idx); 87 + if (!dev) { 88 + rc = -ENODEV; 89 + goto error; 90 + } 91 + 92 + if (addr->target_idx > dev->target_idx - 1 || 93 + addr->target_idx < dev->target_idx - dev->n_targets) { 94 + rc = -EINVAL; 95 + goto error; 96 + } 97 + 98 + if (addr->target_idx > dev->target_idx - 1 || 99 + addr->target_idx < dev->target_idx - dev->n_targets) { 100 + rc = -EINVAL; 101 + goto error; 102 + } 103 + 104 + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); 105 + if (rc) 106 + goto put_dev; 107 + 108 + nfc_rawsock(sk)->dev = dev; 109 + nfc_rawsock(sk)->target_idx = addr->target_idx; 110 + sock->state = SS_CONNECTED; 111 + sk->sk_state = TCP_ESTABLISHED; 112 + sk->sk_state_change(sk); 113 + 114 + release_sock(sk); 115 + return 0; 116 + 117 + put_dev: 118 + nfc_put_device(dev); 119 + error: 120 + release_sock(sk); 121 + return rc; 122 + } 123 + 124 + static int rawsock_add_header(struct sk_buff *skb) 125 + { 126 + 127 + if (skb_cow_head(skb, 1)) 128 + return -ENOMEM; 129 + 130 + *skb_push(skb, 1) = 0; 131 + 132 + return 0; 133 + } 134 + 135 + static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, 136 + int err) 137 + { 138 + struct sock *sk = (struct sock *) context; 139 + 140 + BUG_ON(in_irq()); 141 + 142 + nfc_dbg("sk=%p err=%d", sk, err); 143 + 144 + if (err) 145 + goto error; 146 + 147 + err = rawsock_add_header(skb); 148 + if (err) 149 + goto error; 150 + 151 + err = sock_queue_rcv_skb(sk, skb); 152 + if (err) 153 + goto error; 154 + 155 + spin_lock_bh(&sk->sk_write_queue.lock); 156 + if (!skb_queue_empty(&sk->sk_write_queue)) 157 + schedule_work(&nfc_rawsock(sk)->tx_work); 158 + else 159 + nfc_rawsock(sk)->tx_work_scheduled = false; 160 + spin_unlock_bh(&sk->sk_write_queue.lock); 161 + 162 + sock_put(sk); 163 + return; 164 + 165 + error: 166 + rawsock_report_error(sk, err); 167 + sock_put(sk); 168 + } 169 + 170 + static void rawsock_tx_work(struct work_struct *work) 171 + { 172 + struct sock *sk = to_rawsock_sk(work); 173 + struct nfc_dev *dev = nfc_rawsock(sk)->dev; 174 + u32 target_idx = nfc_rawsock(sk)->target_idx; 175 + struct sk_buff *skb; 176 + int rc; 177 + 178 + nfc_dbg("sk=%p target_idx=%u", sk, target_idx); 179 + 180 + if (sk->sk_shutdown & SEND_SHUTDOWN) { 181 + rawsock_write_queue_purge(sk); 182 + return; 183 + } 184 + 185 + skb = skb_dequeue(&sk->sk_write_queue); 186 + 187 + sock_hold(sk); 188 + rc = nfc_data_exchange(dev, target_idx, skb, 189 + rawsock_data_exchange_complete, sk); 190 + if (rc) { 191 + rawsock_report_error(sk, rc); 192 + sock_put(sk); 193 + } 194 + } 195 + 196 + static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, 197 + struct msghdr *msg, size_t len) 198 + { 199 + struct sock *sk = sock->sk; 200 + struct sk_buff *skb; 201 + int rc; 202 + 203 + nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len); 204 + 205 + if (msg->msg_namelen) 206 + return -EOPNOTSUPP; 207 + 208 + if (sock->state != SS_CONNECTED) 209 + return -ENOTCONN; 210 + 211 + skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, 212 + &rc); 213 + if (!skb) 214 + return rc; 215 + 216 + rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 217 + if (rc < 0) { 218 + kfree_skb(skb); 219 + return rc; 220 + } 221 + 222 + spin_lock_bh(&sk->sk_write_queue.lock); 223 + __skb_queue_tail(&sk->sk_write_queue, skb); 224 + if (!nfc_rawsock(sk)->tx_work_scheduled) { 225 + schedule_work(&nfc_rawsock(sk)->tx_work); 226 + nfc_rawsock(sk)->tx_work_scheduled = true; 227 + } 228 + spin_unlock_bh(&sk->sk_write_queue.lock); 229 + 230 + return len; 231 + } 232 + 233 + static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, 234 + struct msghdr *msg, size_t len, int flags) 235 + { 236 + int noblock = flags & MSG_DONTWAIT; 237 + struct sock *sk = sock->sk; 238 + struct sk_buff *skb; 239 + int copied; 240 + int rc; 241 + 242 + nfc_dbg("sock=%p sk=%p len=%zu flags=%d", sock, sk, len, flags); 243 + 244 + skb = skb_recv_datagram(sk, flags, noblock, &rc); 245 + if (!skb) 246 + return rc; 247 + 248 + msg->msg_namelen = 0; 249 + 250 + copied = skb->len; 251 + if (len < copied) { 252 + msg->msg_flags |= MSG_TRUNC; 253 + copied = len; 254 + } 255 + 256 + rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 257 + 258 + skb_free_datagram(sk, skb); 259 + 260 + return rc ? : copied; 261 + } 262 + 263 + 264 + static const struct proto_ops rawsock_ops = { 265 + .family = PF_NFC, 266 + .owner = THIS_MODULE, 267 + .release = rawsock_release, 268 + .bind = sock_no_bind, 269 + .connect = rawsock_connect, 270 + .socketpair = sock_no_socketpair, 271 + .accept = sock_no_accept, 272 + .getname = sock_no_getname, 273 + .poll = datagram_poll, 274 + .ioctl = sock_no_ioctl, 275 + .listen = sock_no_listen, 276 + .shutdown = sock_no_shutdown, 277 + .setsockopt = sock_no_setsockopt, 278 + .getsockopt = sock_no_getsockopt, 279 + .sendmsg = rawsock_sendmsg, 280 + .recvmsg = rawsock_recvmsg, 281 + .mmap = sock_no_mmap, 282 + }; 283 + 284 + static void rawsock_destruct(struct sock *sk) 285 + { 286 + nfc_dbg("sk=%p", sk); 287 + 288 + if (sk->sk_state == TCP_ESTABLISHED) { 289 + nfc_deactivate_target(nfc_rawsock(sk)->dev, 290 + nfc_rawsock(sk)->target_idx); 291 + nfc_put_device(nfc_rawsock(sk)->dev); 292 + } 293 + 294 + skb_queue_purge(&sk->sk_receive_queue); 295 + 296 + if (!sock_flag(sk, SOCK_DEAD)) { 297 + nfc_err("Freeing alive NFC raw socket %p", sk); 298 + return; 299 + } 300 + } 301 + 302 + static int rawsock_create(struct net *net, struct socket *sock, 303 + const struct nfc_protocol *nfc_proto) 304 + { 305 + struct sock *sk; 306 + 307 + nfc_dbg("sock=%p", sock); 308 + 309 + if (sock->type != SOCK_SEQPACKET) 310 + return -ESOCKTNOSUPPORT; 311 + 312 + sock->ops = &rawsock_ops; 313 + 314 + sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto); 315 + if (!sk) 316 + return -ENOMEM; 317 + 318 + sock_init_data(sock, sk); 319 + sk->sk_protocol = nfc_proto->id; 320 + sk->sk_destruct = rawsock_destruct; 321 + sock->state = SS_UNCONNECTED; 322 + 323 + INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); 324 + nfc_rawsock(sk)->tx_work_scheduled = false; 325 + 326 + return 0; 327 + } 328 + 329 + static struct proto rawsock_proto = { 330 + .name = "NFC_RAW", 331 + .owner = THIS_MODULE, 332 + .obj_size = sizeof(struct nfc_rawsock), 333 + }; 334 + 335 + static const struct nfc_protocol rawsock_nfc_proto = { 336 + .id = NFC_SOCKPROTO_RAW, 337 + .proto = &rawsock_proto, 338 + .owner = THIS_MODULE, 339 + .create = rawsock_create 340 + }; 341 + 342 + int __init rawsock_init(void) 343 + { 344 + int rc; 345 + 346 + rc = nfc_proto_register(&rawsock_nfc_proto); 347 + 348 + return rc; 349 + } 350 + 351 + void rawsock_exit(void) 352 + { 353 + nfc_proto_unregister(&rawsock_nfc_proto); 354 + }
+11
net/wireless/mlme.c
··· 1084 1084 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp); 1085 1085 } 1086 1086 EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); 1087 + 1088 + void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, 1089 + const u8 *replay_ctr, gfp_t gfp) 1090 + { 1091 + struct wireless_dev *wdev = dev->ieee80211_ptr; 1092 + struct wiphy *wiphy = wdev->wiphy; 1093 + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 1094 + 1095 + nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); 1096 + } 1097 + EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
+113
net/wireless/nl80211.c
··· 176 176 [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, 177 177 [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 }, 178 178 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, 179 + [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED }, 179 180 }; 180 181 181 182 /* policy for the key attributes */ ··· 205 204 [NL80211_WOWLAN_TRIG_DISCONNECT] = { .type = NLA_FLAG }, 206 205 [NL80211_WOWLAN_TRIG_MAGIC_PKT] = { .type = NLA_FLAG }, 207 206 [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED }, 207 + }; 208 + 209 + /* policy for GTK rekey offload attributes */ 210 + static const struct nla_policy 211 + nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { 212 + [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN }, 213 + [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN }, 214 + [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, 208 215 }; 209 216 210 217 /* ifidx get helper */ ··· 5417 5408 return err; 5418 5409 } 5419 5410 5411 + static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) 5412 + { 5413 + struct cfg80211_registered_device *rdev = info->user_ptr[0]; 5414 + struct net_device *dev = info->user_ptr[1]; 5415 + struct wireless_dev *wdev = dev->ieee80211_ptr; 5416 + struct nlattr *tb[NUM_NL80211_REKEY_DATA]; 5417 + struct cfg80211_gtk_rekey_data rekey_data; 5418 + int err; 5419 + 5420 + if (!info->attrs[NL80211_ATTR_REKEY_DATA]) 5421 + return -EINVAL; 5422 + 5423 + err = nla_parse(tb, MAX_NL80211_REKEY_DATA, 5424 + nla_data(info->attrs[NL80211_ATTR_REKEY_DATA]), 5425 + nla_len(info->attrs[NL80211_ATTR_REKEY_DATA]), 5426 + nl80211_rekey_policy); 5427 + if (err) 5428 + return err; 5429 + 5430 + if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) 5431 + return -ERANGE; 5432 + if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) 5433 + return -ERANGE; 5434 + if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) 5435 + return -ERANGE; 5436 + 5437 + memcpy(rekey_data.kek, nla_data(tb[NL80211_REKEY_DATA_KEK]), 5438 + NL80211_KEK_LEN); 5439 + memcpy(rekey_data.kck, nla_data(tb[NL80211_REKEY_DATA_KCK]), 5440 + NL80211_KCK_LEN); 5441 + memcpy(rekey_data.replay_ctr, 5442 + nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]), 5443 + NL80211_REPLAY_CTR_LEN); 5444 + 5445 + wdev_lock(wdev); 5446 + if (!wdev->current_bss) { 5447 + err = -ENOTCONN; 5448 + goto out; 5449 + } 5450 + 5451 + if (!rdev->ops->set_rekey_data) { 5452 + err = -EOPNOTSUPP; 5453 + goto out; 5454 + } 5455 + 5456 + err = rdev->ops->set_rekey_data(&rdev->wiphy, dev, &rekey_data); 5457 + out: 5458 + wdev_unlock(wdev); 5459 + return err; 5460 + } 5461 + 5420 5462 #define NL80211_FLAG_NEED_WIPHY 0x01 5421 5463 #define NL80211_FLAG_NEED_NETDEV 0x02 5422 5464 #define NL80211_FLAG_NEED_RTNL 0x04 ··· 5997 5937 .policy = nl80211_policy, 5998 5938 .flags = GENL_ADMIN_PERM, 5999 5939 .internal_flags = NL80211_FLAG_NEED_WIPHY | 5940 + NL80211_FLAG_NEED_RTNL, 5941 + }, 5942 + { 5943 + .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, 5944 + .doit = nl80211_set_rekey_data, 5945 + .policy = nl80211_policy, 5946 + .flags = GENL_ADMIN_PERM, 5947 + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 6000 5948 NL80211_FLAG_NEED_RTNL, 6001 5949 }, 6002 5950 }; ··· 6936 6868 rssi_event); 6937 6869 6938 6870 nla_nest_end(msg, pinfoattr); 6871 + 6872 + if (genlmsg_end(msg, hdr) < 0) { 6873 + nlmsg_free(msg); 6874 + return; 6875 + } 6876 + 6877 + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 6878 + nl80211_mlme_mcgrp.id, gfp); 6879 + return; 6880 + 6881 + nla_put_failure: 6882 + genlmsg_cancel(msg, hdr); 6883 + nlmsg_free(msg); 6884 + } 6885 + 6886 + void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, 6887 + struct net_device *netdev, const u8 *bssid, 6888 + const u8 *replay_ctr, gfp_t gfp) 6889 + { 6890 + struct sk_buff *msg; 6891 + struct nlattr *rekey_attr; 6892 + void *hdr; 6893 + 6894 + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 6895 + if (!msg) 6896 + return; 6897 + 6898 + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_REKEY_OFFLOAD); 6899 + if (!hdr) { 6900 + nlmsg_free(msg); 6901 + return; 6902 + } 6903 + 6904 + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 6905 + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 6906 + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 6907 + 6908 + rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); 6909 + if (!rekey_attr) 6910 + goto nla_put_failure; 6911 + 6912 + NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR, 6913 + NL80211_REPLAY_CTR_LEN, replay_ctr); 6914 + 6915 + nla_nest_end(msg, rekey_attr); 6939 6916 6940 6917 if (genlmsg_end(msg, hdr) < 0) { 6941 6918 nlmsg_free(msg);
+4
net/wireless/nl80211.h
··· 109 109 struct net_device *netdev, const u8 *peer, 110 110 u32 num_packets, gfp_t gfp); 111 111 112 + void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, 113 + struct net_device *netdev, const u8 *bssid, 114 + const u8 *replay_ctr, gfp_t gfp); 115 + 112 116 #endif /* __NET_WIRELESS_NL80211_H */
+2 -3
net/wireless/scan.c
··· 132 132 int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, 133 133 bool driver_initiated) 134 134 { 135 - int err; 136 135 struct net_device *dev; 137 136 138 137 ASSERT_RDEV_LOCK(rdev); ··· 142 143 dev = rdev->sched_scan_req->dev; 143 144 144 145 if (!driver_initiated) { 145 - err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); 146 + int err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); 146 147 if (err) 147 148 return err; 148 149 } ··· 152 153 kfree(rdev->sched_scan_req); 153 154 rdev->sched_scan_req = NULL; 154 155 155 - return err; 156 + return 0; 156 157 } 157 158 158 159 static void bss_release(struct kref *ref)