Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2018-08-05

Here's the main bluetooth-next pull request for the 4.19 kernel.

- Added support for Bluetooth Advertising Extensions
- Added vendor driver support to hci_h5 HCI driver
- Added serdev support to hci_h5 driver
- Added support for Qualcomm wcn3990 controller
- Added support for RTL8723BS and RTL8723DS controllers
- btusb: Added new ID for Realtek 8723DE
- Several other smaller fixes & cleanups

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+3247 -585
+27 -2
Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
··· 10 10 Required properties: 11 11 - compatible: should contain one of the following: 12 12 * "qcom,qca6174-bt" 13 + * "qcom,wcn3990-bt" 13 14 14 - Optional properties: 15 + Optional properties for compatible string qcom,qca6174-bt: 16 + 15 17 - enable-gpios: gpio specifier used to enable chip 16 18 - clocks: clock provided to the controller (SUSCLK_32KHZ) 17 19 18 - Example: 20 + Required properties for compatible string qcom,wcn3990-bt: 21 + 22 + - vddio-supply: VDD_IO supply regulator handle. 23 + - vddxo-supply: VDD_XO supply regulator handle. 24 + - vddrf-supply: VDD_RF supply regulator handle. 25 + - vddch0-supply: VDD_CH0 supply regulator handle. 26 + 27 + Optional properties for compatible string qcom,wcn3990-bt: 28 + 29 + - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt 30 + 31 + Examples: 19 32 20 33 serial@7570000 { 21 34 label = "BT-UART"; ··· 39 26 40 27 enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>; 41 28 clocks = <&divclk4>; 29 + }; 30 + }; 31 + 32 + serial@898000 { 33 + bluetooth { 34 + compatible = "qcom,wcn3990-bt"; 35 + 36 + vddio-supply = <&vreg_s4a_1p8>; 37 + vddxo-supply = <&vreg_l7a_1p8>; 38 + vddrf-supply = <&vreg_l17a_1p3>; 39 + vddch0-supply = <&vreg_l25a_3p3>; 40 + max-speed = <3200000>; 42 41 }; 43 42 };
+1
drivers/bluetooth/Kconfig
··· 159 159 config BT_HCIUART_3WIRE 160 160 bool "Three-wire UART (H5) protocol support" 161 161 depends on BT_HCIUART 162 + depends on BT_HCIUART_SERDEV 162 163 help 163 164 The HCI Three-wire UART Transport Layer makes it possible to 164 165 user the Bluetooth HCI over a serial port interface. The HCI
+1 -1
drivers/bluetooth/bfusb.c
··· 490 490 count = skb->len; 491 491 492 492 /* Max HCI frame size seems to be 1511 + 1 */ 493 - nskb = bt_skb_alloc(count + 32, GFP_ATOMIC); 493 + nskb = bt_skb_alloc(count + 32, GFP_KERNEL); 494 494 if (!nskb) { 495 495 BT_ERR("Can't allocate memory for new packet"); 496 496 return -ENOMEM;
+1 -1
drivers/bluetooth/bluecard_cs.c
··· 565 565 /* Ericsson baud rate command */ 566 566 unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 }; 567 567 568 - skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); 568 + skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL); 569 569 if (!skb) { 570 570 BT_ERR("Can't allocate mem for new packet"); 571 571 return -1;
+3 -3
drivers/bluetooth/bpa10x.c
··· 289 289 290 290 skb->dev = (void *) hdev; 291 291 292 - urb = usb_alloc_urb(0, GFP_ATOMIC); 292 + urb = usb_alloc_urb(0, GFP_KERNEL); 293 293 if (!urb) 294 294 return -ENOMEM; 295 295 ··· 298 298 299 299 switch (hci_skb_pkt_type(skb)) { 300 300 case HCI_COMMAND_PKT: 301 - dr = kmalloc(sizeof(*dr), GFP_ATOMIC); 301 + dr = kmalloc(sizeof(*dr), GFP_KERNEL); 302 302 if (!dr) { 303 303 usb_free_urb(urb); 304 304 return -ENOMEM; ··· 343 343 344 344 usb_anchor_urb(urb, &data->tx_anchor); 345 345 346 - err = usb_submit_urb(urb, GFP_ATOMIC); 346 + err = usb_submit_urb(urb, GFP_KERNEL); 347 347 if (err < 0) { 348 348 bt_dev_err(hdev, "urb %p submission failed", urb); 349 349 kfree(urb->setup_packet);
+1 -1
drivers/bluetooth/btmrvl_sdio.c
··· 718 718 } 719 719 720 720 /* Allocate buffer */ 721 - skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); 721 + skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL); 722 722 if (!skb) { 723 723 BT_ERR("No free skb"); 724 724 ret = -ENOMEM;
+64 -53
drivers/bluetooth/btqca.c
··· 27 27 28 28 #define VERSION "0.1" 29 29 30 - static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) 30 + int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version) 31 31 { 32 32 struct sk_buff *skb; 33 33 struct edl_event_hdr *edl; ··· 35 35 char cmd; 36 36 int err = 0; 37 37 38 - BT_DBG("%s: ROME Patch Version Request", hdev->name); 38 + bt_dev_dbg(hdev, "QCA Version Request"); 39 39 40 40 cmd = EDL_PATCH_VER_REQ_CMD; 41 41 skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, 42 42 &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); 43 43 if (IS_ERR(skb)) { 44 44 err = PTR_ERR(skb); 45 - BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name, 46 - err); 45 + bt_dev_err(hdev, "Reading QCA version information failed (%d)", 46 + err); 47 47 return err; 48 48 } 49 49 50 50 if (skb->len != sizeof(*edl) + sizeof(*ver)) { 51 - BT_ERR("%s: Version size mismatch len %d", hdev->name, 52 - skb->len); 51 + bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len); 53 52 err = -EILSEQ; 54 53 goto out; 55 54 } 56 55 57 56 edl = (struct edl_event_hdr *)(skb->data); 58 57 if (!edl) { 59 - BT_ERR("%s: TLV with no header", hdev->name); 58 + bt_dev_err(hdev, "QCA TLV with no header"); 60 59 err = -EILSEQ; 61 60 goto out; 62 61 } 63 62 64 63 if (edl->cresp != EDL_CMD_REQ_RES_EVT || 65 64 edl->rtype != EDL_APP_VER_RES_EVT) { 66 - BT_ERR("%s: Wrong packet received %d %d", hdev->name, 67 - edl->cresp, edl->rtype); 65 + bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, 66 + edl->rtype); 68 67 err = -EIO; 69 68 goto out; 70 69 } ··· 75 76 BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver)); 76 77 BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id)); 77 78 78 - /* ROME chipset version can be decided by patch and SoC 79 + /* QCA chipset version can be decided by patch and SoC 79 80 * version, combination with upper 2 bytes from SoC 80 81 * and lower 2 bytes from patch will be used. 81 82 */ 82 - *rome_version = (le32_to_cpu(ver->soc_id) << 16) | 83 + *soc_version = (le32_to_cpu(ver->soc_id) << 16) | 83 84 (le16_to_cpu(ver->rome_ver) & 0x0000ffff); 85 + if (*soc_version == 0) 86 + err = -EILSEQ; 84 87 85 88 out: 86 89 kfree_skb(skb); 90 + if (err) 91 + bt_dev_err(hdev, "QCA Failed to get version (%d)", err); 87 92 88 93 return err; 89 94 } 95 + EXPORT_SYMBOL_GPL(qca_read_soc_version); 90 96 91 - static int rome_reset(struct hci_dev *hdev) 97 + static int qca_send_reset(struct hci_dev *hdev) 92 98 { 93 99 struct sk_buff *skb; 94 100 int err; 95 101 96 - BT_DBG("%s: ROME HCI_RESET", hdev->name); 102 + bt_dev_dbg(hdev, "QCA HCI_RESET"); 97 103 98 104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); 99 105 if (IS_ERR(skb)) { 100 106 err = PTR_ERR(skb); 101 - BT_ERR("%s: Reset failed (%d)", hdev->name, err); 107 + bt_dev_err(hdev, "QCA Reset failed (%d)", err); 102 108 return err; 103 109 } 104 110 ··· 112 108 return 0; 113 109 } 114 110 115 - static void rome_tlv_check_data(struct rome_config *config, 111 + static void qca_tlv_check_data(struct rome_config *config, 116 112 const struct firmware *fw) 117 113 { 118 114 const u8 *data; ··· 211 207 } 212 208 } 213 209 214 - static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size, 210 + static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, 215 211 const u8 *data, enum rome_tlv_dnld_mode mode) 216 212 { 217 213 struct sk_buff *skb; ··· 232 228 HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); 233 229 if (IS_ERR(skb)) { 234 230 err = PTR_ERR(skb); 235 - BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err); 231 + bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err); 236 232 return err; 237 233 } 238 234 239 235 if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) { 240 - BT_ERR("%s: TLV response size mismatch", hdev->name); 236 + bt_dev_err(hdev, "QCA TLV response size mismatch"); 241 237 err = -EILSEQ; 242 238 goto out; 243 239 } 244 240 245 241 edl = (struct edl_event_hdr *)(skb->data); 246 242 if (!edl) { 247 - BT_ERR("%s: TLV with no header", hdev->name); 243 + bt_dev_err(hdev, "TLV with no header"); 248 244 err = -EILSEQ; 249 245 goto out; 250 246 } ··· 253 249 254 250 if (edl->cresp != EDL_CMD_REQ_RES_EVT || 255 251 edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) { 256 - BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)", 257 - hdev->name, edl->cresp, edl->rtype, tlv_resp->result); 252 + bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)", 253 + edl->cresp, edl->rtype, tlv_resp->result); 258 254 err = -EIO; 259 255 } 260 256 ··· 264 260 return err; 265 261 } 266 262 267 - static int rome_download_firmware(struct hci_dev *hdev, 263 + static int qca_download_firmware(struct hci_dev *hdev, 268 264 struct rome_config *config) 269 265 { 270 266 const struct firmware *fw; 271 267 const u8 *segment; 272 268 int ret, remain, i = 0; 273 269 274 - bt_dev_info(hdev, "ROME Downloading %s", config->fwname); 270 + bt_dev_info(hdev, "QCA Downloading %s", config->fwname); 275 271 276 272 ret = request_firmware(&fw, config->fwname, &hdev->dev); 277 273 if (ret) { 278 - BT_ERR("%s: Failed to request file: %s (%d)", hdev->name, 279 - config->fwname, ret); 274 + bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", 275 + config->fwname, ret); 280 276 return ret; 281 277 } 282 278 283 - rome_tlv_check_data(config, fw); 279 + qca_tlv_check_data(config, fw); 284 280 285 281 segment = fw->data; 286 282 remain = fw->size; ··· 294 290 if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT) 295 291 config->dnld_mode = ROME_SKIP_EVT_NONE; 296 292 297 - ret = rome_tlv_send_segment(hdev, segsize, segment, 293 + ret = qca_tlv_send_segment(hdev, segsize, segment, 298 294 config->dnld_mode); 299 295 if (ret) 300 296 break; ··· 321 317 HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); 322 318 if (IS_ERR(skb)) { 323 319 err = PTR_ERR(skb); 324 - BT_ERR("%s: Change address command failed (%d)", 325 - hdev->name, err); 320 + bt_dev_err(hdev, "QCA Change address command failed (%d)", err); 326 321 return err; 327 322 } 328 323 ··· 331 328 } 332 329 EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); 333 330 334 - int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate) 331 + int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, 332 + enum qca_btsoc_type soc_type, u32 soc_ver) 335 333 { 336 - u32 rome_ver = 0; 337 334 struct rome_config config; 338 335 int err; 336 + u8 rom_ver; 339 337 340 - BT_DBG("%s: ROME setup on UART", hdev->name); 338 + bt_dev_dbg(hdev, "QCA setup on UART"); 341 339 342 340 config.user_baud_rate = baudrate; 343 341 344 - /* Get ROME version information */ 345 - err = rome_patch_ver_req(hdev, &rome_ver); 346 - if (err < 0 || rome_ver == 0) { 347 - BT_ERR("%s: Failed to get version 0x%x", hdev->name, err); 348 - return err; 349 - } 350 - 351 - bt_dev_info(hdev, "ROME controller version 0x%08x", rome_ver); 352 - 353 342 /* Download rampatch file */ 354 343 config.type = TLV_TYPE_PATCH; 355 - snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", 356 - rome_ver); 357 - err = rome_download_firmware(hdev, &config); 344 + if (soc_type == QCA_WCN3990) { 345 + /* Firmware files to download are based on ROM version. 346 + * ROM version is derived from last two bytes of soc_ver. 347 + */ 348 + rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | 349 + (soc_ver & 0x0000000f); 350 + snprintf(config.fwname, sizeof(config.fwname), 351 + "qca/crbtfw%02x.tlv", rom_ver); 352 + } else { 353 + snprintf(config.fwname, sizeof(config.fwname), 354 + "qca/rampatch_%08x.bin", soc_ver); 355 + } 356 + 357 + err = qca_download_firmware(hdev, &config); 358 358 if (err < 0) { 359 - BT_ERR("%s: Failed to download patch (%d)", hdev->name, err); 359 + bt_dev_err(hdev, "QCA Failed to download patch (%d)", err); 360 360 return err; 361 361 } 362 362 363 363 /* Download NVM configuration */ 364 364 config.type = TLV_TYPE_NVM; 365 - snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", 366 - rome_ver); 367 - err = rome_download_firmware(hdev, &config); 365 + if (soc_type == QCA_WCN3990) 366 + snprintf(config.fwname, sizeof(config.fwname), 367 + "qca/crnv%02x.bin", rom_ver); 368 + else 369 + snprintf(config.fwname, sizeof(config.fwname), 370 + "qca/nvm_%08x.bin", soc_ver); 371 + 372 + err = qca_download_firmware(hdev, &config); 368 373 if (err < 0) { 369 - BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err); 374 + bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err); 370 375 return err; 371 376 } 372 377 373 378 /* Perform HCI reset */ 374 - err = rome_reset(hdev); 379 + err = qca_send_reset(hdev); 375 380 if (err < 0) { 376 - BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err); 381 + bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err); 377 382 return err; 378 383 } 379 384 380 - bt_dev_info(hdev, "ROME setup on UART is completed"); 385 + bt_dev_info(hdev, "QCA setup on UART is completed"); 381 386 382 387 return 0; 383 388 } 384 - EXPORT_SYMBOL_GPL(qca_uart_setup_rome); 389 + EXPORT_SYMBOL_GPL(qca_uart_setup); 385 390 386 391 MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>"); 387 392 MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
+20 -2
drivers/bluetooth/btqca.h
··· 37 37 #define EDL_TAG_ID_HCI (17) 38 38 #define EDL_TAG_ID_DEEP_SLEEP (27) 39 39 40 + #define QCA_WCN3990_POWERON_PULSE 0xFC 41 + #define QCA_WCN3990_POWEROFF_PULSE 0xC0 42 + 40 43 enum qca_bardrate { 41 44 QCA_BAUDRATE_115200 = 0, 42 45 QCA_BAUDRATE_57600, ··· 127 124 __u8 data[0]; 128 125 } __packed; 129 126 127 + enum qca_btsoc_type { 128 + QCA_INVALID = -1, 129 + QCA_AR3002, 130 + QCA_ROME, 131 + QCA_WCN3990 132 + }; 133 + 130 134 #if IS_ENABLED(CONFIG_BT_QCA) 131 135 132 136 int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); 133 - int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate); 137 + int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, 138 + enum qca_btsoc_type soc_type, u32 soc_ver); 139 + int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); 134 140 135 141 #else 136 142 ··· 148 136 return -EOPNOTSUPP; 149 137 } 150 138 151 - static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed) 139 + static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, 140 + enum qca_btsoc_type soc_type, u32 soc_ver) 141 + { 142 + return -EOPNOTSUPP; 143 + } 144 + 145 + static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version) 152 146 { 153 147 return -EOPNOTSUPP; 154 148 }
+376 -144
drivers/bluetooth/btrtl.c
··· 34 34 #define RTL_ROM_LMP_8821A 0x8821 35 35 #define RTL_ROM_LMP_8761A 0x8761 36 36 #define RTL_ROM_LMP_8822B 0x8822 37 + #define RTL_CONFIG_MAGIC 0x8723ab55 37 38 38 39 #define IC_MATCH_FL_LMPSUBV (1 << 0) 39 40 #define IC_MATCH_FL_HCIREV (1 << 1) 41 + #define IC_MATCH_FL_HCIVER (1 << 2) 42 + #define IC_MATCH_FL_HCIBUS (1 << 3) 40 43 #define IC_INFO(lmps, hcir) \ 41 44 .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \ 42 45 .lmp_subver = (lmps), \ ··· 49 46 __u16 match_flags; 50 47 __u16 lmp_subver; 51 48 __u16 hci_rev; 49 + __u8 hci_ver; 50 + __u8 hci_bus; 52 51 bool config_needed; 52 + bool has_rom_version; 53 53 char *fw_name; 54 54 char *cfg_name; 55 55 }; 56 56 57 + struct btrtl_device_info { 58 + const struct id_table *ic_info; 59 + u8 rom_version; 60 + u8 *fw_data; 61 + int fw_len; 62 + u8 *cfg_data; 63 + int cfg_len; 64 + }; 65 + 57 66 static const struct id_table ic_id_table[] = { 67 + { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8723A, 0x0, 68 + .config_needed = false, 69 + .has_rom_version = false, 70 + .fw_name = "rtl_bt/rtl8723a_fw.bin", 71 + .cfg_name = NULL }, 72 + 73 + { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_3499, 0x0, 74 + .config_needed = false, 75 + .has_rom_version = false, 76 + .fw_name = "rtl_bt/rtl8723a_fw.bin", 77 + .cfg_name = NULL }, 78 + 79 + /* 8723BS */ 80 + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | 81 + IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, 82 + .lmp_subver = RTL_ROM_LMP_8723B, 83 + .hci_rev = 0xb, 84 + .hci_ver = 6, 85 + .hci_bus = HCI_UART, 86 + .config_needed = true, 87 + .has_rom_version = true, 88 + .fw_name = "rtl_bt/rtl8723bs_fw.bin", 89 + .cfg_name = "rtl_bt/rtl8723bs_config" }, 90 + 58 91 /* 8723B */ 59 92 { IC_INFO(RTL_ROM_LMP_8723B, 0xb), 60 93 .config_needed = false, 94 + .has_rom_version = true, 61 95 .fw_name = "rtl_bt/rtl8723b_fw.bin", 62 - .cfg_name = "rtl_bt/rtl8723b_config.bin" }, 96 + .cfg_name = "rtl_bt/rtl8723b_config" }, 63 97 64 98 /* 8723D */ 65 99 { IC_INFO(RTL_ROM_LMP_8723B, 0xd), 66 100 .config_needed = true, 101 + .has_rom_version = true, 67 102 .fw_name = "rtl_bt/rtl8723d_fw.bin", 68 - .cfg_name = "rtl_bt/rtl8723d_config.bin" }, 103 + .cfg_name = "rtl_bt/rtl8723d_config" }, 104 + 105 + /* 8723DS */ 106 + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | 107 + IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, 108 + .lmp_subver = RTL_ROM_LMP_8723B, 109 + .hci_rev = 0xd, 110 + .hci_ver = 8, 111 + .hci_bus = HCI_UART, 112 + .config_needed = true, 113 + .has_rom_version = true, 114 + .fw_name = "rtl_bt/rtl8723ds_fw.bin", 115 + .cfg_name = "rtl_bt/rtl8723ds_config" }, 69 116 70 117 /* 8821A */ 71 118 { IC_INFO(RTL_ROM_LMP_8821A, 0xa), 72 119 .config_needed = false, 120 + .has_rom_version = true, 73 121 .fw_name = "rtl_bt/rtl8821a_fw.bin", 74 - .cfg_name = "rtl_bt/rtl8821a_config.bin" }, 122 + .cfg_name = "rtl_bt/rtl8821a_config" }, 75 123 76 124 /* 8821C */ 77 125 { IC_INFO(RTL_ROM_LMP_8821A, 0xc), 78 126 .config_needed = false, 127 + .has_rom_version = true, 79 128 .fw_name = "rtl_bt/rtl8821c_fw.bin", 80 - .cfg_name = "rtl_bt/rtl8821c_config.bin" }, 129 + .cfg_name = "rtl_bt/rtl8821c_config" }, 81 130 82 131 /* 8761A */ 83 132 { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0, 84 133 .config_needed = false, 134 + .has_rom_version = true, 85 135 .fw_name = "rtl_bt/rtl8761a_fw.bin", 86 - .cfg_name = "rtl_bt/rtl8761a_config.bin" }, 136 + .cfg_name = "rtl_bt/rtl8761a_config" }, 87 137 88 138 /* 8822B */ 89 139 { IC_INFO(RTL_ROM_LMP_8822B, 0xb), 90 140 .config_needed = true, 141 + .has_rom_version = true, 91 142 .fw_name = "rtl_bt/rtl8822b_fw.bin", 92 - .cfg_name = "rtl_bt/rtl8822b_config.bin" }, 143 + .cfg_name = "rtl_bt/rtl8822b_config" }, 93 144 }; 145 + 146 + static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, 147 + u8 hci_ver, u8 hci_bus) 148 + { 149 + int i; 150 + 151 + for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) { 152 + if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) && 153 + (ic_id_table[i].lmp_subver != lmp_subver)) 154 + continue; 155 + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) && 156 + (ic_id_table[i].hci_rev != hci_rev)) 157 + continue; 158 + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) && 159 + (ic_id_table[i].hci_ver != hci_ver)) 160 + continue; 161 + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) && 162 + (ic_id_table[i].hci_bus != hci_bus)) 163 + continue; 164 + 165 + break; 166 + } 167 + if (i >= ARRAY_SIZE(ic_id_table)) 168 + return NULL; 169 + 170 + return &ic_id_table[i]; 171 + } 94 172 95 173 static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) 96 174 { ··· 181 97 /* Read RTL ROM version command */ 182 98 skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); 183 99 if (IS_ERR(skb)) { 184 - BT_ERR("%s: Read ROM version failed (%ld)", 185 - hdev->name, PTR_ERR(skb)); 100 + rtl_dev_err(hdev, "Read ROM version failed (%ld)\n", 101 + PTR_ERR(skb)); 186 102 return PTR_ERR(skb); 187 103 } 188 104 189 105 if (skb->len != sizeof(*rom_version)) { 190 - BT_ERR("%s: RTL version event length mismatch", hdev->name); 106 + rtl_dev_err(hdev, "RTL version event length mismatch\n"); 191 107 kfree_skb(skb); 192 108 return -EIO; 193 109 } 194 110 195 111 rom_version = (struct rtl_rom_version_evt *)skb->data; 196 - bt_dev_info(hdev, "rom_version status=%x version=%x", 197 - rom_version->status, rom_version->version); 112 + rtl_dev_info(hdev, "rom_version status=%x version=%x\n", 113 + rom_version->status, rom_version->version); 198 114 199 115 *version = rom_version->version; 200 116 ··· 202 118 return 0; 203 119 } 204 120 205 - static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, 206 - const struct firmware *fw, 121 + static int rtlbt_parse_firmware(struct hci_dev *hdev, 122 + struct btrtl_device_info *btrtl_dev, 207 123 unsigned char **_buf) 208 124 { 209 125 const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; 210 126 struct rtl_epatch_header *epatch_info; 211 127 unsigned char *buf; 212 - int i, ret, len; 128 + int i, len; 213 129 size_t min_size; 214 - u8 opcode, length, data, rom_version = 0; 130 + u8 opcode, length, data; 215 131 int project_id = -1; 216 132 const unsigned char *fwptr, *chip_id_base; 217 133 const unsigned char *patch_length_base, *patch_offset_base; ··· 230 146 { RTL_ROM_LMP_8821A, 10 }, /* 8821C */ 231 147 }; 232 148 233 - ret = rtl_read_rom_version(hdev, &rom_version); 234 - if (ret) 235 - return ret; 236 - 237 149 min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; 238 - if (fw->size < min_size) 150 + if (btrtl_dev->fw_len < min_size) 239 151 return -EINVAL; 240 152 241 - fwptr = fw->data + fw->size - sizeof(extension_sig); 153 + fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig); 242 154 if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { 243 - BT_ERR("%s: extension section signature mismatch", hdev->name); 155 + rtl_dev_err(hdev, "extension section signature mismatch\n"); 244 156 return -EINVAL; 245 157 } 246 158 ··· 246 166 * Once we have that, we double-check that that project_id is suitable 247 167 * for the hardware we are working with. 248 168 */ 249 - while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { 169 + while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) { 250 170 opcode = *--fwptr; 251 171 length = *--fwptr; 252 172 data = *--fwptr; ··· 257 177 break; 258 178 259 179 if (length == 0) { 260 - BT_ERR("%s: found instruction with length 0", 261 - hdev->name); 180 + rtl_dev_err(hdev, "found instruction with length 0\n"); 262 181 return -EINVAL; 263 182 } 264 183 ··· 270 191 } 271 192 272 193 if (project_id < 0) { 273 - BT_ERR("%s: failed to find version instruction", hdev->name); 194 + rtl_dev_err(hdev, "failed to find version instruction\n"); 274 195 return -EINVAL; 275 196 } 276 197 ··· 281 202 } 282 203 283 204 if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { 284 - BT_ERR("%s: unknown project id %d", hdev->name, project_id); 205 + rtl_dev_err(hdev, "unknown project id %d\n", project_id); 285 206 return -EINVAL; 286 207 } 287 208 288 - if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) { 289 - BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, 290 - project_id_to_lmp_subver[i].lmp_subver, lmp_subver); 209 + if (btrtl_dev->ic_info->lmp_subver != 210 + project_id_to_lmp_subver[i].lmp_subver) { 211 + rtl_dev_err(hdev, "firmware is for %x but this is a %x\n", 212 + project_id_to_lmp_subver[i].lmp_subver, 213 + btrtl_dev->ic_info->lmp_subver); 291 214 return -EINVAL; 292 215 } 293 216 294 - epatch_info = (struct rtl_epatch_header *)fw->data; 217 + epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; 295 218 if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { 296 - BT_ERR("%s: bad EPATCH signature", hdev->name); 219 + rtl_dev_err(hdev, "bad EPATCH signature\n"); 297 220 return -EINVAL; 298 221 } 299 222 ··· 310 229 * Find the right patch for this chip. 311 230 */ 312 231 min_size += 8 * num_patches; 313 - if (fw->size < min_size) 232 + if (btrtl_dev->fw_len < min_size) 314 233 return -EINVAL; 315 234 316 - chip_id_base = fw->data + sizeof(struct rtl_epatch_header); 235 + chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header); 317 236 patch_length_base = chip_id_base + (sizeof(u16) * num_patches); 318 237 patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); 319 238 for (i = 0; i < num_patches; i++) { 320 239 u16 chip_id = get_unaligned_le16(chip_id_base + 321 240 (i * sizeof(u16))); 322 - if (chip_id == rom_version + 1) { 241 + if (chip_id == btrtl_dev->rom_version + 1) { 323 242 patch_length = get_unaligned_le16(patch_length_base + 324 243 (i * sizeof(u16))); 325 244 patch_offset = get_unaligned_le32(patch_offset_base + ··· 329 248 } 330 249 331 250 if (!patch_offset) { 332 - BT_ERR("%s: didn't find patch for chip id %d", 333 - hdev->name, rom_version); 251 + rtl_dev_err(hdev, "didn't find patch for chip id %d", 252 + btrtl_dev->rom_version); 334 253 return -EINVAL; 335 254 } 336 255 337 256 BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); 338 257 min_size = patch_offset + patch_length; 339 - if (fw->size < min_size) 258 + if (btrtl_dev->fw_len < min_size) 340 259 return -EINVAL; 341 260 342 261 /* Copy the firmware into a new buffer and write the version at 343 262 * the end. 344 263 */ 345 264 len = patch_length; 346 - buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); 265 + buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, 266 + GFP_KERNEL); 347 267 if (!buf) 348 268 return -ENOMEM; 349 269 ··· 383 301 skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, 384 302 HCI_INIT_TIMEOUT); 385 303 if (IS_ERR(skb)) { 386 - BT_ERR("%s: download fw command failed (%ld)", 387 - hdev->name, PTR_ERR(skb)); 304 + rtl_dev_err(hdev, "download fw command failed (%ld)\n", 305 + PTR_ERR(skb)); 388 306 ret = -PTR_ERR(skb); 389 307 goto out; 390 308 } 391 309 392 310 if (skb->len != sizeof(struct rtl_download_response)) { 393 - BT_ERR("%s: download fw event length mismatch", 394 - hdev->name); 311 + rtl_dev_err(hdev, "download fw event length mismatch\n"); 395 312 kfree_skb(skb); 396 313 ret = -EIO; 397 314 goto out; ··· 405 324 return ret; 406 325 } 407 326 408 - static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) 327 + static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) 409 328 { 410 329 const struct firmware *fw; 411 330 int ret; 412 331 413 - bt_dev_info(hdev, "rtl: loading %s", name); 332 + rtl_dev_info(hdev, "rtl: loading %s\n", name); 414 333 ret = request_firmware(&fw, name, &hdev->dev); 415 334 if (ret < 0) 416 335 return ret; ··· 424 343 return ret; 425 344 } 426 345 427 - static int btrtl_setup_rtl8723a(struct hci_dev *hdev) 346 + static int btrtl_setup_rtl8723a(struct hci_dev *hdev, 347 + struct btrtl_device_info *btrtl_dev) 428 348 { 429 - const struct firmware *fw; 430 - int ret; 431 - 432 - bt_dev_info(hdev, "rtl: loading rtl_bt/rtl8723a_fw.bin"); 433 - ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev); 434 - if (ret < 0) { 435 - BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name); 436 - return ret; 437 - } 438 - 439 - if (fw->size < 8) { 440 - ret = -EINVAL; 441 - goto out; 442 - } 349 + if (btrtl_dev->fw_len < 8) 350 + return -EINVAL; 443 351 444 352 /* Check that the firmware doesn't have the epatch signature 445 353 * (which is only for RTL8723B and newer). 446 354 */ 447 - if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { 448 - BT_ERR("%s: unexpected EPATCH signature!", hdev->name); 449 - ret = -EINVAL; 450 - goto out; 451 - } 452 - 453 - ret = rtl_download_firmware(hdev, fw->data, fw->size); 454 - 455 - out: 456 - release_firmware(fw); 457 - return ret; 458 - } 459 - 460 - static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev, 461 - u16 lmp_subver) 462 - { 463 - unsigned char *fw_data = NULL; 464 - const struct firmware *fw; 465 - int ret; 466 - int cfg_sz; 467 - u8 *cfg_buff = NULL; 468 - u8 *tbuff; 469 - char *cfg_name = NULL; 470 - char *fw_name = NULL; 471 - int i; 472 - 473 - for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) { 474 - if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) && 475 - (ic_id_table[i].lmp_subver != lmp_subver)) 476 - continue; 477 - if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) && 478 - (ic_id_table[i].hci_rev != hci_rev)) 479 - continue; 480 - 481 - break; 482 - } 483 - 484 - if (i >= ARRAY_SIZE(ic_id_table)) { 485 - BT_ERR("%s: unknown IC info, lmp subver %04x, hci rev %04x", 486 - hdev->name, lmp_subver, hci_rev); 355 + if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) { 356 + rtl_dev_err(hdev, "unexpected EPATCH signature!\n"); 487 357 return -EINVAL; 488 358 } 489 359 490 - cfg_name = ic_id_table[i].cfg_name; 360 + return rtl_download_firmware(hdev, btrtl_dev->fw_data, 361 + btrtl_dev->fw_len); 362 + } 491 363 492 - if (cfg_name) { 493 - cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff); 494 - if (cfg_sz < 0) { 495 - cfg_sz = 0; 496 - if (ic_id_table[i].config_needed) 497 - BT_ERR("Necessary config file %s not found\n", 498 - cfg_name); 499 - } 500 - } else 501 - cfg_sz = 0; 364 + static int btrtl_setup_rtl8723b(struct hci_dev *hdev, 365 + struct btrtl_device_info *btrtl_dev) 366 + { 367 + unsigned char *fw_data = NULL; 368 + int ret; 369 + u8 *tbuff; 502 370 503 - fw_name = ic_id_table[i].fw_name; 504 - bt_dev_info(hdev, "rtl: loading %s", fw_name); 505 - ret = request_firmware(&fw, fw_name, &hdev->dev); 506 - if (ret < 0) { 507 - BT_ERR("%s: Failed to load %s", hdev->name, fw_name); 508 - goto err_req_fw; 509 - } 510 - 511 - ret = rtlbt_parse_firmware(hdev, lmp_subver, fw, &fw_data); 371 + ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data); 512 372 if (ret < 0) 513 373 goto out; 514 374 515 - if (cfg_sz) { 516 - tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL); 375 + if (btrtl_dev->cfg_len > 0) { 376 + tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); 517 377 if (!tbuff) { 518 378 ret = -ENOMEM; 519 379 goto out; ··· 463 441 memcpy(tbuff, fw_data, ret); 464 442 kfree(fw_data); 465 443 466 - memcpy(tbuff + ret, cfg_buff, cfg_sz); 467 - ret += cfg_sz; 444 + memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); 445 + ret += btrtl_dev->cfg_len; 468 446 469 447 fw_data = tbuff; 470 448 } 471 449 472 - bt_dev_info(hdev, "cfg_sz %d, total size %d", cfg_sz, ret); 450 + rtl_dev_info(hdev, "cfg_sz %d, total sz %d\n", btrtl_dev->cfg_len, ret); 473 451 474 452 ret = rtl_download_firmware(hdev, fw_data, ret); 475 453 476 454 out: 477 - release_firmware(fw); 478 455 kfree(fw_data); 479 - err_req_fw: 480 - if (cfg_sz) 481 - kfree(cfg_buff); 482 456 return ret; 483 457 } 484 458 ··· 485 467 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, 486 468 HCI_INIT_TIMEOUT); 487 469 if (IS_ERR(skb)) { 488 - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", 489 - hdev->name, PTR_ERR(skb)); 470 + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)\n", 471 + PTR_ERR(skb)); 490 472 return skb; 491 473 } 492 474 493 475 if (skb->len != sizeof(struct hci_rp_read_local_version)) { 494 - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", 495 - hdev->name); 476 + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch\n"); 496 477 kfree_skb(skb); 497 478 return ERR_PTR(-EIO); 498 479 } ··· 499 482 return skb; 500 483 } 501 484 502 - int btrtl_setup_realtek(struct hci_dev *hdev) 485 + void btrtl_free(struct btrtl_device_info *btrtl_dev) 503 486 { 487 + kfree(btrtl_dev->fw_data); 488 + kfree(btrtl_dev->cfg_data); 489 + kfree(btrtl_dev); 490 + } 491 + EXPORT_SYMBOL_GPL(btrtl_free); 492 + 493 + struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, 494 + const char *postfix) 495 + { 496 + struct btrtl_device_info *btrtl_dev; 504 497 struct sk_buff *skb; 505 498 struct hci_rp_read_local_version *resp; 499 + char cfg_name[40]; 506 500 u16 hci_rev, lmp_subver; 501 + u8 hci_ver; 502 + int ret; 503 + 504 + btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL); 505 + if (!btrtl_dev) { 506 + ret = -ENOMEM; 507 + goto err_alloc; 508 + } 507 509 508 510 skb = btrtl_read_local_version(hdev); 509 - if (IS_ERR(skb)) 510 - return -PTR_ERR(skb); 511 + if (IS_ERR(skb)) { 512 + ret = PTR_ERR(skb); 513 + goto err_free; 514 + } 511 515 512 516 resp = (struct hci_rp_read_local_version *)skb->data; 513 - bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x " 514 - "lmp_ver=%02x lmp_subver=%04x", 515 - resp->hci_ver, resp->hci_rev, 516 - resp->lmp_ver, resp->lmp_subver); 517 + rtl_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x\n", 518 + resp->hci_ver, resp->hci_rev, 519 + resp->lmp_ver, resp->lmp_subver); 517 520 521 + hci_ver = resp->hci_ver; 518 522 hci_rev = le16_to_cpu(resp->hci_rev); 519 523 lmp_subver = le16_to_cpu(resp->lmp_subver); 520 524 kfree_skb(skb); 521 525 526 + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, 527 + hdev->bus); 528 + 529 + if (!btrtl_dev->ic_info) { 530 + rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", 531 + lmp_subver, hci_rev, hci_ver); 532 + ret = -EINVAL; 533 + goto err_free; 534 + } 535 + 536 + if (btrtl_dev->ic_info->has_rom_version) { 537 + ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version); 538 + if (ret) 539 + goto err_free; 540 + } 541 + 542 + btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name, 543 + &btrtl_dev->fw_data); 544 + if (btrtl_dev->fw_len < 0) { 545 + rtl_dev_err(hdev, "firmware file %s not found\n", 546 + btrtl_dev->ic_info->fw_name); 547 + ret = btrtl_dev->fw_len; 548 + goto err_free; 549 + } 550 + 551 + if (btrtl_dev->ic_info->cfg_name) { 552 + if (postfix) { 553 + snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin", 554 + btrtl_dev->ic_info->cfg_name, postfix); 555 + } else { 556 + snprintf(cfg_name, sizeof(cfg_name), "%s.bin", 557 + btrtl_dev->ic_info->cfg_name); 558 + } 559 + btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name, 560 + &btrtl_dev->cfg_data); 561 + if (btrtl_dev->ic_info->config_needed && 562 + btrtl_dev->cfg_len <= 0) { 563 + rtl_dev_err(hdev, "mandatory config file %s not found\n", 564 + btrtl_dev->ic_info->cfg_name); 565 + ret = btrtl_dev->cfg_len; 566 + goto err_free; 567 + } 568 + } 569 + 570 + return btrtl_dev; 571 + 572 + err_free: 573 + btrtl_free(btrtl_dev); 574 + err_alloc: 575 + return ERR_PTR(ret); 576 + } 577 + EXPORT_SYMBOL_GPL(btrtl_initialize); 578 + 579 + int btrtl_download_firmware(struct hci_dev *hdev, 580 + struct btrtl_device_info *btrtl_dev) 581 + { 522 582 /* Match a set of subver values that correspond to stock firmware, 523 583 * which is not compatible with standard btusb. 524 584 * If matched, upload an alternative firmware that does conform to 525 585 * standard btusb. Once that firmware is uploaded, the subver changes 526 586 * to a different value. 527 587 */ 528 - switch (lmp_subver) { 588 + switch (btrtl_dev->ic_info->lmp_subver) { 529 589 case RTL_ROM_LMP_8723A: 530 590 case RTL_ROM_LMP_3499: 531 - return btrtl_setup_rtl8723a(hdev); 591 + return btrtl_setup_rtl8723a(hdev, btrtl_dev); 532 592 case RTL_ROM_LMP_8723B: 533 593 case RTL_ROM_LMP_8821A: 534 594 case RTL_ROM_LMP_8761A: 535 595 case RTL_ROM_LMP_8822B: 536 - return btrtl_setup_rtl8723b(hdev, hci_rev, lmp_subver); 596 + return btrtl_setup_rtl8723b(hdev, btrtl_dev); 537 597 default: 538 - bt_dev_info(hdev, "rtl: assuming no firmware upload needed"); 598 + rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n"); 539 599 return 0; 540 600 } 541 601 } 602 + EXPORT_SYMBOL_GPL(btrtl_download_firmware); 603 + 604 + int btrtl_setup_realtek(struct hci_dev *hdev) 605 + { 606 + struct btrtl_device_info *btrtl_dev; 607 + int ret; 608 + 609 + btrtl_dev = btrtl_initialize(hdev, NULL); 610 + if (IS_ERR(btrtl_dev)) 611 + return PTR_ERR(btrtl_dev); 612 + 613 + ret = btrtl_download_firmware(hdev, btrtl_dev); 614 + 615 + btrtl_free(btrtl_dev); 616 + 617 + return ret; 618 + } 542 619 EXPORT_SYMBOL_GPL(btrtl_setup_realtek); 620 + 621 + static unsigned int btrtl_convert_baudrate(u32 device_baudrate) 622 + { 623 + switch (device_baudrate) { 624 + case 0x0252a00a: 625 + return 230400; 626 + 627 + case 0x05f75004: 628 + return 921600; 629 + 630 + case 0x00005004: 631 + return 1000000; 632 + 633 + case 0x04928002: 634 + case 0x01128002: 635 + return 1500000; 636 + 637 + case 0x00005002: 638 + return 2000000; 639 + 640 + case 0x0000b001: 641 + return 2500000; 642 + 643 + case 0x04928001: 644 + return 3000000; 645 + 646 + case 0x052a6001: 647 + return 3500000; 648 + 649 + case 0x00005001: 650 + return 4000000; 651 + 652 + case 0x0252c014: 653 + default: 654 + return 115200; 655 + } 656 + } 657 + 658 + int btrtl_get_uart_settings(struct hci_dev *hdev, 659 + struct btrtl_device_info *btrtl_dev, 660 + unsigned int *controller_baudrate, 661 + u32 *device_baudrate, bool *flow_control) 662 + { 663 + struct rtl_vendor_config *config; 664 + struct rtl_vendor_config_entry *entry; 665 + int i, total_data_len; 666 + bool found = false; 667 + 668 + total_data_len = btrtl_dev->cfg_len - sizeof(*config); 669 + if (total_data_len <= 0) { 670 + rtl_dev_warn(hdev, "no config loaded\n"); 671 + return -EINVAL; 672 + } 673 + 674 + config = (struct rtl_vendor_config *)btrtl_dev->cfg_data; 675 + if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) { 676 + rtl_dev_err(hdev, "invalid config magic\n"); 677 + return -EINVAL; 678 + } 679 + 680 + if (total_data_len < le16_to_cpu(config->total_len)) { 681 + rtl_dev_err(hdev, "config is too short\n"); 682 + return -EINVAL; 683 + } 684 + 685 + for (i = 0; i < total_data_len; ) { 686 + entry = ((void *)config->entry) + i; 687 + 688 + switch (le16_to_cpu(entry->offset)) { 689 + case 0xc: 690 + if (entry->len < sizeof(*device_baudrate)) { 691 + rtl_dev_err(hdev, "invalid UART config entry\n"); 692 + return -EINVAL; 693 + } 694 + 695 + *device_baudrate = get_unaligned_le32(entry->data); 696 + *controller_baudrate = btrtl_convert_baudrate( 697 + *device_baudrate); 698 + 699 + if (entry->len >= 13) 700 + *flow_control = !!(entry->data[12] & BIT(2)); 701 + else 702 + *flow_control = false; 703 + 704 + found = true; 705 + break; 706 + 707 + default: 708 + rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)\n", 709 + le16_to_cpu(entry->offset), entry->len); 710 + break; 711 + }; 712 + 713 + i += sizeof(*entry) + entry->len; 714 + } 715 + 716 + if (!found) { 717 + rtl_dev_err(hdev, "no UART config entry found\n"); 718 + return -ENOENT; 719 + } 720 + 721 + rtl_dev_dbg(hdev, "device baudrate = 0x%08x\n", *device_baudrate); 722 + rtl_dev_dbg(hdev, "controller baudrate = %u\n", *controller_baudrate); 723 + rtl_dev_dbg(hdev, "flow control %d\n", *flow_control); 724 + 725 + return 0; 726 + } 727 + EXPORT_SYMBOL_GPL(btrtl_get_uart_settings); 543 728 544 729 MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>"); 545 730 MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); 546 731 MODULE_VERSION(VERSION); 547 732 MODULE_LICENSE("GPL"); 733 + MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin"); 734 + MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin"); 735 + MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin"); 736 + MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin"); 737 + MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin"); 738 + MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin"); 739 + MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin"); 740 + MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin"); 741 + MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin"); 742 + MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin"); 743 + MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin"); 744 + MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin"); 745 + MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
+53
drivers/bluetooth/btrtl.h
··· 17 17 18 18 #define RTL_FRAG_LEN 252 19 19 20 + #define rtl_dev_err(dev, fmt, ...) bt_dev_err(dev, "RTL: " fmt, ##__VA_ARGS__) 21 + #define rtl_dev_warn(dev, fmt, ...) bt_dev_warn(dev, "RTL: " fmt, ##__VA_ARGS__) 22 + #define rtl_dev_info(dev, fmt, ...) bt_dev_info(dev, "RTL: " fmt, ##__VA_ARGS__) 23 + #define rtl_dev_dbg(dev, fmt, ...) bt_dev_dbg(dev, "RTL: " fmt, ##__VA_ARGS__) 24 + 25 + struct btrtl_device_info; 26 + 20 27 struct rtl_download_cmd { 21 28 __u8 index; 22 29 __u8 data[RTL_FRAG_LEN]; ··· 45 38 __le16 num_patches; 46 39 } __packed; 47 40 41 + struct rtl_vendor_config_entry { 42 + __le16 offset; 43 + __u8 len; 44 + __u8 data[0]; 45 + } __packed; 46 + 47 + struct rtl_vendor_config { 48 + __le32 signature; 49 + __le16 total_len; 50 + struct rtl_vendor_config_entry entry[0]; 51 + } __packed; 52 + 48 53 #if IS_ENABLED(CONFIG_BT_RTL) 49 54 55 + struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, 56 + const char *postfix); 57 + void btrtl_free(struct btrtl_device_info *btrtl_dev); 58 + int btrtl_download_firmware(struct hci_dev *hdev, 59 + struct btrtl_device_info *btrtl_dev); 50 60 int btrtl_setup_realtek(struct hci_dev *hdev); 61 + int btrtl_get_uart_settings(struct hci_dev *hdev, 62 + struct btrtl_device_info *btrtl_dev, 63 + unsigned int *controller_baudrate, 64 + u32 *device_baudrate, bool *flow_control); 51 65 52 66 #else 67 + 68 + static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, 69 + const char *postfix) 70 + { 71 + return ERR_PTR(-EOPNOTSUPP); 72 + } 73 + 74 + static inline void btrtl_free(struct btrtl_device_info *btrtl_dev) 75 + { 76 + } 77 + 78 + static inline int btrtl_download_firmware(struct hci_dev *hdev, 79 + struct btrtl_device_info *btrtl_dev) 80 + { 81 + return -EOPNOTSUPP; 82 + } 53 83 54 84 static inline int btrtl_setup_realtek(struct hci_dev *hdev) 55 85 { 56 86 return -EOPNOTSUPP; 87 + } 88 + 89 + static inline int btrtl_get_uart_settings(struct hci_dev *hdev, 90 + struct btrtl_device_info *btrtl_dev, 91 + unsigned int *controller_baudrate, 92 + u32 *device_baudrate, 93 + bool *flow_control) 94 + { 95 + return -ENOENT; 57 96 } 58 97 59 98 #endif
+67 -49
drivers/bluetooth/btusb.c
··· 374 374 { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, 375 375 376 376 /* Additional Realtek 8723DE Bluetooth devices */ 377 + { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, 377 378 { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, 378 379 379 380 /* Additional Realtek 8821AE Bluetooth devices */ ··· 510 509 static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) 511 510 { 512 511 struct sk_buff *skb; 512 + unsigned long flags; 513 513 int err = 0; 514 514 515 - spin_lock(&data->rxlock); 515 + spin_lock_irqsave(&data->rxlock, flags); 516 516 skb = data->evt_skb; 517 517 518 518 while (count) { ··· 558 556 } 559 557 560 558 data->evt_skb = skb; 561 - spin_unlock(&data->rxlock); 559 + spin_unlock_irqrestore(&data->rxlock, flags); 562 560 563 561 return err; 564 562 } ··· 566 564 static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) 567 565 { 568 566 struct sk_buff *skb; 567 + unsigned long flags; 569 568 int err = 0; 570 569 571 - spin_lock(&data->rxlock); 570 + spin_lock_irqsave(&data->rxlock, flags); 572 571 skb = data->acl_skb; 573 572 574 573 while (count) { ··· 616 613 } 617 614 618 615 data->acl_skb = skb; 619 - spin_unlock(&data->rxlock); 616 + spin_unlock_irqrestore(&data->rxlock, flags); 620 617 621 618 return err; 622 619 } ··· 624 621 static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) 625 622 { 626 623 struct sk_buff *skb; 624 + unsigned long flags; 627 625 int err = 0; 628 626 629 - spin_lock(&data->rxlock); 627 + spin_lock_irqsave(&data->rxlock, flags); 630 628 skb = data->sco_skb; 631 629 632 630 while (count) { ··· 672 668 } 673 669 674 670 data->sco_skb = skb; 675 - spin_unlock(&data->rxlock); 671 + spin_unlock_irqrestore(&data->rxlock, flags); 676 672 677 673 return err; 678 674 } ··· 1070 1066 struct sk_buff *skb = urb->context; 1071 1067 struct hci_dev *hdev = (struct hci_dev *)skb->dev; 1072 1068 struct btusb_data *data = hci_get_drvdata(hdev); 1069 + unsigned long flags; 1073 1070 1074 1071 BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, 1075 1072 urb->actual_length); ··· 1084 1079 hdev->stat.err_tx++; 1085 1080 1086 1081 done: 1087 - spin_lock(&data->txlock); 1082 + spin_lock_irqsave(&data->txlock, flags); 1088 1083 data->tx_in_flight--; 1089 - spin_unlock(&data->txlock); 1084 + spin_unlock_irqrestore(&data->txlock, flags); 1090 1085 1091 1086 kfree(urb->setup_packet); 1092 1087 ··· 1598 1593 ret = request_firmware(&fw, fwname, &hdev->dev); 1599 1594 if (ret < 0) { 1600 1595 if (ret == -EINVAL) { 1601 - BT_ERR("%s Intel firmware file request failed (%d)", 1602 - hdev->name, ret); 1596 + bt_dev_err(hdev, "Intel firmware file request failed (%d)", 1597 + ret); 1603 1598 return NULL; 1604 1599 } 1605 1600 1606 - BT_ERR("%s failed to open Intel firmware file: %s(%d)", 1607 - hdev->name, fwname, ret); 1601 + bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)", 1602 + fwname, ret); 1608 1603 1609 1604 /* If the correct firmware patch file is not found, use the 1610 1605 * default firmware patch file instead ··· 1612 1607 snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq", 1613 1608 ver->hw_platform, ver->hw_variant); 1614 1609 if (request_firmware(&fw, fwname, &hdev->dev) < 0) { 1615 - BT_ERR("%s failed to open default Intel fw file: %s", 1616 - hdev->name, fwname); 1610 + bt_dev_err(hdev, "failed to open default fw file: %s", 1611 + fwname); 1617 1612 return NULL; 1618 1613 } 1619 1614 } ··· 1642 1637 * process. 1643 1638 */ 1644 1639 if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) { 1645 - BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name); 1640 + bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read"); 1646 1641 return -EINVAL; 1647 1642 } 1648 1643 (*fw_ptr)++; ··· 1656 1651 * of command parameter. If not, the firmware file is corrupted. 1657 1652 */ 1658 1653 if (remain < cmd->plen) { 1659 - BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name); 1654 + bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len"); 1660 1655 return -EFAULT; 1661 1656 } 1662 1657 ··· 1689 1684 remain -= sizeof(*evt); 1690 1685 1691 1686 if (remain < evt->plen) { 1692 - BT_ERR("%s Intel fw corrupted: invalid evt len", 1693 - hdev->name); 1687 + bt_dev_err(hdev, "Intel fw corrupted: invalid evt len"); 1694 1688 return -EFAULT; 1695 1689 } 1696 1690 ··· 1703 1699 * file is corrupted. 1704 1700 */ 1705 1701 if (!evt || !evt_param || remain < 0) { 1706 - BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name); 1702 + bt_dev_err(hdev, "Intel fw corrupted: invalid evt read"); 1707 1703 return -EFAULT; 1708 1704 } 1709 1705 1710 1706 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen, 1711 1707 cmd_param, evt->evt, HCI_INIT_TIMEOUT); 1712 1708 if (IS_ERR(skb)) { 1713 - BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", 1714 - hdev->name, cmd->opcode, PTR_ERR(skb)); 1709 + bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)", 1710 + cmd->opcode, PTR_ERR(skb)); 1715 1711 return PTR_ERR(skb); 1716 1712 } 1717 1713 ··· 1720 1716 * the contents of the event. 1721 1717 */ 1722 1718 if (skb->len != evt->plen) { 1723 - BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name, 1724 - le16_to_cpu(cmd->opcode)); 1719 + bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)", 1720 + le16_to_cpu(cmd->opcode)); 1725 1721 kfree_skb(skb); 1726 1722 return -EFAULT; 1727 1723 } 1728 1724 1729 1725 if (memcmp(skb->data, evt_param, evt->plen)) { 1730 - BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)", 1731 - hdev->name, le16_to_cpu(cmd->opcode)); 1726 + bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)", 1727 + le16_to_cpu(cmd->opcode)); 1732 1728 kfree_skb(skb); 1733 1729 return -EFAULT; 1734 1730 } ··· 1757 1753 */ 1758 1754 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); 1759 1755 if (IS_ERR(skb)) { 1760 - BT_ERR("%s sending initial HCI reset command failed (%ld)", 1761 - hdev->name, PTR_ERR(skb)); 1756 + bt_dev_err(hdev, "sending initial HCI reset command failed (%ld)", 1757 + PTR_ERR(skb)); 1762 1758 return PTR_ERR(skb); 1763 1759 } 1764 1760 kfree_skb(skb); ··· 1894 1890 struct hci_event_hdr *hdr; 1895 1891 struct hci_ev_cmd_complete *evt; 1896 1892 1897 - skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); 1893 + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); 1898 1894 if (!skb) 1899 1895 return -ENOMEM; 1900 1896 ··· 2088 2084 * for now only accept this single value. 2089 2085 */ 2090 2086 if (ver.hw_platform != 0x37) { 2091 - BT_ERR("%s: Unsupported Intel hardware platform (%u)", 2092 - hdev->name, ver.hw_platform); 2087 + bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", 2088 + ver.hw_platform); 2093 2089 return -EINVAL; 2094 2090 } 2095 2091 ··· 2108 2104 case 0x14: /* QnJ, IcP */ 2109 2105 break; 2110 2106 default: 2111 - BT_ERR("%s: Unsupported Intel hardware variant (%u)", 2112 - hdev->name, ver.hw_variant); 2107 + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", 2108 + ver.hw_variant); 2113 2109 return -EINVAL; 2114 2110 } 2115 2111 ··· 2138 2134 * choice is to return an error and abort the device initialization. 2139 2135 */ 2140 2136 if (ver.fw_variant != 0x06) { 2141 - BT_ERR("%s: Unsupported Intel firmware variant (%u)", 2142 - hdev->name, ver.fw_variant); 2137 + bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", 2138 + ver.fw_variant); 2143 2139 return -ENODEV; 2144 2140 } 2145 2141 ··· 2155 2151 * that this bootloader does not send them, then abort the setup. 2156 2152 */ 2157 2153 if (params.limited_cce != 0x00) { 2158 - BT_ERR("%s: Unsupported Intel firmware loading method (%u)", 2159 - hdev->name, params.limited_cce); 2154 + bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", 2155 + params.limited_cce); 2160 2156 return -EINVAL; 2161 2157 } 2162 2158 ··· 2206 2202 le16_to_cpu(ver.fw_revision)); 2207 2203 break; 2208 2204 default: 2209 - BT_ERR("%s: Unsupported Intel firmware naming", hdev->name); 2205 + bt_dev_err(hdev, "Unsupported Intel firmware naming"); 2210 2206 return -EINVAL; 2211 2207 } 2212 2208 2213 2209 err = request_firmware(&fw, fwname, &hdev->dev); 2214 2210 if (err < 0) { 2215 - BT_ERR("%s: Failed to load Intel firmware file (%d)", 2216 - hdev->name, err); 2211 + bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err); 2217 2212 return err; 2218 2213 } 2219 2214 ··· 2238 2235 le16_to_cpu(ver.fw_revision)); 2239 2236 break; 2240 2237 default: 2241 - BT_ERR("%s: Unsupported Intel firmware naming", hdev->name); 2238 + bt_dev_err(hdev, "Unsupported Intel firmware naming"); 2242 2239 return -EINVAL; 2243 2240 } 2244 2241 2245 2242 if (fw->size < 644) { 2246 - BT_ERR("%s: Invalid size of firmware file (%zu)", 2247 - hdev->name, fw->size); 2243 + bt_dev_err(hdev, "Invalid size of firmware file (%zu)", 2244 + fw->size); 2248 2245 err = -EBADF; 2249 2246 goto done; 2250 2247 } ··· 2275 2272 TASK_INTERRUPTIBLE, 2276 2273 msecs_to_jiffies(5000)); 2277 2274 if (err == -EINTR) { 2278 - BT_ERR("%s: Firmware loading interrupted", hdev->name); 2275 + bt_dev_err(hdev, "Firmware loading interrupted"); 2279 2276 goto done; 2280 2277 } 2281 2278 2282 2279 if (err) { 2283 - BT_ERR("%s: Firmware loading timeout", hdev->name); 2280 + bt_dev_err(hdev, "Firmware loading timeout"); 2284 2281 err = -ETIMEDOUT; 2285 2282 goto done; 2286 2283 } 2287 2284 2288 2285 if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { 2289 - BT_ERR("%s: Firmware loading failed", hdev->name); 2286 + bt_dev_err(hdev, "Firmware loading failed"); 2290 2287 err = -ENOEXEC; 2291 2288 goto done; 2292 2289 } ··· 2325 2322 msecs_to_jiffies(1000)); 2326 2323 2327 2324 if (err == -EINTR) { 2328 - BT_ERR("%s: Device boot interrupted", hdev->name); 2325 + bt_dev_err(hdev, "Device boot interrupted"); 2329 2326 return -EINTR; 2330 2327 } 2331 2328 2332 2329 if (err) { 2333 - BT_ERR("%s: Device boot timeout", hdev->name); 2330 + bt_dev_err(hdev, "Device boot timeout"); 2334 2331 return -ETIMEDOUT; 2335 2332 } 2336 2333 ··· 2367 2364 struct sk_buff *skb; 2368 2365 long ret; 2369 2366 2367 + /* In the shutdown sequence where Bluetooth is turned off followed 2368 + * by WiFi being turned off, turning WiFi back on causes issue with 2369 + * the RF calibration. 2370 + * 2371 + * To ensure that any RF activity has been stopped, issue HCI Reset 2372 + * command to clear all ongoing activity including advertising, 2373 + * scanning etc. 2374 + */ 2375 + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); 2376 + if (IS_ERR(skb)) { 2377 + ret = PTR_ERR(skb); 2378 + bt_dev_err(hdev, "HCI reset during shutdown failed"); 2379 + return ret; 2380 + } 2381 + kfree_skb(skb); 2382 + 2370 2383 /* Some platforms have an issue with BT LED when the interface is 2371 2384 * down or BT radio is turned off, which takes 5 seconds to BT LED 2372 2385 * goes off. This command turns off the BT LED immediately. ··· 2390 2371 skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); 2391 2372 if (IS_ERR(skb)) { 2392 2373 ret = PTR_ERR(skb); 2393 - BT_ERR("%s: turning off Intel device LED failed (%ld)", 2394 - hdev->name, ret); 2374 + bt_dev_err(hdev, "turning off Intel device LED failed"); 2395 2375 return ret; 2396 2376 } 2397 2377 kfree_skb(skb);
+197 -5
drivers/bluetooth/hci_h5.c
··· 21 21 * 22 22 */ 23 23 24 - #include <linux/kernel.h> 24 + #include <linux/acpi.h> 25 25 #include <linux/errno.h> 26 + #include <linux/gpio/consumer.h> 27 + #include <linux/kernel.h> 28 + #include <linux/mod_devicetable.h> 29 + #include <linux/serdev.h> 26 30 #include <linux/skbuff.h> 27 31 28 32 #include <net/bluetooth/bluetooth.h> 29 33 #include <net/bluetooth/hci_core.h> 30 34 35 + #include "btrtl.h" 31 36 #include "hci_uart.h" 32 37 33 38 #define HCI_3WIRE_ACK_PKT 0 ··· 70 65 }; 71 66 72 67 struct h5 { 68 + /* Must be the first member, hci_serdev.c expects this. */ 69 + struct hci_uart serdev_hu; 70 + 73 71 struct sk_buff_head unack; /* Unack'ed packets queue */ 74 72 struct sk_buff_head rel; /* Reliable packets queue */ 75 73 struct sk_buff_head unrel; /* Unreliable packets queue */ ··· 103 95 H5_SLEEPING, 104 96 H5_WAKING_UP, 105 97 } sleep; 98 + 99 + const struct h5_vnd *vnd; 100 + const char *id; 101 + 102 + struct gpio_desc *enable_gpio; 103 + struct gpio_desc *device_wake_gpio; 104 + }; 105 + 106 + struct h5_vnd { 107 + int (*setup)(struct h5 *h5); 108 + void (*open)(struct h5 *h5); 109 + void (*close)(struct h5 *h5); 110 + const struct acpi_gpio_mapping *acpi_gpio_map; 106 111 }; 107 112 108 113 static void h5_reset_rx(struct h5 *h5); ··· 214 193 215 194 BT_DBG("hu %p", hu); 216 195 217 - h5 = kzalloc(sizeof(*h5), GFP_KERNEL); 218 - if (!h5) 219 - return -ENOMEM; 196 + if (hu->serdev) { 197 + h5 = serdev_device_get_drvdata(hu->serdev); 198 + } else { 199 + h5 = kzalloc(sizeof(*h5), GFP_KERNEL); 200 + if (!h5) 201 + return -ENOMEM; 202 + } 220 203 221 204 hu->priv = h5; 222 205 h5->hu = hu; ··· 234 209 timer_setup(&h5->timer, h5_timed_event, 0); 235 210 236 211 h5->tx_win = H5_TX_WIN_MAX; 212 + 213 + if (h5->vnd && h5->vnd->open) 214 + h5->vnd->open(h5); 237 215 238 216 set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags); 239 217 ··· 257 229 skb_queue_purge(&h5->rel); 258 230 skb_queue_purge(&h5->unrel); 259 231 260 - kfree(h5); 232 + if (h5->vnd && h5->vnd->close) 233 + h5->vnd->close(h5); 234 + 235 + if (!hu->serdev) 236 + kfree(h5); 237 + 238 + return 0; 239 + } 240 + 241 + static int h5_setup(struct hci_uart *hu) 242 + { 243 + struct h5 *h5 = hu->priv; 244 + 245 + if (h5->vnd && h5->vnd->setup) 246 + return h5->vnd->setup(h5); 261 247 262 248 return 0; 263 249 } ··· 786 744 .name = "Three-wire (H5)", 787 745 .open = h5_open, 788 746 .close = h5_close, 747 + .setup = h5_setup, 789 748 .recv = h5_recv, 790 749 .enqueue = h5_enqueue, 791 750 .dequeue = h5_dequeue, 792 751 .flush = h5_flush, 793 752 }; 794 753 754 + static int h5_serdev_probe(struct serdev_device *serdev) 755 + { 756 + const struct acpi_device_id *match; 757 + struct device *dev = &serdev->dev; 758 + struct h5 *h5; 759 + 760 + h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL); 761 + if (!h5) 762 + return -ENOMEM; 763 + 764 + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); 765 + 766 + h5->hu = &h5->serdev_hu; 767 + h5->serdev_hu.serdev = serdev; 768 + serdev_device_set_drvdata(serdev, h5); 769 + 770 + if (has_acpi_companion(dev)) { 771 + match = acpi_match_device(dev->driver->acpi_match_table, dev); 772 + if (!match) 773 + return -ENODEV; 774 + 775 + h5->vnd = (const struct h5_vnd *)match->driver_data; 776 + h5->id = (char *)match->id; 777 + 778 + if (h5->vnd->acpi_gpio_map) 779 + devm_acpi_dev_add_driver_gpios(dev, 780 + h5->vnd->acpi_gpio_map); 781 + } 782 + 783 + h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); 784 + if (IS_ERR(h5->enable_gpio)) 785 + return PTR_ERR(h5->enable_gpio); 786 + 787 + h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake", 788 + GPIOD_OUT_LOW); 789 + if (IS_ERR(h5->device_wake_gpio)) 790 + return PTR_ERR(h5->device_wake_gpio); 791 + 792 + return hci_uart_register_device(&h5->serdev_hu, &h5p); 793 + } 794 + 795 + static void h5_serdev_remove(struct serdev_device *serdev) 796 + { 797 + struct h5 *h5 = serdev_device_get_drvdata(serdev); 798 + 799 + hci_uart_unregister_device(&h5->serdev_hu); 800 + } 801 + 802 + static int h5_btrtl_setup(struct h5 *h5) 803 + { 804 + struct btrtl_device_info *btrtl_dev; 805 + struct sk_buff *skb; 806 + __le32 baudrate_data; 807 + u32 device_baudrate; 808 + unsigned int controller_baudrate; 809 + bool flow_control; 810 + int err; 811 + 812 + btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id); 813 + if (IS_ERR(btrtl_dev)) 814 + return PTR_ERR(btrtl_dev); 815 + 816 + err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev, 817 + &controller_baudrate, &device_baudrate, 818 + &flow_control); 819 + if (err) 820 + goto out_free; 821 + 822 + baudrate_data = cpu_to_le32(device_baudrate); 823 + skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data), 824 + &baudrate_data, HCI_INIT_TIMEOUT); 825 + if (IS_ERR(skb)) { 826 + rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n"); 827 + err = PTR_ERR(skb); 828 + goto out_free; 829 + } else { 830 + kfree_skb(skb); 831 + } 832 + /* Give the device some time to set up the new baudrate. */ 833 + usleep_range(10000, 20000); 834 + 835 + serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate); 836 + serdev_device_set_flow_control(h5->hu->serdev, flow_control); 837 + 838 + err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev); 839 + /* Give the device some time before the hci-core sends it a reset */ 840 + usleep_range(10000, 20000); 841 + 842 + out_free: 843 + btrtl_free(btrtl_dev); 844 + 845 + return err; 846 + } 847 + 848 + static void h5_btrtl_open(struct h5 *h5) 849 + { 850 + /* Devices always start with these fixed parameters */ 851 + serdev_device_set_flow_control(h5->hu->serdev, false); 852 + serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); 853 + serdev_device_set_baudrate(h5->hu->serdev, 115200); 854 + 855 + /* The controller needs up to 500ms to wakeup */ 856 + gpiod_set_value_cansleep(h5->enable_gpio, 1); 857 + gpiod_set_value_cansleep(h5->device_wake_gpio, 1); 858 + msleep(500); 859 + } 860 + 861 + static void h5_btrtl_close(struct h5 *h5) 862 + { 863 + gpiod_set_value_cansleep(h5->device_wake_gpio, 0); 864 + gpiod_set_value_cansleep(h5->enable_gpio, 0); 865 + } 866 + 867 + static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false }; 868 + static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false }; 869 + static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false }; 870 + static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = { 871 + { "device-wake-gpios", &btrtl_device_wake_gpios, 1 }, 872 + { "enable-gpios", &btrtl_enable_gpios, 1 }, 873 + { "host-wake-gpios", &btrtl_host_wake_gpios, 1 }, 874 + {}, 875 + }; 876 + 877 + static struct h5_vnd rtl_vnd = { 878 + .setup = h5_btrtl_setup, 879 + .open = h5_btrtl_open, 880 + .close = h5_btrtl_close, 881 + .acpi_gpio_map = acpi_btrtl_gpios, 882 + }; 883 + 884 + #ifdef CONFIG_ACPI 885 + static const struct acpi_device_id h5_acpi_match[] = { 886 + { "OBDA8723", (kernel_ulong_t)&rtl_vnd }, 887 + { }, 888 + }; 889 + MODULE_DEVICE_TABLE(acpi, h5_acpi_match); 890 + #endif 891 + 892 + static struct serdev_device_driver h5_serdev_driver = { 893 + .probe = h5_serdev_probe, 894 + .remove = h5_serdev_remove, 895 + .driver = { 896 + .name = "hci_uart_h5", 897 + .acpi_match_table = ACPI_PTR(h5_acpi_match), 898 + }, 899 + }; 900 + 795 901 int __init h5_init(void) 796 902 { 903 + serdev_device_driver_register(&h5_serdev_driver); 797 904 return hci_uart_register_proto(&h5p); 798 905 } 799 906 800 907 int __exit h5_deinit(void) 801 908 { 909 + serdev_device_driver_unregister(&h5_serdev_driver); 802 910 return hci_uart_unregister_proto(&h5p); 803 911 }
+1 -1
drivers/bluetooth/hci_intel.c
··· 458 458 struct hci_event_hdr *hdr; 459 459 struct hci_ev_cmd_complete *evt; 460 460 461 - skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); 461 + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); 462 462 if (!skb) 463 463 return -ENOMEM; 464 464
+438 -68
drivers/bluetooth/hci_qca.c
··· 5 5 * protocol extension to H4. 6 6 * 7 7 * Copyright (C) 2007 Texas Instruments, Inc. 8 - * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved. 8 + * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved. 9 9 * 10 10 * Acknowledgements: 11 11 * This file is based on hci_ll.c, which was... ··· 31 31 #include <linux/kernel.h> 32 32 #include <linux/clk.h> 33 33 #include <linux/debugfs.h> 34 + #include <linux/delay.h> 35 + #include <linux/device.h> 34 36 #include <linux/gpio/consumer.h> 35 37 #include <linux/mod_devicetable.h> 36 38 #include <linux/module.h> 39 + #include <linux/of_device.h> 40 + #include <linux/platform_device.h> 41 + #include <linux/regulator/consumer.h> 37 42 #include <linux/serdev.h> 38 43 39 44 #include <net/bluetooth/bluetooth.h> ··· 124 119 u64 votes_off; 125 120 }; 126 121 122 + enum qca_speed_type { 123 + QCA_INIT_SPEED = 1, 124 + QCA_OPER_SPEED 125 + }; 126 + 127 + /* 128 + * Voltage regulator information required for configuring the 129 + * QCA Bluetooth chipset 130 + */ 131 + struct qca_vreg { 132 + const char *name; 133 + unsigned int min_uV; 134 + unsigned int max_uV; 135 + unsigned int load_uA; 136 + }; 137 + 138 + struct qca_vreg_data { 139 + enum qca_btsoc_type soc_type; 140 + struct qca_vreg *vregs; 141 + size_t num_vregs; 142 + }; 143 + 144 + /* 145 + * Platform data for the QCA Bluetooth power driver. 146 + */ 147 + struct qca_power { 148 + struct device *dev; 149 + const struct qca_vreg_data *vreg_data; 150 + struct regulator_bulk_data *vreg_bulk; 151 + bool vregs_on; 152 + }; 153 + 127 154 struct qca_serdev { 128 155 struct hci_uart serdev_hu; 129 156 struct gpio_desc *bt_en; 130 157 struct clk *susclk; 158 + enum qca_btsoc_type btsoc_type; 159 + struct qca_power *bt_power; 160 + u32 init_speed; 161 + u32 oper_speed; 131 162 }; 163 + 164 + static int qca_power_setup(struct hci_uart *hu, bool on); 165 + static void qca_power_shutdown(struct hci_dev *hdev); 132 166 133 167 static void __serial_clock_on(struct tty_struct *tty) 134 168 { ··· 446 402 { 447 403 struct qca_serdev *qcadev; 448 404 struct qca_data *qca; 405 + int ret; 449 406 450 407 BT_DBG("hu %p qca_open", hu); 451 408 452 - qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC); 409 + qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); 453 410 if (!qca) 454 411 return -ENOMEM; 455 412 ··· 498 453 499 454 hu->priv = qca; 500 455 456 + if (hu->serdev) { 457 + serdev_device_open(hu->serdev); 458 + 459 + qcadev = serdev_device_get_drvdata(hu->serdev); 460 + if (qcadev->btsoc_type != QCA_WCN3990) { 461 + gpiod_set_value_cansleep(qcadev->bt_en, 1); 462 + } else { 463 + hu->init_speed = qcadev->init_speed; 464 + hu->oper_speed = qcadev->oper_speed; 465 + ret = qca_power_setup(hu, true); 466 + if (ret) { 467 + destroy_workqueue(qca->workqueue); 468 + kfree_skb(qca->rx_skb); 469 + hu->priv = NULL; 470 + kfree(qca); 471 + return ret; 472 + } 473 + } 474 + } 475 + 501 476 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); 502 477 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; 503 478 504 479 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); 505 480 qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; 506 - 507 - if (hu->serdev) { 508 - serdev_device_open(hu->serdev); 509 - 510 - qcadev = serdev_device_get_drvdata(hu->serdev); 511 - gpiod_set_value_cansleep(qcadev->bt_en, 1); 512 - } 513 481 514 482 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", 515 483 qca->tx_idle_delay, qca->wake_retrans); ··· 607 549 qca->hu = NULL; 608 550 609 551 if (hu->serdev) { 610 - serdev_device_close(hu->serdev); 611 - 612 552 qcadev = serdev_device_get_drvdata(hu->serdev); 613 - gpiod_set_value_cansleep(qcadev->bt_en, 0); 553 + if (qcadev->btsoc_type == QCA_WCN3990) 554 + qca_power_shutdown(hu->hdev); 555 + else 556 + gpiod_set_value_cansleep(qcadev->bt_en, 0); 557 + 558 + serdev_device_close(hu->serdev); 614 559 } 615 560 616 561 kfree_skb(qca->rx_skb); ··· 933 872 return QCA_BAUDRATE_2000000; 934 873 case 3000000: 935 874 return QCA_BAUDRATE_3000000; 875 + case 3200000: 876 + return QCA_BAUDRATE_3200000; 936 877 case 3500000: 937 878 return QCA_BAUDRATE_3500000; 938 879 default: ··· 947 884 struct hci_uart *hu = hci_get_drvdata(hdev); 948 885 struct qca_data *qca = hu->priv; 949 886 struct sk_buff *skb; 887 + struct qca_serdev *qcadev; 950 888 u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; 951 889 952 - if (baudrate > QCA_BAUDRATE_3000000) 890 + if (baudrate > QCA_BAUDRATE_3200000) 953 891 return -EINVAL; 954 892 955 893 cmd[4] = baudrate; 956 894 957 - skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC); 895 + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); 958 896 if (!skb) { 959 897 bt_dev_err(hdev, "Failed to allocate baudrate packet"); 960 898 return -ENOMEM; 961 899 } 900 + 901 + /* Disabling hardware flow control is mandatory while 902 + * sending change baudrate request to wcn3990 SoC. 903 + */ 904 + qcadev = serdev_device_get_drvdata(hu->serdev); 905 + if (qcadev->btsoc_type == QCA_WCN3990) 906 + hci_uart_set_flow_control(hu, true); 962 907 963 908 /* Assign commands to change baudrate and packet type. */ 964 909 skb_put_data(skb, cmd, sizeof(cmd)); ··· 983 912 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); 984 913 set_current_state(TASK_RUNNING); 985 914 915 + if (qcadev->btsoc_type == QCA_WCN3990) 916 + hci_uart_set_flow_control(hu, false); 917 + 986 918 return 0; 987 919 } 988 920 ··· 997 923 hci_uart_set_baudrate(hu, speed); 998 924 } 999 925 926 + static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd) 927 + { 928 + struct hci_uart *hu = hci_get_drvdata(hdev); 929 + struct qca_data *qca = hu->priv; 930 + struct sk_buff *skb; 931 + 932 + /* These power pulses are single byte command which are sent 933 + * at required baudrate to wcn3990. On wcn3990, we have an external 934 + * circuit at Tx pin which decodes the pulse sent at specific baudrate. 935 + * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT 936 + * and also we use the same power inputs to turn on and off for 937 + * Wi-Fi/BT. Powering up the power sources will not enable BT, until 938 + * we send a power on pulse at 115200 bps. This algorithm will help to 939 + * save power. Disabling hardware flow control is mandatory while 940 + * sending power pulses to SoC. 941 + */ 942 + bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd); 943 + 944 + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); 945 + if (!skb) 946 + return -ENOMEM; 947 + 948 + hci_uart_set_flow_control(hu, true); 949 + 950 + skb_put_u8(skb, cmd); 951 + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; 952 + 953 + skb_queue_tail(&qca->txq, skb); 954 + hci_uart_tx_wakeup(hu); 955 + 956 + /* Wait for 100 uS for SoC to settle down */ 957 + usleep_range(100, 200); 958 + hci_uart_set_flow_control(hu, false); 959 + 960 + return 0; 961 + } 962 + 963 + static unsigned int qca_get_speed(struct hci_uart *hu, 964 + enum qca_speed_type speed_type) 965 + { 966 + unsigned int speed = 0; 967 + 968 + if (speed_type == QCA_INIT_SPEED) { 969 + if (hu->init_speed) 970 + speed = hu->init_speed; 971 + else if (hu->proto->init_speed) 972 + speed = hu->proto->init_speed; 973 + } else { 974 + if (hu->oper_speed) 975 + speed = hu->oper_speed; 976 + else if (hu->proto->oper_speed) 977 + speed = hu->proto->oper_speed; 978 + } 979 + 980 + return speed; 981 + } 982 + 983 + static int qca_check_speeds(struct hci_uart *hu) 984 + { 985 + struct qca_serdev *qcadev; 986 + 987 + qcadev = serdev_device_get_drvdata(hu->serdev); 988 + if (qcadev->btsoc_type == QCA_WCN3990) { 989 + if (!qca_get_speed(hu, QCA_INIT_SPEED) && 990 + !qca_get_speed(hu, QCA_OPER_SPEED)) 991 + return -EINVAL; 992 + } else { 993 + if (!qca_get_speed(hu, QCA_INIT_SPEED) || 994 + !qca_get_speed(hu, QCA_OPER_SPEED)) 995 + return -EINVAL; 996 + } 997 + 998 + return 0; 999 + } 1000 + 1001 + static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) 1002 + { 1003 + unsigned int speed, qca_baudrate; 1004 + int ret; 1005 + 1006 + if (speed_type == QCA_INIT_SPEED) { 1007 + speed = qca_get_speed(hu, QCA_INIT_SPEED); 1008 + if (speed) 1009 + host_set_baudrate(hu, speed); 1010 + } else { 1011 + speed = qca_get_speed(hu, QCA_OPER_SPEED); 1012 + if (!speed) 1013 + return 0; 1014 + 1015 + qca_baudrate = qca_get_baudrate_value(speed); 1016 + bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed); 1017 + ret = qca_set_baudrate(hu->hdev, qca_baudrate); 1018 + if (ret) 1019 + return ret; 1020 + 1021 + host_set_baudrate(hu, speed); 1022 + } 1023 + 1024 + return 0; 1025 + } 1026 + 1027 + static int qca_wcn3990_init(struct hci_uart *hu) 1028 + { 1029 + struct hci_dev *hdev = hu->hdev; 1030 + int ret; 1031 + 1032 + /* Forcefully enable wcn3990 to enter in to boot mode. */ 1033 + host_set_baudrate(hu, 2400); 1034 + ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); 1035 + if (ret) 1036 + return ret; 1037 + 1038 + qca_set_speed(hu, QCA_INIT_SPEED); 1039 + ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE); 1040 + if (ret) 1041 + return ret; 1042 + 1043 + /* Wait for 100 ms for SoC to boot */ 1044 + msleep(100); 1045 + 1046 + /* Now the device is in ready state to communicate with host. 1047 + * To sync host with device we need to reopen port. 1048 + * Without this, we will have RTS and CTS synchronization 1049 + * issues. 1050 + */ 1051 + serdev_device_close(hu->serdev); 1052 + ret = serdev_device_open(hu->serdev); 1053 + if (ret) { 1054 + bt_dev_err(hu->hdev, "failed to open port"); 1055 + return ret; 1056 + } 1057 + 1058 + hci_uart_set_flow_control(hu, false); 1059 + 1060 + return 0; 1061 + } 1062 + 1000 1063 static int qca_setup(struct hci_uart *hu) 1001 1064 { 1002 1065 struct hci_dev *hdev = hu->hdev; 1003 1066 struct qca_data *qca = hu->priv; 1004 1067 unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; 1068 + struct qca_serdev *qcadev; 1005 1069 int ret; 1070 + int soc_ver = 0; 1006 1071 1007 - bt_dev_info(hdev, "ROME setup"); 1072 + qcadev = serdev_device_get_drvdata(hu->serdev); 1073 + 1074 + ret = qca_check_speeds(hu); 1075 + if (ret) 1076 + return ret; 1008 1077 1009 1078 /* Patch downloading has to be done without IBS mode */ 1010 1079 clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); 1011 1080 1012 - /* Setup initial baudrate */ 1013 - speed = 0; 1014 - if (hu->init_speed) 1015 - speed = hu->init_speed; 1016 - else if (hu->proto->init_speed) 1017 - speed = hu->proto->init_speed; 1018 - 1019 - if (speed) 1020 - host_set_baudrate(hu, speed); 1021 - 1022 - /* Setup user speed if needed */ 1023 - speed = 0; 1024 - if (hu->oper_speed) 1025 - speed = hu->oper_speed; 1026 - else if (hu->proto->oper_speed) 1027 - speed = hu->proto->oper_speed; 1028 - 1029 - if (speed) { 1030 - qca_baudrate = qca_get_baudrate_value(speed); 1031 - 1032 - bt_dev_info(hdev, "Set UART speed to %d", speed); 1033 - ret = qca_set_baudrate(hdev, qca_baudrate); 1034 - if (ret) { 1035 - bt_dev_err(hdev, "Failed to change the baud rate (%d)", 1036 - ret); 1081 + if (qcadev->btsoc_type == QCA_WCN3990) { 1082 + bt_dev_info(hdev, "setting up wcn3990"); 1083 + ret = qca_wcn3990_init(hu); 1084 + if (ret) 1037 1085 return ret; 1038 - } 1039 - host_set_baudrate(hu, speed); 1086 + 1087 + ret = qca_read_soc_version(hdev, &soc_ver); 1088 + if (ret) 1089 + return ret; 1090 + } else { 1091 + bt_dev_info(hdev, "ROME setup"); 1092 + qca_set_speed(hu, QCA_INIT_SPEED); 1040 1093 } 1041 1094 1095 + /* Setup user speed if needed */ 1096 + speed = qca_get_speed(hu, QCA_OPER_SPEED); 1097 + if (speed) { 1098 + ret = qca_set_speed(hu, QCA_OPER_SPEED); 1099 + if (ret) 1100 + return ret; 1101 + 1102 + qca_baudrate = qca_get_baudrate_value(speed); 1103 + } 1104 + 1105 + if (qcadev->btsoc_type != QCA_WCN3990) { 1106 + /* Get QCA version information */ 1107 + ret = qca_read_soc_version(hdev, &soc_ver); 1108 + if (ret) 1109 + return ret; 1110 + } 1111 + 1112 + bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); 1042 1113 /* Setup patch / NVM configurations */ 1043 - ret = qca_uart_setup_rome(hdev, qca_baudrate); 1114 + ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver); 1044 1115 if (!ret) { 1045 1116 set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); 1046 1117 qca_debugfs_init(hdev); ··· 1221 1002 .dequeue = qca_dequeue, 1222 1003 }; 1223 1004 1005 + static const struct qca_vreg_data qca_soc_data = { 1006 + .soc_type = QCA_WCN3990, 1007 + .vregs = (struct qca_vreg []) { 1008 + { "vddio", 1800000, 1900000, 15000 }, 1009 + { "vddxo", 1800000, 1900000, 80000 }, 1010 + { "vddrf", 1300000, 1350000, 300000 }, 1011 + { "vddch0", 3300000, 3400000, 450000 }, 1012 + }, 1013 + .num_vregs = 4, 1014 + }; 1015 + 1016 + static void qca_power_shutdown(struct hci_dev *hdev) 1017 + { 1018 + struct hci_uart *hu = hci_get_drvdata(hdev); 1019 + 1020 + host_set_baudrate(hu, 2400); 1021 + qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); 1022 + qca_power_setup(hu, false); 1023 + } 1024 + 1025 + static int qca_enable_regulator(struct qca_vreg vregs, 1026 + struct regulator *regulator) 1027 + { 1028 + int ret; 1029 + 1030 + ret = regulator_set_voltage(regulator, vregs.min_uV, 1031 + vregs.max_uV); 1032 + if (ret) 1033 + return ret; 1034 + 1035 + if (vregs.load_uA) 1036 + ret = regulator_set_load(regulator, 1037 + vregs.load_uA); 1038 + 1039 + if (ret) 1040 + return ret; 1041 + 1042 + return regulator_enable(regulator); 1043 + 1044 + } 1045 + 1046 + static void qca_disable_regulator(struct qca_vreg vregs, 1047 + struct regulator *regulator) 1048 + { 1049 + regulator_disable(regulator); 1050 + regulator_set_voltage(regulator, 0, vregs.max_uV); 1051 + if (vregs.load_uA) 1052 + regulator_set_load(regulator, 0); 1053 + 1054 + } 1055 + 1056 + static int qca_power_setup(struct hci_uart *hu, bool on) 1057 + { 1058 + struct qca_vreg *vregs; 1059 + struct regulator_bulk_data *vreg_bulk; 1060 + struct qca_serdev *qcadev; 1061 + int i, num_vregs, ret = 0; 1062 + 1063 + qcadev = serdev_device_get_drvdata(hu->serdev); 1064 + if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data || 1065 + !qcadev->bt_power->vreg_bulk) 1066 + return -EINVAL; 1067 + 1068 + vregs = qcadev->bt_power->vreg_data->vregs; 1069 + vreg_bulk = qcadev->bt_power->vreg_bulk; 1070 + num_vregs = qcadev->bt_power->vreg_data->num_vregs; 1071 + BT_DBG("on: %d", on); 1072 + if (on && !qcadev->bt_power->vregs_on) { 1073 + for (i = 0; i < num_vregs; i++) { 1074 + ret = qca_enable_regulator(vregs[i], 1075 + vreg_bulk[i].consumer); 1076 + if (ret) 1077 + break; 1078 + } 1079 + 1080 + if (ret) { 1081 + BT_ERR("failed to enable regulator:%s", vregs[i].name); 1082 + /* turn off regulators which are enabled */ 1083 + for (i = i - 1; i >= 0; i--) 1084 + qca_disable_regulator(vregs[i], 1085 + vreg_bulk[i].consumer); 1086 + } else { 1087 + qcadev->bt_power->vregs_on = true; 1088 + } 1089 + } else if (!on && qcadev->bt_power->vregs_on) { 1090 + /* turn off regulator in reverse order */ 1091 + i = qcadev->bt_power->vreg_data->num_vregs - 1; 1092 + for ( ; i >= 0; i--) 1093 + qca_disable_regulator(vregs[i], vreg_bulk[i].consumer); 1094 + 1095 + qcadev->bt_power->vregs_on = false; 1096 + } 1097 + 1098 + return ret; 1099 + } 1100 + 1101 + static int qca_init_regulators(struct qca_power *qca, 1102 + const struct qca_vreg *vregs, size_t num_vregs) 1103 + { 1104 + int i; 1105 + 1106 + qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs * 1107 + sizeof(struct regulator_bulk_data), 1108 + GFP_KERNEL); 1109 + if (!qca->vreg_bulk) 1110 + return -ENOMEM; 1111 + 1112 + for (i = 0; i < num_vregs; i++) 1113 + qca->vreg_bulk[i].supply = vregs[i].name; 1114 + 1115 + return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk); 1116 + } 1117 + 1224 1118 static int qca_serdev_probe(struct serdev_device *serdev) 1225 1119 { 1226 1120 struct qca_serdev *qcadev; 1121 + const struct qca_vreg_data *data; 1227 1122 int err; 1228 1123 1229 1124 qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL); ··· 1345 1012 return -ENOMEM; 1346 1013 1347 1014 qcadev->serdev_hu.serdev = serdev; 1015 + data = of_device_get_match_data(&serdev->dev); 1348 1016 serdev_device_set_drvdata(serdev, qcadev); 1017 + if (data && data->soc_type == QCA_WCN3990) { 1018 + qcadev->btsoc_type = QCA_WCN3990; 1019 + qcadev->bt_power = devm_kzalloc(&serdev->dev, 1020 + sizeof(struct qca_power), 1021 + GFP_KERNEL); 1022 + if (!qcadev->bt_power) 1023 + return -ENOMEM; 1349 1024 1350 - qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable", 1351 - GPIOD_OUT_LOW); 1352 - if (IS_ERR(qcadev->bt_en)) { 1353 - dev_err(&serdev->dev, "failed to acquire enable gpio\n"); 1354 - return PTR_ERR(qcadev->bt_en); 1025 + qcadev->bt_power->dev = &serdev->dev; 1026 + qcadev->bt_power->vreg_data = data; 1027 + err = qca_init_regulators(qcadev->bt_power, data->vregs, 1028 + data->num_vregs); 1029 + if (err) { 1030 + BT_ERR("Failed to init regulators:%d", err); 1031 + goto out; 1032 + } 1033 + 1034 + qcadev->bt_power->vregs_on = false; 1035 + 1036 + device_property_read_u32(&serdev->dev, "max-speed", 1037 + &qcadev->oper_speed); 1038 + if (!qcadev->oper_speed) 1039 + BT_DBG("UART will pick default operating speed"); 1040 + 1041 + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); 1042 + if (err) { 1043 + BT_ERR("wcn3990 serdev registration failed"); 1044 + goto out; 1045 + } 1046 + } else { 1047 + qcadev->btsoc_type = QCA_ROME; 1048 + qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable", 1049 + GPIOD_OUT_LOW); 1050 + if (IS_ERR(qcadev->bt_en)) { 1051 + dev_err(&serdev->dev, "failed to acquire enable gpio\n"); 1052 + return PTR_ERR(qcadev->bt_en); 1053 + } 1054 + 1055 + qcadev->susclk = devm_clk_get(&serdev->dev, NULL); 1056 + if (IS_ERR(qcadev->susclk)) { 1057 + dev_err(&serdev->dev, "failed to acquire clk\n"); 1058 + return PTR_ERR(qcadev->susclk); 1059 + } 1060 + 1061 + err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); 1062 + if (err) 1063 + return err; 1064 + 1065 + err = clk_prepare_enable(qcadev->susclk); 1066 + if (err) 1067 + return err; 1068 + 1069 + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); 1070 + if (err) 1071 + clk_disable_unprepare(qcadev->susclk); 1355 1072 } 1356 1073 1357 - qcadev->susclk = devm_clk_get(&serdev->dev, NULL); 1358 - if (IS_ERR(qcadev->susclk)) { 1359 - dev_err(&serdev->dev, "failed to acquire clk\n"); 1360 - return PTR_ERR(qcadev->susclk); 1361 - } 1074 + out: return err; 1362 1075 1363 - err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); 1364 - if (err) 1365 - return err; 1366 - 1367 - err = clk_prepare_enable(qcadev->susclk); 1368 - if (err) 1369 - return err; 1370 - 1371 - err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); 1372 - if (err) 1373 - clk_disable_unprepare(qcadev->susclk); 1374 - 1375 - return err; 1376 1076 } 1377 1077 1378 1078 static void qca_serdev_remove(struct serdev_device *serdev) 1379 1079 { 1380 1080 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); 1381 1081 1382 - hci_uart_unregister_device(&qcadev->serdev_hu); 1082 + if (qcadev->btsoc_type == QCA_WCN3990) 1083 + qca_power_shutdown(qcadev->serdev_hu.hdev); 1084 + else 1085 + clk_disable_unprepare(qcadev->susclk); 1383 1086 1384 - clk_disable_unprepare(qcadev->susclk); 1087 + hci_uart_unregister_device(&qcadev->serdev_hu); 1385 1088 } 1386 1089 1387 1090 static const struct of_device_id qca_bluetooth_of_match[] = { 1388 1091 { .compatible = "qcom,qca6174-bt" }, 1092 + { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data}, 1389 1093 { /* sentinel */ } 1390 1094 }; 1391 1095 MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
-38
include/linux/platform_data/bt-nokia-h4p.h
··· 1 - /* 2 - * This file is part of Nokia H4P bluetooth driver 3 - * 4 - * Copyright (C) 2010 Nokia Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License 8 - * version 2 as published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope that it will be useful, but 11 - * WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 - * General Public License for more details. 14 - * 15 - * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 18 - * 02110-1301 USA 19 - * 20 - */ 21 - 22 - 23 - /** 24 - * struct hci_h4p_platform data - hci_h4p Platform data structure 25 - */ 26 - struct hci_h4p_platform_data { 27 - int chip_type; 28 - int bt_sysclk; 29 - unsigned int bt_wakeup_gpio; 30 - unsigned int host_wakeup_gpio; 31 - unsigned int reset_gpio; 32 - int reset_gpio_shared; 33 - unsigned int uart_irq; 34 - phys_addr_t uart_base; 35 - const char *uart_iclk; 36 - const char *uart_fclk; 37 - void (*set_pm_limits)(struct device *dev, bool set); 38 - };
+219
include/net/bluetooth/hci.h
··· 183 183 * during the hdev->setup vendor callback. 184 184 */ 185 185 HCI_QUIRK_NON_PERSISTENT_DIAG, 186 + 187 + /* When this quirk is set, setup() would be run after every 188 + * open() and not just after the first open(). 189 + * 190 + * This quirk can be set before hci_register_dev is called or 191 + * during the hdev->setup vendor callback. 192 + * 193 + */ 194 + HCI_QUIRK_NON_PERSISTENT_SETUP, 186 195 }; 187 196 188 197 /* HCI device flags */ ··· 300 291 #define HCI_DH3 0x0800 301 292 #define HCI_DH5 0x8000 302 293 294 + /* HCI packet types inverted masks */ 295 + #define HCI_2DH1 0x0002 296 + #define HCI_3DH1 0x0004 297 + #define HCI_2DH3 0x0100 298 + #define HCI_3DH3 0x0200 299 + #define HCI_2DH5 0x1000 300 + #define HCI_3DH5 0x2000 301 + 303 302 #define HCI_HV1 0x0020 304 303 #define HCI_HV2 0x0040 305 304 #define HCI_HV3 0x0080 ··· 371 354 #define LMP_PCONTROL 0x04 372 355 #define LMP_TRANSPARENT 0x08 373 356 357 + #define LMP_EDR_2M 0x02 358 + #define LMP_EDR_3M 0x04 374 359 #define LMP_RSSI_INQ 0x40 375 360 #define LMP_ESCO 0x80 376 361 ··· 380 361 #define LMP_EV5 0x02 381 362 #define LMP_NO_BREDR 0x20 382 363 #define LMP_LE 0x40 364 + #define LMP_EDR_3SLOT 0x80 383 365 366 + #define LMP_EDR_5SLOT 0x01 384 367 #define LMP_SNIFF_SUBR 0x02 385 368 #define LMP_PAUSE_ENC 0x04 386 369 #define LMP_EDR_ESCO_2M 0x20 ··· 419 398 #define HCI_LE_SLAVE_FEATURES 0x08 420 399 #define HCI_LE_PING 0x10 421 400 #define HCI_LE_DATA_LEN_EXT 0x20 401 + #define HCI_LE_PHY_2M 0x01 402 + #define HCI_LE_PHY_CODED 0x08 403 + #define HCI_LE_EXT_ADV 0x10 422 404 #define HCI_LE_EXT_SCAN_POLICY 0x80 405 + #define HCI_LE_PHY_2M 0x01 406 + #define HCI_LE_PHY_CODED 0x08 423 407 #define HCI_LE_CHAN_SEL_ALG2 0x40 424 408 425 409 /* Connection modes */ ··· 1516 1490 __le16 tx_time; 1517 1491 } __packed; 1518 1492 1493 + #define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029 1494 + 1495 + #define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a 1496 + struct hci_rp_le_read_resolv_list_size { 1497 + __u8 status; 1498 + __u8 size; 1499 + } __packed; 1500 + 1519 1501 #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f 1520 1502 struct hci_rp_le_read_max_data_len { 1521 1503 __u8 status; ··· 1538 1504 __u8 all_phys; 1539 1505 __u8 tx_phys; 1540 1506 __u8 rx_phys; 1507 + } __packed; 1508 + 1509 + #define HCI_LE_SET_PHY_1M 0x01 1510 + #define HCI_LE_SET_PHY_2M 0x02 1511 + #define HCI_LE_SET_PHY_CODED 0x04 1512 + 1513 + #define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041 1514 + struct hci_cp_le_set_ext_scan_params { 1515 + __u8 own_addr_type; 1516 + __u8 filter_policy; 1517 + __u8 scanning_phys; 1518 + __u8 data[0]; 1519 + } __packed; 1520 + 1521 + #define LE_SCAN_PHY_1M 0x01 1522 + #define LE_SCAN_PHY_2M 0x02 1523 + #define LE_SCAN_PHY_CODED 0x04 1524 + 1525 + struct hci_cp_le_scan_phy_params { 1526 + __u8 type; 1527 + __le16 interval; 1528 + __le16 window; 1529 + } __packed; 1530 + 1531 + #define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042 1532 + struct hci_cp_le_set_ext_scan_enable { 1533 + __u8 enable; 1534 + __u8 filter_dup; 1535 + __le16 duration; 1536 + __le16 period; 1537 + } __packed; 1538 + 1539 + #define HCI_OP_LE_EXT_CREATE_CONN 0x2043 1540 + struct hci_cp_le_ext_create_conn { 1541 + __u8 filter_policy; 1542 + __u8 own_addr_type; 1543 + __u8 peer_addr_type; 1544 + bdaddr_t peer_addr; 1545 + __u8 phys; 1546 + __u8 data[0]; 1547 + } __packed; 1548 + 1549 + struct hci_cp_le_ext_conn_param { 1550 + __le16 scan_interval; 1551 + __le16 scan_window; 1552 + __le16 conn_interval_min; 1553 + __le16 conn_interval_max; 1554 + __le16 conn_latency; 1555 + __le16 supervision_timeout; 1556 + __le16 min_ce_len; 1557 + __le16 max_ce_len; 1558 + } __packed; 1559 + 1560 + #define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b 1561 + struct hci_rp_le_read_num_supported_adv_sets { 1562 + __u8 status; 1563 + __u8 num_of_sets; 1564 + } __packed; 1565 + 1566 + #define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036 1567 + struct hci_cp_le_set_ext_adv_params { 1568 + __u8 handle; 1569 + __le16 evt_properties; 1570 + __u8 min_interval[3]; 1571 + __u8 max_interval[3]; 1572 + __u8 channel_map; 1573 + __u8 own_addr_type; 1574 + __u8 peer_addr_type; 1575 + bdaddr_t peer_addr; 1576 + __u8 filter_policy; 1577 + __u8 tx_power; 1578 + __u8 primary_phy; 1579 + __u8 secondary_max_skip; 1580 + __u8 secondary_phy; 1581 + __u8 sid; 1582 + __u8 notif_enable; 1583 + } __packed; 1584 + 1585 + #define HCI_ADV_PHY_1M 0X01 1586 + #define HCI_ADV_PHY_2M 0x02 1587 + #define HCI_ADV_PHY_CODED 0x03 1588 + 1589 + struct hci_rp_le_set_ext_adv_params { 1590 + __u8 status; 1591 + __u8 tx_power; 1592 + } __packed; 1593 + 1594 + #define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039 1595 + struct hci_cp_le_set_ext_adv_enable { 1596 + __u8 enable; 1597 + __u8 num_of_sets; 1598 + __u8 data[0]; 1599 + } __packed; 1600 + 1601 + struct hci_cp_ext_adv_set { 1602 + __u8 handle; 1603 + __le16 duration; 1604 + __u8 max_events; 1605 + } __packed; 1606 + 1607 + #define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037 1608 + struct hci_cp_le_set_ext_adv_data { 1609 + __u8 handle; 1610 + __u8 operation; 1611 + __u8 frag_pref; 1612 + __u8 length; 1613 + __u8 data[HCI_MAX_AD_LENGTH]; 1614 + } __packed; 1615 + 1616 + #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038 1617 + struct hci_cp_le_set_ext_scan_rsp_data { 1618 + __u8 handle; 1619 + __u8 operation; 1620 + __u8 frag_pref; 1621 + __u8 length; 1622 + __u8 data[HCI_MAX_AD_LENGTH]; 1623 + } __packed; 1624 + 1625 + #define LE_SET_ADV_DATA_OP_COMPLETE 0x03 1626 + 1627 + #define LE_SET_ADV_DATA_NO_FRAG 0x01 1628 + 1629 + #define HCI_OP_LE_CLEAR_ADV_SETS 0x203d 1630 + 1631 + #define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035 1632 + struct hci_cp_le_set_adv_set_rand_addr { 1633 + __u8 handle; 1634 + bdaddr_t bdaddr; 1541 1635 } __packed; 1542 1636 1543 1637 /* ---- HCI Events ---- */ ··· 2055 1893 #define LE_ADV_SCAN_IND 0x02 2056 1894 #define LE_ADV_NONCONN_IND 0x03 2057 1895 #define LE_ADV_SCAN_RSP 0x04 1896 + #define LE_ADV_INVALID 0x05 1897 + 1898 + /* Legacy event types in extended adv report */ 1899 + #define LE_LEGACY_ADV_IND 0x0013 1900 + #define LE_LEGACY_ADV_DIRECT_IND 0x0015 1901 + #define LE_LEGACY_ADV_SCAN_IND 0x0012 1902 + #define LE_LEGACY_NONCONN_IND 0x0010 1903 + #define LE_LEGACY_SCAN_RSP_ADV 0x001b 1904 + #define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a 1905 + 1906 + /* Extended Advertising event types */ 1907 + #define LE_EXT_ADV_NON_CONN_IND 0x0000 1908 + #define LE_EXT_ADV_CONN_IND 0x0001 1909 + #define LE_EXT_ADV_SCAN_IND 0x0002 1910 + #define LE_EXT_ADV_DIRECT_IND 0x0004 1911 + #define LE_EXT_ADV_SCAN_RSP 0x0008 1912 + #define LE_EXT_ADV_LEGACY_PDU 0x0010 2058 1913 2059 1914 #define ADDR_LE_DEV_PUBLIC 0x00 2060 1915 #define ADDR_LE_DEV_RANDOM 0x01 ··· 2134 1955 __u8 direct_addr_type; 2135 1956 bdaddr_t direct_addr; 2136 1957 __s8 rssi; 1958 + } __packed; 1959 + 1960 + #define HCI_EV_LE_EXT_ADV_REPORT 0x0d 1961 + struct hci_ev_le_ext_adv_report { 1962 + __le16 evt_type; 1963 + __u8 bdaddr_type; 1964 + bdaddr_t bdaddr; 1965 + __u8 primary_phy; 1966 + __u8 secondary_phy; 1967 + __u8 sid; 1968 + __u8 tx_power; 1969 + __s8 rssi; 1970 + __le16 interval; 1971 + __u8 direct_addr_type; 1972 + bdaddr_t direct_addr; 1973 + __u8 length; 1974 + __u8 data[0]; 1975 + } __packed; 1976 + 1977 + #define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a 1978 + struct hci_ev_le_enh_conn_complete { 1979 + __u8 status; 1980 + __le16 handle; 1981 + __u8 role; 1982 + __u8 bdaddr_type; 1983 + bdaddr_t bdaddr; 1984 + bdaddr_t local_rpa; 1985 + bdaddr_t peer_rpa; 1986 + __le16 interval; 1987 + __le16 latency; 1988 + __le16 supervision_timeout; 1989 + __u8 clk_accurancy; 1990 + } __packed; 1991 + 1992 + #define HCI_EV_LE_EXT_ADV_SET_TERM 0x12 1993 + struct hci_evt_le_ext_adv_set_term { 1994 + __u8 status; 1995 + __u8 handle; 1996 + __le16 conn_handle; 1997 + __u8 num_evts; 2137 1998 } __packed; 2138 1999 2139 2000 /* Internal events generated by Bluetooth stack */
+34
include/net/bluetooth/hci_core.h
··· 171 171 __u8 adv_data[HCI_MAX_AD_LENGTH]; 172 172 __u16 scan_rsp_len; 173 173 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; 174 + __s8 tx_power; 175 + bdaddr_t random_addr; 176 + bool rpa_expired; 177 + struct delayed_work rpa_expired_cb; 174 178 }; 175 179 176 180 #define HCI_MAX_ADV_INSTANCES 5 ··· 225 221 __u8 features[HCI_MAX_PAGES][8]; 226 222 __u8 le_features[8]; 227 223 __u8 le_white_list_size; 224 + __u8 le_resolv_list_size; 225 + __u8 le_num_of_adv_sets; 228 226 __u8 le_states[8]; 229 227 __u8 commands[64]; 230 228 __u8 hci_ver; ··· 320 314 unsigned long sco_last_tx; 321 315 unsigned long le_last_tx; 322 316 317 + __u8 le_tx_def_phys; 318 + __u8 le_rx_def_phys; 319 + 323 320 struct workqueue_struct *workqueue; 324 321 struct workqueue_struct *req_workqueue; 325 322 ··· 376 367 struct list_head identity_resolving_keys; 377 368 struct list_head remote_oob_data; 378 369 struct list_head le_white_list; 370 + struct list_head le_resolv_list; 379 371 struct list_head le_conn_params; 380 372 struct list_head pend_le_conns; 381 373 struct list_head pend_le_reports; ··· 1116 1106 u16 scan_rsp_len, u8 *scan_rsp_data, 1117 1107 u16 timeout, u16 duration); 1118 1108 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); 1109 + void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); 1119 1110 1120 1111 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1121 1112 ··· 1147 1136 #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) 1148 1137 #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) 1149 1138 #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) 1139 + #define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) 1140 + #define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) 1141 + #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) 1142 + #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) 1150 1143 1151 1144 /* ----- Extended LMP capabilities ----- */ 1152 1145 #define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER) ··· 1170 1155 !hci_dev_test_flag(dev, HCI_AUTO_OFF)) 1171 1156 #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ 1172 1157 hci_dev_test_flag(dev, HCI_SC_ENABLED)) 1158 + 1159 + #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ 1160 + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) 1161 + 1162 + #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ 1163 + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) 1164 + 1165 + #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ 1166 + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) 1167 + 1168 + /* Use ext scanning if set ext scan param and ext scan enable is supported */ 1169 + #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ 1170 + ((dev)->commands[37] & 0x40)) 1171 + /* Use ext create connection if command is supported */ 1172 + #define use_ext_conn(dev) ((dev)->commands[37] & 0x80) 1173 + 1174 + /* Extended advertising support */ 1175 + #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) 1173 1176 1174 1177 /* ----- HCI protocols ----- */ 1175 1178 #define HCI_PROTO_DEFER 0x01 ··· 1562 1529 u8 instance); 1563 1530 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, 1564 1531 u8 instance); 1532 + int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); 1565 1533 1566 1534 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 1567 1535 u16 to_multiplier);
+55
include/net/bluetooth/mgmt.h
··· 101 101 #define MGMT_SETTING_PRIVACY 0x00002000 102 102 #define MGMT_SETTING_CONFIGURATION 0x00004000 103 103 #define MGMT_SETTING_STATIC_ADDRESS 0x00008000 104 + #define MGMT_SETTING_PHY_CONFIGURATION 0x00010000 104 105 105 106 #define MGMT_OP_READ_INFO 0x0004 106 107 #define MGMT_READ_INFO_SIZE 0 ··· 562 561 #define MGMT_ADV_FLAG_TX_POWER BIT(4) 563 562 #define MGMT_ADV_FLAG_APPEARANCE BIT(5) 564 563 #define MGMT_ADV_FLAG_LOCAL_NAME BIT(6) 564 + #define MGMT_ADV_FLAG_SEC_1M BIT(7) 565 + #define MGMT_ADV_FLAG_SEC_2M BIT(8) 566 + #define MGMT_ADV_FLAG_SEC_CODED BIT(9) 567 + 568 + #define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \ 569 + MGMT_ADV_FLAG_SEC_CODED) 565 570 566 571 #define MGMT_OP_REMOVE_ADVERTISING 0x003F 567 572 struct mgmt_cp_remove_advertising { ··· 610 603 __le16 appearance; 611 604 } __packed; 612 605 #define MGMT_SET_APPEARANCE_SIZE 2 606 + 607 + #define MGMT_OP_GET_PHY_CONFIGURATION 0x0044 608 + struct mgmt_rp_get_phy_confguration { 609 + __le32 supported_phys; 610 + __le32 configurable_phys; 611 + __le32 selected_phys; 612 + } __packed; 613 + #define MGMT_GET_PHY_CONFIGURATION_SIZE 0 614 + 615 + #define MGMT_PHY_BR_1M_1SLOT 0x00000001 616 + #define MGMT_PHY_BR_1M_3SLOT 0x00000002 617 + #define MGMT_PHY_BR_1M_5SLOT 0x00000004 618 + #define MGMT_PHY_EDR_2M_1SLOT 0x00000008 619 + #define MGMT_PHY_EDR_2M_3SLOT 0x00000010 620 + #define MGMT_PHY_EDR_2M_5SLOT 0x00000020 621 + #define MGMT_PHY_EDR_3M_1SLOT 0x00000040 622 + #define MGMT_PHY_EDR_3M_3SLOT 0x00000080 623 + #define MGMT_PHY_EDR_3M_5SLOT 0x00000100 624 + #define MGMT_PHY_LE_1M_TX 0x00000200 625 + #define MGMT_PHY_LE_1M_RX 0x00000400 626 + #define MGMT_PHY_LE_2M_TX 0x00000800 627 + #define MGMT_PHY_LE_2M_RX 0x00001000 628 + #define MGMT_PHY_LE_CODED_TX 0x00002000 629 + #define MGMT_PHY_LE_CODED_RX 0x00004000 630 + 631 + #define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \ 632 + MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \ 633 + MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \ 634 + MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \ 635 + MGMT_PHY_EDR_3M_5SLOT) 636 + #define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \ 637 + MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \ 638 + MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX) 639 + #define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \ 640 + MGMT_PHY_LE_CODED_TX) 641 + #define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \ 642 + MGMT_PHY_LE_CODED_RX) 643 + 644 + #define MGMT_OP_SET_PHY_CONFIGURATION 0x0045 645 + struct mgmt_cp_set_phy_confguration { 646 + __le32 selected_phys; 647 + } __packed; 648 + #define MGMT_SET_PHY_CONFIGURATION_SIZE 4 613 649 614 650 #define MGMT_EV_CMD_COMPLETE 0x0001 615 651 struct mgmt_ev_cmd_complete { ··· 873 823 struct mgmt_ev_ext_info_changed { 874 824 __le16 eir_len; 875 825 __u8 eir[0]; 826 + } __packed; 827 + 828 + #define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026 829 + struct mgmt_ev_phy_configuration_changed { 830 + __le32 selected_phys; 876 831 } __packed;
+1
net/6lowpan/iphc.c
··· 770 770 hdr.hop_limit, &hdr.daddr); 771 771 772 772 skb_push(skb, sizeof(hdr)); 773 + skb_reset_mac_header(skb); 773 774 skb_reset_network_header(skb); 774 775 skb_copy_to_linear_data(skb, &hdr, sizeof(hdr)); 775 776
+1 -1
net/bluetooth/af_bluetooth.c
··· 159 159 BT_DBG("parent %p, sk %p", parent, sk); 160 160 161 161 sock_hold(sk); 162 - lock_sock(sk); 162 + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 163 163 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); 164 164 bt_sk(sk)->parent = parent; 165 165 release_sock(sk);
+150 -39
net/bluetooth/hci_conn.c
··· 748 748 return hci_dev_test_flag(hdev, HCI_PRIVACY); 749 749 } 750 750 751 + static void set_ext_conn_params(struct hci_conn *conn, 752 + struct hci_cp_le_ext_conn_param *p) 753 + { 754 + struct hci_dev *hdev = conn->hdev; 755 + 756 + memset(p, 0, sizeof(*p)); 757 + 758 + /* Set window to be the same value as the interval to 759 + * enable continuous scanning. 760 + */ 761 + p->scan_interval = cpu_to_le16(hdev->le_scan_interval); 762 + p->scan_window = p->scan_interval; 763 + p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 764 + p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 765 + p->conn_latency = cpu_to_le16(conn->le_conn_latency); 766 + p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 767 + p->min_ce_len = cpu_to_le16(0x0000); 768 + p->max_ce_len = cpu_to_le16(0x0000); 769 + } 770 + 751 771 static void hci_req_add_le_create_conn(struct hci_request *req, 752 772 struct hci_conn *conn, 753 773 bdaddr_t *direct_rpa) 754 774 { 755 - struct hci_cp_le_create_conn cp; 756 775 struct hci_dev *hdev = conn->hdev; 757 776 u8 own_addr_type; 758 777 ··· 794 775 return; 795 776 } 796 777 797 - memset(&cp, 0, sizeof(cp)); 778 + if (use_ext_conn(hdev)) { 779 + struct hci_cp_le_ext_create_conn *cp; 780 + struct hci_cp_le_ext_conn_param *p; 781 + u8 data[sizeof(*cp) + sizeof(*p) * 3]; 782 + u32 plen; 798 783 799 - /* Set window to be the same value as the interval to enable 800 - * continuous scanning. 801 - */ 802 - cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); 803 - cp.scan_window = cp.scan_interval; 784 + cp = (void *) data; 785 + p = (void *) cp->data; 804 786 805 - bacpy(&cp.peer_addr, &conn->dst); 806 - cp.peer_addr_type = conn->dst_type; 807 - cp.own_address_type = own_addr_type; 808 - cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 809 - cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 810 - cp.conn_latency = cpu_to_le16(conn->le_conn_latency); 811 - cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 812 - cp.min_ce_len = cpu_to_le16(0x0000); 813 - cp.max_ce_len = cpu_to_le16(0x0000); 787 + memset(cp, 0, sizeof(*cp)); 814 788 815 - hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 789 + bacpy(&cp->peer_addr, &conn->dst); 790 + cp->peer_addr_type = conn->dst_type; 791 + cp->own_addr_type = own_addr_type; 792 + 793 + plen = sizeof(*cp); 794 + 795 + if (scan_1m(hdev)) { 796 + cp->phys |= LE_SCAN_PHY_1M; 797 + set_ext_conn_params(conn, p); 798 + 799 + p++; 800 + plen += sizeof(*p); 801 + } 802 + 803 + if (scan_2m(hdev)) { 804 + cp->phys |= LE_SCAN_PHY_2M; 805 + set_ext_conn_params(conn, p); 806 + 807 + p++; 808 + plen += sizeof(*p); 809 + } 810 + 811 + if (scan_coded(hdev)) { 812 + cp->phys |= LE_SCAN_PHY_CODED; 813 + set_ext_conn_params(conn, p); 814 + 815 + plen += sizeof(*p); 816 + } 817 + 818 + hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data); 819 + 820 + } else { 821 + struct hci_cp_le_create_conn cp; 822 + 823 + memset(&cp, 0, sizeof(cp)); 824 + 825 + /* Set window to be the same value as the interval to enable 826 + * continuous scanning. 827 + */ 828 + cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); 829 + cp.scan_window = cp.scan_interval; 830 + 831 + bacpy(&cp.peer_addr, &conn->dst); 832 + cp.peer_addr_type = conn->dst_type; 833 + cp.own_address_type = own_addr_type; 834 + cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 835 + cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 836 + cp.conn_latency = cpu_to_le16(conn->le_conn_latency); 837 + cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 838 + cp.min_ce_len = cpu_to_le16(0x0000); 839 + cp.max_ce_len = cpu_to_le16(0x0000); 840 + 841 + hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 842 + } 816 843 817 844 conn->state = BT_CONNECT; 818 845 clear_bit(HCI_CONN_SCANNING, &conn->flags); ··· 868 803 struct hci_conn *conn) 869 804 { 870 805 struct hci_dev *hdev = req->hdev; 871 - struct hci_cp_le_set_adv_param cp; 872 806 u8 own_addr_type; 873 807 u8 enable; 874 808 875 - /* Clear the HCI_LE_ADV bit temporarily so that the 876 - * hci_update_random_address knows that it's safe to go ahead 877 - * and write a new random address. The flag will be set back on 878 - * as soon as the SET_ADV_ENABLE HCI command completes. 879 - */ 880 - hci_dev_clear_flag(hdev, HCI_LE_ADV); 809 + if (ext_adv_capable(hdev)) { 810 + struct hci_cp_le_set_ext_adv_params cp; 811 + bdaddr_t random_addr; 881 812 882 - /* Set require_privacy to false so that the remote device has a 883 - * chance of identifying us. 884 - */ 885 - if (hci_update_random_address(req, false, conn_use_rpa(conn), 886 - &own_addr_type) < 0) 887 - return; 813 + /* Set require_privacy to false so that the remote device has a 814 + * chance of identifying us. 815 + */ 816 + if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, 817 + &own_addr_type, &random_addr) < 0) 818 + return; 888 819 889 - memset(&cp, 0, sizeof(cp)); 890 - cp.type = LE_ADV_DIRECT_IND; 891 - cp.own_address_type = own_addr_type; 892 - cp.direct_addr_type = conn->dst_type; 893 - bacpy(&cp.direct_addr, &conn->dst); 894 - cp.channel_map = hdev->le_adv_channel_map; 820 + memset(&cp, 0, sizeof(cp)); 895 821 896 - hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); 822 + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); 823 + cp.own_addr_type = own_addr_type; 824 + cp.channel_map = hdev->le_adv_channel_map; 825 + cp.tx_power = HCI_TX_POWER_INVALID; 826 + cp.primary_phy = HCI_ADV_PHY_1M; 827 + cp.secondary_phy = HCI_ADV_PHY_1M; 828 + cp.handle = 0; /* Use instance 0 for directed adv */ 829 + cp.own_addr_type = own_addr_type; 830 + cp.peer_addr_type = conn->dst_type; 831 + bacpy(&cp.peer_addr, &conn->dst); 897 832 898 - enable = 0x01; 899 - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 833 + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); 834 + 835 + if (own_addr_type == ADDR_LE_DEV_RANDOM && 836 + bacmp(&random_addr, BDADDR_ANY) && 837 + bacmp(&random_addr, &hdev->random_addr)) { 838 + struct hci_cp_le_set_adv_set_rand_addr cp; 839 + 840 + memset(&cp, 0, sizeof(cp)); 841 + 842 + cp.handle = 0; 843 + bacpy(&cp.bdaddr, &random_addr); 844 + 845 + hci_req_add(req, 846 + HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 847 + sizeof(cp), &cp); 848 + } 849 + 850 + __hci_req_enable_ext_advertising(req); 851 + } else { 852 + struct hci_cp_le_set_adv_param cp; 853 + 854 + /* Clear the HCI_LE_ADV bit temporarily so that the 855 + * hci_update_random_address knows that it's safe to go ahead 856 + * and write a new random address. The flag will be set back on 857 + * as soon as the SET_ADV_ENABLE HCI command completes. 858 + */ 859 + hci_dev_clear_flag(hdev, HCI_LE_ADV); 860 + 861 + /* Set require_privacy to false so that the remote device has a 862 + * chance of identifying us. 863 + */ 864 + if (hci_update_random_address(req, false, conn_use_rpa(conn), 865 + &own_addr_type) < 0) 866 + return; 867 + 868 + memset(&cp, 0, sizeof(cp)); 869 + cp.type = LE_ADV_DIRECT_IND; 870 + cp.own_address_type = own_addr_type; 871 + cp.direct_addr_type = conn->dst_type; 872 + bacpy(&cp.direct_addr, &conn->dst); 873 + cp.channel_map = hdev->le_adv_channel_map; 874 + 875 + hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); 876 + 877 + enable = 0x01; 878 + hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), 879 + &enable); 880 + } 900 881 901 882 conn->state = BT_CONNECT; 902 883 }
+97 -8
net/bluetooth/hci_core.c
··· 695 695 if (hdev->commands[35] & (0x20 | 0x40)) 696 696 events[1] |= 0x08; /* LE PHY Update Complete */ 697 697 698 + /* If the controller supports LE Set Extended Scan Parameters 699 + * and LE Set Extended Scan Enable commands, enable the 700 + * corresponding event. 701 + */ 702 + if (use_ext_scan(hdev)) 703 + events[1] |= 0x10; /* LE Extended Advertising 704 + * Report 705 + */ 706 + 707 + /* If the controller supports the LE Extended Create Connection 708 + * command, enable the corresponding event. 709 + */ 710 + if (use_ext_conn(hdev)) 711 + events[1] |= 0x02; /* LE Enhanced Connection 712 + * Complete 713 + */ 714 + 715 + /* If the controller supports the LE Extended Advertising 716 + * command, enable the corresponding event. 717 + */ 718 + if (ext_adv_capable(hdev)) 719 + events[2] |= 0x02; /* LE Advertising Set 720 + * Terminated 721 + */ 722 + 698 723 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), 699 724 events); 700 725 701 - if (hdev->commands[25] & 0x40) { 702 - /* Read LE Advertising Channel TX Power */ 726 + /* Read LE Advertising Channel TX Power */ 727 + if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { 728 + /* HCI TS spec forbids mixing of legacy and extended 729 + * advertising commands wherein READ_ADV_TX_POWER is 730 + * also included. So do not call it if extended adv 731 + * is supported otherwise controller will return 732 + * COMMAND_DISALLOWED for extended commands. 733 + */ 703 734 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 704 735 } 705 736 ··· 745 714 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); 746 715 } 747 716 717 + if (hdev->commands[34] & 0x40) { 718 + /* Read LE Resolving List Size */ 719 + hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 720 + 0, NULL); 721 + } 722 + 723 + if (hdev->commands[34] & 0x20) { 724 + /* Clear LE Resolving List */ 725 + hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); 726 + } 727 + 748 728 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { 749 729 /* Read LE Maximum Data Length */ 750 730 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); 751 731 752 732 /* Read LE Suggested Default Data Length */ 753 733 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); 734 + } 735 + 736 + if (ext_adv_capable(hdev)) { 737 + /* Read LE Number of Supported Advertising Sets */ 738 + hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 739 + 0, NULL); 754 740 } 755 741 756 742 hci_set_le_support(req); ··· 850 802 if (hdev->commands[35] & 0x20) { 851 803 struct hci_cp_le_set_default_phy cp; 852 804 853 - /* No transmitter PHY or receiver PHY preferences */ 854 - cp.all_phys = 0x03; 855 - cp.tx_phys = 0; 856 - cp.rx_phys = 0; 805 + cp.all_phys = 0x00; 806 + cp.tx_phys = hdev->le_tx_def_phys; 807 + cp.rx_phys = hdev->le_rx_def_phys; 857 808 858 809 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); 859 810 } ··· 1415 1368 atomic_set(&hdev->cmd_cnt, 1); 1416 1369 set_bit(HCI_INIT, &hdev->flags); 1417 1370 1418 - if (hci_dev_test_flag(hdev, HCI_SETUP)) { 1371 + if (hci_dev_test_flag(hdev, HCI_SETUP) || 1372 + test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { 1419 1373 hci_sock_dev_event(hdev, HCI_DEV_SETUP); 1420 1374 1421 1375 if (hdev->setup) ··· 1480 1432 if (!ret) { 1481 1433 hci_dev_hold(hdev); 1482 1434 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 1435 + hci_adv_instances_set_rpa_expired(hdev, true); 1483 1436 set_bit(HCI_UP, &hdev->flags); 1484 1437 hci_sock_dev_event(hdev, HCI_DEV_UP); 1485 1438 hci_leds_update_powered(hdev, true); ··· 1636 1587 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 1637 1588 cancel_delayed_work(&hdev->service_cache); 1638 1589 1639 - if (hci_dev_test_flag(hdev, HCI_MGMT)) 1590 + if (hci_dev_test_flag(hdev, HCI_MGMT)) { 1591 + struct adv_info *adv_instance; 1592 + 1640 1593 cancel_delayed_work_sync(&hdev->rpa_expired); 1594 + 1595 + list_for_each_entry(adv_instance, &hdev->adv_instances, list) 1596 + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1597 + } 1641 1598 1642 1599 /* Avoid potential lockdep warnings from the *_flush() calls by 1643 1600 * ensuring the workqueue is empty up front. ··· 1952 1897 break; 1953 1898 1954 1899 case HCISETPTYPE: 1900 + if (hdev->pkt_type == (__u16) dr.dev_opt) 1901 + break; 1902 + 1955 1903 hdev->pkt_type = (__u16) dr.dev_opt; 1904 + mgmt_phy_configuration_changed(hdev, NULL); 1956 1905 break; 1957 1906 1958 1907 case HCISETACLMTU: ··· 2720 2661 hdev->cur_adv_instance = 0x00; 2721 2662 } 2722 2663 2664 + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 2665 + 2723 2666 list_del(&adv_instance->list); 2724 2667 kfree(adv_instance); 2725 2668 2726 2669 hdev->adv_instance_cnt--; 2727 2670 2728 2671 return 0; 2672 + } 2673 + 2674 + void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) 2675 + { 2676 + struct adv_info *adv_instance, *n; 2677 + 2678 + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) 2679 + adv_instance->rpa_expired = rpa_expired; 2729 2680 } 2730 2681 2731 2682 /* This function requires the caller holds hdev->lock */ ··· 2749 2680 } 2750 2681 2751 2682 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 2683 + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 2752 2684 list_del(&adv_instance->list); 2753 2685 kfree(adv_instance); 2754 2686 } 2755 2687 2756 2688 hdev->adv_instance_cnt = 0; 2757 2689 hdev->cur_adv_instance = 0x00; 2690 + } 2691 + 2692 + static void adv_instance_rpa_expired(struct work_struct *work) 2693 + { 2694 + struct adv_info *adv_instance = container_of(work, struct adv_info, 2695 + rpa_expired_cb.work); 2696 + 2697 + BT_DBG(""); 2698 + 2699 + adv_instance->rpa_expired = true; 2758 2700 } 2759 2701 2760 2702 /* This function requires the caller holds hdev->lock */ ··· 2815 2735 adv_instance->duration = HCI_DEFAULT_ADV_DURATION; 2816 2736 else 2817 2737 adv_instance->duration = duration; 2738 + 2739 + adv_instance->tx_power = HCI_TX_POWER_INVALID; 2740 + 2741 + INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, 2742 + adv_instance_rpa_expired); 2818 2743 2819 2744 BT_DBG("%s for %dMR", hdev->name, instance); 2820 2745 ··· 3084 2999 hdev->le_max_tx_time = 0x0148; 3085 3000 hdev->le_max_rx_len = 0x001b; 3086 3001 hdev->le_max_rx_time = 0x0148; 3002 + hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 3003 + hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 3087 3004 3088 3005 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3089 3006 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; ··· 3104 3017 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 3105 3018 INIT_LIST_HEAD(&hdev->remote_oob_data); 3106 3019 INIT_LIST_HEAD(&hdev->le_white_list); 3020 + INIT_LIST_HEAD(&hdev->le_resolv_list); 3107 3021 INIT_LIST_HEAD(&hdev->le_conn_params); 3108 3022 INIT_LIST_HEAD(&hdev->pend_le_conns); 3109 3023 INIT_LIST_HEAD(&hdev->pend_le_reports); ··· 3306 3218 hci_remote_oob_data_clear(hdev); 3307 3219 hci_adv_instances_clear(hdev); 3308 3220 hci_bdaddr_list_clear(&hdev->le_white_list); 3221 + hci_bdaddr_list_clear(&hdev->le_resolv_list); 3309 3222 hci_conn_params_clear_all(hdev); 3310 3223 hci_discovery_filter_clear(hdev); 3311 3224 hci_dev_unlock(hdev);
+19
net/bluetooth/hci_debugfs.c
··· 694 694 695 695 DEFINE_SHOW_ATTRIBUTE(white_list); 696 696 697 + static int resolv_list_show(struct seq_file *f, void *ptr) 698 + { 699 + struct hci_dev *hdev = f->private; 700 + struct bdaddr_list *b; 701 + 702 + hci_dev_lock(hdev); 703 + list_for_each_entry(b, &hdev->le_resolv_list, list) 704 + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); 705 + hci_dev_unlock(hdev); 706 + 707 + return 0; 708 + } 709 + 710 + DEFINE_SHOW_ATTRIBUTE(resolv_list); 711 + 697 712 static int identity_resolving_keys_show(struct seq_file *f, void *ptr) 698 713 { 699 714 struct hci_dev *hdev = f->private; ··· 970 955 &hdev->le_white_list_size); 971 956 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, 972 957 &white_list_fops); 958 + debugfs_create_u8("resolv_list_size", 0444, hdev->debugfs, 959 + &hdev->le_resolv_list_size); 960 + debugfs_create_file("resolv_list", 0444, hdev->debugfs, hdev, 961 + &resolv_list_fops); 973 962 debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs, 974 963 hdev, &identity_resolving_keys_fops); 975 964 debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev,
+490 -64
net/bluetooth/hci_event.c
··· 221 221 hdev->ssp_debug_mode = 0; 222 222 223 223 hci_bdaddr_list_clear(&hdev->le_white_list); 224 + hci_bdaddr_list_clear(&hdev->le_resolv_list); 224 225 } 225 226 226 227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev, ··· 1042 1041 hci_dev_unlock(hdev); 1043 1042 } 1044 1043 1044 + static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) 1045 + { 1046 + __u8 status = *((__u8 *) skb->data); 1047 + struct hci_cp_le_set_default_phy *cp; 1048 + 1049 + BT_DBG("%s status 0x%2.2x", hdev->name, status); 1050 + 1051 + if (status) 1052 + return; 1053 + 1054 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1055 + if (!cp) 1056 + return; 1057 + 1058 + hci_dev_lock(hdev); 1059 + 1060 + hdev->le_tx_def_phys = cp->tx_phys; 1061 + hdev->le_rx_def_phys = cp->rx_phys; 1062 + 1063 + hci_dev_unlock(hdev); 1064 + } 1065 + 1066 + static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, 1067 + struct sk_buff *skb) 1068 + { 1069 + __u8 status = *((__u8 *) skb->data); 1070 + struct hci_cp_le_set_adv_set_rand_addr *cp; 1071 + struct adv_info *adv_instance; 1072 + 1073 + if (status) 1074 + return; 1075 + 1076 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1077 + if (!cp) 1078 + return; 1079 + 1080 + hci_dev_lock(hdev); 1081 + 1082 + if (!hdev->cur_adv_instance) { 1083 + /* Store in hdev for instance 0 (Set adv and Directed advs) */ 1084 + bacpy(&hdev->random_addr, &cp->bdaddr); 1085 + } else { 1086 + adv_instance = hci_find_adv_instance(hdev, 1087 + hdev->cur_adv_instance); 1088 + if (adv_instance) 1089 + bacpy(&adv_instance->random_addr, &cp->bdaddr); 1090 + } 1091 + 1092 + hci_dev_unlock(hdev); 1093 + } 1094 + 1045 1095 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 1046 1096 { 1047 1097 __u8 *sent, status = *((__u8 *) skb->data); ··· 1112 1060 * timeout in case something goes wrong. 1113 1061 */ 1114 1062 if (*sent) { 1063 + struct hci_conn *conn; 1064 + 1065 + hci_dev_set_flag(hdev, HCI_LE_ADV); 1066 + 1067 + conn = hci_lookup_le_connect(hdev); 1068 + if (conn) 1069 + queue_delayed_work(hdev->workqueue, 1070 + &conn->le_conn_timeout, 1071 + conn->conn_timeout); 1072 + } else { 1073 + hci_dev_clear_flag(hdev, HCI_LE_ADV); 1074 + } 1075 + 1076 + hci_dev_unlock(hdev); 1077 + } 1078 + 1079 + static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, 1080 + struct sk_buff *skb) 1081 + { 1082 + struct hci_cp_le_set_ext_adv_enable *cp; 1083 + struct hci_cp_ext_adv_set *adv_set; 1084 + __u8 status = *((__u8 *) skb->data); 1085 + 1086 + BT_DBG("%s status 0x%2.2x", hdev->name, status); 1087 + 1088 + if (status) 1089 + return; 1090 + 1091 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1092 + if (!cp) 1093 + return; 1094 + 1095 + adv_set = (void *) cp->data; 1096 + 1097 + hci_dev_lock(hdev); 1098 + 1099 + if (cp->enable) { 1115 1100 struct hci_conn *conn; 1116 1101 1117 1102 hci_dev_set_flag(hdev, HCI_LE_ADV); ··· 1186 1097 hci_dev_unlock(hdev); 1187 1098 } 1188 1099 1100 + static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, 1101 + struct sk_buff *skb) 1102 + { 1103 + struct hci_cp_le_set_ext_scan_params *cp; 1104 + __u8 status = *((__u8 *) skb->data); 1105 + struct hci_cp_le_scan_phy_params *phy_param; 1106 + 1107 + BT_DBG("%s status 0x%2.2x", hdev->name, status); 1108 + 1109 + if (status) 1110 + return; 1111 + 1112 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1113 + if (!cp) 1114 + return; 1115 + 1116 + phy_param = (void *)cp->data; 1117 + 1118 + hci_dev_lock(hdev); 1119 + 1120 + hdev->le_scan_type = phy_param->type; 1121 + 1122 + hci_dev_unlock(hdev); 1123 + } 1124 + 1189 1125 static bool has_pending_adv_report(struct hci_dev *hdev) 1190 1126 { 1191 1127 struct discovery_state *d = &hdev->discovery; ··· 1240 1126 d->last_adv_data_len = len; 1241 1127 } 1242 1128 1243 - static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1244 - struct sk_buff *skb) 1129 + static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1245 1130 { 1246 - struct hci_cp_le_set_scan_enable *cp; 1247 - __u8 status = *((__u8 *) skb->data); 1248 - 1249 - BT_DBG("%s status 0x%2.2x", hdev->name, status); 1250 - 1251 - if (status) 1252 - return; 1253 - 1254 - cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1255 - if (!cp) 1256 - return; 1257 - 1258 1131 hci_dev_lock(hdev); 1259 1132 1260 - switch (cp->enable) { 1133 + switch (enable) { 1261 1134 case LE_SCAN_ENABLE: 1262 1135 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1263 1136 if (hdev->le_scan_type == LE_SCAN_ACTIVE) ··· 1290 1189 1291 1190 default: 1292 1191 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1293 - cp->enable); 1192 + enable); 1294 1193 break; 1295 1194 } 1296 1195 1297 1196 hci_dev_unlock(hdev); 1197 + } 1198 + 1199 + static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1200 + struct sk_buff *skb) 1201 + { 1202 + struct hci_cp_le_set_scan_enable *cp; 1203 + __u8 status = *((__u8 *) skb->data); 1204 + 1205 + BT_DBG("%s status 0x%2.2x", hdev->name, status); 1206 + 1207 + if (status) 1208 + return; 1209 + 1210 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1211 + if (!cp) 1212 + return; 1213 + 1214 + le_set_scan_enable_complete(hdev, cp->enable); 1215 + } 1216 + 1217 + static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, 1218 + struct sk_buff *skb) 1219 + { 1220 + struct hci_cp_le_set_ext_scan_enable *cp; 1221 + __u8 status = *((__u8 *) skb->data); 1222 + 1223 + BT_DBG("%s status 0x%2.2x", hdev->name, status); 1224 + 1225 + if (status) 1226 + return; 1227 + 1228 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1229 + if (!cp) 1230 + return; 1231 + 1232 + le_set_scan_enable_complete(hdev, cp->enable); 1233 + } 1234 + 1235 + static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, 1236 + struct sk_buff *skb) 1237 + { 1238 + struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data; 1239 + 1240 + BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status, 1241 + rp->num_of_sets); 1242 + 1243 + if (rp->status) 1244 + return; 1245 + 1246 + hdev->le_num_of_adv_sets = rp->num_of_sets; 1298 1247 } 1299 1248 1300 1249 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, ··· 1457 1306 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1458 1307 } 1459 1308 1309 + static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, 1310 + struct sk_buff *skb) 1311 + { 1312 + __u8 status = *((__u8 *) skb->data); 1313 + 1314 + BT_DBG("%s status 0x%2.2x", hdev->name, status); 1315 + 1316 + if (status) 1317 + return; 1318 + 1319 + hci_bdaddr_list_clear(&hdev->le_resolv_list); 1320 + } 1321 + 1322 + static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, 1323 + struct sk_buff *skb) 1324 + { 1325 + struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data; 1326 + 1327 + BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1328 + 1329 + if (rp->status) 1330 + return; 1331 + 1332 + hdev->le_resolv_list_size = rp->size; 1333 + } 1334 + 1460 1335 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, 1461 1336 struct sk_buff *skb) 1462 1337 { ··· 1549 1372 1550 1373 hci_dev_lock(hdev); 1551 1374 hdev->adv_addr_type = cp->own_address_type; 1375 + hci_dev_unlock(hdev); 1376 + } 1377 + 1378 + static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1379 + { 1380 + struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data; 1381 + struct hci_cp_le_set_ext_adv_params *cp; 1382 + struct adv_info *adv_instance; 1383 + 1384 + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1385 + 1386 + if (rp->status) 1387 + return; 1388 + 1389 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 1390 + if (!cp) 1391 + return; 1392 + 1393 + hci_dev_lock(hdev); 1394 + hdev->adv_addr_type = cp->own_addr_type; 1395 + if (!hdev->cur_adv_instance) { 1396 + /* Store in hdev for instance 0 */ 1397 + hdev->adv_tx_power = rp->tx_power; 1398 + } else { 1399 + adv_instance = hci_find_adv_instance(hdev, 1400 + hdev->cur_adv_instance); 1401 + if (adv_instance) 1402 + adv_instance->tx_power = rp->tx_power; 1403 + } 1404 + /* Update adv data as tx power is known now */ 1405 + hci_req_update_adv_data(hdev, hdev->cur_adv_instance); 1552 1406 hci_dev_unlock(hdev); 1553 1407 } 1554 1408 ··· 2104 1896 hci_dev_unlock(hdev); 2105 1897 } 2106 1898 1899 + static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 1900 + u8 peer_addr_type, u8 own_address_type, 1901 + u8 filter_policy) 1902 + { 1903 + struct hci_conn *conn; 1904 + 1905 + conn = hci_conn_hash_lookup_le(hdev, peer_addr, 1906 + peer_addr_type); 1907 + if (!conn) 1908 + return; 1909 + 1910 + /* Store the initiator and responder address information which 1911 + * is needed for SMP. These values will not change during the 1912 + * lifetime of the connection. 1913 + */ 1914 + conn->init_addr_type = own_address_type; 1915 + if (own_address_type == ADDR_LE_DEV_RANDOM) 1916 + bacpy(&conn->init_addr, &hdev->random_addr); 1917 + else 1918 + bacpy(&conn->init_addr, &hdev->bdaddr); 1919 + 1920 + conn->resp_addr_type = peer_addr_type; 1921 + bacpy(&conn->resp_addr, peer_addr); 1922 + 1923 + /* We don't want the connection attempt to stick around 1924 + * indefinitely since LE doesn't have a page timeout concept 1925 + * like BR/EDR. Set a timer for any connection that doesn't use 1926 + * the white list for connecting. 1927 + */ 1928 + if (filter_policy == HCI_LE_USE_PEER_ADDR) 1929 + queue_delayed_work(conn->hdev->workqueue, 1930 + &conn->le_conn_timeout, 1931 + conn->conn_timeout); 1932 + } 1933 + 2107 1934 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2108 1935 { 2109 1936 struct hci_cp_le_create_conn *cp; 2110 - struct hci_conn *conn; 2111 1937 2112 1938 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2113 1939 ··· 2158 1916 2159 1917 hci_dev_lock(hdev); 2160 1918 2161 - conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr, 2162 - cp->peer_addr_type); 2163 - if (!conn) 2164 - goto unlock; 1919 + cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 1920 + cp->own_address_type, cp->filter_policy); 2165 1921 2166 - /* Store the initiator and responder address information which 2167 - * is needed for SMP. These values will not change during the 2168 - * lifetime of the connection. 1922 + hci_dev_unlock(hdev); 1923 + } 1924 + 1925 + static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 1926 + { 1927 + struct hci_cp_le_ext_create_conn *cp; 1928 + 1929 + BT_DBG("%s status 0x%2.2x", hdev->name, status); 1930 + 1931 + /* All connection failure handling is taken care of by the 1932 + * hci_le_conn_failed function which is triggered by the HCI 1933 + * request completion callbacks used for connecting. 2169 1934 */ 2170 - conn->init_addr_type = cp->own_address_type; 2171 - if (cp->own_address_type == ADDR_LE_DEV_RANDOM) 2172 - bacpy(&conn->init_addr, &hdev->random_addr); 2173 - else 2174 - bacpy(&conn->init_addr, &hdev->bdaddr); 1935 + if (status) 1936 + return; 2175 1937 2176 - conn->resp_addr_type = cp->peer_addr_type; 2177 - bacpy(&conn->resp_addr, &cp->peer_addr); 1938 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 1939 + if (!cp) 1940 + return; 2178 1941 2179 - /* We don't want the connection attempt to stick around 2180 - * indefinitely since LE doesn't have a page timeout concept 2181 - * like BR/EDR. Set a timer for any connection that doesn't use 2182 - * the white list for connecting. 2183 - */ 2184 - if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) 2185 - queue_delayed_work(conn->hdev->workqueue, 2186 - &conn->le_conn_timeout, 2187 - conn->conn_timeout); 1942 + hci_dev_lock(hdev); 2188 1943 2189 - unlock: 1944 + cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 1945 + cp->own_addr_type, cp->filter_policy); 1946 + 2190 1947 hci_dev_unlock(hdev); 2191 1948 } 2192 1949 ··· 2859 2618 /* We should disregard the current RPA and generate a new one 2860 2619 * whenever the encryption procedure fails. 2861 2620 */ 2862 - if (ev->status && conn->type == LE_LINK) 2621 + if (ev->status && conn->type == LE_LINK) { 2863 2622 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 2623 + hci_adv_instances_set_rpa_expired(hdev, true); 2624 + } 2864 2625 2865 2626 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2866 2627 ··· 3258 3015 hci_cc_le_write_def_data_len(hdev, skb); 3259 3016 break; 3260 3017 3018 + case HCI_OP_LE_CLEAR_RESOLV_LIST: 3019 + hci_cc_le_clear_resolv_list(hdev, skb); 3020 + break; 3021 + 3022 + case HCI_OP_LE_READ_RESOLV_LIST_SIZE: 3023 + hci_cc_le_read_resolv_list_size(hdev, skb); 3024 + break; 3025 + 3261 3026 case HCI_OP_LE_READ_MAX_DATA_LEN: 3262 3027 hci_cc_le_read_max_data_len(hdev, skb); 3263 3028 break; ··· 3288 3037 3289 3038 case HCI_OP_WRITE_SSP_DEBUG_MODE: 3290 3039 hci_cc_write_ssp_debug_mode(hdev, skb); 3040 + break; 3041 + 3042 + case HCI_OP_LE_SET_EXT_SCAN_PARAMS: 3043 + hci_cc_le_set_ext_scan_param(hdev, skb); 3044 + break; 3045 + 3046 + case HCI_OP_LE_SET_EXT_SCAN_ENABLE: 3047 + hci_cc_le_set_ext_scan_enable(hdev, skb); 3048 + break; 3049 + 3050 + case HCI_OP_LE_SET_DEFAULT_PHY: 3051 + hci_cc_le_set_default_phy(hdev, skb); 3052 + break; 3053 + 3054 + case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS: 3055 + hci_cc_le_read_num_adv_sets(hdev, skb); 3056 + break; 3057 + 3058 + case HCI_OP_LE_SET_EXT_ADV_PARAMS: 3059 + hci_cc_set_ext_adv_param(hdev, skb); 3060 + break; 3061 + 3062 + case HCI_OP_LE_SET_EXT_ADV_ENABLE: 3063 + hci_cc_le_set_ext_adv_enable(hdev, skb); 3064 + break; 3065 + 3066 + case HCI_OP_LE_SET_ADV_SET_RAND_ADDR: 3067 + hci_cc_le_set_adv_set_random_addr(hdev, skb); 3291 3068 break; 3292 3069 3293 3070 default: ··· 3411 3132 3412 3133 case HCI_OP_LE_START_ENC: 3413 3134 hci_cs_le_start_enc(hdev, ev->status); 3135 + break; 3136 + 3137 + case HCI_OP_LE_EXT_CREATE_CONN: 3138 + hci_cs_le_ext_create_conn(hdev, ev->status); 3414 3139 break; 3415 3140 3416 3141 default: ··· 4743 4460 } 4744 4461 #endif 4745 4462 4746 - static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4463 + static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 4464 + bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle, 4465 + u16 interval, u16 latency, u16 supervision_timeout) 4747 4466 { 4748 - struct hci_ev_le_conn_complete *ev = (void *) skb->data; 4749 4467 struct hci_conn_params *params; 4750 4468 struct hci_conn *conn; 4751 4469 struct smp_irk *irk; 4752 4470 u8 addr_type; 4753 - 4754 - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4755 4471 4756 4472 hci_dev_lock(hdev); 4757 4473 ··· 4761 4479 4762 4480 conn = hci_lookup_le_connect(hdev); 4763 4481 if (!conn) { 4764 - conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role); 4482 + conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 4765 4483 if (!conn) { 4766 4484 bt_dev_err(hdev, "no memory for new connection"); 4767 4485 goto unlock; 4768 4486 } 4769 4487 4770 - conn->dst_type = ev->bdaddr_type; 4488 + conn->dst_type = bdaddr_type; 4771 4489 4772 4490 /* If we didn't have a hci_conn object previously 4773 4491 * but we're in master role this must be something ··· 4778 4496 * initiator address based on the HCI_PRIVACY flag. 4779 4497 */ 4780 4498 if (conn->out) { 4781 - conn->resp_addr_type = ev->bdaddr_type; 4782 - bacpy(&conn->resp_addr, &ev->bdaddr); 4499 + conn->resp_addr_type = bdaddr_type; 4500 + bacpy(&conn->resp_addr, bdaddr); 4783 4501 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 4784 4502 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 4785 4503 bacpy(&conn->init_addr, &hdev->rpa); ··· 4798 4516 * the advertising address type. 4799 4517 */ 4800 4518 conn->resp_addr_type = hdev->adv_addr_type; 4801 - if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) 4802 - bacpy(&conn->resp_addr, &hdev->random_addr); 4803 - else 4519 + if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 4520 + /* In case of ext adv, resp_addr will be updated in 4521 + * Adv Terminated event. 4522 + */ 4523 + if (!ext_adv_capable(hdev)) 4524 + bacpy(&conn->resp_addr, &hdev->random_addr); 4525 + } else { 4804 4526 bacpy(&conn->resp_addr, &hdev->bdaddr); 4527 + } 4805 4528 4806 - conn->init_addr_type = ev->bdaddr_type; 4807 - bacpy(&conn->init_addr, &ev->bdaddr); 4529 + conn->init_addr_type = bdaddr_type; 4530 + bacpy(&conn->init_addr, bdaddr); 4808 4531 4809 4532 /* For incoming connections, set the default minimum 4810 4533 * and maximum connection interval. They will be used ··· 4835 4548 conn->dst_type = irk->addr_type; 4836 4549 } 4837 4550 4838 - if (ev->status) { 4839 - hci_le_conn_failed(conn, ev->status); 4551 + if (status) { 4552 + hci_le_conn_failed(conn, status); 4840 4553 goto unlock; 4841 4554 } 4842 4555 ··· 4855 4568 mgmt_device_connected(hdev, conn, 0, NULL, 0); 4856 4569 4857 4570 conn->sec_level = BT_SECURITY_LOW; 4858 - conn->handle = __le16_to_cpu(ev->handle); 4571 + conn->handle = handle; 4859 4572 conn->state = BT_CONFIG; 4860 4573 4861 - conn->le_conn_interval = le16_to_cpu(ev->interval); 4862 - conn->le_conn_latency = le16_to_cpu(ev->latency); 4863 - conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 4574 + conn->le_conn_interval = interval; 4575 + conn->le_conn_latency = latency; 4576 + conn->le_supv_timeout = supervision_timeout; 4864 4577 4865 4578 hci_debugfs_create_conn(conn); 4866 4579 hci_conn_add_sysfs(conn); 4867 4580 4868 - if (!ev->status) { 4581 + if (!status) { 4869 4582 /* The remote features procedure is defined for master 4870 4583 * role only. So only in case of an initiated connection 4871 4584 * request the remote features. ··· 4887 4600 hci_conn_hold(conn); 4888 4601 } else { 4889 4602 conn->state = BT_CONNECTED; 4890 - hci_connect_cfm(conn, ev->status); 4603 + hci_connect_cfm(conn, status); 4891 4604 } 4892 4605 } else { 4893 - hci_connect_cfm(conn, ev->status); 4606 + hci_connect_cfm(conn, status); 4894 4607 } 4895 4608 4896 4609 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, ··· 4907 4620 unlock: 4908 4621 hci_update_background_scan(hdev); 4909 4622 hci_dev_unlock(hdev); 4623 + } 4624 + 4625 + static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4626 + { 4627 + struct hci_ev_le_conn_complete *ev = (void *) skb->data; 4628 + 4629 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4630 + 4631 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 4632 + ev->role, le16_to_cpu(ev->handle), 4633 + le16_to_cpu(ev->interval), 4634 + le16_to_cpu(ev->latency), 4635 + le16_to_cpu(ev->supervision_timeout)); 4636 + } 4637 + 4638 + static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, 4639 + struct sk_buff *skb) 4640 + { 4641 + struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; 4642 + 4643 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4644 + 4645 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 4646 + ev->role, le16_to_cpu(ev->handle), 4647 + le16_to_cpu(ev->interval), 4648 + le16_to_cpu(ev->latency), 4649 + le16_to_cpu(ev->supervision_timeout)); 4650 + } 4651 + 4652 + static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) 4653 + { 4654 + struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; 4655 + struct hci_conn *conn; 4656 + 4657 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4658 + 4659 + if (ev->status) 4660 + return; 4661 + 4662 + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 4663 + if (conn) { 4664 + struct adv_info *adv_instance; 4665 + 4666 + if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM) 4667 + return; 4668 + 4669 + if (!hdev->cur_adv_instance) { 4670 + bacpy(&conn->resp_addr, &hdev->random_addr); 4671 + return; 4672 + } 4673 + 4674 + adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 4675 + if (adv_instance) 4676 + bacpy(&conn->resp_addr, &adv_instance->random_addr); 4677 + } 4910 4678 } 4911 4679 4912 4680 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, ··· 5299 4957 hci_dev_unlock(hdev); 5300 4958 } 5301 4959 4960 + static u8 ext_evt_type_to_legacy(u16 evt_type) 4961 + { 4962 + if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 4963 + switch (evt_type) { 4964 + case LE_LEGACY_ADV_IND: 4965 + return LE_ADV_IND; 4966 + case LE_LEGACY_ADV_DIRECT_IND: 4967 + return LE_ADV_DIRECT_IND; 4968 + case LE_LEGACY_ADV_SCAN_IND: 4969 + return LE_ADV_SCAN_IND; 4970 + case LE_LEGACY_NONCONN_IND: 4971 + return LE_ADV_NONCONN_IND; 4972 + case LE_LEGACY_SCAN_RSP_ADV: 4973 + case LE_LEGACY_SCAN_RSP_ADV_SCAN: 4974 + return LE_ADV_SCAN_RSP; 4975 + } 4976 + 4977 + BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x", 4978 + evt_type); 4979 + 4980 + return LE_ADV_INVALID; 4981 + } 4982 + 4983 + if (evt_type & LE_EXT_ADV_CONN_IND) { 4984 + if (evt_type & LE_EXT_ADV_DIRECT_IND) 4985 + return LE_ADV_DIRECT_IND; 4986 + 4987 + return LE_ADV_IND; 4988 + } 4989 + 4990 + if (evt_type & LE_EXT_ADV_SCAN_RSP) 4991 + return LE_ADV_SCAN_RSP; 4992 + 4993 + if (evt_type & LE_EXT_ADV_SCAN_IND) 4994 + return LE_ADV_SCAN_IND; 4995 + 4996 + if (evt_type == LE_EXT_ADV_NON_CONN_IND || 4997 + evt_type & LE_EXT_ADV_DIRECT_IND) 4998 + return LE_ADV_NONCONN_IND; 4999 + 5000 + BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x", 5001 + evt_type); 5002 + 5003 + return LE_ADV_INVALID; 5004 + } 5005 + 5006 + static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5007 + { 5008 + u8 num_reports = skb->data[0]; 5009 + void *ptr = &skb->data[1]; 5010 + 5011 + hci_dev_lock(hdev); 5012 + 5013 + while (num_reports--) { 5014 + struct hci_ev_le_ext_adv_report *ev = ptr; 5015 + u8 legacy_evt_type; 5016 + u16 evt_type; 5017 + 5018 + evt_type = __le16_to_cpu(ev->evt_type); 5019 + legacy_evt_type = ext_evt_type_to_legacy(evt_type); 5020 + if (legacy_evt_type != LE_ADV_INVALID) { 5021 + process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, 5022 + ev->bdaddr_type, NULL, 0, ev->rssi, 5023 + ev->data, ev->length); 5024 + } 5025 + 5026 + ptr += sizeof(*ev) + ev->length + 1; 5027 + } 5028 + 5029 + hci_dev_unlock(hdev); 5030 + } 5031 + 5302 5032 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, 5303 5033 struct sk_buff *skb) 5304 5034 { ··· 5601 5187 5602 5188 case HCI_EV_LE_DIRECT_ADV_REPORT: 5603 5189 hci_le_direct_adv_report_evt(hdev, skb); 5190 + break; 5191 + 5192 + case HCI_EV_LE_EXT_ADV_REPORT: 5193 + hci_le_ext_adv_report_evt(hdev, skb); 5194 + break; 5195 + 5196 + case HCI_EV_LE_ENHANCED_CONN_COMPLETE: 5197 + hci_le_enh_conn_complete_evt(hdev, skb); 5198 + break; 5199 + 5200 + case HCI_EV_LE_EXT_ADV_SET_TERM: 5201 + hci_le_ext_adv_term_evt(hdev, skb); 5604 5202 break; 5605 5203 5606 5204 default:
+524 -90
net/bluetooth/hci_request.c
··· 647 647 648 648 void hci_req_add_le_scan_disable(struct hci_request *req) 649 649 { 650 - struct hci_cp_le_set_scan_enable cp; 650 + struct hci_dev *hdev = req->hdev; 651 651 652 - memset(&cp, 0, sizeof(cp)); 653 - cp.enable = LE_SCAN_DISABLE; 654 - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 652 + if (use_ext_scan(hdev)) { 653 + struct hci_cp_le_set_ext_scan_enable cp; 654 + 655 + memset(&cp, 0, sizeof(cp)); 656 + cp.enable = LE_SCAN_DISABLE; 657 + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), 658 + &cp); 659 + } else { 660 + struct hci_cp_le_set_scan_enable cp; 661 + 662 + memset(&cp, 0, sizeof(cp)); 663 + cp.enable = LE_SCAN_DISABLE; 664 + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 665 + } 655 666 } 656 667 657 668 static void add_to_white_list(struct hci_request *req, ··· 778 767 return hci_dev_test_flag(hdev, HCI_PRIVACY); 779 768 } 780 769 770 + static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, 771 + u16 window, u8 own_addr_type, u8 filter_policy) 772 + { 773 + struct hci_dev *hdev = req->hdev; 774 + 775 + /* Use ext scanning if set ext scan param and ext scan enable is 776 + * supported 777 + */ 778 + if (use_ext_scan(hdev)) { 779 + struct hci_cp_le_set_ext_scan_params *ext_param_cp; 780 + struct hci_cp_le_set_ext_scan_enable ext_enable_cp; 781 + struct hci_cp_le_scan_phy_params *phy_params; 782 + u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; 783 + u32 plen; 784 + 785 + ext_param_cp = (void *)data; 786 + phy_params = (void *)ext_param_cp->data; 787 + 788 + memset(ext_param_cp, 0, sizeof(*ext_param_cp)); 789 + ext_param_cp->own_addr_type = own_addr_type; 790 + ext_param_cp->filter_policy = filter_policy; 791 + 792 + plen = sizeof(*ext_param_cp); 793 + 794 + if (scan_1m(hdev) || scan_2m(hdev)) { 795 + ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; 796 + 797 + memset(phy_params, 0, sizeof(*phy_params)); 798 + phy_params->type = type; 799 + phy_params->interval = cpu_to_le16(interval); 800 + phy_params->window = cpu_to_le16(window); 801 + 802 + plen += sizeof(*phy_params); 803 + phy_params++; 804 + } 805 + 806 + if (scan_coded(hdev)) { 807 + ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; 808 + 809 + memset(phy_params, 0, sizeof(*phy_params)); 810 + phy_params->type = type; 811 + phy_params->interval = cpu_to_le16(interval); 812 + phy_params->window = cpu_to_le16(window); 813 + 814 + plen += sizeof(*phy_params); 815 + phy_params++; 816 + } 817 + 818 + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, 819 + plen, ext_param_cp); 820 + 821 + memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); 822 + ext_enable_cp.enable = LE_SCAN_ENABLE; 823 + ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 824 + 825 + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, 826 + sizeof(ext_enable_cp), &ext_enable_cp); 827 + } else { 828 + struct hci_cp_le_set_scan_param param_cp; 829 + struct hci_cp_le_set_scan_enable enable_cp; 830 + 831 + memset(&param_cp, 0, sizeof(param_cp)); 832 + param_cp.type = type; 833 + param_cp.interval = cpu_to_le16(interval); 834 + param_cp.window = cpu_to_le16(window); 835 + param_cp.own_address_type = own_addr_type; 836 + param_cp.filter_policy = filter_policy; 837 + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 838 + &param_cp); 839 + 840 + memset(&enable_cp, 0, sizeof(enable_cp)); 841 + enable_cp.enable = LE_SCAN_ENABLE; 842 + enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 843 + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), 844 + &enable_cp); 845 + } 846 + } 847 + 781 848 void hci_req_add_le_passive_scan(struct hci_request *req) 782 849 { 783 - struct hci_cp_le_set_scan_param param_cp; 784 - struct hci_cp_le_set_scan_enable enable_cp; 785 850 struct hci_dev *hdev = req->hdev; 786 851 u8 own_addr_type; 787 852 u8 filter_policy; ··· 891 804 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) 892 805 filter_policy |= 0x02; 893 806 894 - memset(&param_cp, 0, sizeof(param_cp)); 895 - param_cp.type = LE_SCAN_PASSIVE; 896 - param_cp.interval = cpu_to_le16(hdev->le_scan_interval); 897 - param_cp.window = cpu_to_le16(hdev->le_scan_window); 898 - param_cp.own_address_type = own_addr_type; 899 - param_cp.filter_policy = filter_policy; 900 - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 901 - &param_cp); 807 + hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval, 808 + hdev->le_scan_window, own_addr_type, filter_policy); 809 + } 902 810 903 - memset(&enable_cp, 0, sizeof(enable_cp)); 904 - enable_cp.enable = LE_SCAN_ENABLE; 905 - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 906 - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), 907 - &enable_cp); 811 + static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) 812 + { 813 + struct adv_info *adv_instance; 814 + 815 + /* Ignore instance 0 */ 816 + if (instance == 0x00) 817 + return 0; 818 + 819 + adv_instance = hci_find_adv_instance(hdev, instance); 820 + if (!adv_instance) 821 + return 0; 822 + 823 + /* TODO: Take into account the "appearance" and "local-name" flags here. 824 + * These are currently being ignored as they are not supported. 825 + */ 826 + return adv_instance->scan_rsp_len; 908 827 } 909 828 910 829 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) ··· 934 841 935 842 void __hci_req_disable_advertising(struct hci_request *req) 936 843 { 937 - u8 enable = 0x00; 844 + if (ext_adv_capable(req->hdev)) { 845 + struct hci_cp_le_set_ext_adv_enable cp; 938 846 939 - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 847 + cp.enable = 0x00; 848 + /* Disable all sets since we only support one set at the moment */ 849 + cp.num_of_sets = 0x00; 850 + 851 + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp); 852 + } else { 853 + u8 enable = 0x00; 854 + 855 + hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 856 + } 940 857 } 941 858 942 859 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) ··· 1184 1081 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) 1185 1082 { 1186 1083 struct hci_dev *hdev = req->hdev; 1187 - struct hci_cp_le_set_scan_rsp_data cp; 1188 1084 u8 len; 1189 1085 1190 1086 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1191 1087 return; 1192 1088 1193 - memset(&cp, 0, sizeof(cp)); 1089 + if (ext_adv_capable(hdev)) { 1090 + struct hci_cp_le_set_ext_scan_rsp_data cp; 1194 1091 1195 - if (instance) 1196 - len = create_instance_scan_rsp_data(hdev, instance, cp.data); 1197 - else 1198 - len = create_default_scan_rsp_data(hdev, cp.data); 1092 + memset(&cp, 0, sizeof(cp)); 1199 1093 1200 - if (hdev->scan_rsp_data_len == len && 1201 - !memcmp(cp.data, hdev->scan_rsp_data, len)) 1202 - return; 1094 + if (instance) 1095 + len = create_instance_scan_rsp_data(hdev, instance, 1096 + cp.data); 1097 + else 1098 + len = create_default_scan_rsp_data(hdev, cp.data); 1203 1099 1204 - memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); 1205 - hdev->scan_rsp_data_len = len; 1100 + if (hdev->scan_rsp_data_len == len && 1101 + !memcmp(cp.data, hdev->scan_rsp_data, len)) 1102 + return; 1206 1103 1207 - cp.length = len; 1104 + memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); 1105 + hdev->scan_rsp_data_len = len; 1208 1106 1209 - hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); 1107 + cp.handle = 0; 1108 + cp.length = len; 1109 + cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1110 + cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1111 + 1112 + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp), 1113 + &cp); 1114 + } else { 1115 + struct hci_cp_le_set_scan_rsp_data cp; 1116 + 1117 + memset(&cp, 0, sizeof(cp)); 1118 + 1119 + if (instance) 1120 + len = create_instance_scan_rsp_data(hdev, instance, 1121 + cp.data); 1122 + else 1123 + len = create_default_scan_rsp_data(hdev, cp.data); 1124 + 1125 + if (hdev->scan_rsp_data_len == len && 1126 + !memcmp(cp.data, hdev->scan_rsp_data, len)) 1127 + return; 1128 + 1129 + memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); 1130 + hdev->scan_rsp_data_len = len; 1131 + 1132 + cp.length = len; 1133 + 1134 + hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); 1135 + } 1210 1136 } 1211 1137 1212 1138 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) ··· 1292 1160 ptr += adv_instance->adv_data_len; 1293 1161 } 1294 1162 1295 - /* Provide Tx Power only if we can provide a valid value for it */ 1296 - if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && 1297 - (instance_flags & MGMT_ADV_FLAG_TX_POWER)) { 1298 - ptr[0] = 0x02; 1299 - ptr[1] = EIR_TX_POWER; 1300 - ptr[2] = (u8)hdev->adv_tx_power; 1163 + if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { 1164 + s8 adv_tx_power; 1301 1165 1302 - ad_len += 3; 1303 - ptr += 3; 1166 + if (ext_adv_capable(hdev)) { 1167 + if (adv_instance) 1168 + adv_tx_power = adv_instance->tx_power; 1169 + else 1170 + adv_tx_power = hdev->adv_tx_power; 1171 + } else { 1172 + adv_tx_power = hdev->adv_tx_power; 1173 + } 1174 + 1175 + /* Provide Tx Power only if we can provide a valid value for it */ 1176 + if (adv_tx_power != HCI_TX_POWER_INVALID) { 1177 + ptr[0] = 0x02; 1178 + ptr[1] = EIR_TX_POWER; 1179 + ptr[2] = (u8)adv_tx_power; 1180 + 1181 + ad_len += 3; 1182 + ptr += 3; 1183 + } 1304 1184 } 1305 1185 1306 1186 return ad_len; ··· 1321 1177 void __hci_req_update_adv_data(struct hci_request *req, u8 instance) 1322 1178 { 1323 1179 struct hci_dev *hdev = req->hdev; 1324 - struct hci_cp_le_set_adv_data cp; 1325 1180 u8 len; 1326 1181 1327 1182 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1328 1183 return; 1329 1184 1330 - memset(&cp, 0, sizeof(cp)); 1185 + if (ext_adv_capable(hdev)) { 1186 + struct hci_cp_le_set_ext_adv_data cp; 1331 1187 1332 - len = create_instance_adv_data(hdev, instance, cp.data); 1188 + memset(&cp, 0, sizeof(cp)); 1333 1189 1334 - /* There's nothing to do if the data hasn't changed */ 1335 - if (hdev->adv_data_len == len && 1336 - memcmp(cp.data, hdev->adv_data, len) == 0) 1337 - return; 1190 + len = create_instance_adv_data(hdev, instance, cp.data); 1338 1191 1339 - memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); 1340 - hdev->adv_data_len = len; 1192 + /* There's nothing to do if the data hasn't changed */ 1193 + if (hdev->adv_data_len == len && 1194 + memcmp(cp.data, hdev->adv_data, len) == 0) 1195 + return; 1341 1196 1342 - cp.length = len; 1197 + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); 1198 + hdev->adv_data_len = len; 1343 1199 1344 - hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); 1200 + cp.length = len; 1201 + cp.handle = 0; 1202 + cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1203 + cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1204 + 1205 + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp); 1206 + } else { 1207 + struct hci_cp_le_set_adv_data cp; 1208 + 1209 + memset(&cp, 0, sizeof(cp)); 1210 + 1211 + len = create_instance_adv_data(hdev, instance, cp.data); 1212 + 1213 + /* There's nothing to do if the data hasn't changed */ 1214 + if (hdev->adv_data_len == len && 1215 + memcmp(cp.data, hdev->adv_data, len) == 0) 1216 + return; 1217 + 1218 + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); 1219 + hdev->adv_data_len = len; 1220 + 1221 + cp.length = len; 1222 + 1223 + hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); 1224 + } 1345 1225 } 1346 1226 1347 1227 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) ··· 1397 1229 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, 1398 1230 true); 1399 1231 } else { 1400 - __hci_req_update_adv_data(&req, 0x00); 1401 - __hci_req_update_scan_rsp_data(&req, 0x00); 1402 - __hci_req_enable_advertising(&req); 1232 + if (ext_adv_capable(hdev)) { 1233 + __hci_req_start_ext_adv(&req, 0x00); 1234 + } else { 1235 + __hci_req_update_adv_data(&req, 0x00); 1236 + __hci_req_update_scan_rsp_data(&req, 0x00); 1237 + __hci_req_enable_advertising(&req); 1238 + } 1403 1239 } 1404 1240 1405 1241 hci_req_run(&req, adv_enable_complete); ··· 1438 1266 1439 1267 unlock: 1440 1268 hci_dev_unlock(hdev); 1269 + } 1270 + 1271 + int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, 1272 + bool use_rpa, struct adv_info *adv_instance, 1273 + u8 *own_addr_type, bdaddr_t *rand_addr) 1274 + { 1275 + int err; 1276 + 1277 + bacpy(rand_addr, BDADDR_ANY); 1278 + 1279 + /* If privacy is enabled use a resolvable private address. If 1280 + * current RPA has expired then generate a new one. 1281 + */ 1282 + if (use_rpa) { 1283 + int to; 1284 + 1285 + *own_addr_type = ADDR_LE_DEV_RANDOM; 1286 + 1287 + if (adv_instance) { 1288 + if (!adv_instance->rpa_expired && 1289 + !bacmp(&adv_instance->random_addr, &hdev->rpa)) 1290 + return 0; 1291 + 1292 + adv_instance->rpa_expired = false; 1293 + } else { 1294 + if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && 1295 + !bacmp(&hdev->random_addr, &hdev->rpa)) 1296 + return 0; 1297 + } 1298 + 1299 + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 1300 + if (err < 0) { 1301 + BT_ERR("%s failed to generate new RPA", hdev->name); 1302 + return err; 1303 + } 1304 + 1305 + bacpy(rand_addr, &hdev->rpa); 1306 + 1307 + to = msecs_to_jiffies(hdev->rpa_timeout * 1000); 1308 + if (adv_instance) 1309 + queue_delayed_work(hdev->workqueue, 1310 + &adv_instance->rpa_expired_cb, to); 1311 + else 1312 + queue_delayed_work(hdev->workqueue, 1313 + &hdev->rpa_expired, to); 1314 + 1315 + return 0; 1316 + } 1317 + 1318 + /* In case of required privacy without resolvable private address, 1319 + * use an non-resolvable private address. This is useful for 1320 + * non-connectable advertising. 1321 + */ 1322 + if (require_privacy) { 1323 + bdaddr_t nrpa; 1324 + 1325 + while (true) { 1326 + /* The non-resolvable private address is generated 1327 + * from random six bytes with the two most significant 1328 + * bits cleared. 1329 + */ 1330 + get_random_bytes(&nrpa, 6); 1331 + nrpa.b[5] &= 0x3f; 1332 + 1333 + /* The non-resolvable private address shall not be 1334 + * equal to the public address. 1335 + */ 1336 + if (bacmp(&hdev->bdaddr, &nrpa)) 1337 + break; 1338 + } 1339 + 1340 + *own_addr_type = ADDR_LE_DEV_RANDOM; 1341 + bacpy(rand_addr, &nrpa); 1342 + 1343 + return 0; 1344 + } 1345 + 1346 + /* No privacy so use a public address. */ 1347 + *own_addr_type = ADDR_LE_DEV_PUBLIC; 1348 + 1349 + return 0; 1350 + } 1351 + 1352 + void __hci_req_clear_ext_adv_sets(struct hci_request *req) 1353 + { 1354 + hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); 1355 + } 1356 + 1357 + int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) 1358 + { 1359 + struct hci_cp_le_set_ext_adv_params cp; 1360 + struct hci_dev *hdev = req->hdev; 1361 + bool connectable; 1362 + u32 flags; 1363 + bdaddr_t random_addr; 1364 + u8 own_addr_type; 1365 + int err; 1366 + struct adv_info *adv_instance; 1367 + bool secondary_adv; 1368 + /* In ext adv set param interval is 3 octets */ 1369 + const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; 1370 + 1371 + if (instance > 0) { 1372 + adv_instance = hci_find_adv_instance(hdev, instance); 1373 + if (!adv_instance) 1374 + return -EINVAL; 1375 + } else { 1376 + adv_instance = NULL; 1377 + } 1378 + 1379 + flags = get_adv_instance_flags(hdev, instance); 1380 + 1381 + /* If the "connectable" instance flag was not set, then choose between 1382 + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. 1383 + */ 1384 + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || 1385 + mgmt_get_connectable(hdev); 1386 + 1387 + if (!is_advertising_allowed(hdev, connectable)) 1388 + return -EPERM; 1389 + 1390 + /* Set require_privacy to true only when non-connectable 1391 + * advertising is used. In that case it is fine to use a 1392 + * non-resolvable private address. 1393 + */ 1394 + err = hci_get_random_address(hdev, !connectable, 1395 + adv_use_rpa(hdev, flags), adv_instance, 1396 + &own_addr_type, &random_addr); 1397 + if (err < 0) 1398 + return err; 1399 + 1400 + memset(&cp, 0, sizeof(cp)); 1401 + 1402 + memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); 1403 + memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); 1404 + 1405 + secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); 1406 + 1407 + if (connectable) { 1408 + if (secondary_adv) 1409 + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); 1410 + else 1411 + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); 1412 + } else if (get_adv_instance_scan_rsp_len(hdev, instance)) { 1413 + if (secondary_adv) 1414 + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); 1415 + else 1416 + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); 1417 + } else { 1418 + if (secondary_adv) 1419 + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); 1420 + else 1421 + cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); 1422 + } 1423 + 1424 + cp.own_addr_type = own_addr_type; 1425 + cp.channel_map = hdev->le_adv_channel_map; 1426 + cp.tx_power = 127; 1427 + cp.handle = 0; 1428 + 1429 + if (flags & MGMT_ADV_FLAG_SEC_2M) { 1430 + cp.primary_phy = HCI_ADV_PHY_1M; 1431 + cp.secondary_phy = HCI_ADV_PHY_2M; 1432 + } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { 1433 + cp.primary_phy = HCI_ADV_PHY_CODED; 1434 + cp.secondary_phy = HCI_ADV_PHY_CODED; 1435 + } else { 1436 + /* In all other cases use 1M */ 1437 + cp.primary_phy = HCI_ADV_PHY_1M; 1438 + cp.secondary_phy = HCI_ADV_PHY_1M; 1439 + } 1440 + 1441 + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); 1442 + 1443 + if (own_addr_type == ADDR_LE_DEV_RANDOM && 1444 + bacmp(&random_addr, BDADDR_ANY)) { 1445 + struct hci_cp_le_set_adv_set_rand_addr cp; 1446 + 1447 + /* Check if random address need to be updated */ 1448 + if (adv_instance) { 1449 + if (!bacmp(&random_addr, &adv_instance->random_addr)) 1450 + return 0; 1451 + } else { 1452 + if (!bacmp(&random_addr, &hdev->random_addr)) 1453 + return 0; 1454 + } 1455 + 1456 + memset(&cp, 0, sizeof(cp)); 1457 + 1458 + cp.handle = 0; 1459 + bacpy(&cp.bdaddr, &random_addr); 1460 + 1461 + hci_req_add(req, 1462 + HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 1463 + sizeof(cp), &cp); 1464 + } 1465 + 1466 + return 0; 1467 + } 1468 + 1469 + void __hci_req_enable_ext_advertising(struct hci_request *req) 1470 + { 1471 + struct hci_cp_le_set_ext_adv_enable *cp; 1472 + struct hci_cp_ext_adv_set *adv_set; 1473 + u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; 1474 + 1475 + cp = (void *) data; 1476 + adv_set = (void *) cp->data; 1477 + 1478 + memset(cp, 0, sizeof(*cp)); 1479 + 1480 + cp->enable = 0x01; 1481 + cp->num_of_sets = 0x01; 1482 + 1483 + memset(adv_set, 0, sizeof(*adv_set)); 1484 + 1485 + adv_set->handle = 0; 1486 + 1487 + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, 1488 + sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, 1489 + data); 1490 + } 1491 + 1492 + int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) 1493 + { 1494 + struct hci_dev *hdev = req->hdev; 1495 + int err; 1496 + 1497 + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) 1498 + __hci_req_disable_advertising(req); 1499 + 1500 + err = __hci_req_setup_ext_adv_instance(req, instance); 1501 + if (err < 0) 1502 + return err; 1503 + 1504 + __hci_req_update_scan_rsp_data(req, instance); 1505 + __hci_req_enable_ext_advertising(req); 1506 + 1507 + return 0; 1441 1508 } 1442 1509 1443 1510 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, ··· 1732 1321 return 0; 1733 1322 1734 1323 hdev->cur_adv_instance = instance; 1735 - __hci_req_update_adv_data(req, instance); 1736 - __hci_req_update_scan_rsp_data(req, instance); 1737 - __hci_req_enable_advertising(req); 1324 + if (ext_adv_capable(hdev)) { 1325 + __hci_req_start_ext_adv(req, instance); 1326 + } else { 1327 + __hci_req_update_adv_data(req, instance); 1328 + __hci_req_update_scan_rsp_data(req, instance); 1329 + __hci_req_enable_advertising(req); 1330 + } 1738 1331 1739 1332 return 0; 1740 1333 } ··· 2009 1594 2010 1595 /* Update the advertising parameters if necessary */ 2011 1596 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 2012 - !list_empty(&hdev->adv_instances)) 2013 - __hci_req_enable_advertising(req); 1597 + !list_empty(&hdev->adv_instances)) { 1598 + if (ext_adv_capable(hdev)) 1599 + __hci_req_start_ext_adv(req, hdev->cur_adv_instance); 1600 + else 1601 + __hci_req_enable_advertising(req); 1602 + } 2014 1603 2015 1604 __hci_update_background_scan(req); 2016 1605 ··· 2123 1704 /* Discoverable mode affects the local advertising 2124 1705 * address in limited privacy mode. 2125 1706 */ 2126 - if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) 2127 - __hci_req_enable_advertising(req); 1707 + if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { 1708 + if (ext_adv_capable(hdev)) 1709 + __hci_req_start_ext_adv(req, 0x00); 1710 + else 1711 + __hci_req_enable_advertising(req); 1712 + } 2128 1713 } 2129 1714 2130 1715 hci_dev_unlock(hdev); ··· 2363 1940 static int le_scan_restart(struct hci_request *req, unsigned long opt) 2364 1941 { 2365 1942 struct hci_dev *hdev = req->hdev; 2366 - struct hci_cp_le_set_scan_enable cp; 2367 1943 2368 1944 /* If controller is not scanning we are done. */ 2369 1945 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) ··· 2370 1948 2371 1949 hci_req_add_le_scan_disable(req); 2372 1950 2373 - memset(&cp, 0, sizeof(cp)); 2374 - cp.enable = LE_SCAN_ENABLE; 2375 - cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 2376 - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1951 + if (use_ext_scan(hdev)) { 1952 + struct hci_cp_le_set_ext_scan_enable ext_enable_cp; 1953 + 1954 + memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); 1955 + ext_enable_cp.enable = LE_SCAN_ENABLE; 1956 + ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 1957 + 1958 + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, 1959 + sizeof(ext_enable_cp), &ext_enable_cp); 1960 + } else { 1961 + struct hci_cp_le_set_scan_enable cp; 1962 + 1963 + memset(&cp, 0, sizeof(cp)); 1964 + cp.enable = LE_SCAN_ENABLE; 1965 + cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 1966 + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1967 + } 2377 1968 2378 1969 return 0; 2379 1970 } ··· 2445 2010 { 2446 2011 uint16_t interval = opt; 2447 2012 struct hci_dev *hdev = req->hdev; 2448 - struct hci_cp_le_set_scan_param param_cp; 2449 - struct hci_cp_le_set_scan_enable enable_cp; 2450 2013 u8 own_addr_type; 2451 2014 int err; 2452 2015 ··· 2483 2050 if (err < 0) 2484 2051 own_addr_type = ADDR_LE_DEV_PUBLIC; 2485 2052 2486 - memset(&param_cp, 0, sizeof(param_cp)); 2487 - param_cp.type = LE_SCAN_ACTIVE; 2488 - param_cp.interval = cpu_to_le16(interval); 2489 - param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); 2490 - param_cp.own_address_type = own_addr_type; 2491 - 2492 - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 2493 - &param_cp); 2494 - 2495 - memset(&enable_cp, 0, sizeof(enable_cp)); 2496 - enable_cp.enable = LE_SCAN_ENABLE; 2497 - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 2498 - 2499 - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), 2500 - &enable_cp); 2501 - 2053 + hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN, 2054 + own_addr_type, 0); 2502 2055 return 0; 2503 2056 } 2504 2057 ··· 2721 2302 */ 2722 2303 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 2723 2304 list_empty(&hdev->adv_instances)) { 2724 - __hci_req_update_adv_data(req, 0x00); 2725 - __hci_req_update_scan_rsp_data(req, 0x00); 2305 + int err; 2726 2306 2727 - if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 2728 - __hci_req_enable_advertising(req); 2307 + if (ext_adv_capable(hdev)) { 2308 + err = __hci_req_setup_ext_adv_instance(req, 2309 + 0x00); 2310 + if (!err) 2311 + __hci_req_update_scan_rsp_data(req, 2312 + 0x00); 2313 + } else { 2314 + err = 0; 2315 + __hci_req_update_adv_data(req, 0x00); 2316 + __hci_req_update_scan_rsp_data(req, 0x00); 2317 + } 2318 + 2319 + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { 2320 + if (!ext_adv_capable(hdev)) 2321 + __hci_req_enable_advertising(req); 2322 + else if (!err) 2323 + __hci_req_enable_ext_advertising(req); 2324 + } 2729 2325 } else if (!list_empty(&hdev->adv_instances)) { 2730 2326 struct adv_info *adv_instance; 2731 2327
+8
net/bluetooth/hci_request.h
··· 80 80 struct hci_request *req, u8 instance, 81 81 bool force); 82 82 83 + int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); 84 + int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); 85 + void __hci_req_enable_ext_advertising(struct hci_request *req); 86 + void __hci_req_clear_ext_adv_sets(struct hci_request *req); 87 + int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, 88 + bool use_rpa, struct adv_info *adv_instance, 89 + u8 *own_addr_type, bdaddr_t *rand_addr); 90 + 83 91 void __hci_req_update_class(struct hci_request *req); 84 92 85 93 /* Returns true if HCI commands were queued */
+3 -3
net/bluetooth/hidp/core.c
··· 431 431 del_timer(&session->timer); 432 432 } 433 433 434 - static void hidp_process_report(struct hidp_session *session, 435 - int type, const u8 *data, int len, int intr) 434 + static void hidp_process_report(struct hidp_session *session, int type, 435 + const u8 *data, unsigned int len, int intr) 436 436 { 437 437 if (len > HID_MAX_BUFFER_SIZE) 438 438 len = HID_MAX_BUFFER_SIZE; ··· 775 775 hid->version = req->version; 776 776 hid->country = req->country; 777 777 778 - strncpy(hid->name, req->name, sizeof(req->name) - 1); 778 + strncpy(hid->name, req->name, sizeof(hid->name)); 779 779 780 780 snprintf(hid->phys, sizeof(hid->phys), "%pMR", 781 781 &l2cap_pi(session->ctrl_sock->sk)->chan->src);
+394 -11
net/bluetooth/mgmt.c
··· 617 617 &rp, sizeof(rp)); 618 618 } 619 619 620 + static u32 get_supported_phys(struct hci_dev *hdev) 621 + { 622 + u32 supported_phys = 0; 623 + 624 + if (lmp_bredr_capable(hdev)) { 625 + supported_phys |= MGMT_PHY_BR_1M_1SLOT; 626 + 627 + if (hdev->features[0][0] & LMP_3SLOT) 628 + supported_phys |= MGMT_PHY_BR_1M_3SLOT; 629 + 630 + if (hdev->features[0][0] & LMP_5SLOT) 631 + supported_phys |= MGMT_PHY_BR_1M_5SLOT; 632 + 633 + if (lmp_edr_2m_capable(hdev)) { 634 + supported_phys |= MGMT_PHY_EDR_2M_1SLOT; 635 + 636 + if (lmp_edr_3slot_capable(hdev)) 637 + supported_phys |= MGMT_PHY_EDR_2M_3SLOT; 638 + 639 + if (lmp_edr_5slot_capable(hdev)) 640 + supported_phys |= MGMT_PHY_EDR_2M_5SLOT; 641 + 642 + if (lmp_edr_3m_capable(hdev)) { 643 + supported_phys |= MGMT_PHY_EDR_3M_1SLOT; 644 + 645 + if (lmp_edr_3slot_capable(hdev)) 646 + supported_phys |= MGMT_PHY_EDR_3M_3SLOT; 647 + 648 + if (lmp_edr_5slot_capable(hdev)) 649 + supported_phys |= MGMT_PHY_EDR_3M_5SLOT; 650 + } 651 + } 652 + } 653 + 654 + if (lmp_le_capable(hdev)) { 655 + supported_phys |= MGMT_PHY_LE_1M_TX; 656 + supported_phys |= MGMT_PHY_LE_1M_RX; 657 + 658 + if (hdev->le_features[1] & HCI_LE_PHY_2M) { 659 + supported_phys |= MGMT_PHY_LE_2M_TX; 660 + supported_phys |= MGMT_PHY_LE_2M_RX; 661 + } 662 + 663 + if (hdev->le_features[1] & HCI_LE_PHY_CODED) { 664 + supported_phys |= MGMT_PHY_LE_CODED_TX; 665 + supported_phys |= MGMT_PHY_LE_CODED_RX; 666 + } 667 + } 668 + 669 + return supported_phys; 670 + } 671 + 672 + static u32 get_selected_phys(struct hci_dev *hdev) 673 + { 674 + u32 selected_phys = 0; 675 + 676 + if (lmp_bredr_capable(hdev)) { 677 + selected_phys |= MGMT_PHY_BR_1M_1SLOT; 678 + 679 + if (hdev->pkt_type & (HCI_DM3 | HCI_DH3)) 680 + selected_phys |= MGMT_PHY_BR_1M_3SLOT; 681 + 682 + if (hdev->pkt_type & (HCI_DM5 | HCI_DH5)) 683 + selected_phys |= MGMT_PHY_BR_1M_5SLOT; 684 + 685 + if (lmp_edr_2m_capable(hdev)) { 686 + if (!(hdev->pkt_type & HCI_2DH1)) 687 + selected_phys |= MGMT_PHY_EDR_2M_1SLOT; 688 + 689 + if (lmp_edr_3slot_capable(hdev) && 690 + !(hdev->pkt_type & HCI_2DH3)) 691 + selected_phys |= MGMT_PHY_EDR_2M_3SLOT; 692 + 693 + if (lmp_edr_5slot_capable(hdev) && 694 + !(hdev->pkt_type & HCI_2DH5)) 695 + selected_phys |= MGMT_PHY_EDR_2M_5SLOT; 696 + 697 + if (lmp_edr_3m_capable(hdev)) { 698 + if (!(hdev->pkt_type & HCI_3DH1)) 699 + selected_phys |= MGMT_PHY_EDR_3M_1SLOT; 700 + 701 + if (lmp_edr_3slot_capable(hdev) && 702 + !(hdev->pkt_type & HCI_3DH3)) 703 + selected_phys |= MGMT_PHY_EDR_3M_3SLOT; 704 + 705 + if (lmp_edr_5slot_capable(hdev) && 706 + !(hdev->pkt_type & HCI_3DH5)) 707 + selected_phys |= MGMT_PHY_EDR_3M_5SLOT; 708 + } 709 + } 710 + } 711 + 712 + if (lmp_le_capable(hdev)) { 713 + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M) 714 + selected_phys |= MGMT_PHY_LE_1M_TX; 715 + 716 + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M) 717 + selected_phys |= MGMT_PHY_LE_1M_RX; 718 + 719 + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M) 720 + selected_phys |= MGMT_PHY_LE_2M_TX; 721 + 722 + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M) 723 + selected_phys |= MGMT_PHY_LE_2M_RX; 724 + 725 + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED) 726 + selected_phys |= MGMT_PHY_LE_CODED_TX; 727 + 728 + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED) 729 + selected_phys |= MGMT_PHY_LE_CODED_RX; 730 + } 731 + 732 + return selected_phys; 733 + } 734 + 735 + static u32 get_configurable_phys(struct hci_dev *hdev) 736 + { 737 + return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT & 738 + ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX); 739 + } 740 + 620 741 static u32 get_supported_settings(struct hci_dev *hdev) 621 742 { 622 743 u32 settings = 0; ··· 774 653 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 775 654 hdev->set_bdaddr) 776 655 settings |= MGMT_SETTING_CONFIGURATION; 656 + 657 + settings |= MGMT_SETTING_PHY_CONFIGURATION; 777 658 778 659 return settings; 779 660 } ··· 940 817 * function. 941 818 */ 942 819 hci_req_init(&req, hdev); 943 - __hci_req_enable_advertising(&req); 820 + if (ext_adv_capable(hdev)) 821 + __hci_req_start_ext_adv(&req, hdev->cur_adv_instance); 822 + else 823 + __hci_req_enable_advertising(&req); 944 824 hci_req_run(&req, NULL); 945 825 } 946 826 ··· 1847 1721 */ 1848 1722 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 1849 1723 struct hci_request req; 1850 - 1851 1724 hci_req_init(&req, hdev); 1852 - __hci_req_update_adv_data(&req, 0x00); 1853 - __hci_req_update_scan_rsp_data(&req, 0x00); 1725 + if (ext_adv_capable(hdev)) { 1726 + int err; 1727 + 1728 + err = __hci_req_setup_ext_adv_instance(&req, 0x00); 1729 + if (!err) 1730 + __hci_req_update_scan_rsp_data(&req, 0x00); 1731 + } else { 1732 + __hci_req_update_adv_data(&req, 0x00); 1733 + __hci_req_update_scan_rsp_data(&req, 0x00); 1734 + } 1854 1735 hci_req_run(&req, NULL); 1855 1736 hci_update_background_scan(hdev); 1856 1737 } ··· 1956 1823 } else { 1957 1824 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) 1958 1825 __hci_req_disable_advertising(&req); 1826 + 1827 + if (ext_adv_capable(hdev)) 1828 + __hci_req_clear_ext_adv_sets(&req); 1959 1829 } 1960 1830 1961 1831 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), ··· 3320 3184 return err; 3321 3185 } 3322 3186 3187 + static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, 3188 + void *data, u16 len) 3189 + { 3190 + struct mgmt_rp_get_phy_confguration rp; 3191 + 3192 + BT_DBG("sock %p %s", sk, hdev->name); 3193 + 3194 + hci_dev_lock(hdev); 3195 + 3196 + memset(&rp, 0, sizeof(rp)); 3197 + 3198 + rp.supported_phys = cpu_to_le32(get_supported_phys(hdev)); 3199 + rp.selected_phys = cpu_to_le32(get_selected_phys(hdev)); 3200 + rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev)); 3201 + 3202 + hci_dev_unlock(hdev); 3203 + 3204 + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0, 3205 + &rp, sizeof(rp)); 3206 + } 3207 + 3208 + int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) 3209 + { 3210 + struct mgmt_ev_phy_configuration_changed ev; 3211 + 3212 + memset(&ev, 0, sizeof(ev)); 3213 + 3214 + ev.selected_phys = cpu_to_le32(get_selected_phys(hdev)); 3215 + 3216 + return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev, 3217 + sizeof(ev), skip); 3218 + } 3219 + 3220 + static void set_default_phy_complete(struct hci_dev *hdev, u8 status, 3221 + u16 opcode, struct sk_buff *skb) 3222 + { 3223 + struct mgmt_cp_set_phy_confguration *cp; 3224 + struct mgmt_pending_cmd *cmd; 3225 + 3226 + BT_DBG("status 0x%02x", status); 3227 + 3228 + hci_dev_lock(hdev); 3229 + 3230 + cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev); 3231 + if (!cmd) 3232 + goto unlock; 3233 + 3234 + cp = cmd->param; 3235 + 3236 + if (status) { 3237 + mgmt_cmd_status(cmd->sk, hdev->id, 3238 + MGMT_OP_SET_PHY_CONFIGURATION, 3239 + mgmt_status(status)); 3240 + } else { 3241 + mgmt_cmd_complete(cmd->sk, hdev->id, 3242 + MGMT_OP_SET_PHY_CONFIGURATION, 0, 3243 + NULL, 0); 3244 + 3245 + mgmt_phy_configuration_changed(hdev, cmd->sk); 3246 + } 3247 + 3248 + mgmt_pending_remove(cmd); 3249 + 3250 + unlock: 3251 + hci_dev_unlock(hdev); 3252 + } 3253 + 3254 + static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, 3255 + void *data, u16 len) 3256 + { 3257 + struct mgmt_cp_set_phy_confguration *cp = data; 3258 + struct hci_cp_le_set_default_phy cp_phy; 3259 + struct mgmt_pending_cmd *cmd; 3260 + struct hci_request req; 3261 + u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; 3262 + u16 pkt_type = (HCI_DH1 | HCI_DM1); 3263 + bool changed = false; 3264 + int err; 3265 + 3266 + BT_DBG("sock %p %s", sk, hdev->name); 3267 + 3268 + configurable_phys = get_configurable_phys(hdev); 3269 + supported_phys = get_supported_phys(hdev); 3270 + selected_phys = __le32_to_cpu(cp->selected_phys); 3271 + 3272 + if (selected_phys & ~supported_phys) 3273 + return mgmt_cmd_status(sk, hdev->id, 3274 + MGMT_OP_SET_PHY_CONFIGURATION, 3275 + MGMT_STATUS_INVALID_PARAMS); 3276 + 3277 + unconfigure_phys = supported_phys & ~configurable_phys; 3278 + 3279 + if ((selected_phys & unconfigure_phys) != unconfigure_phys) 3280 + return mgmt_cmd_status(sk, hdev->id, 3281 + MGMT_OP_SET_PHY_CONFIGURATION, 3282 + MGMT_STATUS_INVALID_PARAMS); 3283 + 3284 + if (selected_phys == get_selected_phys(hdev)) 3285 + return mgmt_cmd_complete(sk, hdev->id, 3286 + MGMT_OP_SET_PHY_CONFIGURATION, 3287 + 0, NULL, 0); 3288 + 3289 + hci_dev_lock(hdev); 3290 + 3291 + if (!hdev_is_powered(hdev)) { 3292 + err = mgmt_cmd_status(sk, hdev->id, 3293 + MGMT_OP_SET_PHY_CONFIGURATION, 3294 + MGMT_STATUS_REJECTED); 3295 + goto unlock; 3296 + } 3297 + 3298 + if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) { 3299 + err = mgmt_cmd_status(sk, hdev->id, 3300 + MGMT_OP_SET_PHY_CONFIGURATION, 3301 + MGMT_STATUS_BUSY); 3302 + goto unlock; 3303 + } 3304 + 3305 + if (selected_phys & MGMT_PHY_BR_1M_3SLOT) 3306 + pkt_type |= (HCI_DH3 | HCI_DM3); 3307 + else 3308 + pkt_type &= ~(HCI_DH3 | HCI_DM3); 3309 + 3310 + if (selected_phys & MGMT_PHY_BR_1M_5SLOT) 3311 + pkt_type |= (HCI_DH5 | HCI_DM5); 3312 + else 3313 + pkt_type &= ~(HCI_DH5 | HCI_DM5); 3314 + 3315 + if (selected_phys & MGMT_PHY_EDR_2M_1SLOT) 3316 + pkt_type &= ~HCI_2DH1; 3317 + else 3318 + pkt_type |= HCI_2DH1; 3319 + 3320 + if (selected_phys & MGMT_PHY_EDR_2M_3SLOT) 3321 + pkt_type &= ~HCI_2DH3; 3322 + else 3323 + pkt_type |= HCI_2DH3; 3324 + 3325 + if (selected_phys & MGMT_PHY_EDR_2M_5SLOT) 3326 + pkt_type &= ~HCI_2DH5; 3327 + else 3328 + pkt_type |= HCI_2DH5; 3329 + 3330 + if (selected_phys & MGMT_PHY_EDR_3M_1SLOT) 3331 + pkt_type &= ~HCI_3DH1; 3332 + else 3333 + pkt_type |= HCI_3DH1; 3334 + 3335 + if (selected_phys & MGMT_PHY_EDR_3M_3SLOT) 3336 + pkt_type &= ~HCI_3DH3; 3337 + else 3338 + pkt_type |= HCI_3DH3; 3339 + 3340 + if (selected_phys & MGMT_PHY_EDR_3M_5SLOT) 3341 + pkt_type &= ~HCI_3DH5; 3342 + else 3343 + pkt_type |= HCI_3DH5; 3344 + 3345 + if (pkt_type != hdev->pkt_type) { 3346 + hdev->pkt_type = pkt_type; 3347 + changed = true; 3348 + } 3349 + 3350 + if ((selected_phys & MGMT_PHY_LE_MASK) == 3351 + (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) { 3352 + if (changed) 3353 + mgmt_phy_configuration_changed(hdev, sk); 3354 + 3355 + err = mgmt_cmd_complete(sk, hdev->id, 3356 + MGMT_OP_SET_PHY_CONFIGURATION, 3357 + 0, NULL, 0); 3358 + 3359 + goto unlock; 3360 + } 3361 + 3362 + cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, 3363 + len); 3364 + if (!cmd) { 3365 + err = -ENOMEM; 3366 + goto unlock; 3367 + } 3368 + 3369 + hci_req_init(&req, hdev); 3370 + 3371 + memset(&cp_phy, 0, sizeof(cp_phy)); 3372 + 3373 + if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) 3374 + cp_phy.all_phys |= 0x01; 3375 + 3376 + if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) 3377 + cp_phy.all_phys |= 0x02; 3378 + 3379 + if (selected_phys & MGMT_PHY_LE_1M_TX) 3380 + cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; 3381 + 3382 + if (selected_phys & MGMT_PHY_LE_2M_TX) 3383 + cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; 3384 + 3385 + if (selected_phys & MGMT_PHY_LE_CODED_TX) 3386 + cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; 3387 + 3388 + if (selected_phys & MGMT_PHY_LE_1M_RX) 3389 + cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; 3390 + 3391 + if (selected_phys & MGMT_PHY_LE_2M_RX) 3392 + cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; 3393 + 3394 + if (selected_phys & MGMT_PHY_LE_CODED_RX) 3395 + cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; 3396 + 3397 + hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy); 3398 + 3399 + err = hci_req_run_skb(&req, set_default_phy_complete); 3400 + if (err < 0) 3401 + mgmt_pending_remove(cmd); 3402 + 3403 + unlock: 3404 + hci_dev_unlock(hdev); 3405 + 3406 + return err; 3407 + } 3408 + 3323 3409 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, 3324 3410 u16 opcode, struct sk_buff *skb) 3325 3411 { ··· 4395 4037 * HCI_ADVERTISING flag is not yet set. 4396 4038 */ 4397 4039 hdev->cur_adv_instance = 0x00; 4398 - __hci_req_update_adv_data(&req, 0x00); 4399 - __hci_req_update_scan_rsp_data(&req, 0x00); 4400 - __hci_req_enable_advertising(&req); 4040 + 4041 + if (ext_adv_capable(hdev)) { 4042 + __hci_req_start_ext_adv(&req, 0x00); 4043 + } else { 4044 + __hci_req_update_adv_data(&req, 0x00); 4045 + __hci_req_update_scan_rsp_data(&req, 0x00); 4046 + __hci_req_enable_advertising(&req); 4047 + } 4401 4048 } else { 4402 4049 __hci_req_disable_advertising(&req); 4403 4050 } ··· 4972 4609 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY); 4973 4610 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); 4974 4611 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 4612 + hci_adv_instances_set_rpa_expired(hdev, true); 4975 4613 if (cp->privacy == 0x02) 4976 4614 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY); 4977 4615 else ··· 4981 4617 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY); 4982 4618 memset(hdev->irk, 0, sizeof(hdev->irk)); 4983 4619 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 4620 + hci_adv_instances_set_rpa_expired(hdev, false); 4984 4621 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); 4985 4622 } 4986 4623 ··· 6332 5967 flags |= MGMT_ADV_FLAG_APPEARANCE; 6333 5968 flags |= MGMT_ADV_FLAG_LOCAL_NAME; 6334 5969 6335 - if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) 5970 + /* In extended adv TX_POWER returned from Set Adv Param 5971 + * will be always valid. 5972 + */ 5973 + if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) || 5974 + ext_adv_capable(hdev)) 6336 5975 flags |= MGMT_ADV_FLAG_TX_POWER; 5976 + 5977 + if (ext_adv_capable(hdev)) { 5978 + flags |= MGMT_ADV_FLAG_SEC_1M; 5979 + 5980 + if (hdev->le_features[1] & HCI_LE_PHY_2M) 5981 + flags |= MGMT_ADV_FLAG_SEC_2M; 5982 + 5983 + if (hdev->le_features[1] & HCI_LE_PHY_CODED) 5984 + flags |= MGMT_ADV_FLAG_SEC_CODED; 5985 + } 6337 5986 6338 5987 return flags; 6339 5988 } ··· 6554 6175 struct mgmt_cp_add_advertising *cp = data; 6555 6176 struct mgmt_rp_add_advertising rp; 6556 6177 u32 flags; 6557 - u32 supported_flags; 6178 + u32 supported_flags, phy_flags; 6558 6179 u8 status; 6559 6180 u16 timeout, duration; 6560 6181 unsigned int prev_instance_cnt = hdev->adv_instance_cnt; ··· 6584 6205 duration = __le16_to_cpu(cp->duration); 6585 6206 6586 6207 /* The current implementation only supports a subset of the specified 6587 - * flags. 6208 + * flags. Also need to check mutual exclusiveness of sec flags. 6588 6209 */ 6589 6210 supported_flags = get_supported_adv_flags(hdev); 6590 - if (flags & ~supported_flags) 6211 + phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK; 6212 + if (flags & ~supported_flags || 6213 + ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags))))) 6591 6214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, 6592 6215 MGMT_STATUS_INVALID_PARAMS); 6593 6216 ··· 6925 6544 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE, 6926 6545 HCI_MGMT_UNTRUSTED }, 6927 6546 { set_appearance, MGMT_SET_APPEARANCE_SIZE }, 6547 + { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE }, 6548 + { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE }, 6928 6549 }; 6929 6550 6930 6551 void mgmt_index_added(struct hci_dev *hdev)
+2 -1
net/bluetooth/sco.c
··· 393 393 */ 394 394 static void sco_sock_kill(struct sock *sk) 395 395 { 396 - if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 396 + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || 397 + sock_flag(sk, SOCK_DEAD)) 397 398 return; 398 399 399 400 BT_DBG("sk %p state %d", sk, sk->sk_state);