Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-net-next-2021-10-01' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Luiz Augusto von Dentz says:

====================
bluetooth-next pull request for net-next:

- Add support for MediaTek MT7922 and MT7921
- Enable support for AOSP extention in Qualcomm WCN399x and Realtek
8822C/8852A.
- Add initial support for link quality and audio/codec offload.
- Rework of sockets sendmsg to avoid locking issues.
- Add vhci suspend/resume emulation.

====================

Link: https://lore.kernel.org/r/20211001230850.3635543-1-luiz.dentz@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2826 -1121
+202 -39
drivers/bluetooth/btintel.c
··· 1037 1037 1038 1038 params = (void *)(fw_ptr + sizeof(*cmd)); 1039 1039 1040 - bt_dev_info(hdev, "Boot Address: 0x%x", 1041 - le32_to_cpu(params->boot_addr)); 1040 + *boot_addr = le32_to_cpu(params->boot_addr); 1041 + 1042 + bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr); 1042 1043 1043 1044 bt_dev_info(hdev, "Firmware Version: %u-%u.%u", 1044 1045 params->fw_build_num, params->fw_build_ww, ··· 1072 1071 /* Skip version checking */ 1073 1072 break; 1074 1073 default: 1075 - /* Skip reading firmware file version in bootloader mode */ 1076 - if (ver->fw_variant == 0x06) 1077 - break; 1078 1074 1079 1075 /* Skip download if firmware has the same version */ 1080 1076 if (btintel_firmware_version(hdev, ver->fw_build_num, ··· 1112 1114 int err; 1113 1115 u32 css_header_ver; 1114 1116 1115 - /* Skip reading firmware file version in bootloader mode */ 1116 - if (ver->img_type != 0x01) { 1117 - /* Skip download if firmware has the same version */ 1118 - if (btintel_firmware_version(hdev, ver->min_fw_build_nn, 1119 - ver->min_fw_build_cw, 1120 - ver->min_fw_build_yy, 1121 - fw, boot_param)) { 1122 - bt_dev_info(hdev, "Firmware already loaded"); 1123 - /* Return -EALREADY to indicate that firmware has 1124 - * already been loaded. 1125 - */ 1126 - return -EALREADY; 1127 - } 1117 + /* Skip download if firmware has the same version */ 1118 + if (btintel_firmware_version(hdev, ver->min_fw_build_nn, 1119 + ver->min_fw_build_cw, 1120 + ver->min_fw_build_yy, 1121 + fw, boot_param)) { 1122 + bt_dev_info(hdev, "Firmware already loaded"); 1123 + /* Return -EALREADY to indicate that firmware has 1124 + * already been loaded. 1125 + */ 1126 + return -EALREADY; 1128 1127 } 1129 1128 1130 1129 /* The firmware variant determines if the device is in bootloader ··· 1280 1285 static int btintel_set_debug_features(struct hci_dev *hdev, 1281 1286 const struct intel_debug_features *features) 1282 1287 { 1283 - u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00, 1288 + u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00, 1284 1289 0x00, 0x00, 0x00 }; 1290 + u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 }; 1291 + u8 trace_enable = 0x02; 1285 1292 struct sk_buff *skb; 1286 1293 1287 - if (!features) 1294 + if (!features) { 1295 + bt_dev_warn(hdev, "Debug features not read"); 1288 1296 return -EINVAL; 1297 + } 1289 1298 1290 1299 if (!(features->page1[0] & 0x3f)) { 1291 1300 bt_dev_info(hdev, "Telemetry exception format not supported"); ··· 1302 1303 PTR_ERR(skb)); 1303 1304 return PTR_ERR(skb); 1304 1305 } 1305 - 1306 1306 kfree_skb(skb); 1307 + 1308 + skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT); 1309 + if (IS_ERR(skb)) { 1310 + bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)", 1311 + PTR_ERR(skb)); 1312 + return PTR_ERR(skb); 1313 + } 1314 + kfree_skb(skb); 1315 + 1316 + skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); 1317 + if (IS_ERR(skb)) { 1318 + bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)", 1319 + PTR_ERR(skb)); 1320 + return PTR_ERR(skb); 1321 + } 1322 + kfree_skb(skb); 1323 + 1324 + bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x", 1325 + trace_enable, mask[3]); 1326 + 1307 1327 return 0; 1308 1328 } 1329 + 1330 + static int btintel_reset_debug_features(struct hci_dev *hdev, 1331 + const struct intel_debug_features *features) 1332 + { 1333 + u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 1334 + 0x00, 0x00, 0x00 }; 1335 + u8 trace_enable = 0x00; 1336 + struct sk_buff *skb; 1337 + 1338 + if (!features) { 1339 + bt_dev_warn(hdev, "Debug features not read"); 1340 + return -EINVAL; 1341 + } 1342 + 1343 + if (!(features->page1[0] & 0x3f)) { 1344 + bt_dev_info(hdev, "Telemetry exception format not supported"); 1345 + return 0; 1346 + } 1347 + 1348 + /* Should stop the trace before writing ddc event mask. */ 1349 + skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); 1350 + if (IS_ERR(skb)) { 1351 + bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)", 1352 + PTR_ERR(skb)); 1353 + return PTR_ERR(skb); 1354 + } 1355 + kfree_skb(skb); 1356 + 1357 + skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); 1358 + if (IS_ERR(skb)) { 1359 + bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", 1360 + PTR_ERR(skb)); 1361 + return PTR_ERR(skb); 1362 + } 1363 + kfree_skb(skb); 1364 + 1365 + bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x", 1366 + trace_enable, mask[3]); 1367 + 1368 + return 0; 1369 + } 1370 + 1371 + int btintel_set_quality_report(struct hci_dev *hdev, bool enable) 1372 + { 1373 + struct intel_debug_features features; 1374 + int err; 1375 + 1376 + bt_dev_dbg(hdev, "enable %d", enable); 1377 + 1378 + /* Read the Intel supported features and if new exception formats 1379 + * supported, need to load the additional DDC config to enable. 1380 + */ 1381 + err = btintel_read_debug_features(hdev, &features); 1382 + if (err) 1383 + return err; 1384 + 1385 + /* Set or reset the debug features. */ 1386 + if (enable) 1387 + err = btintel_set_debug_features(hdev, &features); 1388 + else 1389 + err = btintel_reset_debug_features(hdev, &features); 1390 + 1391 + return err; 1392 + } 1393 + EXPORT_SYMBOL_GPL(btintel_set_quality_report); 1309 1394 1310 1395 static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev, 1311 1396 struct intel_version *ver) ··· 1976 1893 u32 boot_param; 1977 1894 char ddcname[64]; 1978 1895 int err; 1979 - struct intel_debug_features features; 1980 1896 1981 1897 BT_DBG("%s", hdev->name); 1982 1898 ··· 2016 1934 btintel_load_ddc_config(hdev, ddcname); 2017 1935 } 2018 1936 2019 - /* Read the Intel supported features and if new exception formats 2020 - * supported, need to load the additional DDC config to enable. 2021 - */ 2022 - err = btintel_read_debug_features(hdev, &features); 2023 - if (!err) { 2024 - /* Set DDC mask for available debug features */ 2025 - btintel_set_debug_features(hdev, &features); 2026 - } 1937 + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); 2027 1938 2028 1939 /* Read the Intel version information after loading the FW */ 2029 1940 err = btintel_read_version(hdev, &new_ver); ··· 2158 2083 return err; 2159 2084 } 2160 2085 2086 + static int btintel_get_codec_config_data(struct hci_dev *hdev, 2087 + __u8 link, struct bt_codec *codec, 2088 + __u8 *ven_len, __u8 **ven_data) 2089 + { 2090 + int err = 0; 2091 + 2092 + if (!ven_data || !ven_len) 2093 + return -EINVAL; 2094 + 2095 + *ven_len = 0; 2096 + *ven_data = NULL; 2097 + 2098 + if (link != ESCO_LINK) { 2099 + bt_dev_err(hdev, "Invalid link type(%u)", link); 2100 + return -EINVAL; 2101 + } 2102 + 2103 + *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL); 2104 + if (!*ven_data) { 2105 + err = -ENOMEM; 2106 + goto error; 2107 + } 2108 + 2109 + /* supports only CVSD and mSBC offload codecs */ 2110 + switch (codec->id) { 2111 + case 0x02: 2112 + **ven_data = 0x00; 2113 + break; 2114 + case 0x05: 2115 + **ven_data = 0x01; 2116 + break; 2117 + default: 2118 + err = -EINVAL; 2119 + bt_dev_err(hdev, "Invalid codec id(%u)", codec->id); 2120 + goto error; 2121 + } 2122 + /* codec and its capabilities are pre-defined to ids 2123 + * preset id = 0x00 represents CVSD codec with sampling rate 8K 2124 + * preset id = 0x01 represents mSBC codec with sampling rate 16K 2125 + */ 2126 + *ven_len = sizeof(__u8); 2127 + return err; 2128 + 2129 + error: 2130 + kfree(*ven_data); 2131 + *ven_data = NULL; 2132 + return err; 2133 + } 2134 + 2135 + static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id) 2136 + { 2137 + /* Intel uses 1 as data path id for all the usecases */ 2138 + *data_path_id = 1; 2139 + return 0; 2140 + } 2141 + 2142 + static int btintel_configure_offload(struct hci_dev *hdev) 2143 + { 2144 + struct sk_buff *skb; 2145 + int err = 0; 2146 + struct intel_offload_use_cases *use_cases; 2147 + 2148 + skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT); 2149 + if (IS_ERR(skb)) { 2150 + bt_dev_err(hdev, "Reading offload use cases failed (%ld)", 2151 + PTR_ERR(skb)); 2152 + return PTR_ERR(skb); 2153 + } 2154 + 2155 + if (skb->len < sizeof(*use_cases)) { 2156 + err = -EIO; 2157 + goto error; 2158 + } 2159 + 2160 + use_cases = (void *)skb->data; 2161 + 2162 + if (use_cases->status) { 2163 + err = -bt_to_errno(skb->data[0]); 2164 + goto error; 2165 + } 2166 + 2167 + if (use_cases->preset[0] & 0x03) { 2168 + hdev->get_data_path_id = btintel_get_data_path_id; 2169 + hdev->get_codec_config_data = btintel_get_codec_config_data; 2170 + } 2171 + error: 2172 + kfree_skb(skb); 2173 + return err; 2174 + } 2175 + 2161 2176 static int btintel_bootloader_setup_tlv(struct hci_dev *hdev, 2162 2177 struct intel_version_tlv *ver) 2163 2178 { 2164 2179 u32 boot_param; 2165 2180 char ddcname[64]; 2166 2181 int err; 2167 - struct intel_debug_features features; 2168 2182 struct intel_version_tlv new_ver; 2169 2183 2170 2184 bt_dev_dbg(hdev, ""); ··· 2289 2125 */ 2290 2126 btintel_load_ddc_config(hdev, ddcname); 2291 2127 2292 - /* Read the Intel supported features and if new exception formats 2293 - * supported, need to load the additional DDC config to enable. 2294 - */ 2295 - err = btintel_read_debug_features(hdev, &features); 2296 - if (!err) { 2297 - /* Set DDC mask for available debug features */ 2298 - btintel_set_debug_features(hdev, &features); 2299 - } 2128 + /* Read supported use cases and set callbacks to fetch datapath id */ 2129 + btintel_configure_offload(hdev); 2130 + 2131 + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); 2300 2132 2301 2133 /* Read the Intel version information after loading the FW */ 2302 2134 err = btintel_read_version_tlv(hdev, &new_ver); ··· 2391 2231 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 2392 2232 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); 2393 2233 set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); 2234 + 2235 + /* Set up the quality report callback for Intel devices */ 2236 + hdev->set_quality_report = btintel_set_quality_report; 2394 2237 2395 2238 /* For Legacy device, check the HW platform value and size */ 2396 2239 if (skb->len == sizeof(ver) && skb->data[1] == 0x37) {
+11
drivers/bluetooth/btintel.h
··· 132 132 __u8 page1[16]; 133 133 } __packed; 134 134 135 + struct intel_offload_use_cases { 136 + __u8 status; 137 + __u8 preset[8]; 138 + } __packed; 139 + 135 140 #define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8)) 136 141 #define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16)) 137 142 #define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff) ··· 209 204 void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len); 210 205 void btintel_secure_send_result(struct hci_dev *hdev, 211 206 const void *ptr, unsigned int len); 207 + int btintel_set_quality_report(struct hci_dev *hdev, bool enable); 212 208 #else 213 209 214 210 static inline int btintel_check_bdaddr(struct hci_dev *hdev) ··· 299 293 static inline void btintel_secure_send_result(struct hci_dev *hdev, 300 294 const void *ptr, unsigned int len) 301 295 { 296 + } 297 + 298 + static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable) 299 + { 300 + return -ENODEV; 302 301 } 303 302 #endif
+3 -3
drivers/bluetooth/btmrvl_main.c
··· 587 587 return 0; 588 588 } 589 589 590 - static bool btmrvl_prevent_wake(struct hci_dev *hdev) 590 + static bool btmrvl_wakeup(struct hci_dev *hdev) 591 591 { 592 592 struct btmrvl_private *priv = hci_get_drvdata(hdev); 593 593 struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; 594 594 595 - return !device_may_wakeup(&card->func->dev); 595 + return device_may_wakeup(&card->func->dev); 596 596 } 597 597 598 598 /* ··· 696 696 hdev->send = btmrvl_send_frame; 697 697 hdev->setup = btmrvl_setup; 698 698 hdev->set_bdaddr = btmrvl_set_bdaddr; 699 - hdev->prevent_wake = btmrvl_prevent_wake; 699 + hdev->wakeup = btmrvl_wakeup; 700 700 SET_HCIDEV_DEV(hdev, &card->func->dev); 701 701 702 702 hdev->dev_type = priv->btmrvl_dev.dev_type;
+8 -5
drivers/bluetooth/btmtkuart.c
··· 158 158 int err; 159 159 160 160 hlen = sizeof(*hdr) + wmt_params->dlen; 161 - if (hlen > 255) 162 - return -EINVAL; 161 + if (hlen > 255) { 162 + err = -EINVAL; 163 + goto err_free_skb; 164 + } 163 165 164 166 hdr = (struct mtk_wmt_hdr *)&wc; 165 167 hdr->dir = 1; ··· 175 173 err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); 176 174 if (err < 0) { 177 175 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); 178 - return err; 176 + goto err_free_skb; 179 177 } 180 178 181 179 /* The vendor specific WMT commands are all answered by a vendor ··· 192 190 if (err == -EINTR) { 193 191 bt_dev_err(hdev, "Execution of wmt command interrupted"); 194 192 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); 195 - return err; 193 + goto err_free_skb; 196 194 } 197 195 198 196 if (err) { 199 197 bt_dev_err(hdev, "Execution of wmt command timed out"); 200 198 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); 201 - return -ETIMEDOUT; 199 + err = -ETIMEDOUT; 200 + goto err_free_skb; 202 201 } 203 202 204 203 /* Parse and handle the return WMT event */
-1
drivers/bluetooth/btrsi.c
··· 19 19 #include <net/bluetooth/hci_core.h> 20 20 #include <asm/unaligned.h> 21 21 #include <net/rsi_91x.h> 22 - #include <net/genetlink.h> 23 22 24 23 #define RSI_DMA_ALIGN 8 25 24 #define RSI_FRAME_DESC_SIZE 16
+16 -10
drivers/bluetooth/btrtl.c
··· 59 59 __u8 hci_bus; 60 60 bool config_needed; 61 61 bool has_rom_version; 62 + bool has_msft_ext; 62 63 char *fw_name; 63 64 char *cfg_name; 64 65 }; ··· 122 121 { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB), 123 122 .config_needed = false, 124 123 .has_rom_version = true, 124 + .has_msft_ext = true, 125 125 .fw_name = "rtl_bt/rtl8821c_fw.bin", 126 126 .cfg_name = "rtl_bt/rtl8821c_config" }, 127 127 ··· 137 135 { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART), 138 136 .config_needed = false, 139 137 .has_rom_version = true, 138 + .has_msft_ext = true, 140 139 .fw_name = "rtl_bt/rtl8761b_fw.bin", 141 140 .cfg_name = "rtl_bt/rtl8761b_config" }, 142 141 ··· 152 149 { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART), 153 150 .config_needed = true, 154 151 .has_rom_version = true, 152 + .has_msft_ext = true, 155 153 .fw_name = "rtl_bt/rtl8822cs_fw.bin", 156 154 .cfg_name = "rtl_bt/rtl8822cs_config" }, 157 155 ··· 160 156 { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB), 161 157 .config_needed = false, 162 158 .has_rom_version = true, 159 + .has_msft_ext = true, 163 160 .fw_name = "rtl_bt/rtl8822cu_fw.bin", 164 161 .cfg_name = "rtl_bt/rtl8822cu_config" }, 165 162 ··· 168 163 { IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB), 169 164 .config_needed = true, 170 165 .has_rom_version = true, 166 + .has_msft_ext = true, 171 167 .fw_name = "rtl_bt/rtl8822b_fw.bin", 172 168 .cfg_name = "rtl_bt/rtl8822b_config" }, 173 169 ··· 176 170 { IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB), 177 171 .config_needed = false, 178 172 .has_rom_version = true, 173 + .has_msft_ext = true, 179 174 .fw_name = "rtl_bt/rtl8852au_fw.bin", 180 175 .cfg_name = "rtl_bt/rtl8852au_config" }, 181 176 }; ··· 601 594 hci_rev = le16_to_cpu(resp->hci_rev); 602 595 lmp_subver = le16_to_cpu(resp->lmp_subver); 603 596 604 - if (resp->hci_ver == 0x8 && le16_to_cpu(resp->hci_rev) == 0x826c && 605 - resp->lmp_ver == 0x8 && le16_to_cpu(resp->lmp_subver) == 0xa99e) 597 + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, 598 + hdev->bus); 599 + 600 + if (!btrtl_dev->ic_info) 606 601 btrtl_dev->drop_fw = true; 607 602 608 603 if (btrtl_dev->drop_fw) { ··· 643 634 hci_ver = resp->hci_ver; 644 635 hci_rev = le16_to_cpu(resp->hci_rev); 645 636 lmp_subver = le16_to_cpu(resp->lmp_subver); 637 + 638 + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, 639 + hdev->bus); 646 640 } 647 641 out_free: 648 642 kfree_skb(skb); 649 - 650 - btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, 651 - hdev->bus); 652 643 653 644 if (!btrtl_dev->ic_info) { 654 645 rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", ··· 693 684 /* The following chips supports the Microsoft vendor extension, 694 685 * therefore set the corresponding VsMsftOpCode. 695 686 */ 696 - switch (lmp_subver) { 697 - case RTL_ROM_LMP_8822B: 698 - case RTL_ROM_LMP_8852A: 687 + if (btrtl_dev->ic_info->has_msft_ext) 699 688 hci_set_msft_opcode(hdev, 0xFCF0); 700 - break; 701 - } 702 689 703 690 return btrtl_dev; 704 691 ··· 751 746 case CHIP_ID_8852A: 752 747 set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); 753 748 set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); 749 + hci_set_aosp_capable(hdev); 754 750 break; 755 751 default: 756 752 rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
+57 -7
drivers/bluetooth/btusb.c
··· 384 384 /* Realtek 8852AE Bluetooth devices */ 385 385 { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | 386 386 BTUSB_WIDEBAND_SPEECH }, 387 + { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK | 388 + BTUSB_WIDEBAND_SPEECH }, 389 + { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK | 390 + BTUSB_WIDEBAND_SPEECH }, 391 + { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | 392 + BTUSB_WIDEBAND_SPEECH }, 387 393 388 394 /* Realtek Bluetooth devices */ 389 395 { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), ··· 416 410 { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK | 417 411 BTUSB_WIDEBAND_SPEECH | 418 412 BTUSB_VALID_LE_STATES }, 413 + { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | 414 + BTUSB_WIDEBAND_SPEECH | 415 + BTUSB_VALID_LE_STATES }, 419 416 { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | 420 417 BTUSB_WIDEBAND_SPEECH | 421 418 BTUSB_VALID_LE_STATES }, ··· 442 433 { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, 443 434 { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, 444 435 436 + /* Additional Realtek 8761B Bluetooth devices */ 437 + { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK | 438 + BTUSB_WIDEBAND_SPEECH }, 439 + 445 440 /* Additional Realtek 8761BU Bluetooth devices */ 446 441 { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | 447 442 BTUSB_WIDEBAND_SPEECH }, ··· 464 451 /* Additional Realtek 8822CE Bluetooth devices */ 465 452 { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK | 466 453 BTUSB_WIDEBAND_SPEECH }, 467 - /* Bluetooth component of Realtek 8852AE device */ 468 - { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | 469 - BTUSB_WIDEBAND_SPEECH }, 470 - 471 454 { USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK | 472 455 BTUSB_WIDEBAND_SPEECH }, 473 456 { USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK | ··· 661 652 static void btusb_qca_cmd_timeout(struct hci_dev *hdev) 662 653 { 663 654 struct btusb_data *data = hci_get_drvdata(hdev); 655 + struct gpio_desc *reset_gpio = data->reset_gpio; 664 656 int err; 665 657 666 658 if (++data->cmd_timeout_cnt < 5) 667 659 return; 660 + 661 + if (reset_gpio) { 662 + bt_dev_err(hdev, "Reset qca device via bt_en gpio"); 663 + 664 + /* Toggle the hard reset line. The qca bt device is going to 665 + * yank itself off the USB and then replug. The cleanup is handled 666 + * correctly on the way out (standard USB disconnect), and the new 667 + * device is detected cleanly and bound to the driver again like 668 + * it should be. 669 + */ 670 + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { 671 + bt_dev_err(hdev, "last reset failed? Not resetting again"); 672 + return; 673 + } 674 + 675 + gpiod_set_value_cansleep(reset_gpio, 0); 676 + msleep(200); 677 + gpiod_set_value_cansleep(reset_gpio, 1); 678 + 679 + return; 680 + } 668 681 669 682 bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device."); 670 683 /* This is not an unbalanced PM reference since the device will reset */ ··· 2231 2200 }; 2232 2201 } __packed; 2233 2202 2203 + static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr) 2204 + { 2205 + struct sk_buff *skb; 2206 + long ret; 2207 + 2208 + skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT); 2209 + if (IS_ERR(skb)) { 2210 + ret = PTR_ERR(skb); 2211 + bt_dev_err(hdev, "changing Mediatek device address failed (%ld)", 2212 + ret); 2213 + return ret; 2214 + } 2215 + kfree_skb(skb); 2216 + 2217 + return 0; 2218 + } 2219 + 2234 2220 static void btusb_mtk_wmt_recv(struct urb *urb) 2235 2221 { 2236 2222 struct hci_dev *hdev = urb->context; ··· 2852 2804 case 0x7668: 2853 2805 fwname = FIRMWARE_MT7668; 2854 2806 break; 2807 + case 0x7922: 2855 2808 case 0x7961: 2856 2809 snprintf(fw_bin_name, sizeof(fw_bin_name), 2857 2810 "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", ··· 3640 3591 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; 3641 3592 } 3642 3593 3643 - static bool btusb_prevent_wake(struct hci_dev *hdev) 3594 + static bool btusb_wakeup(struct hci_dev *hdev) 3644 3595 { 3645 3596 struct btusb_data *data = hci_get_drvdata(hdev); 3646 3597 3647 - return !device_may_wakeup(&data->udev->dev); 3598 + return device_may_wakeup(&data->udev->dev); 3648 3599 } 3649 3600 3650 3601 static int btusb_shutdown_qca(struct hci_dev *hdev) ··· 3801 3752 hdev->flush = btusb_flush; 3802 3753 hdev->send = btusb_send_frame; 3803 3754 hdev->notify = btusb_notify; 3804 - hdev->prevent_wake = btusb_prevent_wake; 3755 + hdev->wakeup = btusb_wakeup; 3805 3756 3806 3757 #ifdef CONFIG_PM 3807 3758 err = btusb_config_oob_wake(hdev); ··· 3868 3819 hdev->shutdown = btusb_mtk_shutdown; 3869 3820 hdev->manufacturer = 70; 3870 3821 hdev->cmd_timeout = btusb_mtk_cmd_timeout; 3822 + hdev->set_bdaddr = btusb_set_bdaddr_mtk; 3871 3823 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); 3872 3824 data->recv_acl = btusb_recv_acl_mtk; 3873 3825 }
+17 -18
drivers/bluetooth/hci_h5.c
··· 587 587 count -= processed; 588 588 } 589 589 590 - pm_runtime_get(&hu->serdev->dev); 591 - pm_runtime_mark_last_busy(&hu->serdev->dev); 592 - pm_runtime_put_autosuspend(&hu->serdev->dev); 590 + if (hu->serdev) { 591 + pm_runtime_get(&hu->serdev->dev); 592 + pm_runtime_mark_last_busy(&hu->serdev->dev); 593 + pm_runtime_put_autosuspend(&hu->serdev->dev); 594 + } 593 595 594 596 return 0; 595 597 } ··· 816 814 struct device *dev = &serdev->dev; 817 815 struct h5 *h5; 818 816 const struct h5_device_data *data; 819 - int err; 820 817 821 818 h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL); 822 819 if (!h5) ··· 847 846 h5->vnd = data->vnd; 848 847 } 849 848 849 + if (data->driver_info & H5_INFO_WAKEUP_DISABLE) 850 + set_bit(H5_WAKEUP_DISABLE, &h5->flags); 850 851 851 852 h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); 852 853 if (IS_ERR(h5->enable_gpio)) ··· 859 856 if (IS_ERR(h5->device_wake_gpio)) 860 857 return PTR_ERR(h5->device_wake_gpio); 861 858 862 - err = hci_uart_register_device(&h5->serdev_hu, &h5p); 863 - if (err) 864 - return err; 865 - 866 - if (data->driver_info & H5_INFO_WAKEUP_DISABLE) 867 - set_bit(H5_WAKEUP_DISABLE, &h5->flags); 868 - 869 - return 0; 859 + return hci_uart_register_device(&h5->serdev_hu, &h5p); 870 860 } 871 861 872 862 static void h5_serdev_remove(struct serdev_device *serdev) ··· 958 962 serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); 959 963 serdev_device_set_baudrate(h5->hu->serdev, 115200); 960 964 961 - pm_runtime_set_active(&h5->hu->serdev->dev); 962 - pm_runtime_use_autosuspend(&h5->hu->serdev->dev); 963 - pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, 964 - SUSPEND_TIMEOUT_MS); 965 - pm_runtime_enable(&h5->hu->serdev->dev); 965 + if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { 966 + pm_runtime_set_active(&h5->hu->serdev->dev); 967 + pm_runtime_use_autosuspend(&h5->hu->serdev->dev); 968 + pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, 969 + SUSPEND_TIMEOUT_MS); 970 + pm_runtime_enable(&h5->hu->serdev->dev); 971 + } 966 972 967 973 /* The controller needs up to 500ms to wakeup */ 968 974 gpiod_set_value_cansleep(h5->enable_gpio, 1); ··· 974 976 975 977 static void h5_btrtl_close(struct h5 *h5) 976 978 { 977 - pm_runtime_disable(&h5->hu->serdev->dev); 979 + if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) 980 + pm_runtime_disable(&h5->hu->serdev->dev); 978 981 979 982 gpiod_set_value_cansleep(h5->device_wake_gpio, 0); 980 983 gpiod_set_value_cansleep(h5->enable_gpio, 0);
+3
drivers/bluetooth/hci_ldisc.c
··· 479 479 480 480 BT_DBG("tty %p", tty); 481 481 482 + if (!capable(CAP_NET_ADMIN)) 483 + return -EPERM; 484 + 482 485 /* Error if the tty has no write op instead of leaving an exploitable 483 486 * hole 484 487 */
+3 -2
drivers/bluetooth/hci_qca.c
··· 1577 1577 mutex_unlock(&qca->hci_memdump_lock); 1578 1578 } 1579 1579 1580 - static bool qca_prevent_wake(struct hci_dev *hdev) 1580 + static bool qca_wakeup(struct hci_dev *hdev) 1581 1581 { 1582 1582 struct hci_uart *hu = hci_get_drvdata(hdev); 1583 1583 bool wakeup; ··· 1730 1730 if (qca_is_wcn399x(soc_type) || 1731 1731 qca_is_wcn6750(soc_type)) { 1732 1732 set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); 1733 + hci_set_aosp_capable(hdev); 1733 1734 1734 1735 ret = qca_read_soc_version(hdev, &ver, soc_type); 1735 1736 if (ret) ··· 1765 1764 qca_debugfs_init(hdev); 1766 1765 hu->hdev->hw_error = qca_hw_error; 1767 1766 hu->hdev->cmd_timeout = qca_cmd_timeout; 1768 - hu->hdev->prevent_wake = qca_prevent_wake; 1767 + hu->hdev->wakeup = qca_wakeup; 1769 1768 } else if (ret == -ENOENT) { 1770 1769 /* No patch/nvm-config found, run with original fw/config */ 1771 1770 set_bit(QCA_ROM_FW, &qca->flags);
+122
drivers/bluetooth/hci_vhci.c
··· 21 21 22 22 #include <linux/skbuff.h> 23 23 #include <linux/miscdevice.h> 24 + #include <linux/debugfs.h> 24 25 25 26 #include <net/bluetooth/bluetooth.h> 26 27 #include <net/bluetooth/hci_core.h> ··· 38 37 39 38 struct mutex open_mutex; 40 39 struct delayed_work open_timeout; 40 + 41 + bool suspended; 42 + bool wakeup; 41 43 }; 42 44 43 45 static int vhci_open_dev(struct hci_dev *hdev) ··· 76 72 wake_up_interruptible(&data->read_wait); 77 73 return 0; 78 74 } 75 + 76 + static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) 77 + { 78 + *data_path_id = 0; 79 + return 0; 80 + } 81 + 82 + static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, 83 + struct bt_codec *codec, __u8 *vnd_len, 84 + __u8 **vnd_data) 85 + { 86 + if (type != ESCO_LINK) 87 + return -EINVAL; 88 + 89 + *vnd_len = 0; 90 + *vnd_data = NULL; 91 + return 0; 92 + } 93 + 94 + static bool vhci_wakeup(struct hci_dev *hdev) 95 + { 96 + struct vhci_data *data = hci_get_drvdata(hdev); 97 + 98 + return data->wakeup; 99 + } 100 + 101 + static ssize_t force_suspend_read(struct file *file, char __user *user_buf, 102 + size_t count, loff_t *ppos) 103 + { 104 + struct vhci_data *data = file->private_data; 105 + char buf[3]; 106 + 107 + buf[0] = data->suspended ? 'Y' : 'N'; 108 + buf[1] = '\n'; 109 + buf[2] = '\0'; 110 + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 111 + } 112 + 113 + static ssize_t force_suspend_write(struct file *file, 114 + const char __user *user_buf, 115 + size_t count, loff_t *ppos) 116 + { 117 + struct vhci_data *data = file->private_data; 118 + bool enable; 119 + int err; 120 + 121 + err = kstrtobool_from_user(user_buf, count, &enable); 122 + if (err) 123 + return err; 124 + 125 + if (data->suspended == enable) 126 + return -EALREADY; 127 + 128 + if (enable) 129 + err = hci_suspend_dev(data->hdev); 130 + else 131 + err = hci_resume_dev(data->hdev); 132 + 133 + if (err) 134 + return err; 135 + 136 + data->suspended = enable; 137 + 138 + return count; 139 + } 140 + 141 + static const struct file_operations force_suspend_fops = { 142 + .open = simple_open, 143 + .read = force_suspend_read, 144 + .write = force_suspend_write, 145 + .llseek = default_llseek, 146 + }; 147 + 148 + static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, 149 + size_t count, loff_t *ppos) 150 + { 151 + struct vhci_data *data = file->private_data; 152 + char buf[3]; 153 + 154 + buf[0] = data->wakeup ? 'Y' : 'N'; 155 + buf[1] = '\n'; 156 + buf[2] = '\0'; 157 + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 158 + } 159 + 160 + static ssize_t force_wakeup_write(struct file *file, 161 + const char __user *user_buf, size_t count, 162 + loff_t *ppos) 163 + { 164 + struct vhci_data *data = file->private_data; 165 + bool enable; 166 + int err; 167 + 168 + err = kstrtobool_from_user(user_buf, count, &enable); 169 + if (err) 170 + return err; 171 + 172 + if (data->wakeup == enable) 173 + return -EALREADY; 174 + 175 + return count; 176 + } 177 + 178 + static const struct file_operations force_wakeup_fops = { 179 + .open = simple_open, 180 + .read = force_wakeup_read, 181 + .write = force_wakeup_write, 182 + .llseek = default_llseek, 183 + }; 79 184 80 185 static int __vhci_create_device(struct vhci_data *data, __u8 opcode) 81 186 { ··· 225 112 hdev->close = vhci_close_dev; 226 113 hdev->flush = vhci_flush; 227 114 hdev->send = vhci_send_frame; 115 + hdev->get_data_path_id = vhci_get_data_path_id; 116 + hdev->get_codec_config_data = vhci_get_codec_config_data; 117 + hdev->wakeup = vhci_wakeup; 228 118 229 119 /* bit 6 is for external configuration */ 230 120 if (opcode & 0x40) ··· 244 128 kfree_skb(skb); 245 129 return -EBUSY; 246 130 } 131 + 132 + debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, 133 + &force_suspend_fops); 134 + 135 + debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, 136 + &force_wakeup_fops); 247 137 248 138 hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; 249 139
+90
include/net/bluetooth/bluetooth.h
··· 153 153 154 154 #define BT_SCM_PKT_STATUS 0x03 155 155 156 + #define BT_CODEC 19 157 + 158 + struct bt_codec_caps { 159 + __u8 len; 160 + __u8 data[]; 161 + } __packed; 162 + 163 + struct bt_codec { 164 + __u8 id; 165 + __u16 cid; 166 + __u16 vid; 167 + __u8 data_path; 168 + __u8 num_caps; 169 + } __packed; 170 + 171 + struct bt_codecs { 172 + __u8 num_codecs; 173 + struct bt_codec codecs[]; 174 + } __packed; 175 + 176 + #define BT_CODEC_CVSD 0x02 177 + #define BT_CODEC_TRANSPARENT 0x03 178 + #define BT_CODEC_MSBC 0x05 179 + 156 180 __printf(1, 2) 157 181 void bt_info(const char *fmt, ...); 158 182 __printf(1, 2) ··· 442 418 out: 443 419 kfree_skb(skb); 444 420 return NULL; 421 + } 422 + 423 + /* Shall not be called with lock_sock held */ 424 + static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk, 425 + struct msghdr *msg, 426 + size_t len, size_t mtu, 427 + size_t headroom, size_t tailroom) 428 + { 429 + struct sk_buff *skb; 430 + size_t size = min_t(size_t, len, mtu); 431 + int err; 432 + 433 + skb = bt_skb_send_alloc(sk, size + headroom + tailroom, 434 + msg->msg_flags & MSG_DONTWAIT, &err); 435 + if (!skb) 436 + return ERR_PTR(err); 437 + 438 + skb_reserve(skb, headroom); 439 + skb_tailroom_reserve(skb, mtu, tailroom); 440 + 441 + if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) { 442 + kfree_skb(skb); 443 + return ERR_PTR(-EFAULT); 444 + } 445 + 446 + skb->priority = sk->sk_priority; 447 + 448 + return skb; 449 + } 450 + 451 + /* Similar to bt_skb_sendmsg but can split the msg into multiple fragments 452 + * accourding to the MTU. 453 + */ 454 + static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk, 455 + struct msghdr *msg, 456 + size_t len, size_t mtu, 457 + size_t headroom, size_t tailroom) 458 + { 459 + struct sk_buff *skb, **frag; 460 + 461 + skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom); 462 + if (IS_ERR_OR_NULL(skb)) 463 + return skb; 464 + 465 + len -= skb->len; 466 + if (!len) 467 + return skb; 468 + 469 + /* Add remaining data over MTU as continuation fragments */ 470 + frag = &skb_shinfo(skb)->frag_list; 471 + while (len) { 472 + struct sk_buff *tmp; 473 + 474 + tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom); 475 + if (IS_ERR(tmp)) { 476 + kfree_skb(skb); 477 + return tmp; 478 + } 479 + 480 + len -= tmp->len; 481 + 482 + *frag = tmp; 483 + frag = &(*frag)->next; 484 + } 485 + 486 + return skb; 445 487 } 446 488 447 489 int bt_to_errno(u16 code);
+117
include/net/bluetooth/hci.h
··· 330 330 HCI_ENABLE_LL_PRIVACY, 331 331 HCI_CMD_PENDING, 332 332 HCI_FORCE_NO_MITM, 333 + HCI_QUALITY_REPORT, 334 + HCI_OFFLOAD_CODECS_ENABLED, 333 335 334 336 __HCI_NUM_FLAGS, 335 337 }; ··· 873 871 __u8 flow_spec_id; 874 872 } __packed; 875 873 874 + #define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d 875 + struct hci_coding_format { 876 + __u8 id; 877 + __le16 cid; 878 + __le16 vid; 879 + } __packed; 880 + 881 + struct hci_cp_enhanced_setup_sync_conn { 882 + __le16 handle; 883 + __le32 tx_bandwidth; 884 + __le32 rx_bandwidth; 885 + struct hci_coding_format tx_coding_format; 886 + struct hci_coding_format rx_coding_format; 887 + __le16 tx_codec_frame_size; 888 + __le16 rx_codec_frame_size; 889 + __le32 in_bandwidth; 890 + __le32 out_bandwidth; 891 + struct hci_coding_format in_coding_format; 892 + struct hci_coding_format out_coding_format; 893 + __le16 in_coded_data_size; 894 + __le16 out_coded_data_size; 895 + __u8 in_pcm_data_format; 896 + __u8 out_pcm_data_format; 897 + __u8 in_pcm_sample_payload_msb_pos; 898 + __u8 out_pcm_sample_payload_msb_pos; 899 + __u8 in_data_path; 900 + __u8 out_data_path; 901 + __u8 in_transport_unit_size; 902 + __u8 out_transport_unit_size; 903 + __le16 max_latency; 904 + __le16 pkt_type; 905 + __u8 retrans_effort; 906 + } __packed; 907 + 876 908 struct hci_rp_logical_link_cancel { 877 909 __u8 status; 878 910 __u8 phy_handle; ··· 1286 1250 __u8 rand256[16]; 1287 1251 } __packed; 1288 1252 1253 + #define HCI_CONFIGURE_DATA_PATH 0x0c83 1254 + struct hci_op_configure_data_path { 1255 + __u8 direction; 1256 + __u8 data_path_id; 1257 + __u8 vnd_len; 1258 + __u8 vnd_data[]; 1259 + } __packed; 1260 + 1289 1261 #define HCI_OP_READ_LOCAL_VERSION 0x1001 1290 1262 struct hci_rp_read_local_version { 1291 1263 __u8 status; ··· 1351 1307 } __packed; 1352 1308 1353 1309 #define HCI_OP_READ_LOCAL_CODECS 0x100b 1310 + struct hci_std_codecs { 1311 + __u8 num; 1312 + __u8 codec[]; 1313 + } __packed; 1314 + 1315 + struct hci_vnd_codec { 1316 + /* company id */ 1317 + __le16 cid; 1318 + /* vendor codec id */ 1319 + __le16 vid; 1320 + } __packed; 1321 + 1322 + struct hci_vnd_codecs { 1323 + __u8 num; 1324 + struct hci_vnd_codec codec[]; 1325 + } __packed; 1326 + 1327 + struct hci_rp_read_local_supported_codecs { 1328 + __u8 status; 1329 + struct hci_std_codecs std_codecs; 1330 + struct hci_vnd_codecs vnd_codecs; 1331 + } __packed; 1354 1332 1355 1333 #define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c 1356 1334 struct hci_rp_read_local_pairing_opts { 1357 1335 __u8 status; 1358 1336 __u8 pairing_opts; 1359 1337 __u8 max_key_size; 1338 + } __packed; 1339 + 1340 + #define HCI_OP_READ_LOCAL_CODECS_V2 0x100d 1341 + struct hci_std_codec_v2 { 1342 + __u8 id; 1343 + __u8 transport; 1344 + } __packed; 1345 + 1346 + struct hci_std_codecs_v2 { 1347 + __u8 num; 1348 + struct hci_std_codec_v2 codec[]; 1349 + } __packed; 1350 + 1351 + struct hci_vnd_codec_v2 { 1352 + __u8 id; 1353 + __le16 cid; 1354 + __le16 vid; 1355 + __u8 transport; 1356 + } __packed; 1357 + 1358 + struct hci_vnd_codecs_v2 { 1359 + __u8 num; 1360 + struct hci_vnd_codec_v2 codec[]; 1361 + } __packed; 1362 + 1363 + struct hci_rp_read_local_supported_codecs_v2 { 1364 + __u8 status; 1365 + struct hci_std_codecs_v2 std_codecs; 1366 + struct hci_vnd_codecs_v2 vendor_codecs; 1367 + } __packed; 1368 + 1369 + #define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e 1370 + struct hci_op_read_local_codec_caps { 1371 + __u8 id; 1372 + __le16 cid; 1373 + __le16 vid; 1374 + __u8 transport; 1375 + __u8 direction; 1376 + } __packed; 1377 + 1378 + struct hci_codec_caps { 1379 + __u8 len; 1380 + __u8 data[]; 1381 + } __packed; 1382 + 1383 + struct hci_rp_read_local_codec_caps { 1384 + __u8 status; 1385 + __u8 num_caps; 1360 1386 } __packed; 1361 1387 1362 1388 #define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b ··· 2664 2550 #define hci_iso_data_len_pack(h, f) ((__u16) ((h) | ((f) << 14))) 2665 2551 #define hci_iso_data_len(h) ((h) & 0x3fff) 2666 2552 #define hci_iso_data_flags(h) ((h) >> 14) 2553 + 2554 + /* codec transport types */ 2555 + #define HCI_TRANSPORT_SCO_ESCO 0x01 2667 2556 2668 2557 /* le24 support */ 2669 2558 static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
+34 -41
include/net/bluetooth/hci_core.h
··· 131 131 u8 bdaddr_type; 132 132 }; 133 133 134 + struct codec_list { 135 + struct list_head list; 136 + u8 id; 137 + __u16 cid; 138 + __u16 vid; 139 + u8 transport; 140 + u8 num_caps; 141 + u32 len; 142 + struct hci_codec_caps caps[]; 143 + }; 144 + 134 145 struct bdaddr_list_with_irk { 135 146 struct list_head list; 136 147 bdaddr_t bdaddr; ··· 547 536 struct list_head pend_le_conns; 548 537 struct list_head pend_le_reports; 549 538 struct list_head blocked_keys; 539 + struct list_head local_codecs; 550 540 551 541 struct hci_dev_stats stat; 552 542 ··· 617 605 int (*set_diag)(struct hci_dev *hdev, bool enable); 618 606 int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); 619 607 void (*cmd_timeout)(struct hci_dev *hdev); 620 - bool (*prevent_wake)(struct hci_dev *hdev); 608 + bool (*wakeup)(struct hci_dev *hdev); 609 + int (*set_quality_report)(struct hci_dev *hdev, bool enable); 610 + int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); 611 + int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, 612 + struct bt_codec *codec, __u8 *vnd_len, 613 + __u8 **vnd_data); 621 614 }; 622 615 623 616 #define HCI_PHY_HANDLE(handle) (handle & 0xff) ··· 716 699 struct amp_mgr *amp_mgr; 717 700 718 701 struct hci_conn *link; 702 + struct bt_codec codec; 719 703 720 704 void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); 721 705 void (*security_cfm_cb) (struct hci_conn *conn, u8 status); ··· 778 760 hci_dev_clear_flag(hdev, HCI_LE_ADV); \ 779 761 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ 780 762 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ 763 + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ 781 764 } while (0) 782 765 783 766 /* ----- HCI interface to upper protocols ----- */ ··· 1118 1099 u16 conn_timeout, 1119 1100 enum conn_reasons conn_reason); 1120 1101 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 1121 - u8 dst_type, u8 sec_level, u16 conn_timeout, 1122 - u8 role, bdaddr_t *direct_rpa); 1102 + u8 dst_type, bool dst_resolved, u8 sec_level, 1103 + u16 conn_timeout, u8 role, 1104 + bdaddr_t *direct_rpa); 1123 1105 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, 1124 1106 u8 sec_level, u8 auth_type, 1125 1107 enum conn_reasons conn_reason); 1126 1108 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, 1127 - __u16 setting); 1109 + __u16 setting, struct bt_codec *codec); 1128 1110 int hci_conn_check_link_mode(struct hci_conn *conn); 1129 1111 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); 1130 1112 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, ··· 1380 1360 u16 scan_rsp_len, u8 *scan_rsp_data); 1381 1361 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); 1382 1362 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); 1363 + u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); 1364 + bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); 1383 1365 1384 1366 void hci_adv_monitors_clear(struct hci_dev *hdev); 1385 1367 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); ··· 1463 1441 1464 1442 /* Use LL Privacy based address resolution if supported */ 1465 1443 #define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) 1444 + 1445 + /* Use enhanced synchronous connection if command is supported */ 1446 + #define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08) 1466 1447 1467 1448 /* Use ext scanning if set ext scan param and ext scan enable is supported */ 1468 1449 #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ··· 1632 1607 cb->role_switch_cfm(conn, status, role); 1633 1608 } 1634 1609 mutex_unlock(&hci_cb_list_lock); 1635 - } 1636 - 1637 - static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, 1638 - size_t *data_len) 1639 - { 1640 - size_t parsed = 0; 1641 - 1642 - if (eir_len < 2) 1643 - return NULL; 1644 - 1645 - while (parsed < eir_len - 1) { 1646 - u8 field_len = eir[0]; 1647 - 1648 - if (field_len == 0) 1649 - break; 1650 - 1651 - parsed += field_len + 1; 1652 - 1653 - if (parsed > eir_len) 1654 - break; 1655 - 1656 - if (eir[1] != type) { 1657 - eir += field_len + 1; 1658 - continue; 1659 - } 1660 - 1661 - /* Zero length data */ 1662 - if (field_len == 1) 1663 - return NULL; 1664 - 1665 - if (data_len) 1666 - *data_len = field_len - 1; 1667 - 1668 - return &eir[2]; 1669 - } 1670 - 1671 - return NULL; 1672 1610 } 1673 1611 1674 1612 static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) ··· 1854 1866 #define SCO_AIRMODE_MASK 0x0003 1855 1867 #define SCO_AIRMODE_CVSD 0x0000 1856 1868 #define SCO_AIRMODE_TRANSP 0x0003 1869 + 1870 + #define LOCAL_CODEC_ACL_MASK BIT(0) 1871 + #define LOCAL_CODEC_SCO_MASK BIT(1) 1872 + 1873 + #define TRANSPORT_TYPE_MAX 0x04 1857 1874 1858 1875 #endif /* __HCI_CORE_H */
+2 -1
net/bluetooth/Makefile
··· 14 14 15 15 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 16 16 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ 17 - ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o 17 + ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ 18 + eir.o 18 19 19 20 bluetooth-$(CONFIG_BT_BREDR) += sco.o 20 21 bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
+335
net/bluetooth/eir.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * BlueZ - Bluetooth protocol stack for Linux 4 + * 5 + * Copyright (C) 2021 Intel Corporation 6 + */ 7 + 8 + #include <net/bluetooth/bluetooth.h> 9 + #include <net/bluetooth/hci_core.h> 10 + #include <net/bluetooth/mgmt.h> 11 + 12 + #include "eir.h" 13 + 14 + #define PNP_INFO_SVCLASS_ID 0x1200 15 + 16 + u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) 17 + { 18 + size_t short_len; 19 + size_t complete_len; 20 + 21 + /* no space left for name (+ NULL + type + len) */ 22 + if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) 23 + return ad_len; 24 + 25 + /* use complete name if present and fits */ 26 + complete_len = strlen(hdev->dev_name); 27 + if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) 28 + return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, 29 + hdev->dev_name, complete_len + 1); 30 + 31 + /* use short name if present */ 32 + short_len = strlen(hdev->short_name); 33 + if (short_len) 34 + return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, 35 + hdev->short_name, short_len + 1); 36 + 37 + /* use shortened full name if present, we already know that name 38 + * is longer then HCI_MAX_SHORT_NAME_LENGTH 39 + */ 40 + if (complete_len) { 41 + u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; 42 + 43 + memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); 44 + name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; 45 + 46 + return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, 47 + sizeof(name)); 48 + } 49 + 50 + return ad_len; 51 + } 52 + 53 + u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) 54 + { 55 + return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); 56 + } 57 + 58 + static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 59 + { 60 + u8 *ptr = data, *uuids_start = NULL; 61 + struct bt_uuid *uuid; 62 + 63 + if (len < 4) 64 + return ptr; 65 + 66 + list_for_each_entry(uuid, &hdev->uuids, list) { 67 + u16 uuid16; 68 + 69 + if (uuid->size != 16) 70 + continue; 71 + 72 + uuid16 = get_unaligned_le16(&uuid->uuid[12]); 73 + if (uuid16 < 0x1100) 74 + continue; 75 + 76 + if (uuid16 == PNP_INFO_SVCLASS_ID) 77 + continue; 78 + 79 + if (!uuids_start) { 80 + uuids_start = ptr; 81 + uuids_start[0] = 1; 82 + uuids_start[1] = EIR_UUID16_ALL; 83 + ptr += 2; 84 + } 85 + 86 + /* Stop if not enough space to put next UUID */ 87 + if ((ptr - data) + sizeof(u16) > len) { 88 + uuids_start[1] = EIR_UUID16_SOME; 89 + break; 90 + } 91 + 92 + *ptr++ = (uuid16 & 0x00ff); 93 + *ptr++ = (uuid16 & 0xff00) >> 8; 94 + uuids_start[0] += sizeof(uuid16); 95 + } 96 + 97 + return ptr; 98 + } 99 + 100 + static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 101 + { 102 + u8 *ptr = data, *uuids_start = NULL; 103 + struct bt_uuid *uuid; 104 + 105 + if (len < 6) 106 + return ptr; 107 + 108 + list_for_each_entry(uuid, &hdev->uuids, list) { 109 + if (uuid->size != 32) 110 + continue; 111 + 112 + if (!uuids_start) { 113 + uuids_start = ptr; 114 + uuids_start[0] = 1; 115 + uuids_start[1] = EIR_UUID32_ALL; 116 + ptr += 2; 117 + } 118 + 119 + /* Stop if not enough space to put next UUID */ 120 + if ((ptr - data) + sizeof(u32) > len) { 121 + uuids_start[1] = EIR_UUID32_SOME; 122 + break; 123 + } 124 + 125 + memcpy(ptr, &uuid->uuid[12], sizeof(u32)); 126 + ptr += sizeof(u32); 127 + uuids_start[0] += sizeof(u32); 128 + } 129 + 130 + return ptr; 131 + } 132 + 133 + static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 134 + { 135 + u8 *ptr = data, *uuids_start = NULL; 136 + struct bt_uuid *uuid; 137 + 138 + if (len < 18) 139 + return ptr; 140 + 141 + list_for_each_entry(uuid, &hdev->uuids, list) { 142 + if (uuid->size != 128) 143 + continue; 144 + 145 + if (!uuids_start) { 146 + uuids_start = ptr; 147 + uuids_start[0] = 1; 148 + uuids_start[1] = EIR_UUID128_ALL; 149 + ptr += 2; 150 + } 151 + 152 + /* Stop if not enough space to put next UUID */ 153 + if ((ptr - data) + 16 > len) { 154 + uuids_start[1] = EIR_UUID128_SOME; 155 + break; 156 + } 157 + 158 + memcpy(ptr, uuid->uuid, 16); 159 + ptr += 16; 160 + uuids_start[0] += 16; 161 + } 162 + 163 + return ptr; 164 + } 165 + 166 + void eir_create(struct hci_dev *hdev, u8 *data) 167 + { 168 + u8 *ptr = data; 169 + size_t name_len; 170 + 171 + name_len = strlen(hdev->dev_name); 172 + 173 + if (name_len > 0) { 174 + /* EIR Data type */ 175 + if (name_len > 48) { 176 + name_len = 48; 177 + ptr[1] = EIR_NAME_SHORT; 178 + } else { 179 + ptr[1] = EIR_NAME_COMPLETE; 180 + } 181 + 182 + /* EIR Data length */ 183 + ptr[0] = name_len + 1; 184 + 185 + memcpy(ptr + 2, hdev->dev_name, name_len); 186 + 187 + ptr += (name_len + 2); 188 + } 189 + 190 + if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { 191 + ptr[0] = 2; 192 + ptr[1] = EIR_TX_POWER; 193 + ptr[2] = (u8)hdev->inq_tx_power; 194 + 195 + ptr += 3; 196 + } 197 + 198 + if (hdev->devid_source > 0) { 199 + ptr[0] = 9; 200 + ptr[1] = EIR_DEVICE_ID; 201 + 202 + put_unaligned_le16(hdev->devid_source, ptr + 2); 203 + put_unaligned_le16(hdev->devid_vendor, ptr + 4); 204 + put_unaligned_le16(hdev->devid_product, ptr + 6); 205 + put_unaligned_le16(hdev->devid_version, ptr + 8); 206 + 207 + ptr += 10; 208 + } 209 + 210 + ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 211 + ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 212 + ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 213 + } 214 + 215 + u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) 216 + { 217 + struct adv_info *adv = NULL; 218 + u8 ad_len = 0, flags = 0; 219 + u32 instance_flags; 220 + 221 + /* Return 0 when the current instance identifier is invalid. */ 222 + if (instance) { 223 + adv = hci_find_adv_instance(hdev, instance); 224 + if (!adv) 225 + return 0; 226 + } 227 + 228 + instance_flags = hci_adv_instance_flags(hdev, instance); 229 + 230 + /* If instance already has the flags set skip adding it once 231 + * again. 232 + */ 233 + if (adv && eir_get_data(adv->adv_data, adv->adv_data_len, EIR_FLAGS, 234 + NULL)) 235 + goto skip_flags; 236 + 237 + /* The Add Advertising command allows userspace to set both the general 238 + * and limited discoverable flags. 239 + */ 240 + if (instance_flags & MGMT_ADV_FLAG_DISCOV) 241 + flags |= LE_AD_GENERAL; 242 + 243 + if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) 244 + flags |= LE_AD_LIMITED; 245 + 246 + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 247 + flags |= LE_AD_NO_BREDR; 248 + 249 + if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { 250 + /* If a discovery flag wasn't provided, simply use the global 251 + * settings. 252 + */ 253 + if (!flags) 254 + flags |= mgmt_get_adv_discov_flags(hdev); 255 + 256 + /* If flags would still be empty, then there is no need to 257 + * include the "Flags" AD field". 258 + */ 259 + if (flags) { 260 + ptr[0] = 0x02; 261 + ptr[1] = EIR_FLAGS; 262 + ptr[2] = flags; 263 + 264 + ad_len += 3; 265 + ptr += 3; 266 + } 267 + } 268 + 269 + skip_flags: 270 + if (adv) { 271 + memcpy(ptr, adv->adv_data, adv->adv_data_len); 272 + ad_len += adv->adv_data_len; 273 + ptr += adv->adv_data_len; 274 + } 275 + 276 + if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { 277 + s8 adv_tx_power; 278 + 279 + if (ext_adv_capable(hdev)) { 280 + if (adv) 281 + adv_tx_power = adv->tx_power; 282 + else 283 + adv_tx_power = hdev->adv_tx_power; 284 + } else { 285 + adv_tx_power = hdev->adv_tx_power; 286 + } 287 + 288 + /* Provide Tx Power only if we can provide a valid value for it */ 289 + if (adv_tx_power != HCI_TX_POWER_INVALID) { 290 + ptr[0] = 0x02; 291 + ptr[1] = EIR_TX_POWER; 292 + ptr[2] = (u8)adv_tx_power; 293 + 294 + ad_len += 3; 295 + ptr += 3; 296 + } 297 + } 298 + 299 + return ad_len; 300 + } 301 + 302 + static u8 create_default_scan_rsp(struct hci_dev *hdev, u8 *ptr) 303 + { 304 + u8 scan_rsp_len = 0; 305 + 306 + if (hdev->appearance) 307 + scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); 308 + 309 + return eir_append_local_name(hdev, ptr, scan_rsp_len); 310 + } 311 + 312 + u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr) 313 + { 314 + struct adv_info *adv; 315 + u8 scan_rsp_len = 0; 316 + 317 + if (!instance) 318 + return create_default_scan_rsp(hdev, ptr); 319 + 320 + adv = hci_find_adv_instance(hdev, instance); 321 + if (!adv) 322 + return 0; 323 + 324 + if ((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) 325 + scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); 326 + 327 + memcpy(&ptr[scan_rsp_len], adv->scan_rsp_data, adv->scan_rsp_len); 328 + 329 + scan_rsp_len += adv->scan_rsp_len; 330 + 331 + if (adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 332 + scan_rsp_len = eir_append_local_name(hdev, ptr, scan_rsp_len); 333 + 334 + return scan_rsp_len; 335 + }
+72
net/bluetooth/eir.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * BlueZ - Bluetooth protocol stack for Linux 4 + * 5 + * Copyright (C) 2021 Intel Corporation 6 + */ 7 + 8 + void eir_create(struct hci_dev *hdev, u8 *data); 9 + 10 + u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); 11 + u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); 12 + 13 + u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); 14 + u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); 15 + 16 + static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, 17 + u8 *data, u8 data_len) 18 + { 19 + eir[eir_len++] = sizeof(type) + data_len; 20 + eir[eir_len++] = type; 21 + memcpy(&eir[eir_len], data, data_len); 22 + eir_len += data_len; 23 + 24 + return eir_len; 25 + } 26 + 27 + static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) 28 + { 29 + eir[eir_len++] = sizeof(type) + sizeof(data); 30 + eir[eir_len++] = type; 31 + put_unaligned_le16(data, &eir[eir_len]); 32 + eir_len += sizeof(data); 33 + 34 + return eir_len; 35 + } 36 + 37 + static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, 38 + size_t *data_len) 39 + { 40 + size_t parsed = 0; 41 + 42 + if (eir_len < 2) 43 + return NULL; 44 + 45 + while (parsed < eir_len - 1) { 46 + u8 field_len = eir[0]; 47 + 48 + if (field_len == 0) 49 + break; 50 + 51 + parsed += field_len + 1; 52 + 53 + if (parsed > eir_len) 54 + break; 55 + 56 + if (eir[1] != type) { 57 + eir += field_len + 1; 58 + continue; 59 + } 60 + 61 + /* Zero length data */ 62 + if (field_len == 1) 63 + return NULL; 64 + 65 + if (data_len) 66 + *data_len = field_len - 1; 67 + 68 + return &eir[2]; 69 + } 70 + 71 + return NULL; 72 + }
+238
net/bluetooth/hci_codec.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + /* Copyright (C) 2021 Intel Corporation */ 4 + 5 + #include <net/bluetooth/bluetooth.h> 6 + #include <net/bluetooth/hci_core.h> 7 + #include "hci_codec.h" 8 + 9 + static int hci_codec_list_add(struct list_head *list, 10 + struct hci_op_read_local_codec_caps *sent, 11 + struct hci_rp_read_local_codec_caps *rp, 12 + void *caps, 13 + __u32 len) 14 + { 15 + struct codec_list *entry; 16 + 17 + entry = kzalloc(sizeof(*entry) + len, GFP_KERNEL); 18 + if (!entry) 19 + return -ENOMEM; 20 + 21 + entry->id = sent->id; 22 + if (sent->id == 0xFF) { 23 + entry->cid = __le16_to_cpu(sent->cid); 24 + entry->vid = __le16_to_cpu(sent->vid); 25 + } 26 + entry->transport = sent->transport; 27 + entry->len = len; 28 + entry->num_caps = rp->num_caps; 29 + if (rp->num_caps) 30 + memcpy(entry->caps, caps, len); 31 + list_add(&entry->list, list); 32 + 33 + return 0; 34 + } 35 + 36 + void hci_codec_list_clear(struct list_head *codec_list) 37 + { 38 + struct codec_list *c, *n; 39 + 40 + list_for_each_entry_safe(c, n, codec_list, list) { 41 + list_del(&c->list); 42 + kfree(c); 43 + } 44 + } 45 + 46 + static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, 47 + struct hci_op_read_local_codec_caps 48 + *cmd) 49 + { 50 + __u8 i; 51 + 52 + for (i = 0; i < TRANSPORT_TYPE_MAX; i++) { 53 + if (transport & BIT(i)) { 54 + struct hci_rp_read_local_codec_caps *rp; 55 + struct hci_codec_caps *caps; 56 + struct sk_buff *skb; 57 + __u8 j; 58 + __u32 len; 59 + 60 + cmd->transport = i; 61 + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, 62 + sizeof(*cmd), cmd, 63 + HCI_CMD_TIMEOUT); 64 + if (IS_ERR(skb)) { 65 + bt_dev_err(hdev, "Failed to read codec capabilities (%ld)", 66 + PTR_ERR(skb)); 67 + continue; 68 + } 69 + 70 + if (skb->len < sizeof(*rp)) 71 + goto error; 72 + 73 + rp = (void *)skb->data; 74 + 75 + if (rp->status) 76 + goto error; 77 + 78 + if (!rp->num_caps) { 79 + len = 0; 80 + /* this codec doesn't have capabilities */ 81 + goto skip_caps_parse; 82 + } 83 + 84 + skb_pull(skb, sizeof(*rp)); 85 + 86 + for (j = 0, len = 0; j < rp->num_caps; j++) { 87 + caps = (void *)skb->data; 88 + if (skb->len < sizeof(*caps)) 89 + goto error; 90 + if (skb->len < caps->len) 91 + goto error; 92 + len += sizeof(caps->len) + caps->len; 93 + skb_pull(skb, sizeof(caps->len) + caps->len); 94 + } 95 + 96 + skip_caps_parse: 97 + hci_dev_lock(hdev); 98 + hci_codec_list_add(&hdev->local_codecs, cmd, rp, 99 + (__u8 *)rp + sizeof(*rp), len); 100 + hci_dev_unlock(hdev); 101 + error: 102 + kfree_skb(skb); 103 + } 104 + } 105 + } 106 + 107 + void hci_read_supported_codecs(struct hci_dev *hdev) 108 + { 109 + struct sk_buff *skb; 110 + struct hci_rp_read_local_supported_codecs *rp; 111 + struct hci_std_codecs *std_codecs; 112 + struct hci_vnd_codecs *vnd_codecs; 113 + struct hci_op_read_local_codec_caps caps; 114 + __u8 i; 115 + 116 + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, 117 + HCI_CMD_TIMEOUT); 118 + 119 + if (IS_ERR(skb)) { 120 + bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", 121 + PTR_ERR(skb)); 122 + return; 123 + } 124 + 125 + if (skb->len < sizeof(*rp)) 126 + goto error; 127 + 128 + rp = (void *)skb->data; 129 + 130 + if (rp->status) 131 + goto error; 132 + 133 + skb_pull(skb, sizeof(rp->status)); 134 + 135 + std_codecs = (void *)skb->data; 136 + 137 + /* validate codecs length before accessing */ 138 + if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) 139 + + sizeof(std_codecs->num)) 140 + goto error; 141 + 142 + /* enumerate codec capabilities of standard codecs */ 143 + memset(&caps, 0, sizeof(caps)); 144 + for (i = 0; i < std_codecs->num; i++) { 145 + caps.id = std_codecs->codec[i]; 146 + caps.direction = 0x00; 147 + hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps); 148 + } 149 + 150 + skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) 151 + + sizeof(std_codecs->num)); 152 + 153 + vnd_codecs = (void *)skb->data; 154 + 155 + /* validate vendor codecs length before accessing */ 156 + if (skb->len < 157 + flex_array_size(vnd_codecs, codec, vnd_codecs->num) 158 + + sizeof(vnd_codecs->num)) 159 + goto error; 160 + 161 + /* enumerate vendor codec capabilities */ 162 + for (i = 0; i < vnd_codecs->num; i++) { 163 + caps.id = 0xFF; 164 + caps.cid = vnd_codecs->codec[i].cid; 165 + caps.vid = vnd_codecs->codec[i].vid; 166 + caps.direction = 0x00; 167 + hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps); 168 + } 169 + 170 + error: 171 + kfree_skb(skb); 172 + } 173 + 174 + void hci_read_supported_codecs_v2(struct hci_dev *hdev) 175 + { 176 + struct sk_buff *skb; 177 + struct hci_rp_read_local_supported_codecs_v2 *rp; 178 + struct hci_std_codecs_v2 *std_codecs; 179 + struct hci_vnd_codecs_v2 *vnd_codecs; 180 + struct hci_op_read_local_codec_caps caps; 181 + __u8 i; 182 + 183 + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL, 184 + HCI_CMD_TIMEOUT); 185 + 186 + if (IS_ERR(skb)) { 187 + bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", 188 + PTR_ERR(skb)); 189 + return; 190 + } 191 + 192 + if (skb->len < sizeof(*rp)) 193 + goto error; 194 + 195 + rp = (void *)skb->data; 196 + 197 + if (rp->status) 198 + goto error; 199 + 200 + skb_pull(skb, sizeof(rp->status)); 201 + 202 + std_codecs = (void *)skb->data; 203 + 204 + /* check for payload data length before accessing */ 205 + if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) 206 + + sizeof(std_codecs->num)) 207 + goto error; 208 + 209 + memset(&caps, 0, sizeof(caps)); 210 + 211 + for (i = 0; i < std_codecs->num; i++) { 212 + caps.id = std_codecs->codec[i].id; 213 + hci_read_codec_capabilities(hdev, std_codecs->codec[i].transport, 214 + &caps); 215 + } 216 + 217 + skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) 218 + + sizeof(std_codecs->num)); 219 + 220 + vnd_codecs = (void *)skb->data; 221 + 222 + /* check for payload data length before accessing */ 223 + if (skb->len < 224 + flex_array_size(vnd_codecs, codec, vnd_codecs->num) 225 + + sizeof(vnd_codecs->num)) 226 + goto error; 227 + 228 + for (i = 0; i < vnd_codecs->num; i++) { 229 + caps.id = 0xFF; 230 + caps.cid = vnd_codecs->codec[i].cid; 231 + caps.vid = vnd_codecs->codec[i].vid; 232 + hci_read_codec_capabilities(hdev, vnd_codecs->codec[i].transport, 233 + &caps); 234 + } 235 + 236 + error: 237 + kfree_skb(skb); 238 + }
+7
net/bluetooth/hci_codec.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + /* Copyright (C) 2014 Intel Corporation */ 4 + 5 + void hci_read_supported_codecs(struct hci_dev *hdev); 6 + void hci_read_supported_codecs_v2(struct hci_dev *hdev); 7 + void hci_codec_list_clear(struct list_head *codec_list);
+151 -17
net/bluetooth/hci_conn.c
··· 307 307 return conn->attempt <= size; 308 308 } 309 309 310 - bool hci_setup_sync(struct hci_conn *conn, __u16 handle) 310 + static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) 311 + { 312 + struct hci_dev *hdev = conn->hdev; 313 + struct hci_cp_enhanced_setup_sync_conn cp; 314 + const struct sco_param *param; 315 + 316 + bt_dev_dbg(hdev, "hcon %p", conn); 317 + 318 + /* for offload use case, codec needs to configured before opening SCO */ 319 + if (conn->codec.data_path) 320 + hci_req_configure_datapath(hdev, &conn->codec); 321 + 322 + conn->state = BT_CONNECT; 323 + conn->out = true; 324 + 325 + conn->attempt++; 326 + 327 + memset(&cp, 0x00, sizeof(cp)); 328 + 329 + cp.handle = cpu_to_le16(handle); 330 + 331 + cp.tx_bandwidth = cpu_to_le32(0x00001f40); 332 + cp.rx_bandwidth = cpu_to_le32(0x00001f40); 333 + 334 + switch (conn->codec.id) { 335 + case BT_CODEC_MSBC: 336 + if (!find_next_esco_param(conn, esco_param_msbc, 337 + ARRAY_SIZE(esco_param_msbc))) 338 + return false; 339 + 340 + param = &esco_param_msbc[conn->attempt - 1]; 341 + cp.tx_coding_format.id = 0x05; 342 + cp.rx_coding_format.id = 0x05; 343 + cp.tx_codec_frame_size = __cpu_to_le16(60); 344 + cp.rx_codec_frame_size = __cpu_to_le16(60); 345 + cp.in_bandwidth = __cpu_to_le32(32000); 346 + cp.out_bandwidth = __cpu_to_le32(32000); 347 + cp.in_coding_format.id = 0x04; 348 + cp.out_coding_format.id = 0x04; 349 + cp.in_coded_data_size = __cpu_to_le16(16); 350 + cp.out_coded_data_size = __cpu_to_le16(16); 351 + cp.in_pcm_data_format = 2; 352 + cp.out_pcm_data_format = 2; 353 + cp.in_pcm_sample_payload_msb_pos = 0; 354 + cp.out_pcm_sample_payload_msb_pos = 0; 355 + cp.in_data_path = conn->codec.data_path; 356 + cp.out_data_path = conn->codec.data_path; 357 + cp.in_transport_unit_size = 1; 358 + cp.out_transport_unit_size = 1; 359 + break; 360 + 361 + case BT_CODEC_TRANSPARENT: 362 + if (!find_next_esco_param(conn, esco_param_msbc, 363 + ARRAY_SIZE(esco_param_msbc))) 364 + return false; 365 + param = &esco_param_msbc[conn->attempt - 1]; 366 + cp.tx_coding_format.id = 0x03; 367 + cp.rx_coding_format.id = 0x03; 368 + cp.tx_codec_frame_size = __cpu_to_le16(60); 369 + cp.rx_codec_frame_size = __cpu_to_le16(60); 370 + cp.in_bandwidth = __cpu_to_le32(0x1f40); 371 + cp.out_bandwidth = __cpu_to_le32(0x1f40); 372 + cp.in_coding_format.id = 0x03; 373 + cp.out_coding_format.id = 0x03; 374 + cp.in_coded_data_size = __cpu_to_le16(16); 375 + cp.out_coded_data_size = __cpu_to_le16(16); 376 + cp.in_pcm_data_format = 2; 377 + cp.out_pcm_data_format = 2; 378 + cp.in_pcm_sample_payload_msb_pos = 0; 379 + cp.out_pcm_sample_payload_msb_pos = 0; 380 + cp.in_data_path = conn->codec.data_path; 381 + cp.out_data_path = conn->codec.data_path; 382 + cp.in_transport_unit_size = 1; 383 + cp.out_transport_unit_size = 1; 384 + break; 385 + 386 + case BT_CODEC_CVSD: 387 + if (lmp_esco_capable(conn->link)) { 388 + if (!find_next_esco_param(conn, esco_param_cvsd, 389 + ARRAY_SIZE(esco_param_cvsd))) 390 + return false; 391 + param = &esco_param_cvsd[conn->attempt - 1]; 392 + } else { 393 + if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) 394 + return false; 395 + param = &sco_param_cvsd[conn->attempt - 1]; 396 + } 397 + cp.tx_coding_format.id = 2; 398 + cp.rx_coding_format.id = 2; 399 + cp.tx_codec_frame_size = __cpu_to_le16(60); 400 + cp.rx_codec_frame_size = __cpu_to_le16(60); 401 + cp.in_bandwidth = __cpu_to_le32(16000); 402 + cp.out_bandwidth = __cpu_to_le32(16000); 403 + cp.in_coding_format.id = 4; 404 + cp.out_coding_format.id = 4; 405 + cp.in_coded_data_size = __cpu_to_le16(16); 406 + cp.out_coded_data_size = __cpu_to_le16(16); 407 + cp.in_pcm_data_format = 2; 408 + cp.out_pcm_data_format = 2; 409 + cp.in_pcm_sample_payload_msb_pos = 0; 410 + cp.out_pcm_sample_payload_msb_pos = 0; 411 + cp.in_data_path = conn->codec.data_path; 412 + cp.out_data_path = conn->codec.data_path; 413 + cp.in_transport_unit_size = 16; 414 + cp.out_transport_unit_size = 16; 415 + break; 416 + default: 417 + return false; 418 + } 419 + 420 + cp.retrans_effort = param->retrans_effort; 421 + cp.pkt_type = __cpu_to_le16(param->pkt_type); 422 + cp.max_latency = __cpu_to_le16(param->max_latency); 423 + 424 + if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) 425 + return false; 426 + 427 + return true; 428 + } 429 + 430 + static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) 311 431 { 312 432 struct hci_dev *hdev = conn->hdev; 313 433 struct hci_cp_setup_sync_conn cp; 314 434 const struct sco_param *param; 315 435 316 - BT_DBG("hcon %p", conn); 436 + bt_dev_dbg(hdev, "hcon %p", conn); 317 437 318 438 conn->state = BT_CONNECT; 319 439 conn->out = true; ··· 477 357 return false; 478 358 479 359 return true; 360 + } 361 + 362 + bool hci_setup_sync(struct hci_conn *conn, __u16 handle) 363 + { 364 + if (enhanced_sco_capable(conn->hdev)) 365 + return hci_enhanced_setup_sync_conn(conn, handle); 366 + 367 + return hci_setup_sync_conn(conn, handle); 480 368 } 481 369 482 370 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, ··· 1168 1040 } 1169 1041 1170 1042 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 1171 - u8 dst_type, u8 sec_level, u16 conn_timeout, 1172 - u8 role, bdaddr_t *direct_rpa) 1043 + u8 dst_type, bool dst_resolved, u8 sec_level, 1044 + u16 conn_timeout, u8 role, bdaddr_t *direct_rpa) 1173 1045 { 1174 1046 struct hci_conn_params *params; 1175 1047 struct hci_conn *conn; ··· 1206 1078 return ERR_PTR(-EBUSY); 1207 1079 } 1208 1080 1209 - /* When given an identity address with existing identity 1210 - * resolving key, the connection needs to be established 1211 - * to a resolvable random address. 1212 - * 1213 - * Storing the resolvable random address is required here 1214 - * to handle connection failures. The address will later 1215 - * be resolved back into the original identity address 1216 - * from the connect request. 1081 + /* Check if the destination address has been resolved by the controller 1082 + * since if it did then the identity address shall be used. 1217 1083 */ 1218 - irk = hci_find_irk_by_addr(hdev, dst, dst_type); 1219 - if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { 1220 - dst = &irk->rpa; 1221 - dst_type = ADDR_LE_DEV_RANDOM; 1084 + if (!dst_resolved) { 1085 + /* When given an identity address with existing identity 1086 + * resolving key, the connection needs to be established 1087 + * to a resolvable random address. 1088 + * 1089 + * Storing the resolvable random address is required here 1090 + * to handle connection failures. The address will later 1091 + * be resolved back into the original identity address 1092 + * from the connect request. 1093 + */ 1094 + irk = hci_find_irk_by_addr(hdev, dst, dst_type); 1095 + if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { 1096 + dst = &irk->rpa; 1097 + dst_type = ADDR_LE_DEV_RANDOM; 1098 + } 1222 1099 } 1223 1100 1224 1101 if (conn) { ··· 1452 1319 } 1453 1320 1454 1321 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, 1455 - __u16 setting) 1322 + __u16 setting, struct bt_codec *codec) 1456 1323 { 1457 1324 struct hci_conn *acl; 1458 1325 struct hci_conn *sco; ··· 1477 1344 hci_conn_hold(sco); 1478 1345 1479 1346 sco->setting = setting; 1347 + sco->codec = *codec; 1480 1348 1481 1349 if (acl->state == BT_CONNECTED && 1482 1350 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+134 -186
net/bluetooth/hci_core.c
··· 45 45 #include "leds.h" 46 46 #include "msft.h" 47 47 #include "aosp.h" 48 + #include "hci_codec.h" 48 49 49 50 static void hci_rx_work(struct work_struct *work); 50 51 static void hci_cmd_work(struct work_struct *work); ··· 61 60 62 61 /* HCI ID Numbering */ 63 62 static DEFINE_IDA(hci_index_ida); 64 - 65 - /* ---- HCI debugfs entries ---- */ 66 - 67 - static ssize_t dut_mode_read(struct file *file, char __user *user_buf, 68 - size_t count, loff_t *ppos) 69 - { 70 - struct hci_dev *hdev = file->private_data; 71 - char buf[3]; 72 - 73 - buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; 74 - buf[1] = '\n'; 75 - buf[2] = '\0'; 76 - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 77 - } 78 - 79 - static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, 80 - size_t count, loff_t *ppos) 81 - { 82 - struct hci_dev *hdev = file->private_data; 83 - struct sk_buff *skb; 84 - bool enable; 85 - int err; 86 - 87 - if (!test_bit(HCI_UP, &hdev->flags)) 88 - return -ENETDOWN; 89 - 90 - err = kstrtobool_from_user(user_buf, count, &enable); 91 - if (err) 92 - return err; 93 - 94 - if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) 95 - return -EALREADY; 96 - 97 - hci_req_sync_lock(hdev); 98 - if (enable) 99 - skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, 100 - HCI_CMD_TIMEOUT); 101 - else 102 - skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, 103 - HCI_CMD_TIMEOUT); 104 - hci_req_sync_unlock(hdev); 105 - 106 - if (IS_ERR(skb)) 107 - return PTR_ERR(skb); 108 - 109 - kfree_skb(skb); 110 - 111 - hci_dev_change_flag(hdev, HCI_DUT_MODE); 112 - 113 - return count; 114 - } 115 - 116 - static const struct file_operations dut_mode_fops = { 117 - .open = simple_open, 118 - .read = dut_mode_read, 119 - .write = dut_mode_write, 120 - .llseek = default_llseek, 121 - }; 122 - 123 - static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, 124 - size_t count, loff_t *ppos) 125 - { 126 - struct hci_dev *hdev = file->private_data; 127 - char buf[3]; 128 - 129 - buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; 130 - buf[1] = '\n'; 131 - buf[2] = '\0'; 132 - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 133 - } 134 - 135 - static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, 136 - size_t count, loff_t *ppos) 137 - { 138 - struct hci_dev *hdev = file->private_data; 139 - bool enable; 140 - int err; 141 - 142 - err = kstrtobool_from_user(user_buf, count, &enable); 143 - if (err) 144 - return err; 145 - 146 - /* When the diagnostic flags are not persistent and the transport 147 - * is not active or in user channel operation, then there is no need 148 - * for the vendor callback. Instead just store the desired value and 149 - * the setting will be programmed when the controller gets powered on. 150 - */ 151 - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 152 - (!test_bit(HCI_RUNNING, &hdev->flags) || 153 - hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) 154 - goto done; 155 - 156 - hci_req_sync_lock(hdev); 157 - err = hdev->set_diag(hdev, enable); 158 - hci_req_sync_unlock(hdev); 159 - 160 - if (err < 0) 161 - return err; 162 - 163 - done: 164 - if (enable) 165 - hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); 166 - else 167 - hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); 168 - 169 - return count; 170 - } 171 - 172 - static const struct file_operations vendor_diag_fops = { 173 - .open = simple_open, 174 - .read = vendor_diag_read, 175 - .write = vendor_diag_write, 176 - .llseek = default_llseek, 177 - }; 178 - 179 - static void hci_debugfs_create_basic(struct hci_dev *hdev) 180 - { 181 - debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, 182 - &dut_mode_fops); 183 - 184 - if (hdev->set_diag) 185 - debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, 186 - &vendor_diag_fops); 187 - } 188 63 189 64 static int hci_reset_req(struct hci_request *req, unsigned long opt) 190 65 { ··· 715 838 if (hdev->commands[22] & 0x04) 716 839 hci_set_event_mask_page_2(req); 717 840 718 - /* Read local codec list if the HCI command is supported */ 719 - if (hdev->commands[29] & 0x20) 720 - hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL); 721 - 722 841 /* Read local pairing options if the HCI command is supported */ 723 842 if (hdev->commands[41] & 0x08) 724 843 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL); ··· 809 936 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL); 810 937 if (err < 0) 811 938 return err; 939 + 940 + /* Read local codec list if the HCI command is supported */ 941 + if (hdev->commands[45] & 0x04) 942 + hci_read_supported_codecs_v2(hdev); 943 + else if (hdev->commands[29] & 0x20) 944 + hci_read_supported_codecs(hdev); 812 945 813 946 /* This function is only called when the controller is actually in 814 947 * configured state. When the controller is marked as unconfigured, ··· 1727 1848 memset(hdev->eir, 0, sizeof(hdev->eir)); 1728 1849 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 1729 1850 bacpy(&hdev->random_addr, BDADDR_ANY); 1851 + hci_codec_list_clear(&hdev->local_codecs); 1730 1852 1731 1853 hci_req_sync_unlock(hdev); 1732 1854 ··· 2961 3081 } 2962 3082 2963 3083 /* This function requires the caller holds hdev->lock */ 3084 + u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) 3085 + { 3086 + u32 flags; 3087 + struct adv_info *adv; 3088 + 3089 + if (instance == 0x00) { 3090 + /* Instance 0 always manages the "Tx Power" and "Flags" 3091 + * fields 3092 + */ 3093 + flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 3094 + 3095 + /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting 3096 + * corresponds to the "connectable" instance flag. 3097 + */ 3098 + if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) 3099 + flags |= MGMT_ADV_FLAG_CONNECTABLE; 3100 + 3101 + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 3102 + flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; 3103 + else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 3104 + flags |= MGMT_ADV_FLAG_DISCOV; 3105 + 3106 + return flags; 3107 + } 3108 + 3109 + adv = hci_find_adv_instance(hdev, instance); 3110 + 3111 + /* Return 0 when we got an invalid instance identifier. */ 3112 + if (!adv) 3113 + return 0; 3114 + 3115 + return adv->flags; 3116 + } 3117 + 3118 + bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) 3119 + { 3120 + struct adv_info *adv; 3121 + 3122 + /* Instance 0x00 always set local name */ 3123 + if (instance == 0x00) 3124 + return true; 3125 + 3126 + adv = hci_find_adv_instance(hdev, instance); 3127 + if (!adv) 3128 + return false; 3129 + 3130 + if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || 3131 + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 3132 + return true; 3133 + 3134 + return adv->scan_rsp_len ? true : false; 3135 + } 3136 + 3137 + /* This function requires the caller holds hdev->lock */ 2964 3138 void hci_adv_monitors_clear(struct hci_dev *hdev) 2965 3139 { 2966 3140 struct adv_monitor *monitor; ··· 3421 3487 { 3422 3488 struct hci_conn_params *param; 3423 3489 3424 - switch (addr_type) { 3425 - case ADDR_LE_DEV_PUBLIC_RESOLVED: 3426 - addr_type = ADDR_LE_DEV_PUBLIC; 3427 - break; 3428 - case ADDR_LE_DEV_RANDOM_RESOLVED: 3429 - addr_type = ADDR_LE_DEV_RANDOM; 3430 - break; 3431 - } 3432 - 3433 3490 list_for_each_entry(param, list, action) { 3434 3491 if (bacmp(&param->addr, addr) == 0 && 3435 3492 param->addr_type == addr_type) ··· 3626 3701 struct hci_dev *hdev = 3627 3702 container_of(nb, struct hci_dev, suspend_notifier); 3628 3703 int ret = 0; 3629 - u8 state = BT_RUNNING; 3630 3704 3631 - /* If powering down, wait for completion. */ 3632 - if (mgmt_powering_down(hdev)) { 3633 - set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); 3634 - ret = hci_suspend_wait_event(hdev); 3635 - if (ret) 3636 - goto done; 3637 - } 3705 + if (action == PM_SUSPEND_PREPARE) 3706 + ret = hci_suspend_dev(hdev); 3707 + else if (action == PM_POST_SUSPEND) 3708 + ret = hci_resume_dev(hdev); 3638 3709 3639 - /* Suspend notifier should only act on events when powered. */ 3640 - if (!hdev_is_powered(hdev) || 3641 - hci_dev_test_flag(hdev, HCI_UNREGISTER)) 3642 - goto done; 3643 - 3644 - if (action == PM_SUSPEND_PREPARE) { 3645 - /* Suspend consists of two actions: 3646 - * - First, disconnect everything and make the controller not 3647 - * connectable (disabling scanning) 3648 - * - Second, program event filter/accept list and enable scan 3649 - */ 3650 - ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); 3651 - if (!ret) 3652 - state = BT_SUSPEND_DISCONNECT; 3653 - 3654 - /* Only configure accept list if disconnect succeeded and wake 3655 - * isn't being prevented. 3656 - */ 3657 - if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) { 3658 - ret = hci_change_suspend_state(hdev, 3659 - BT_SUSPEND_CONFIGURE_WAKE); 3660 - if (!ret) 3661 - state = BT_SUSPEND_CONFIGURE_WAKE; 3662 - } 3663 - 3664 - hci_clear_wake_reason(hdev); 3665 - mgmt_suspending(hdev, state); 3666 - 3667 - } else if (action == PM_POST_SUSPEND) { 3668 - ret = hci_change_suspend_state(hdev, BT_RUNNING); 3669 - 3670 - mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, 3671 - hdev->wake_addr_type); 3672 - } 3673 - 3674 - done: 3675 - /* We always allow suspend even if suspend preparation failed and 3676 - * attempt to recover in resume. 3677 - */ 3678 3710 if (ret) 3679 3711 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", 3680 3712 action, ret); ··· 3739 3857 INIT_LIST_HEAD(&hdev->adv_instances); 3740 3858 INIT_LIST_HEAD(&hdev->blocked_keys); 3741 3859 3860 + INIT_LIST_HEAD(&hdev->local_codecs); 3742 3861 INIT_WORK(&hdev->rx_work, hci_rx_work); 3743 3862 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 3744 3863 INIT_WORK(&hdev->tx_work, hci_tx_work); ··· 3877 3994 queue_work(hdev->req_workqueue, &hdev->power_on); 3878 3995 3879 3996 idr_init(&hdev->adv_monitors_idr); 3997 + msft_register(hdev); 3880 3998 3881 3999 return id; 3882 4000 ··· 3909 4025 unregister_pm_notifier(&hdev->suspend_notifier); 3910 4026 cancel_work_sync(&hdev->suspend_prepare); 3911 4027 } 4028 + 4029 + msft_unregister(hdev); 3912 4030 3913 4031 hci_dev_do_close(hdev); 3914 4032 ··· 3974 4088 /* Suspend HCI device */ 3975 4089 int hci_suspend_dev(struct hci_dev *hdev) 3976 4090 { 4091 + int ret; 4092 + u8 state = BT_RUNNING; 4093 + 4094 + bt_dev_dbg(hdev, ""); 4095 + 4096 + /* Suspend should only act on when powered. */ 4097 + if (!hdev_is_powered(hdev) || 4098 + hci_dev_test_flag(hdev, HCI_UNREGISTER)) 4099 + return 0; 4100 + 4101 + /* If powering down, wait for completion. */ 4102 + if (mgmt_powering_down(hdev)) { 4103 + set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); 4104 + ret = hci_suspend_wait_event(hdev); 4105 + if (ret) 4106 + goto done; 4107 + } 4108 + 4109 + /* Suspend consists of two actions: 4110 + * - First, disconnect everything and make the controller not 4111 + * connectable (disabling scanning) 4112 + * - Second, program event filter/accept list and enable scan 4113 + */ 4114 + ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); 4115 + if (ret) 4116 + goto clear; 4117 + 4118 + state = BT_SUSPEND_DISCONNECT; 4119 + 4120 + /* Only configure accept list if device may wakeup. */ 4121 + if (hdev->wakeup && hdev->wakeup(hdev)) { 4122 + ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE); 4123 + if (!ret) 4124 + state = BT_SUSPEND_CONFIGURE_WAKE; 4125 + } 4126 + 4127 + clear: 4128 + hci_clear_wake_reason(hdev); 4129 + mgmt_suspending(hdev, state); 4130 + 4131 + done: 4132 + /* We always allow suspend even if suspend preparation failed and 4133 + * attempt to recover in resume. 4134 + */ 3977 4135 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); 3978 - return 0; 4136 + return ret; 3979 4137 } 3980 4138 EXPORT_SYMBOL(hci_suspend_dev); 3981 4139 3982 4140 /* Resume HCI device */ 3983 4141 int hci_resume_dev(struct hci_dev *hdev) 3984 4142 { 4143 + int ret; 4144 + 4145 + bt_dev_dbg(hdev, ""); 4146 + 4147 + /* Resume should only act on when powered. */ 4148 + if (!hdev_is_powered(hdev) || 4149 + hci_dev_test_flag(hdev, HCI_UNREGISTER)) 4150 + return 0; 4151 + 4152 + /* If powering down don't attempt to resume */ 4153 + if (mgmt_powering_down(hdev)) 4154 + return 0; 4155 + 4156 + ret = hci_change_suspend_state(hdev, BT_RUNNING); 4157 + 4158 + mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, 4159 + hdev->wake_addr_type); 4160 + 3985 4161 hci_sock_dev_event(hdev, HCI_DEV_RESUME); 3986 - return 0; 4162 + return ret; 3987 4163 } 3988 4164 EXPORT_SYMBOL(hci_resume_dev); 3989 4165
+123
net/bluetooth/hci_debugfs.c
··· 27 27 #include <net/bluetooth/hci_core.h> 28 28 29 29 #include "smp.h" 30 + #include "hci_request.h" 30 31 #include "hci_debugfs.h" 31 32 32 33 #define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \ ··· 1250 1249 1251 1250 snprintf(name, sizeof(name), "%u", conn->handle); 1252 1251 conn->debugfs = debugfs_create_dir(name, hdev->debugfs); 1252 + } 1253 + 1254 + static ssize_t dut_mode_read(struct file *file, char __user *user_buf, 1255 + size_t count, loff_t *ppos) 1256 + { 1257 + struct hci_dev *hdev = file->private_data; 1258 + char buf[3]; 1259 + 1260 + buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; 1261 + buf[1] = '\n'; 1262 + buf[2] = '\0'; 1263 + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 1264 + } 1265 + 1266 + static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, 1267 + size_t count, loff_t *ppos) 1268 + { 1269 + struct hci_dev *hdev = file->private_data; 1270 + struct sk_buff *skb; 1271 + bool enable; 1272 + int err; 1273 + 1274 + if (!test_bit(HCI_UP, &hdev->flags)) 1275 + return -ENETDOWN; 1276 + 1277 + err = kstrtobool_from_user(user_buf, count, &enable); 1278 + if (err) 1279 + return err; 1280 + 1281 + if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) 1282 + return -EALREADY; 1283 + 1284 + hci_req_sync_lock(hdev); 1285 + if (enable) 1286 + skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, 1287 + HCI_CMD_TIMEOUT); 1288 + else 1289 + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, 1290 + HCI_CMD_TIMEOUT); 1291 + hci_req_sync_unlock(hdev); 1292 + 1293 + if (IS_ERR(skb)) 1294 + return PTR_ERR(skb); 1295 + 1296 + kfree_skb(skb); 1297 + 1298 + hci_dev_change_flag(hdev, HCI_DUT_MODE); 1299 + 1300 + return count; 1301 + } 1302 + 1303 + static const struct file_operations dut_mode_fops = { 1304 + .open = simple_open, 1305 + .read = dut_mode_read, 1306 + .write = dut_mode_write, 1307 + .llseek = default_llseek, 1308 + }; 1309 + 1310 + static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, 1311 + size_t count, loff_t *ppos) 1312 + { 1313 + struct hci_dev *hdev = file->private_data; 1314 + char buf[3]; 1315 + 1316 + buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; 1317 + buf[1] = '\n'; 1318 + buf[2] = '\0'; 1319 + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 1320 + } 1321 + 1322 + static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, 1323 + size_t count, loff_t *ppos) 1324 + { 1325 + struct hci_dev *hdev = file->private_data; 1326 + bool enable; 1327 + int err; 1328 + 1329 + err = kstrtobool_from_user(user_buf, count, &enable); 1330 + if (err) 1331 + return err; 1332 + 1333 + /* When the diagnostic flags are not persistent and the transport 1334 + * is not active or in user channel operation, then there is no need 1335 + * for the vendor callback. Instead just store the desired value and 1336 + * the setting will be programmed when the controller gets powered on. 1337 + */ 1338 + if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 1339 + (!test_bit(HCI_RUNNING, &hdev->flags) || 1340 + hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) 1341 + goto done; 1342 + 1343 + hci_req_sync_lock(hdev); 1344 + err = hdev->set_diag(hdev, enable); 1345 + hci_req_sync_unlock(hdev); 1346 + 1347 + if (err < 0) 1348 + return err; 1349 + 1350 + done: 1351 + if (enable) 1352 + hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); 1353 + else 1354 + hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); 1355 + 1356 + return count; 1357 + } 1358 + 1359 + static const struct file_operations vendor_diag_fops = { 1360 + .open = simple_open, 1361 + .read = vendor_diag_read, 1362 + .write = vendor_diag_write, 1363 + .llseek = default_llseek, 1364 + }; 1365 + 1366 + void hci_debugfs_create_basic(struct hci_dev *hdev) 1367 + { 1368 + debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, 1369 + &dut_mode_fops); 1370 + 1371 + if (hdev->set_diag) 1372 + debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, 1373 + &vendor_diag_fops); 1253 1374 }
+5
net/bluetooth/hci_debugfs.h
··· 26 26 void hci_debugfs_create_bredr(struct hci_dev *hdev); 27 27 void hci_debugfs_create_le(struct hci_dev *hdev); 28 28 void hci_debugfs_create_conn(struct hci_conn *conn); 29 + void hci_debugfs_create_basic(struct hci_dev *hdev); 29 30 30 31 #else 31 32 ··· 43 42 } 44 43 45 44 static inline void hci_debugfs_create_conn(struct hci_conn *conn) 45 + { 46 + } 47 + 48 + static inline void hci_debugfs_create_basic(struct hci_dev *hdev) 46 49 { 47 50 } 48 51
+90 -45
net/bluetooth/hci_event.c
··· 36 36 #include "amp.h" 37 37 #include "smp.h" 38 38 #include "msft.h" 39 + #include "eir.h" 39 40 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 41 42 "\x00\x00\x00\x00\x00\x00\x00\x00" ··· 2279 2278 hci_dev_unlock(hdev); 2280 2279 } 2281 2280 2281 + static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2282 + { 2283 + struct hci_cp_enhanced_setup_sync_conn *cp; 2284 + struct hci_conn *acl, *sco; 2285 + __u16 handle; 2286 + 2287 + bt_dev_dbg(hdev, "status 0x%2.2x", status); 2288 + 2289 + if (!status) 2290 + return; 2291 + 2292 + cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2293 + if (!cp) 2294 + return; 2295 + 2296 + handle = __le16_to_cpu(cp->handle); 2297 + 2298 + bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2299 + 2300 + hci_dev_lock(hdev); 2301 + 2302 + acl = hci_conn_hash_lookup_handle(hdev, handle); 2303 + if (acl) { 2304 + sco = acl->link; 2305 + if (sco) { 2306 + sco->state = BT_CLOSED; 2307 + 2308 + hci_connect_cfm(sco, status); 2309 + hci_conn_del(sco); 2310 + } 2311 + } 2312 + 2313 + hci_dev_unlock(hdev); 2314 + } 2315 + 2282 2316 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2283 2317 { 2284 2318 struct hci_cp_sniff_mode *cp; ··· 2387 2351 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2388 2352 conn->dst_type, status); 2389 2353 2390 - if (conn->type == LE_LINK) { 2354 + if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2391 2355 hdev->cur_adv_instance = conn->adv_instance; 2392 2356 hci_req_reenable_advertising(hdev); 2393 2357 } ··· 2403 2367 hci_dev_unlock(hdev); 2404 2368 } 2405 2369 2370 + static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2371 + { 2372 + /* When using controller based address resolution, then the new 2373 + * address types 0x02 and 0x03 are used. These types need to be 2374 + * converted back into either public address or random address type 2375 + */ 2376 + switch (type) { 2377 + case ADDR_LE_DEV_PUBLIC_RESOLVED: 2378 + if (resolved) 2379 + *resolved = true; 2380 + return ADDR_LE_DEV_PUBLIC; 2381 + case ADDR_LE_DEV_RANDOM_RESOLVED: 2382 + if (resolved) 2383 + *resolved = true; 2384 + return ADDR_LE_DEV_RANDOM; 2385 + } 2386 + 2387 + if (resolved) 2388 + *resolved = false; 2389 + return type; 2390 + } 2391 + 2406 2392 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2407 2393 u8 peer_addr_type, u8 own_address_type, 2408 2394 u8 filter_policy) ··· 2436 2378 if (!conn) 2437 2379 return; 2438 2380 2439 - /* When using controller based address resolution, then the new 2440 - * address types 0x02 and 0x03 are used. These types need to be 2441 - * converted back into either public address or random address type 2442 - */ 2443 - if (use_ll_privacy(hdev) && 2444 - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { 2445 - switch (own_address_type) { 2446 - case ADDR_LE_DEV_PUBLIC_RESOLVED: 2447 - own_address_type = ADDR_LE_DEV_PUBLIC; 2448 - break; 2449 - case ADDR_LE_DEV_RANDOM_RESOLVED: 2450 - own_address_type = ADDR_LE_DEV_RANDOM; 2451 - break; 2452 - } 2453 - } 2381 + own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2454 2382 2455 2383 /* Store the initiator and responder address information which 2456 2384 * is needed for SMP. These values will not change during the ··· 3005 2961 * or until a connection is created or until the Advertising 3006 2962 * is timed out due to Directed Advertising." 3007 2963 */ 3008 - if (conn->type == LE_LINK) { 2964 + if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3009 2965 hdev->cur_adv_instance = conn->adv_instance; 3010 2966 hci_req_reenable_advertising(hdev); 3011 2967 } ··· 3800 3756 hci_cs_setup_sync_conn(hdev, ev->status); 3801 3757 break; 3802 3758 3759 + case HCI_OP_ENHANCED_SETUP_SYNC_CONN: 3760 + hci_cs_enhanced_setup_sync_conn(hdev, ev->status); 3761 + break; 3762 + 3803 3763 case HCI_OP_SNIFF_MODE: 3804 3764 hci_cs_sniff_mode(hdev, ev->status); 3805 3765 break; ··· 4445 4397 { 4446 4398 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 4447 4399 struct hci_conn *conn; 4400 + unsigned int notify_evt; 4448 4401 4449 4402 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4450 4403 ··· 4520 4471 4521 4472 switch (ev->air_mode) { 4522 4473 case 0x02: 4523 - if (hdev->notify) 4524 - hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4474 + notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD; 4525 4475 break; 4526 4476 case 0x03: 4527 - if (hdev->notify) 4528 - hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4477 + notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP; 4529 4478 break; 4479 + } 4480 + 4481 + /* Notify only in case of SCO over HCI transport data path which 4482 + * is zero and non-zero value shall be non-HCI transport data path 4483 + */ 4484 + if (conn->codec.data_path == 0) { 4485 + if (hdev->notify) 4486 + hdev->notify(hdev, notify_evt); 4530 4487 } 4531 4488 4532 4489 hci_connect_cfm(conn, ev->status); ··· 5337 5282 conn->dst_type = irk->addr_type; 5338 5283 } 5339 5284 5340 - /* When using controller based address resolution, then the new 5341 - * address types 0x02 and 0x03 are used. These types need to be 5342 - * converted back into either public address or random address type 5343 - */ 5344 - if (use_ll_privacy(hdev) && 5345 - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && 5346 - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { 5347 - switch (conn->dst_type) { 5348 - case ADDR_LE_DEV_PUBLIC_RESOLVED: 5349 - conn->dst_type = ADDR_LE_DEV_PUBLIC; 5350 - break; 5351 - case ADDR_LE_DEV_RANDOM_RESOLVED: 5352 - conn->dst_type = ADDR_LE_DEV_RANDOM; 5353 - break; 5354 - } 5355 - } 5285 + conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5356 5286 5357 5287 if (status) { 5358 5288 hci_le_conn_failed(conn, status); ··· 5519 5479 /* This function requires the caller holds hdev->lock */ 5520 5480 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5521 5481 bdaddr_t *addr, 5522 - u8 addr_type, u8 adv_type, 5523 - bdaddr_t *direct_rpa) 5482 + u8 addr_type, bool addr_resolved, 5483 + u8 adv_type, bdaddr_t *direct_rpa) 5524 5484 { 5525 5485 struct hci_conn *conn; 5526 5486 struct hci_conn_params *params; ··· 5572 5532 } 5573 5533 } 5574 5534 5575 - conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 5576 - hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER, 5577 - direct_rpa); 5535 + conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5536 + BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5537 + HCI_ROLE_MASTER, direct_rpa); 5578 5538 if (!IS_ERR(conn)) { 5579 5539 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5580 5540 * by higher layer that tried to connect, if no then ··· 5615 5575 struct discovery_state *d = &hdev->discovery; 5616 5576 struct smp_irk *irk; 5617 5577 struct hci_conn *conn; 5618 - bool match; 5578 + bool match, bdaddr_resolved; 5619 5579 u32 flags; 5620 5580 u8 *ptr; 5621 5581 ··· 5659 5619 * controller address. 5660 5620 */ 5661 5621 if (direct_addr) { 5622 + direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 5623 + &bdaddr_resolved); 5624 + 5662 5625 /* Only resolvable random addresses are valid for these 5663 5626 * kind of reports and others can be ignored. 5664 5627 */ ··· 5689 5646 bdaddr_type = irk->addr_type; 5690 5647 } 5691 5648 5649 + bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 5650 + 5692 5651 /* Check if we have been requested to connect to this device. 5693 5652 * 5694 5653 * direct_addr is set only for directed advertising reports (it is NULL 5695 5654 * for advertising reports) and is already verified to be RPA above. 5696 5655 */ 5697 - conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, 5698 - direct_addr); 5656 + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 5657 + type, direct_addr); 5699 5658 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 5700 5659 /* Store report for later inclusion by 5701 5660 * mgmt_device_connected
+71 -407
net/bluetooth/hci_request.c
··· 30 30 #include "smp.h" 31 31 #include "hci_request.h" 32 32 #include "msft.h" 33 + #include "eir.h" 33 34 34 35 #define HCI_REQ_DONE 0 35 36 #define HCI_REQ_PEND 1 ··· 522 521 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); 523 522 } 524 523 525 - #define PNP_INFO_SVCLASS_ID 0x1200 526 - 527 - static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 528 - { 529 - u8 *ptr = data, *uuids_start = NULL; 530 - struct bt_uuid *uuid; 531 - 532 - if (len < 4) 533 - return ptr; 534 - 535 - list_for_each_entry(uuid, &hdev->uuids, list) { 536 - u16 uuid16; 537 - 538 - if (uuid->size != 16) 539 - continue; 540 - 541 - uuid16 = get_unaligned_le16(&uuid->uuid[12]); 542 - if (uuid16 < 0x1100) 543 - continue; 544 - 545 - if (uuid16 == PNP_INFO_SVCLASS_ID) 546 - continue; 547 - 548 - if (!uuids_start) { 549 - uuids_start = ptr; 550 - uuids_start[0] = 1; 551 - uuids_start[1] = EIR_UUID16_ALL; 552 - ptr += 2; 553 - } 554 - 555 - /* Stop if not enough space to put next UUID */ 556 - if ((ptr - data) + sizeof(u16) > len) { 557 - uuids_start[1] = EIR_UUID16_SOME; 558 - break; 559 - } 560 - 561 - *ptr++ = (uuid16 & 0x00ff); 562 - *ptr++ = (uuid16 & 0xff00) >> 8; 563 - uuids_start[0] += sizeof(uuid16); 564 - } 565 - 566 - return ptr; 567 - } 568 - 569 - static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 570 - { 571 - u8 *ptr = data, *uuids_start = NULL; 572 - struct bt_uuid *uuid; 573 - 574 - if (len < 6) 575 - return ptr; 576 - 577 - list_for_each_entry(uuid, &hdev->uuids, list) { 578 - if (uuid->size != 32) 579 - continue; 580 - 581 - if (!uuids_start) { 582 - uuids_start = ptr; 583 - uuids_start[0] = 1; 584 - uuids_start[1] = EIR_UUID32_ALL; 585 - ptr += 2; 586 - } 587 - 588 - /* Stop if not enough space to put next UUID */ 589 - if ((ptr - data) + sizeof(u32) > len) { 590 - uuids_start[1] = EIR_UUID32_SOME; 591 - break; 592 - } 593 - 594 - memcpy(ptr, &uuid->uuid[12], sizeof(u32)); 595 - ptr += sizeof(u32); 596 - uuids_start[0] += sizeof(u32); 597 - } 598 - 599 - return ptr; 600 - } 601 - 602 - static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 603 - { 604 - u8 *ptr = data, *uuids_start = NULL; 605 - struct bt_uuid *uuid; 606 - 607 - if (len < 18) 608 - return ptr; 609 - 610 - list_for_each_entry(uuid, &hdev->uuids, list) { 611 - if (uuid->size != 128) 612 - continue; 613 - 614 - if (!uuids_start) { 615 - uuids_start = ptr; 616 - uuids_start[0] = 1; 617 - uuids_start[1] = EIR_UUID128_ALL; 618 - ptr += 2; 619 - } 620 - 621 - /* Stop if not enough space to put next UUID */ 622 - if ((ptr - data) + 16 > len) { 623 - uuids_start[1] = EIR_UUID128_SOME; 624 - break; 625 - } 626 - 627 - memcpy(ptr, uuid->uuid, 16); 628 - ptr += 16; 629 - uuids_start[0] += 16; 630 - } 631 - 632 - return ptr; 633 - } 634 - 635 - static void create_eir(struct hci_dev *hdev, u8 *data) 636 - { 637 - u8 *ptr = data; 638 - size_t name_len; 639 - 640 - name_len = strlen(hdev->dev_name); 641 - 642 - if (name_len > 0) { 643 - /* EIR Data type */ 644 - if (name_len > 48) { 645 - name_len = 48; 646 - ptr[1] = EIR_NAME_SHORT; 647 - } else 648 - ptr[1] = EIR_NAME_COMPLETE; 649 - 650 - /* EIR Data length */ 651 - ptr[0] = name_len + 1; 652 - 653 - memcpy(ptr + 2, hdev->dev_name, name_len); 654 - 655 - ptr += (name_len + 2); 656 - } 657 - 658 - if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { 659 - ptr[0] = 2; 660 - ptr[1] = EIR_TX_POWER; 661 - ptr[2] = (u8) hdev->inq_tx_power; 662 - 663 - ptr += 3; 664 - } 665 - 666 - if (hdev->devid_source > 0) { 667 - ptr[0] = 9; 668 - ptr[1] = EIR_DEVICE_ID; 669 - 670 - put_unaligned_le16(hdev->devid_source, ptr + 2); 671 - put_unaligned_le16(hdev->devid_vendor, ptr + 4); 672 - put_unaligned_le16(hdev->devid_product, ptr + 6); 673 - put_unaligned_le16(hdev->devid_version, ptr + 8); 674 - 675 - ptr += 10; 676 - } 677 - 678 - ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 679 - ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 680 - ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 681 - } 682 - 683 524 void __hci_req_update_eir(struct hci_request *req) 684 525 { 685 526 struct hci_dev *hdev = req->hdev; ··· 541 698 542 699 memset(&cp, 0, sizeof(cp)); 543 700 544 - create_eir(hdev, cp.data); 701 + eir_create(hdev, cp.data); 545 702 546 703 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) 547 704 return; ··· 977 1134 addr_resolv); 978 1135 } 979 1136 980 - static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) 981 - { 982 - struct adv_info *adv_instance; 983 - 984 - /* Instance 0x00 always set local name */ 985 - if (instance == 0x00) 986 - return true; 987 - 988 - adv_instance = hci_find_adv_instance(hdev, instance); 989 - if (!adv_instance) 990 - return false; 991 - 992 - if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE || 993 - adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME) 994 - return true; 995 - 996 - return adv_instance->scan_rsp_len ? true : false; 997 - } 998 - 999 1137 static void hci_req_clear_event_filter(struct hci_request *req) 1000 1138 { 1001 1139 struct hci_cp_set_event_filter f; ··· 1105 1281 } 1106 1282 } 1107 1283 1108 - static void hci_req_add_set_adv_filter_enable(struct hci_request *req, 1109 - bool enable) 1284 + static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req, 1285 + bool suspending) 1110 1286 { 1111 1287 struct hci_dev *hdev = req->hdev; 1112 1288 1113 1289 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1114 1290 case HCI_ADV_MONITOR_EXT_MSFT: 1115 - msft_req_add_set_filter_enable(req, enable); 1291 + if (suspending) 1292 + msft_suspend(hdev); 1293 + else 1294 + msft_resume(hdev); 1116 1295 break; 1117 1296 default: 1118 1297 return; 1119 1298 } 1120 1299 1121 1300 /* No need to block when enabling since it's on resume path */ 1122 - if (hdev->suspended && !enable) 1301 + if (hdev->suspended && suspending) 1123 1302 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); 1124 1303 } 1125 1304 ··· 1189 1362 } 1190 1363 1191 1364 /* Disable advertisement filters */ 1192 - hci_req_add_set_adv_filter_enable(&req, false); 1365 + hci_req_prepare_adv_monitor_suspend(&req, true); 1193 1366 1194 1367 /* Prevent disconnects from causing scanning to be re-enabled */ 1195 1368 hdev->scanning_paused = true; ··· 1231 1404 /* Reset passive/background scanning to normal */ 1232 1405 __hci_update_background_scan(&req); 1233 1406 /* Enable all of the advertisement filters */ 1234 - hci_req_add_set_adv_filter_enable(&req, true); 1407 + hci_req_prepare_adv_monitor_suspend(&req, false); 1235 1408 1236 1409 /* Unpause directed advertising */ 1237 1410 hdev->advertising_paused = false; ··· 1269 1442 1270 1443 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) 1271 1444 { 1272 - return adv_instance_is_scannable(hdev, hdev->cur_adv_instance); 1445 + return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); 1273 1446 } 1274 1447 1275 1448 void __hci_req_disable_advertising(struct hci_request *req) ··· 1282 1455 1283 1456 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 1284 1457 } 1285 - } 1286 - 1287 - static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) 1288 - { 1289 - u32 flags; 1290 - struct adv_info *adv_instance; 1291 - 1292 - if (instance == 0x00) { 1293 - /* Instance 0 always manages the "Tx Power" and "Flags" 1294 - * fields 1295 - */ 1296 - flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 1297 - 1298 - /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting 1299 - * corresponds to the "connectable" instance flag. 1300 - */ 1301 - if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) 1302 - flags |= MGMT_ADV_FLAG_CONNECTABLE; 1303 - 1304 - if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 1305 - flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; 1306 - else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 1307 - flags |= MGMT_ADV_FLAG_DISCOV; 1308 - 1309 - return flags; 1310 - } 1311 - 1312 - adv_instance = hci_find_adv_instance(hdev, instance); 1313 - 1314 - /* Return 0 when we got an invalid instance identifier. */ 1315 - if (!adv_instance) 1316 - return 0; 1317 - 1318 - return adv_instance->flags; 1319 1458 } 1320 1459 1321 1460 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) ··· 1348 1555 void __hci_req_enable_advertising(struct hci_request *req) 1349 1556 { 1350 1557 struct hci_dev *hdev = req->hdev; 1351 - struct adv_info *adv_instance; 1558 + struct adv_info *adv; 1352 1559 struct hci_cp_le_set_adv_param cp; 1353 1560 u8 own_addr_type, enable = 0x01; 1354 1561 bool connectable; 1355 1562 u16 adv_min_interval, adv_max_interval; 1356 1563 u32 flags; 1357 1564 1358 - flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); 1359 - adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 1565 + flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); 1566 + adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 1360 1567 1361 1568 /* If the "connectable" instance flag was not set, then choose between 1362 1569 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. ··· 1388 1595 1389 1596 memset(&cp, 0, sizeof(cp)); 1390 1597 1391 - if (adv_instance) { 1392 - adv_min_interval = adv_instance->min_interval; 1393 - adv_max_interval = adv_instance->max_interval; 1598 + if (adv) { 1599 + adv_min_interval = adv->min_interval; 1600 + adv_max_interval = adv->max_interval; 1394 1601 } else { 1395 1602 adv_min_interval = hdev->le_adv_min_interval; 1396 1603 adv_max_interval = hdev->le_adv_max_interval; ··· 1421 1628 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 1422 1629 } 1423 1630 1424 - u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) 1425 - { 1426 - size_t short_len; 1427 - size_t complete_len; 1428 - 1429 - /* no space left for name (+ NULL + type + len) */ 1430 - if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) 1431 - return ad_len; 1432 - 1433 - /* use complete name if present and fits */ 1434 - complete_len = strlen(hdev->dev_name); 1435 - if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) 1436 - return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, 1437 - hdev->dev_name, complete_len + 1); 1438 - 1439 - /* use short name if present */ 1440 - short_len = strlen(hdev->short_name); 1441 - if (short_len) 1442 - return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, 1443 - hdev->short_name, short_len + 1); 1444 - 1445 - /* use shortened full name if present, we already know that name 1446 - * is longer then HCI_MAX_SHORT_NAME_LENGTH 1447 - */ 1448 - if (complete_len) { 1449 - u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; 1450 - 1451 - memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); 1452 - name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; 1453 - 1454 - return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, 1455 - sizeof(name)); 1456 - } 1457 - 1458 - return ad_len; 1459 - } 1460 - 1461 - static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) 1462 - { 1463 - return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); 1464 - } 1465 - 1466 - static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) 1467 - { 1468 - u8 scan_rsp_len = 0; 1469 - 1470 - if (hdev->appearance) 1471 - scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); 1472 - 1473 - return append_local_name(hdev, ptr, scan_rsp_len); 1474 - } 1475 - 1476 - static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, 1477 - u8 *ptr) 1478 - { 1479 - struct adv_info *adv_instance; 1480 - u32 instance_flags; 1481 - u8 scan_rsp_len = 0; 1482 - 1483 - adv_instance = hci_find_adv_instance(hdev, instance); 1484 - if (!adv_instance) 1485 - return 0; 1486 - 1487 - instance_flags = adv_instance->flags; 1488 - 1489 - if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) 1490 - scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); 1491 - 1492 - memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data, 1493 - adv_instance->scan_rsp_len); 1494 - 1495 - scan_rsp_len += adv_instance->scan_rsp_len; 1496 - 1497 - if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME) 1498 - scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); 1499 - 1500 - return scan_rsp_len; 1501 - } 1502 - 1503 1631 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) 1504 1632 { 1505 1633 struct hci_dev *hdev = req->hdev; ··· 1437 1723 1438 1724 memset(&pdu, 0, sizeof(pdu)); 1439 1725 1440 - if (instance) 1441 - len = create_instance_scan_rsp_data(hdev, instance, 1442 - pdu.data); 1443 - else 1444 - len = create_default_scan_rsp_data(hdev, pdu.data); 1726 + len = eir_create_scan_rsp(hdev, instance, pdu.data); 1445 1727 1446 1728 if (hdev->scan_rsp_data_len == len && 1447 1729 !memcmp(pdu.data, hdev->scan_rsp_data, len)) ··· 1458 1748 1459 1749 memset(&cp, 0, sizeof(cp)); 1460 1750 1461 - if (instance) 1462 - len = create_instance_scan_rsp_data(hdev, instance, 1463 - cp.data); 1464 - else 1465 - len = create_default_scan_rsp_data(hdev, cp.data); 1751 + len = eir_create_scan_rsp(hdev, instance, cp.data); 1466 1752 1467 1753 if (hdev->scan_rsp_data_len == len && 1468 1754 !memcmp(cp.data, hdev->scan_rsp_data, len)) ··· 1471 1765 1472 1766 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); 1473 1767 } 1474 - } 1475 - 1476 - static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) 1477 - { 1478 - struct adv_info *adv_instance = NULL; 1479 - u8 ad_len = 0, flags = 0; 1480 - u32 instance_flags; 1481 - 1482 - /* Return 0 when the current instance identifier is invalid. */ 1483 - if (instance) { 1484 - adv_instance = hci_find_adv_instance(hdev, instance); 1485 - if (!adv_instance) 1486 - return 0; 1487 - } 1488 - 1489 - instance_flags = get_adv_instance_flags(hdev, instance); 1490 - 1491 - /* If instance already has the flags set skip adding it once 1492 - * again. 1493 - */ 1494 - if (adv_instance && eir_get_data(adv_instance->adv_data, 1495 - adv_instance->adv_data_len, EIR_FLAGS, 1496 - NULL)) 1497 - goto skip_flags; 1498 - 1499 - /* The Add Advertising command allows userspace to set both the general 1500 - * and limited discoverable flags. 1501 - */ 1502 - if (instance_flags & MGMT_ADV_FLAG_DISCOV) 1503 - flags |= LE_AD_GENERAL; 1504 - 1505 - if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) 1506 - flags |= LE_AD_LIMITED; 1507 - 1508 - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 1509 - flags |= LE_AD_NO_BREDR; 1510 - 1511 - if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { 1512 - /* If a discovery flag wasn't provided, simply use the global 1513 - * settings. 1514 - */ 1515 - if (!flags) 1516 - flags |= mgmt_get_adv_discov_flags(hdev); 1517 - 1518 - /* If flags would still be empty, then there is no need to 1519 - * include the "Flags" AD field". 1520 - */ 1521 - if (flags) { 1522 - ptr[0] = 0x02; 1523 - ptr[1] = EIR_FLAGS; 1524 - ptr[2] = flags; 1525 - 1526 - ad_len += 3; 1527 - ptr += 3; 1528 - } 1529 - } 1530 - 1531 - skip_flags: 1532 - if (adv_instance) { 1533 - memcpy(ptr, adv_instance->adv_data, 1534 - adv_instance->adv_data_len); 1535 - ad_len += adv_instance->adv_data_len; 1536 - ptr += adv_instance->adv_data_len; 1537 - } 1538 - 1539 - if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { 1540 - s8 adv_tx_power; 1541 - 1542 - if (ext_adv_capable(hdev)) { 1543 - if (adv_instance) 1544 - adv_tx_power = adv_instance->tx_power; 1545 - else 1546 - adv_tx_power = hdev->adv_tx_power; 1547 - } else { 1548 - adv_tx_power = hdev->adv_tx_power; 1549 - } 1550 - 1551 - /* Provide Tx Power only if we can provide a valid value for it */ 1552 - if (adv_tx_power != HCI_TX_POWER_INVALID) { 1553 - ptr[0] = 0x02; 1554 - ptr[1] = EIR_TX_POWER; 1555 - ptr[2] = (u8)adv_tx_power; 1556 - 1557 - ad_len += 3; 1558 - ptr += 3; 1559 - } 1560 - } 1561 - 1562 - return ad_len; 1563 1768 } 1564 1769 1565 1770 void __hci_req_update_adv_data(struct hci_request *req, u8 instance) ··· 1489 1872 1490 1873 memset(&pdu, 0, sizeof(pdu)); 1491 1874 1492 - len = create_instance_adv_data(hdev, instance, pdu.data); 1875 + len = eir_create_adv_data(hdev, instance, pdu.data); 1493 1876 1494 1877 /* There's nothing to do if the data hasn't changed */ 1495 1878 if (hdev->adv_data_len == len && ··· 1511 1894 1512 1895 memset(&cp, 0, sizeof(cp)); 1513 1896 1514 - len = create_instance_adv_data(hdev, instance, cp.data); 1897 + len = eir_create_adv_data(hdev, instance, cp.data); 1515 1898 1516 1899 /* There's nothing to do if the data hasn't changed */ 1517 1900 if (hdev->adv_data_len == len && ··· 1800 2183 adv_instance = NULL; 1801 2184 } 1802 2185 1803 - flags = get_adv_instance_flags(hdev, instance); 2186 + flags = hci_adv_instance_flags(hdev, instance); 1804 2187 1805 2188 /* If the "connectable" instance flag was not set, then choose between 1806 2189 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. ··· 1840 2223 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); 1841 2224 else 1842 2225 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); 1843 - } else if (adv_instance_is_scannable(hdev, instance) || 2226 + } else if (hci_adv_instance_is_scannable(hdev, instance) || 1844 2227 (flags & MGMT_ADV_PARAM_SCAN_RSP)) { 1845 2228 if (secondary_adv) 1846 2229 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); ··· 2942 3325 } 2943 3326 2944 3327 return ret; 3328 + } 3329 + 3330 + static void config_data_path_complete(struct hci_dev *hdev, u8 status, 3331 + u16 opcode) 3332 + { 3333 + bt_dev_dbg(hdev, "status %u", status); 3334 + } 3335 + 3336 + int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec) 3337 + { 3338 + struct hci_request req; 3339 + int err; 3340 + __u8 vnd_len, *vnd_data = NULL; 3341 + struct hci_op_configure_data_path *cmd = NULL; 3342 + 3343 + hci_req_init(&req, hdev); 3344 + 3345 + err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, 3346 + &vnd_data); 3347 + if (err < 0) 3348 + goto error; 3349 + 3350 + cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); 3351 + if (!cmd) { 3352 + err = -ENOMEM; 3353 + goto error; 3354 + } 3355 + 3356 + err = hdev->get_data_path_id(hdev, &cmd->data_path_id); 3357 + if (err < 0) 3358 + goto error; 3359 + 3360 + cmd->vnd_len = vnd_len; 3361 + memcpy(cmd->vnd_data, vnd_data, vnd_len); 3362 + 3363 + cmd->direction = 0x00; 3364 + hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); 3365 + 3366 + cmd->direction = 0x01; 3367 + hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); 3368 + 3369 + err = hci_req_run(&req, config_data_path_complete); 3370 + error: 3371 + 3372 + kfree(cmd); 3373 + kfree(vnd_data); 3374 + return err; 2945 3375 } 2946 3376 2947 3377 static int stop_discovery(struct hci_request *req, unsigned long opt)
+2 -23
net/bluetooth/hci_request.h
··· 101 101 /* Returns true if HCI commands were queued */ 102 102 bool hci_req_stop_discovery(struct hci_request *req); 103 103 104 + int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec); 105 + 104 106 static inline void hci_req_update_scan(struct hci_dev *hdev) 105 107 { 106 108 queue_work(hdev->req_workqueue, &hdev->scan_update); ··· 124 122 125 123 void hci_request_setup(struct hci_dev *hdev); 126 124 void hci_request_cancel_all(struct hci_dev *hdev); 127 - 128 - u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len); 129 - 130 - static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, 131 - u8 *data, u8 data_len) 132 - { 133 - eir[eir_len++] = sizeof(type) + data_len; 134 - eir[eir_len++] = type; 135 - memcpy(&eir[eir_len], data, data_len); 136 - eir_len += data_len; 137 - 138 - return eir_len; 139 - } 140 - 141 - static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) 142 - { 143 - eir[eir_len++] = sizeof(type) + sizeof(data); 144 - eir[eir_len++] = type; 145 - put_unaligned_le16(data, &eir[eir_len]); 146 - eir_len += sizeof(data); 147 - 148 - return eir_len; 149 - }
+130 -84
net/bluetooth/hci_sock.c
··· 57 57 unsigned long flags; 58 58 __u32 cookie; 59 59 char comm[TASK_COMM_LEN]; 60 + __u16 mtu; 60 61 }; 61 62 62 63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk) ··· 1375 1374 break; 1376 1375 } 1377 1376 1377 + /* Default MTU to HCI_MAX_FRAME_SIZE if not set */ 1378 + if (!hci_pi(sk)->mtu) 1379 + hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE; 1380 + 1378 1381 sk->sk_state = BT_BOUND; 1379 1382 1380 1383 done: ··· 1511 1506 } 1512 1507 1513 1508 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, 1514 - struct msghdr *msg, size_t msglen) 1509 + struct sk_buff *skb) 1515 1510 { 1516 - void *buf; 1517 1511 u8 *cp; 1518 1512 struct mgmt_hdr *hdr; 1519 1513 u16 opcode, index, len; ··· 1521 1517 bool var_len, no_hdev; 1522 1518 int err; 1523 1519 1524 - BT_DBG("got %zu bytes", msglen); 1520 + BT_DBG("got %d bytes", skb->len); 1525 1521 1526 - if (msglen < sizeof(*hdr)) 1522 + if (skb->len < sizeof(*hdr)) 1527 1523 return -EINVAL; 1528 1524 1529 - buf = kmalloc(msglen, GFP_KERNEL); 1530 - if (!buf) 1531 - return -ENOMEM; 1532 - 1533 - if (memcpy_from_msg(buf, msg, msglen)) { 1534 - err = -EFAULT; 1535 - goto done; 1536 - } 1537 - 1538 - hdr = buf; 1525 + hdr = (void *)skb->data; 1539 1526 opcode = __le16_to_cpu(hdr->opcode); 1540 1527 index = __le16_to_cpu(hdr->index); 1541 1528 len = __le16_to_cpu(hdr->len); 1542 1529 1543 - if (len != msglen - sizeof(*hdr)) { 1530 + if (len != skb->len - sizeof(*hdr)) { 1544 1531 err = -EINVAL; 1545 1532 goto done; 1546 1533 } 1547 1534 1548 1535 if (chan->channel == HCI_CHANNEL_CONTROL) { 1549 - struct sk_buff *skb; 1536 + struct sk_buff *cmd; 1550 1537 1551 1538 /* Send event to monitor */ 1552 - skb = create_monitor_ctrl_command(sk, index, opcode, len, 1553 - buf + sizeof(*hdr)); 1554 - if (skb) { 1555 - hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1539 + cmd = create_monitor_ctrl_command(sk, index, opcode, len, 1540 + skb->data + sizeof(*hdr)); 1541 + if (cmd) { 1542 + hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd, 1556 1543 HCI_SOCK_TRUSTED, NULL); 1557 - kfree_skb(skb); 1544 + kfree_skb(cmd); 1558 1545 } 1559 1546 } 1560 1547 ··· 1610 1615 if (hdev && chan->hdev_init) 1611 1616 chan->hdev_init(sk, hdev); 1612 1617 1613 - cp = buf + sizeof(*hdr); 1618 + cp = skb->data + sizeof(*hdr); 1614 1619 1615 1620 err = handler->func(sk, hdev, cp, len); 1616 1621 if (err < 0) 1617 1622 goto done; 1618 1623 1619 - err = msglen; 1624 + err = skb->len; 1620 1625 1621 1626 done: 1622 1627 if (hdev) 1623 1628 hci_dev_put(hdev); 1624 1629 1625 - kfree(buf); 1626 1630 return err; 1627 1631 } 1628 1632 1629 - static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len) 1633 + static int hci_logging_frame(struct sock *sk, struct sk_buff *skb, 1634 + unsigned int flags) 1630 1635 { 1631 1636 struct hci_mon_hdr *hdr; 1632 - struct sk_buff *skb; 1633 1637 struct hci_dev *hdev; 1634 1638 u16 index; 1635 1639 int err; ··· 1637 1643 * the priority byte, the ident length byte and at least one string 1638 1644 * terminator NUL byte. Anything shorter are invalid packets. 1639 1645 */ 1640 - if (len < sizeof(*hdr) + 3) 1646 + if (skb->len < sizeof(*hdr) + 3) 1641 1647 return -EINVAL; 1642 - 1643 - skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); 1644 - if (!skb) 1645 - return err; 1646 - 1647 - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { 1648 - err = -EFAULT; 1649 - goto drop; 1650 - } 1651 1648 1652 1649 hdr = (void *)skb->data; 1653 1650 1654 - if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) { 1655 - err = -EINVAL; 1656 - goto drop; 1657 - } 1651 + if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr)) 1652 + return -EINVAL; 1658 1653 1659 1654 if (__le16_to_cpu(hdr->opcode) == 0x0000) { 1660 1655 __u8 priority = skb->data[sizeof(*hdr)]; ··· 1662 1679 * The message follows the ident string (if present) and 1663 1680 * must be NUL terminated. Otherwise it is not a valid packet. 1664 1681 */ 1665 - if (priority > 7 || skb->data[len - 1] != 0x00 || 1666 - ident_len > len - sizeof(*hdr) - 3 || 1667 - skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) { 1668 - err = -EINVAL; 1669 - goto drop; 1670 - } 1682 + if (priority > 7 || skb->data[skb->len - 1] != 0x00 || 1683 + ident_len > skb->len - sizeof(*hdr) - 3 || 1684 + skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) 1685 + return -EINVAL; 1671 1686 } else { 1672 - err = -EINVAL; 1673 - goto drop; 1687 + return -EINVAL; 1674 1688 } 1675 1689 1676 1690 index = __le16_to_cpu(hdr->index); 1677 1691 1678 1692 if (index != MGMT_INDEX_NONE) { 1679 1693 hdev = hci_dev_get(index); 1680 - if (!hdev) { 1681 - err = -ENODEV; 1682 - goto drop; 1683 - } 1694 + if (!hdev) 1695 + return -ENODEV; 1684 1696 } else { 1685 1697 hdev = NULL; 1686 1698 } ··· 1683 1705 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING); 1684 1706 1685 1707 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); 1686 - err = len; 1708 + err = skb->len; 1687 1709 1688 1710 if (hdev) 1689 1711 hci_dev_put(hdev); 1690 1712 1691 - drop: 1692 - kfree_skb(skb); 1693 1713 return err; 1694 1714 } 1695 1715 ··· 1699 1723 struct hci_dev *hdev; 1700 1724 struct sk_buff *skb; 1701 1725 int err; 1726 + const unsigned int flags = msg->msg_flags; 1702 1727 1703 1728 BT_DBG("sock %p sk %p", sock, sk); 1704 1729 1705 - if (msg->msg_flags & MSG_OOB) 1730 + if (flags & MSG_OOB) 1706 1731 return -EOPNOTSUPP; 1707 1732 1708 - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE| 1709 - MSG_CMSG_COMPAT)) 1733 + if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT)) 1710 1734 return -EINVAL; 1711 1735 1712 - if (len < 4 || len > HCI_MAX_FRAME_SIZE) 1736 + if (len < 4 || len > hci_pi(sk)->mtu) 1713 1737 return -EINVAL; 1738 + 1739 + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); 1740 + if (IS_ERR(skb)) 1741 + return PTR_ERR(skb); 1714 1742 1715 1743 lock_sock(sk); 1716 1744 ··· 1724 1744 break; 1725 1745 case HCI_CHANNEL_MONITOR: 1726 1746 err = -EOPNOTSUPP; 1727 - goto done; 1747 + goto drop; 1728 1748 case HCI_CHANNEL_LOGGING: 1729 - err = hci_logging_frame(sk, msg, len); 1730 - goto done; 1749 + err = hci_logging_frame(sk, skb, flags); 1750 + goto drop; 1731 1751 default: 1732 1752 mutex_lock(&mgmt_chan_list_lock); 1733 1753 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); 1734 1754 if (chan) 1735 - err = hci_mgmt_cmd(chan, sk, msg, len); 1755 + err = hci_mgmt_cmd(chan, sk, skb); 1736 1756 else 1737 1757 err = -EINVAL; 1738 1758 1739 1759 mutex_unlock(&mgmt_chan_list_lock); 1740 - goto done; 1760 + goto drop; 1741 1761 } 1742 1762 1743 1763 hdev = hci_hdev_from_sock(sk); 1744 1764 if (IS_ERR(hdev)) { 1745 1765 err = PTR_ERR(hdev); 1746 - goto done; 1766 + goto drop; 1747 1767 } 1748 1768 1749 1769 if (!test_bit(HCI_UP, &hdev->flags)) { 1750 1770 err = -ENETDOWN; 1751 - goto done; 1752 - } 1753 - 1754 - skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); 1755 - if (!skb) 1756 - goto done; 1757 - 1758 - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { 1759 - err = -EFAULT; 1760 1771 goto drop; 1761 1772 } 1762 1773 ··· 1828 1857 goto done; 1829 1858 } 1830 1859 1831 - static int hci_sock_setsockopt(struct socket *sock, int level, int optname, 1832 - sockptr_t optval, unsigned int len) 1860 + static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, 1861 + sockptr_t optval, unsigned int len) 1833 1862 { 1834 1863 struct hci_ufilter uf = { .opcode = 0 }; 1835 1864 struct sock *sk = sock->sk; 1836 1865 int err = 0, opt = 0; 1837 1866 1838 1867 BT_DBG("sk %p, opt %d", sk, optname); 1839 - 1840 - if (level != SOL_HCI) 1841 - return -ENOPROTOOPT; 1842 1868 1843 1869 lock_sock(sk); 1844 1870 ··· 1911 1943 return err; 1912 1944 } 1913 1945 1914 - static int hci_sock_getsockopt(struct socket *sock, int level, int optname, 1915 - char __user *optval, int __user *optlen) 1946 + static int hci_sock_setsockopt(struct socket *sock, int level, int optname, 1947 + sockptr_t optval, unsigned int len) 1948 + { 1949 + struct sock *sk = sock->sk; 1950 + int err = 0, opt = 0; 1951 + 1952 + BT_DBG("sk %p, opt %d", sk, optname); 1953 + 1954 + if (level == SOL_HCI) 1955 + return hci_sock_setsockopt_old(sock, level, optname, optval, 1956 + len); 1957 + 1958 + if (level != SOL_BLUETOOTH) 1959 + return -ENOPROTOOPT; 1960 + 1961 + lock_sock(sk); 1962 + 1963 + switch (optname) { 1964 + case BT_SNDMTU: 1965 + case BT_RCVMTU: 1966 + switch (hci_pi(sk)->channel) { 1967 + /* Don't allow changing MTU for channels that are meant for HCI 1968 + * traffic only. 1969 + */ 1970 + case HCI_CHANNEL_RAW: 1971 + case HCI_CHANNEL_USER: 1972 + err = -ENOPROTOOPT; 1973 + goto done; 1974 + } 1975 + 1976 + if (copy_from_sockptr(&opt, optval, sizeof(u16))) { 1977 + err = -EFAULT; 1978 + break; 1979 + } 1980 + 1981 + hci_pi(sk)->mtu = opt; 1982 + break; 1983 + 1984 + default: 1985 + err = -ENOPROTOOPT; 1986 + break; 1987 + } 1988 + 1989 + done: 1990 + release_sock(sk); 1991 + return err; 1992 + } 1993 + 1994 + static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname, 1995 + char __user *optval, int __user *optlen) 1916 1996 { 1917 1997 struct hci_ufilter uf; 1918 1998 struct sock *sk = sock->sk; 1919 1999 int len, opt, err = 0; 1920 2000 1921 2001 BT_DBG("sk %p, opt %d", sk, optname); 1922 - 1923 - if (level != SOL_HCI) 1924 - return -ENOPROTOOPT; 1925 2002 1926 2003 if (get_user(len, optlen)) 1927 2004 return -EFAULT; ··· 2021 2008 } 2022 2009 2023 2010 done: 2011 + release_sock(sk); 2012 + return err; 2013 + } 2014 + 2015 + static int hci_sock_getsockopt(struct socket *sock, int level, int optname, 2016 + char __user *optval, int __user *optlen) 2017 + { 2018 + struct sock *sk = sock->sk; 2019 + int err = 0; 2020 + 2021 + BT_DBG("sk %p, opt %d", sk, optname); 2022 + 2023 + if (level == SOL_HCI) 2024 + return hci_sock_getsockopt_old(sock, level, optname, optval, 2025 + optlen); 2026 + 2027 + if (level != SOL_BLUETOOTH) 2028 + return -ENOPROTOOPT; 2029 + 2030 + lock_sock(sk); 2031 + 2032 + switch (optname) { 2033 + case BT_SNDMTU: 2034 + case BT_RCVMTU: 2035 + if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval)) 2036 + err = -EFAULT; 2037 + break; 2038 + 2039 + default: 2040 + err = -ENOPROTOOPT; 2041 + break; 2042 + } 2043 + 2024 2044 release_sock(sk); 2025 2045 return err; 2026 2046 }
+1 -1
net/bluetooth/l2cap_core.c
··· 7902 7902 dst_type = ADDR_LE_DEV_RANDOM; 7903 7903 7904 7904 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 7905 - hcon = hci_connect_le(hdev, dst, dst_type, 7905 + hcon = hci_connect_le(hdev, dst, dst_type, false, 7906 7906 chan->sec_level, 7907 7907 HCI_LE_CONN_TIMEOUT, 7908 7908 HCI_ROLE_SLAVE, NULL);
+9 -1
net/bluetooth/l2cap_sock.c
··· 1508 1508 { 1509 1509 struct sock *sk = chan->data; 1510 1510 1511 + if (!sk) 1512 + return; 1513 + 1511 1514 l2cap_sock_kill(sk); 1512 1515 } 1513 1516 ··· 1518 1515 { 1519 1516 struct sock *sk = chan->data; 1520 1517 struct sock *parent; 1518 + 1519 + if (!sk) 1520 + return; 1521 1521 1522 1522 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 1523 1523 ··· 1713 1707 { 1714 1708 BT_DBG("sk %p", sk); 1715 1709 1716 - if (l2cap_pi(sk)->chan) 1710 + if (l2cap_pi(sk)->chan) { 1711 + l2cap_pi(sk)->chan->data = NULL; 1717 1712 l2cap_chan_put(l2cap_pi(sk)->chan); 1713 + } 1718 1714 1719 1715 if (l2cap_pi(sk)->rx_busy_skb) { 1720 1716 kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+371 -140
net/bluetooth/mgmt.c
··· 38 38 #include "mgmt_util.h" 39 39 #include "mgmt_config.h" 40 40 #include "msft.h" 41 + #include "eir.h" 41 42 42 43 #define MGMT_VERSION 1 43 44 #define MGMT_REVISION 21 ··· 3792 3791 }; 3793 3792 #endif 3794 3793 3794 + /* 330859bc-7506-492d-9370-9a6f0614037f */ 3795 + static const u8 quality_report_uuid[16] = { 3796 + 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93, 3797 + 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33, 3798 + }; 3799 + 3800 + /* a6695ace-ee7f-4fb9-881a-5fac66c629af */ 3801 + static const u8 offload_codecs_uuid[16] = { 3802 + 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88, 3803 + 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6, 3804 + }; 3805 + 3795 3806 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ 3796 3807 static const u8 simult_central_periph_uuid[16] = { 3797 3808 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, ··· 3819 3806 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, 3820 3807 void *data, u16 data_len) 3821 3808 { 3822 - char buf[62]; /* Enough space for 3 features */ 3809 + char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */ 3823 3810 struct mgmt_rp_read_exp_features_info *rp = (void *)buf; 3824 3811 u16 idx = 0; 3825 3812 u32 flags; ··· 3859 3846 flags = BIT(1); 3860 3847 3861 3848 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16); 3849 + rp->features[idx].flags = cpu_to_le32(flags); 3850 + idx++; 3851 + } 3852 + 3853 + if (hdev && hdev->set_quality_report) { 3854 + if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)) 3855 + flags = BIT(0); 3856 + else 3857 + flags = 0; 3858 + 3859 + memcpy(rp->features[idx].uuid, quality_report_uuid, 16); 3860 + rp->features[idx].flags = cpu_to_le32(flags); 3861 + idx++; 3862 + } 3863 + 3864 + if (hdev && hdev->get_data_path_id) { 3865 + if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) 3866 + flags = BIT(0); 3867 + else 3868 + flags = 0; 3869 + 3870 + memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16); 3862 3871 rp->features[idx].flags = cpu_to_le32(flags); 3863 3872 idx++; 3864 3873 } ··· 3927 3892 } 3928 3893 #endif 3929 3894 3895 + static int exp_quality_report_feature_changed(bool enabled, struct sock *skip) 3896 + { 3897 + struct mgmt_ev_exp_feature_changed ev; 3898 + 3899 + memset(&ev, 0, sizeof(ev)); 3900 + memcpy(ev.uuid, quality_report_uuid, 16); 3901 + ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); 3902 + 3903 + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, 3904 + &ev, sizeof(ev), 3905 + HCI_MGMT_EXP_FEATURE_EVENTS, skip); 3906 + } 3907 + 3908 + #define EXP_FEAT(_uuid, _set_func) \ 3909 + { \ 3910 + .uuid = _uuid, \ 3911 + .set_func = _set_func, \ 3912 + } 3913 + 3914 + /* The zero key uuid is special. Multiple exp features are set through it. */ 3915 + static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, 3916 + struct mgmt_cp_set_exp_feature *cp, u16 data_len) 3917 + { 3918 + struct mgmt_rp_set_exp_feature rp; 3919 + 3920 + memset(rp.uuid, 0, 16); 3921 + rp.flags = cpu_to_le32(0); 3922 + 3923 + #ifdef CONFIG_BT_FEATURE_DEBUG 3924 + if (!hdev) { 3925 + bool changed = bt_dbg_get(); 3926 + 3927 + bt_dbg_set(false); 3928 + 3929 + if (changed) 3930 + exp_debug_feature_changed(false, sk); 3931 + } 3932 + #endif 3933 + 3934 + if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { 3935 + bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); 3936 + 3937 + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); 3938 + 3939 + if (changed) 3940 + exp_ll_privacy_feature_changed(false, hdev, sk); 3941 + } 3942 + 3943 + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 3944 + 3945 + return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, 3946 + MGMT_OP_SET_EXP_FEATURE, 0, 3947 + &rp, sizeof(rp)); 3948 + } 3949 + 3950 + #ifdef CONFIG_BT_FEATURE_DEBUG 3951 + static int set_debug_func(struct sock *sk, struct hci_dev *hdev, 3952 + struct mgmt_cp_set_exp_feature *cp, u16 data_len) 3953 + { 3954 + struct mgmt_rp_set_exp_feature rp; 3955 + 3956 + bool val, changed; 3957 + int err; 3958 + 3959 + /* Command requires to use the non-controller index */ 3960 + if (hdev) 3961 + return mgmt_cmd_status(sk, hdev->id, 3962 + MGMT_OP_SET_EXP_FEATURE, 3963 + MGMT_STATUS_INVALID_INDEX); 3964 + 3965 + /* Parameters are limited to a single octet */ 3966 + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) 3967 + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 3968 + MGMT_OP_SET_EXP_FEATURE, 3969 + MGMT_STATUS_INVALID_PARAMS); 3970 + 3971 + /* Only boolean on/off is supported */ 3972 + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) 3973 + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 3974 + MGMT_OP_SET_EXP_FEATURE, 3975 + MGMT_STATUS_INVALID_PARAMS); 3976 + 3977 + val = !!cp->param[0]; 3978 + changed = val ? !bt_dbg_get() : bt_dbg_get(); 3979 + bt_dbg_set(val); 3980 + 3981 + memcpy(rp.uuid, debug_uuid, 16); 3982 + rp.flags = cpu_to_le32(val ? BIT(0) : 0); 3983 + 3984 + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 3985 + 3986 + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, 3987 + MGMT_OP_SET_EXP_FEATURE, 0, 3988 + &rp, sizeof(rp)); 3989 + 3990 + if (changed) 3991 + exp_debug_feature_changed(val, sk); 3992 + 3993 + return err; 3994 + } 3995 + #endif 3996 + 3997 + static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, 3998 + struct mgmt_cp_set_exp_feature *cp, 3999 + u16 data_len) 4000 + { 4001 + struct mgmt_rp_set_exp_feature rp; 4002 + bool val, changed; 4003 + int err; 4004 + u32 flags; 4005 + 4006 + /* Command requires to use the controller index */ 4007 + if (!hdev) 4008 + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 4009 + MGMT_OP_SET_EXP_FEATURE, 4010 + MGMT_STATUS_INVALID_INDEX); 4011 + 4012 + /* Changes can only be made when controller is powered down */ 4013 + if (hdev_is_powered(hdev)) 4014 + return mgmt_cmd_status(sk, hdev->id, 4015 + MGMT_OP_SET_EXP_FEATURE, 4016 + MGMT_STATUS_REJECTED); 4017 + 4018 + /* Parameters are limited to a single octet */ 4019 + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) 4020 + return mgmt_cmd_status(sk, hdev->id, 4021 + MGMT_OP_SET_EXP_FEATURE, 4022 + MGMT_STATUS_INVALID_PARAMS); 4023 + 4024 + /* Only boolean on/off is supported */ 4025 + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) 4026 + return mgmt_cmd_status(sk, hdev->id, 4027 + MGMT_OP_SET_EXP_FEATURE, 4028 + MGMT_STATUS_INVALID_PARAMS); 4029 + 4030 + val = !!cp->param[0]; 4031 + 4032 + if (val) { 4033 + changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); 4034 + hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); 4035 + hci_dev_clear_flag(hdev, HCI_ADVERTISING); 4036 + 4037 + /* Enable LL privacy + supported settings changed */ 4038 + flags = BIT(0) | BIT(1); 4039 + } else { 4040 + changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); 4041 + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); 4042 + 4043 + /* Disable LL privacy + supported settings changed */ 4044 + flags = BIT(1); 4045 + } 4046 + 4047 + memcpy(rp.uuid, rpa_resolution_uuid, 16); 4048 + rp.flags = cpu_to_le32(flags); 4049 + 4050 + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 4051 + 4052 + err = mgmt_cmd_complete(sk, hdev->id, 4053 + MGMT_OP_SET_EXP_FEATURE, 0, 4054 + &rp, sizeof(rp)); 4055 + 4056 + if (changed) 4057 + exp_ll_privacy_feature_changed(val, hdev, sk); 4058 + 4059 + return err; 4060 + } 4061 + 4062 + static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, 4063 + struct mgmt_cp_set_exp_feature *cp, 4064 + u16 data_len) 4065 + { 4066 + struct mgmt_rp_set_exp_feature rp; 4067 + bool val, changed; 4068 + int err; 4069 + 4070 + /* Command requires to use a valid controller index */ 4071 + if (!hdev) 4072 + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 4073 + MGMT_OP_SET_EXP_FEATURE, 4074 + MGMT_STATUS_INVALID_INDEX); 4075 + 4076 + /* Parameters are limited to a single octet */ 4077 + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) 4078 + return mgmt_cmd_status(sk, hdev->id, 4079 + MGMT_OP_SET_EXP_FEATURE, 4080 + MGMT_STATUS_INVALID_PARAMS); 4081 + 4082 + /* Only boolean on/off is supported */ 4083 + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) 4084 + return mgmt_cmd_status(sk, hdev->id, 4085 + MGMT_OP_SET_EXP_FEATURE, 4086 + MGMT_STATUS_INVALID_PARAMS); 4087 + 4088 + hci_req_sync_lock(hdev); 4089 + 4090 + val = !!cp->param[0]; 4091 + changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)); 4092 + 4093 + if (!hdev->set_quality_report) { 4094 + err = mgmt_cmd_status(sk, hdev->id, 4095 + MGMT_OP_SET_EXP_FEATURE, 4096 + MGMT_STATUS_NOT_SUPPORTED); 4097 + goto unlock_quality_report; 4098 + } 4099 + 4100 + if (changed) { 4101 + err = hdev->set_quality_report(hdev, val); 4102 + if (err) { 4103 + err = mgmt_cmd_status(sk, hdev->id, 4104 + MGMT_OP_SET_EXP_FEATURE, 4105 + MGMT_STATUS_FAILED); 4106 + goto unlock_quality_report; 4107 + } 4108 + if (val) 4109 + hci_dev_set_flag(hdev, HCI_QUALITY_REPORT); 4110 + else 4111 + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); 4112 + } 4113 + 4114 + bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed); 4115 + 4116 + memcpy(rp.uuid, quality_report_uuid, 16); 4117 + rp.flags = cpu_to_le32(val ? BIT(0) : 0); 4118 + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 4119 + err = mgmt_cmd_complete(sk, hdev->id, 4120 + MGMT_OP_SET_EXP_FEATURE, 0, 4121 + &rp, sizeof(rp)); 4122 + 4123 + if (changed) 4124 + exp_quality_report_feature_changed(val, sk); 4125 + 4126 + unlock_quality_report: 4127 + hci_req_sync_unlock(hdev); 4128 + return err; 4129 + } 4130 + 4131 + static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip) 4132 + { 4133 + struct mgmt_ev_exp_feature_changed ev; 4134 + 4135 + memset(&ev, 0, sizeof(ev)); 4136 + memcpy(ev.uuid, offload_codecs_uuid, 16); 4137 + ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); 4138 + 4139 + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, 4140 + &ev, sizeof(ev), 4141 + HCI_MGMT_EXP_FEATURE_EVENTS, skip); 4142 + } 4143 + 4144 + static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, 4145 + struct mgmt_cp_set_exp_feature *cp, 4146 + u16 data_len) 4147 + { 4148 + bool val, changed; 4149 + int err; 4150 + struct mgmt_rp_set_exp_feature rp; 4151 + 4152 + /* Command requires to use a valid controller index */ 4153 + if (!hdev) 4154 + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 4155 + MGMT_OP_SET_EXP_FEATURE, 4156 + MGMT_STATUS_INVALID_INDEX); 4157 + 4158 + /* Parameters are limited to a single octet */ 4159 + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) 4160 + return mgmt_cmd_status(sk, hdev->id, 4161 + MGMT_OP_SET_EXP_FEATURE, 4162 + MGMT_STATUS_INVALID_PARAMS); 4163 + 4164 + /* Only boolean on/off is supported */ 4165 + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) 4166 + return mgmt_cmd_status(sk, hdev->id, 4167 + MGMT_OP_SET_EXP_FEATURE, 4168 + MGMT_STATUS_INVALID_PARAMS); 4169 + 4170 + val = !!cp->param[0]; 4171 + changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)); 4172 + 4173 + if (!hdev->get_data_path_id) { 4174 + return mgmt_cmd_status(sk, hdev->id, 4175 + MGMT_OP_SET_EXP_FEATURE, 4176 + MGMT_STATUS_NOT_SUPPORTED); 4177 + } 4178 + 4179 + if (changed) { 4180 + if (val) 4181 + hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); 4182 + else 4183 + hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); 4184 + } 4185 + 4186 + bt_dev_info(hdev, "offload codecs enable %d changed %d", 4187 + val, changed); 4188 + 4189 + memcpy(rp.uuid, offload_codecs_uuid, 16); 4190 + rp.flags = cpu_to_le32(val ? BIT(0) : 0); 4191 + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 4192 + err = mgmt_cmd_complete(sk, hdev->id, 4193 + MGMT_OP_SET_EXP_FEATURE, 0, 4194 + &rp, sizeof(rp)); 4195 + 4196 + if (changed) 4197 + exp_offload_codec_feature_changed(val, sk); 4198 + 4199 + return err; 4200 + } 4201 + 4202 + static const struct mgmt_exp_feature { 4203 + const u8 *uuid; 4204 + int (*set_func)(struct sock *sk, struct hci_dev *hdev, 4205 + struct mgmt_cp_set_exp_feature *cp, u16 data_len); 4206 + } exp_features[] = { 4207 + EXP_FEAT(ZERO_KEY, set_zero_key_func), 4208 + #ifdef CONFIG_BT_FEATURE_DEBUG 4209 + EXP_FEAT(debug_uuid, set_debug_func), 4210 + #endif 4211 + EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), 4212 + EXP_FEAT(quality_report_uuid, set_quality_report_func), 4213 + EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), 4214 + 4215 + /* end with a null feature */ 4216 + EXP_FEAT(NULL, NULL) 4217 + }; 4218 + 3930 4219 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, 3931 4220 void *data, u16 data_len) 3932 4221 { 3933 4222 struct mgmt_cp_set_exp_feature *cp = data; 3934 - struct mgmt_rp_set_exp_feature rp; 4223 + size_t i = 0; 3935 4224 3936 4225 bt_dev_dbg(hdev, "sock %p", sk); 3937 4226 3938 - if (!memcmp(cp->uuid, ZERO_KEY, 16)) { 3939 - memset(rp.uuid, 0, 16); 3940 - rp.flags = cpu_to_le32(0); 3941 - 3942 - #ifdef CONFIG_BT_FEATURE_DEBUG 3943 - if (!hdev) { 3944 - bool changed = bt_dbg_get(); 3945 - 3946 - bt_dbg_set(false); 3947 - 3948 - if (changed) 3949 - exp_debug_feature_changed(false, sk); 3950 - } 3951 - #endif 3952 - 3953 - if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { 3954 - bool changed = hci_dev_test_flag(hdev, 3955 - HCI_ENABLE_LL_PRIVACY); 3956 - 3957 - hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); 3958 - 3959 - if (changed) 3960 - exp_ll_privacy_feature_changed(false, hdev, sk); 3961 - } 3962 - 3963 - hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 3964 - 3965 - return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, 3966 - MGMT_OP_SET_EXP_FEATURE, 0, 3967 - &rp, sizeof(rp)); 3968 - } 3969 - 3970 - #ifdef CONFIG_BT_FEATURE_DEBUG 3971 - if (!memcmp(cp->uuid, debug_uuid, 16)) { 3972 - bool val, changed; 3973 - int err; 3974 - 3975 - /* Command requires to use the non-controller index */ 3976 - if (hdev) 3977 - return mgmt_cmd_status(sk, hdev->id, 3978 - MGMT_OP_SET_EXP_FEATURE, 3979 - MGMT_STATUS_INVALID_INDEX); 3980 - 3981 - /* Parameters are limited to a single octet */ 3982 - if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) 3983 - return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 3984 - MGMT_OP_SET_EXP_FEATURE, 3985 - MGMT_STATUS_INVALID_PARAMS); 3986 - 3987 - /* Only boolean on/off is supported */ 3988 - if (cp->param[0] != 0x00 && cp->param[0] != 0x01) 3989 - return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 3990 - MGMT_OP_SET_EXP_FEATURE, 3991 - MGMT_STATUS_INVALID_PARAMS); 3992 - 3993 - val = !!cp->param[0]; 3994 - changed = val ? !bt_dbg_get() : bt_dbg_get(); 3995 - bt_dbg_set(val); 3996 - 3997 - memcpy(rp.uuid, debug_uuid, 16); 3998 - rp.flags = cpu_to_le32(val ? BIT(0) : 0); 3999 - 4000 - hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 4001 - 4002 - err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, 4003 - MGMT_OP_SET_EXP_FEATURE, 0, 4004 - &rp, sizeof(rp)); 4005 - 4006 - if (changed) 4007 - exp_debug_feature_changed(val, sk); 4008 - 4009 - return err; 4010 - } 4011 - #endif 4012 - 4013 - if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) { 4014 - bool val, changed; 4015 - int err; 4016 - u32 flags; 4017 - 4018 - /* Command requires to use the controller index */ 4019 - if (!hdev) 4020 - return mgmt_cmd_status(sk, MGMT_INDEX_NONE, 4021 - MGMT_OP_SET_EXP_FEATURE, 4022 - MGMT_STATUS_INVALID_INDEX); 4023 - 4024 - /* Changes can only be made when controller is powered down */ 4025 - if (hdev_is_powered(hdev)) 4026 - return mgmt_cmd_status(sk, hdev->id, 4027 - MGMT_OP_SET_EXP_FEATURE, 4028 - MGMT_STATUS_REJECTED); 4029 - 4030 - /* Parameters are limited to a single octet */ 4031 - if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) 4032 - return mgmt_cmd_status(sk, hdev->id, 4033 - MGMT_OP_SET_EXP_FEATURE, 4034 - MGMT_STATUS_INVALID_PARAMS); 4035 - 4036 - /* Only boolean on/off is supported */ 4037 - if (cp->param[0] != 0x00 && cp->param[0] != 0x01) 4038 - return mgmt_cmd_status(sk, hdev->id, 4039 - MGMT_OP_SET_EXP_FEATURE, 4040 - MGMT_STATUS_INVALID_PARAMS); 4041 - 4042 - val = !!cp->param[0]; 4043 - 4044 - if (val) { 4045 - changed = !hci_dev_test_flag(hdev, 4046 - HCI_ENABLE_LL_PRIVACY); 4047 - hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); 4048 - hci_dev_clear_flag(hdev, HCI_ADVERTISING); 4049 - 4050 - /* Enable LL privacy + supported settings changed */ 4051 - flags = BIT(0) | BIT(1); 4052 - } else { 4053 - changed = hci_dev_test_flag(hdev, 4054 - HCI_ENABLE_LL_PRIVACY); 4055 - hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); 4056 - 4057 - /* Disable LL privacy + supported settings changed */ 4058 - flags = BIT(1); 4059 - } 4060 - 4061 - memcpy(rp.uuid, rpa_resolution_uuid, 16); 4062 - rp.flags = cpu_to_le32(flags); 4063 - 4064 - hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); 4065 - 4066 - err = mgmt_cmd_complete(sk, hdev->id, 4067 - MGMT_OP_SET_EXP_FEATURE, 0, 4068 - &rp, sizeof(rp)); 4069 - 4070 - if (changed) 4071 - exp_ll_privacy_feature_changed(val, hdev, sk); 4072 - 4073 - return err; 4227 + for (i = 0; exp_features[i].uuid; i++) { 4228 + if (!memcmp(cp->uuid, exp_features[i].uuid, 16)) 4229 + return exp_features[i].set_func(sk, hdev, cp, data_len); 4074 4230 } 4075 4231 4076 4232 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, ··· 7541 7315 if (!rp) 7542 7316 return -ENOMEM; 7543 7317 7318 + if (!status && !lmp_ssp_capable(hdev)) { 7319 + status = MGMT_STATUS_NOT_SUPPORTED; 7320 + eir_len = 0; 7321 + } 7322 + 7544 7323 if (status) 7545 7324 goto complete; 7546 7325 ··· 7757 7526 { 7758 7527 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3]; 7759 7528 7760 - return append_local_name(hdev, buf, 0); 7529 + return eir_append_local_name(hdev, buf, 0); 7761 7530 } 7762 7531 7763 7532 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, ··· 8453 8222 * advertising. 8454 8223 */ 8455 8224 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) 8456 - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, 8225 + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, 8457 8226 MGMT_STATUS_NOT_SUPPORTED); 8458 8227 8459 8228 hci_dev_lock(hdev);
+148 -24
net/bluetooth/msft.c
··· 94 94 __u16 pending_add_handle; 95 95 __u16 pending_remove_handle; 96 96 __u8 reregistering; 97 + __u8 suspending; 97 98 __u8 filter_enabled; 98 99 }; 99 100 100 101 static int __msft_add_monitor_pattern(struct hci_dev *hdev, 101 102 struct adv_monitor *monitor); 103 + static int __msft_remove_monitor(struct hci_dev *hdev, 104 + struct adv_monitor *monitor, u16 handle); 102 105 103 106 bool msft_monitor_supported(struct hci_dev *hdev) 104 107 { ··· 157 154 } 158 155 159 156 /* This function requires the caller holds hdev->lock */ 160 - static void reregister_monitor_on_restart(struct hci_dev *hdev, int handle) 157 + static void reregister_monitor(struct hci_dev *hdev, int handle) 161 158 { 162 159 struct adv_monitor *monitor; 163 160 struct msft_data *msft = hdev->msft_data; ··· 185 182 } 186 183 } 187 184 185 + /* This function requires the caller holds hdev->lock */ 186 + static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle) 187 + { 188 + struct adv_monitor *monitor; 189 + struct msft_data *msft = hdev->msft_data; 190 + int err; 191 + 192 + while (1) { 193 + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); 194 + if (!monitor) { 195 + /* All monitors have been removed */ 196 + msft->suspending = false; 197 + hci_update_background_scan(hdev); 198 + return; 199 + } 200 + 201 + msft->pending_remove_handle = (u16)handle; 202 + err = __msft_remove_monitor(hdev, monitor, handle); 203 + 204 + /* If success, return and wait for monitor removed callback */ 205 + if (!err) 206 + return; 207 + 208 + /* Otherwise free the monitor and keep removing */ 209 + hci_free_adv_monitor(hdev, monitor); 210 + handle++; 211 + } 212 + } 213 + 214 + /* This function requires the caller holds hdev->lock */ 215 + void msft_suspend(struct hci_dev *hdev) 216 + { 217 + struct msft_data *msft = hdev->msft_data; 218 + 219 + if (!msft) 220 + return; 221 + 222 + if (msft_monitor_supported(hdev)) { 223 + msft->suspending = true; 224 + /* Quitely remove all monitors on suspend to avoid waking up 225 + * the system. 226 + */ 227 + remove_monitor_on_suspend(hdev, 0); 228 + } 229 + } 230 + 231 + /* This function requires the caller holds hdev->lock */ 232 + void msft_resume(struct hci_dev *hdev) 233 + { 234 + struct msft_data *msft = hdev->msft_data; 235 + 236 + if (!msft) 237 + return; 238 + 239 + if (msft_monitor_supported(hdev)) { 240 + msft->reregistering = true; 241 + /* Monitors are removed on suspend, so we need to add all 242 + * monitors on resume. 243 + */ 244 + reregister_monitor(hdev, 0); 245 + } 246 + } 247 + 188 248 void msft_do_open(struct hci_dev *hdev) 189 249 { 190 - struct msft_data *msft; 250 + struct msft_data *msft = hdev->msft_data; 191 251 192 252 if (hdev->msft_opcode == HCI_OP_NOP) 193 253 return; 194 254 255 + if (!msft) { 256 + bt_dev_err(hdev, "MSFT extension not registered"); 257 + return; 258 + } 259 + 195 260 bt_dev_dbg(hdev, "Initialize MSFT extension"); 196 261 197 - msft = kzalloc(sizeof(*msft), GFP_KERNEL); 198 - if (!msft) 199 - return; 262 + /* Reset existing MSFT data before re-reading */ 263 + kfree(msft->evt_prefix); 264 + msft->evt_prefix = NULL; 265 + msft->evt_prefix_len = 0; 266 + msft->features = 0; 200 267 201 268 if (!read_supported_features(hdev, msft)) { 269 + hdev->msft_data = NULL; 202 270 kfree(msft); 203 271 return; 204 272 } 205 273 206 - INIT_LIST_HEAD(&msft->handle_map); 207 - hdev->msft_data = msft; 208 - 209 274 if (msft_monitor_supported(hdev)) { 210 275 msft->reregistering = true; 211 276 msft_set_filter_enable(hdev, true); 212 - reregister_monitor_on_restart(hdev, 0); 277 + /* Monitors get removed on power off, so we need to explicitly 278 + * tell the controller to re-monitor. 279 + */ 280 + reregister_monitor(hdev, 0); 213 281 } 214 282 } 215 283 ··· 295 221 296 222 bt_dev_dbg(hdev, "Cleanup of MSFT extension"); 297 223 298 - hdev->msft_data = NULL; 299 - 224 + /* The controller will silently remove all monitors on power off. 225 + * Therefore, remove handle_data mapping and reset monitor state. 226 + */ 300 227 list_for_each_entry_safe(handle_data, tmp, &msft->handle_map, list) { 301 228 monitor = idr_find(&hdev->adv_monitors_idr, 302 229 handle_data->mgmt_handle); ··· 308 233 list_del(&handle_data->list); 309 234 kfree(handle_data); 310 235 } 236 + } 237 + 238 + void msft_register(struct hci_dev *hdev) 239 + { 240 + struct msft_data *msft = NULL; 241 + 242 + bt_dev_dbg(hdev, "Register MSFT extension"); 243 + 244 + msft = kzalloc(sizeof(*msft), GFP_KERNEL); 245 + if (!msft) { 246 + bt_dev_err(hdev, "Failed to register MSFT extension"); 247 + return; 248 + } 249 + 250 + INIT_LIST_HEAD(&msft->handle_map); 251 + hdev->msft_data = msft; 252 + } 253 + 254 + void msft_unregister(struct hci_dev *hdev) 255 + { 256 + struct msft_data *msft = hdev->msft_data; 257 + 258 + if (!msft) 259 + return; 260 + 261 + bt_dev_dbg(hdev, "Unregister MSFT extension"); 262 + 263 + hdev->msft_data = NULL; 311 264 312 265 kfree(msft->evt_prefix); 313 266 kfree(msft); ··· 448 345 449 346 /* If in restart/reregister sequence, keep registering. */ 450 347 if (msft->reregistering) 451 - reregister_monitor_on_restart(hdev, 452 - msft->pending_add_handle + 1); 348 + reregister_monitor(hdev, msft->pending_add_handle + 1); 453 349 454 350 hci_dev_unlock(hdev); 455 351 ··· 485 383 if (handle_data) { 486 384 monitor = idr_find(&hdev->adv_monitors_idr, 487 385 handle_data->mgmt_handle); 488 - if (monitor) 386 + 387 + if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) 388 + monitor->state = ADV_MONITOR_STATE_REGISTERED; 389 + 390 + /* Do not free the monitor if it is being removed due to 391 + * suspend. It will be re-monitored on resume. 392 + */ 393 + if (monitor && !msft->suspending) 489 394 hci_free_adv_monitor(hdev, monitor); 490 395 491 396 list_del(&handle_data->list); 492 397 kfree(handle_data); 493 398 } 399 + 400 + /* If in suspend/remove sequence, keep removing. */ 401 + if (msft->suspending) 402 + remove_monitor_on_suspend(hdev, 403 + msft->pending_remove_handle + 1); 494 404 495 405 /* If remove all monitors is required, we need to continue the process 496 406 * here because the earlier it was paused when waiting for the ··· 522 408 hci_dev_unlock(hdev); 523 409 524 410 done: 525 - hci_remove_adv_monitor_complete(hdev, status); 411 + if (!msft->suspending) 412 + hci_remove_adv_monitor_complete(hdev, status); 526 413 } 527 414 528 415 static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, ··· 656 541 if (!msft) 657 542 return -EOPNOTSUPP; 658 543 659 - if (msft->reregistering) 544 + if (msft->reregistering || msft->suspending) 660 545 return -EBUSY; 661 546 662 547 return __msft_add_monitor_pattern(hdev, monitor); 663 548 } 664 549 665 550 /* This function requires the caller holds hdev->lock */ 666 - int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 667 - u16 handle) 551 + static int __msft_remove_monitor(struct hci_dev *hdev, 552 + struct adv_monitor *monitor, u16 handle) 668 553 { 669 554 struct msft_cp_le_cancel_monitor_advertisement cp; 670 555 struct msft_monitor_advertisement_handle_data *handle_data; 671 556 struct hci_request req; 672 557 struct msft_data *msft = hdev->msft_data; 673 558 int err = 0; 674 - 675 - if (!msft) 676 - return -EOPNOTSUPP; 677 - 678 - if (msft->reregistering) 679 - return -EBUSY; 680 559 681 560 handle_data = msft_find_handle_data(hdev, monitor->handle, true); 682 561 ··· 689 580 msft->pending_remove_handle = handle; 690 581 691 582 return err; 583 + } 584 + 585 + /* This function requires the caller holds hdev->lock */ 586 + int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 587 + u16 handle) 588 + { 589 + struct msft_data *msft = hdev->msft_data; 590 + 591 + if (!msft) 592 + return -EOPNOTSUPP; 593 + 594 + if (msft->reregistering || msft->suspending) 595 + return -EBUSY; 596 + 597 + return __msft_remove_monitor(hdev, monitor, handle); 692 598 } 693 599 694 600 void msft_req_add_set_filter_enable(struct hci_request *req, bool enable)
+9
net/bluetooth/msft.h
··· 13 13 #if IS_ENABLED(CONFIG_BT_MSFTEXT) 14 14 15 15 bool msft_monitor_supported(struct hci_dev *hdev); 16 + void msft_register(struct hci_dev *hdev); 17 + void msft_unregister(struct hci_dev *hdev); 16 18 void msft_do_open(struct hci_dev *hdev); 17 19 void msft_do_close(struct hci_dev *hdev); 18 20 void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); ··· 24 22 u16 handle); 25 23 void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); 26 24 int msft_set_filter_enable(struct hci_dev *hdev, bool enable); 25 + void msft_suspend(struct hci_dev *hdev); 26 + void msft_resume(struct hci_dev *hdev); 27 27 bool msft_curve_validity(struct hci_dev *hdev); 28 28 29 29 #else ··· 35 31 return false; 36 32 } 37 33 34 + static inline void msft_register(struct hci_dev *hdev) {} 35 + static inline void msft_unregister(struct hci_dev *hdev) {} 38 36 static inline void msft_do_open(struct hci_dev *hdev) {} 39 37 static inline void msft_do_close(struct hci_dev *hdev) {} 40 38 static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} ··· 60 54 { 61 55 return -EOPNOTSUPP; 62 56 } 57 + 58 + static inline void msft_suspend(struct hci_dev *hdev) {} 59 + static inline void msft_resume(struct hci_dev *hdev) {} 63 60 64 61 static inline bool msft_curve_validity(struct hci_dev *hdev) 65 62 {
+44 -8
net/bluetooth/rfcomm/core.c
··· 549 549 return dlc; 550 550 } 551 551 552 - int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) 552 + static int rfcomm_dlc_send_frag(struct rfcomm_dlc *d, struct sk_buff *frag) 553 553 { 554 - int len = skb->len; 555 - 556 - if (d->state != BT_CONNECTED) 557 - return -ENOTCONN; 554 + int len = frag->len; 558 555 559 556 BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); 560 557 561 558 if (len > d->mtu) 562 559 return -EINVAL; 563 560 564 - rfcomm_make_uih(skb, d->addr); 565 - skb_queue_tail(&d->tx_queue, skb); 561 + rfcomm_make_uih(frag, d->addr); 562 + __skb_queue_tail(&d->tx_queue, frag); 566 563 567 - if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags)) 564 + return len; 565 + } 566 + 567 + int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) 568 + { 569 + unsigned long flags; 570 + struct sk_buff *frag, *next; 571 + int len; 572 + 573 + if (d->state != BT_CONNECTED) 574 + return -ENOTCONN; 575 + 576 + frag = skb_shinfo(skb)->frag_list; 577 + skb_shinfo(skb)->frag_list = NULL; 578 + 579 + /* Queue all fragments atomically. */ 580 + spin_lock_irqsave(&d->tx_queue.lock, flags); 581 + 582 + len = rfcomm_dlc_send_frag(d, skb); 583 + if (len < 0 || !frag) 584 + goto unlock; 585 + 586 + for (; frag; frag = next) { 587 + int ret; 588 + 589 + next = frag->next; 590 + 591 + ret = rfcomm_dlc_send_frag(d, frag); 592 + if (ret < 0) { 593 + kfree_skb(frag); 594 + goto unlock; 595 + } 596 + 597 + len += ret; 598 + } 599 + 600 + unlock: 601 + spin_unlock_irqrestore(&d->tx_queue.lock, flags); 602 + 603 + if (len > 0 && !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) 568 604 rfcomm_schedule(); 569 605 return len; 570 606 }
+12 -38
net/bluetooth/rfcomm/sock.c
··· 575 575 lock_sock(sk); 576 576 577 577 sent = bt_sock_wait_ready(sk, msg->msg_flags); 578 - if (sent) 579 - goto done; 580 578 581 - while (len) { 582 - size_t size = min_t(size_t, len, d->mtu); 583 - int err; 584 - 585 - skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, 586 - msg->msg_flags & MSG_DONTWAIT, &err); 587 - if (!skb) { 588 - if (sent == 0) 589 - sent = err; 590 - break; 591 - } 592 - skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); 593 - 594 - err = memcpy_from_msg(skb_put(skb, size), msg, size); 595 - if (err) { 596 - kfree_skb(skb); 597 - if (sent == 0) 598 - sent = err; 599 - break; 600 - } 601 - 602 - skb->priority = sk->sk_priority; 603 - 604 - err = rfcomm_dlc_send(d, skb); 605 - if (err < 0) { 606 - kfree_skb(skb); 607 - if (sent == 0) 608 - sent = err; 609 - break; 610 - } 611 - 612 - sent += size; 613 - len -= size; 614 - } 615 - 616 - done: 617 579 release_sock(sk); 580 + 581 + if (sent) 582 + return sent; 583 + 584 + skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE, 585 + RFCOMM_SKB_TAIL_RESERVE); 586 + if (IS_ERR(skb)) 587 + return PTR_ERR(skb); 588 + 589 + sent = rfcomm_dlc_send(d, skb); 590 + if (sent < 0) 591 + kfree_skb(skb); 618 592 619 593 return sent; 620 594 }
+189 -20
net/bluetooth/sco.c
··· 69 69 __u32 flags; 70 70 __u16 setting; 71 71 __u8 cmsg_mask; 72 + struct bt_codec codec; 72 73 struct sco_conn *conn; 73 74 }; 74 75 ··· 134 133 return NULL; 135 134 136 135 spin_lock_init(&conn->lock); 136 + INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); 137 137 138 138 hcon->sco_data = conn; 139 139 conn->hcon = hcon; ··· 189 187 /* Kill socket */ 190 188 sco_conn_lock(conn); 191 189 sk = conn->sk; 190 + if (sk) 191 + sock_hold(sk); 192 192 sco_conn_unlock(conn); 193 193 194 194 if (sk) { 195 - sock_hold(sk); 196 195 lock_sock(sk); 197 196 sco_sock_clear_timer(sk); 198 197 sco_chan_del(sk, err); 199 198 release_sock(sk); 200 199 sock_put(sk); 201 - 202 - /* Ensure no more work items will run before freeing conn. */ 203 - cancel_delayed_work_sync(&conn->timeout_work); 204 200 } 201 + 202 + /* Ensure no more work items will run before freeing conn. */ 203 + cancel_delayed_work_sync(&conn->timeout_work); 205 204 206 205 hcon->sco_data = NULL; 207 206 kfree(conn); ··· 215 212 216 213 sco_pi(sk)->conn = conn; 217 214 conn->sk = sk; 218 - 219 - INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); 220 215 221 216 if (parent) 222 217 bt_accept_enqueue(parent, sk, true); ··· 253 252 return -EOPNOTSUPP; 254 253 255 254 hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, 256 - sco_pi(sk)->setting); 255 + sco_pi(sk)->setting, &sco_pi(sk)->codec); 257 256 if (IS_ERR(hcon)) 258 257 return PTR_ERR(hcon); 259 258 ··· 281 280 return err; 282 281 } 283 282 284 - static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) 283 + static int sco_send_frame(struct sock *sk, struct sk_buff *skb) 285 284 { 286 285 struct sco_conn *conn = sco_pi(sk)->conn; 287 - struct sk_buff *skb; 288 - int err; 286 + int len = skb->len; 289 287 290 288 /* Check outgoing MTU */ 291 289 if (len > conn->mtu) 292 290 return -EINVAL; 293 291 294 292 BT_DBG("sk %p len %d", sk, len); 295 - 296 - skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); 297 - if (!skb) 298 - return err; 299 - 300 - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { 301 - kfree_skb(skb); 302 - return -EFAULT; 303 - } 304 293 305 294 hci_send_sco(conn->hcon, skb); 306 295 ··· 435 444 sock_set_flag(sk, SOCK_ZAPPED); 436 445 break; 437 446 } 447 + 438 448 } 439 449 440 450 /* Must be called on unlocked socket. */ ··· 496 504 sk->sk_state = BT_OPEN; 497 505 498 506 sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; 507 + sco_pi(sk)->codec.id = BT_CODEC_CVSD; 508 + sco_pi(sk)->codec.cid = 0xffff; 509 + sco_pi(sk)->codec.vid = 0xffff; 510 + sco_pi(sk)->codec.data_path = 0x00; 499 511 500 512 bt_sock_link(&sco_sk_list, sk); 501 513 return sk; ··· 721 725 size_t len) 722 726 { 723 727 struct sock *sk = sock->sk; 728 + struct sk_buff *skb; 724 729 int err; 725 730 726 731 BT_DBG("sock %p, sk %p", sock, sk); ··· 733 736 if (msg->msg_flags & MSG_OOB) 734 737 return -EOPNOTSUPP; 735 738 739 + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); 740 + if (IS_ERR(skb)) 741 + return PTR_ERR(skb); 742 + 736 743 lock_sock(sk); 737 744 738 745 if (sk->sk_state == BT_CONNECTED) 739 - err = sco_send_frame(sk, msg, len); 746 + err = sco_send_frame(sk, skb); 740 747 else 741 748 err = -ENOTCONN; 742 749 743 750 release_sock(sk); 751 + 752 + if (err < 0) 753 + kfree_skb(skb); 744 754 return err; 745 755 } 746 756 ··· 829 825 int len, err = 0; 830 826 struct bt_voice voice; 831 827 u32 opt; 828 + struct bt_codecs *codecs; 829 + struct hci_dev *hdev; 830 + __u8 buffer[255]; 832 831 833 832 BT_DBG("sk %p", sk); 834 833 ··· 879 872 } 880 873 881 874 sco_pi(sk)->setting = voice.setting; 875 + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, 876 + BDADDR_BREDR); 877 + if (!hdev) { 878 + err = -EBADFD; 879 + break; 880 + } 881 + if (enhanced_sco_capable(hdev) && 882 + voice.setting == BT_VOICE_TRANSPARENT) 883 + sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; 884 + hci_dev_put(hdev); 882 885 break; 883 886 884 887 case BT_PKT_STATUS: ··· 901 884 sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS; 902 885 else 903 886 sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; 887 + break; 888 + 889 + case BT_CODEC: 890 + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && 891 + sk->sk_state != BT_CONNECT2) { 892 + err = -EINVAL; 893 + break; 894 + } 895 + 896 + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, 897 + BDADDR_BREDR); 898 + if (!hdev) { 899 + err = -EBADFD; 900 + break; 901 + } 902 + 903 + if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { 904 + hci_dev_put(hdev); 905 + err = -EOPNOTSUPP; 906 + break; 907 + } 908 + 909 + if (!hdev->get_data_path_id) { 910 + hci_dev_put(hdev); 911 + err = -EOPNOTSUPP; 912 + break; 913 + } 914 + 915 + if (optlen < sizeof(struct bt_codecs) || 916 + optlen > sizeof(buffer)) { 917 + hci_dev_put(hdev); 918 + err = -EINVAL; 919 + break; 920 + } 921 + 922 + if (copy_from_sockptr(buffer, optval, optlen)) { 923 + hci_dev_put(hdev); 924 + err = -EFAULT; 925 + break; 926 + } 927 + 928 + codecs = (void *)buffer; 929 + 930 + if (codecs->num_codecs > 1) { 931 + hci_dev_put(hdev); 932 + err = -EINVAL; 933 + break; 934 + } 935 + 936 + sco_pi(sk)->codec = codecs->codecs[0]; 937 + hci_dev_put(hdev); 904 938 break; 905 939 906 940 default: ··· 1032 964 struct bt_voice voice; 1033 965 u32 phys; 1034 966 int pkt_status; 967 + int buf_len; 968 + struct codec_list *c; 969 + u8 num_codecs, i, __user *ptr; 970 + struct hci_dev *hdev; 971 + struct hci_codec_caps *caps; 972 + struct bt_codec codec; 1035 973 1036 974 BT_DBG("sk %p", sk); 1037 975 ··· 1100 1026 1101 1027 if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval)) 1102 1028 err = -EFAULT; 1029 + break; 1030 + 1031 + case BT_CODEC: 1032 + num_codecs = 0; 1033 + buf_len = 0; 1034 + 1035 + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); 1036 + if (!hdev) { 1037 + err = -EBADFD; 1038 + break; 1039 + } 1040 + 1041 + if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { 1042 + hci_dev_put(hdev); 1043 + err = -EOPNOTSUPP; 1044 + break; 1045 + } 1046 + 1047 + if (!hdev->get_data_path_id) { 1048 + hci_dev_put(hdev); 1049 + err = -EOPNOTSUPP; 1050 + break; 1051 + } 1052 + 1053 + /* find total buffer size required to copy codec + caps */ 1054 + hci_dev_lock(hdev); 1055 + list_for_each_entry(c, &hdev->local_codecs, list) { 1056 + if (c->transport != HCI_TRANSPORT_SCO_ESCO) 1057 + continue; 1058 + num_codecs++; 1059 + for (i = 0, caps = c->caps; i < c->num_caps; i++) { 1060 + buf_len += 1 + caps->len; 1061 + caps = (void *)&caps->data[caps->len]; 1062 + } 1063 + buf_len += sizeof(struct bt_codec); 1064 + } 1065 + hci_dev_unlock(hdev); 1066 + 1067 + buf_len += sizeof(struct bt_codecs); 1068 + if (buf_len > len) { 1069 + hci_dev_put(hdev); 1070 + err = -ENOBUFS; 1071 + break; 1072 + } 1073 + ptr = optval; 1074 + 1075 + if (put_user(num_codecs, ptr)) { 1076 + hci_dev_put(hdev); 1077 + err = -EFAULT; 1078 + break; 1079 + } 1080 + ptr += sizeof(num_codecs); 1081 + 1082 + /* Iterate all the codecs supported over SCO and populate 1083 + * codec data 1084 + */ 1085 + hci_dev_lock(hdev); 1086 + list_for_each_entry(c, &hdev->local_codecs, list) { 1087 + if (c->transport != HCI_TRANSPORT_SCO_ESCO) 1088 + continue; 1089 + 1090 + codec.id = c->id; 1091 + codec.cid = c->cid; 1092 + codec.vid = c->vid; 1093 + err = hdev->get_data_path_id(hdev, &codec.data_path); 1094 + if (err < 0) 1095 + break; 1096 + codec.num_caps = c->num_caps; 1097 + if (copy_to_user(ptr, &codec, sizeof(codec))) { 1098 + err = -EFAULT; 1099 + break; 1100 + } 1101 + ptr += sizeof(codec); 1102 + 1103 + /* find codec capabilities data length */ 1104 + len = 0; 1105 + for (i = 0, caps = c->caps; i < c->num_caps; i++) { 1106 + len += 1 + caps->len; 1107 + caps = (void *)&caps->data[caps->len]; 1108 + } 1109 + 1110 + /* copy codec capabilities data */ 1111 + if (len && copy_to_user(ptr, c->caps, len)) { 1112 + err = -EFAULT; 1113 + break; 1114 + } 1115 + ptr += len; 1116 + } 1117 + 1118 + if (!err && put_user(buf_len, optlen)) 1119 + err = -EFAULT; 1120 + 1121 + hci_dev_unlock(hdev); 1122 + hci_dev_put(hdev); 1123 + 1103 1124 break; 1104 1125 1105 1126 default: