Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kern el/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2021-02-11

Here's the main bluetooth-next pull request for 5.12:

- Add support for advertising monitor offliading using Microsoft
vendor extensions
- Add firmware download support for MediaTek MT7921U USB devices
- Suspend-related fixes for Qualcomm devices
- Add support for Intel GarfieldPeak controller
- Various other smaller fixes & cleanups

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1723 -372
+1 -1
Documentation/devicetree/bindings/net/btusb.txt
··· 38 38 compatible = "usb1286,204e"; 39 39 reg = <1>; 40 40 interrupt-parent = <&gpio0>; 41 - interrupt-name = "wakeup"; 41 + interrupt-names = "wakeup"; 42 42 interrupts = <3 IRQ_TYPE_LEVEL_LOW>; 43 43 }; 44 44 };
+7 -14
drivers/bluetooth/btintel.c
··· 437 437 tlv = (struct intel_tlv *)skb->data; 438 438 switch (tlv->type) { 439 439 case INTEL_TLV_CNVI_TOP: 440 - version->cnvi_top = 441 - __le32_to_cpu(get_unaligned_le32(tlv->val)); 440 + version->cnvi_top = get_unaligned_le32(tlv->val); 442 441 break; 443 442 case INTEL_TLV_CNVR_TOP: 444 - version->cnvr_top = 445 - __le32_to_cpu(get_unaligned_le32(tlv->val)); 443 + version->cnvr_top = get_unaligned_le32(tlv->val); 446 444 break; 447 445 case INTEL_TLV_CNVI_BT: 448 - version->cnvi_bt = 449 - __le32_to_cpu(get_unaligned_le32(tlv->val)); 446 + version->cnvi_bt = get_unaligned_le32(tlv->val); 450 447 break; 451 448 case INTEL_TLV_CNVR_BT: 452 - version->cnvr_bt = 453 - __le32_to_cpu(get_unaligned_le32(tlv->val)); 449 + version->cnvr_bt = get_unaligned_le32(tlv->val); 454 450 break; 455 451 case INTEL_TLV_DEV_REV_ID: 456 - version->dev_rev_id = 457 - __le16_to_cpu(get_unaligned_le16(tlv->val)); 452 + version->dev_rev_id = get_unaligned_le16(tlv->val); 458 453 break; 459 454 case INTEL_TLV_IMAGE_TYPE: 460 455 version->img_type = tlv->val[0]; 461 456 break; 462 457 case INTEL_TLV_TIME_STAMP: 463 - version->timestamp = 464 - __le16_to_cpu(get_unaligned_le16(tlv->val)); 458 + version->timestamp = get_unaligned_le16(tlv->val); 465 459 break; 466 460 case INTEL_TLV_BUILD_TYPE: 467 461 version->build_type = tlv->val[0]; 468 462 break; 469 463 case INTEL_TLV_BUILD_NUM: 470 - version->build_num = 471 - __le32_to_cpu(get_unaligned_le32(tlv->val)); 464 + version->build_num = get_unaligned_le32(tlv->val); 472 465 break; 473 466 case INTEL_TLV_SECURE_BOOT: 474 467 version->secure_boot = tlv->val[0];
+8 -8
drivers/bluetooth/btmtksdio.c
··· 442 442 } 443 443 444 444 switch ((&pkts[i])->lsize) { 445 - case 1: 446 - dlen = skb->data[(&pkts[i])->loff]; 447 - break; 448 - case 2: 449 - dlen = get_unaligned_le16(skb->data + 445 + case 1: 446 + dlen = skb->data[(&pkts[i])->loff]; 447 + break; 448 + case 2: 449 + dlen = get_unaligned_le16(skb->data + 450 450 (&pkts[i])->loff); 451 - break; 452 - default: 453 - goto err_kfree_skb; 451 + break; 452 + default: 453 + goto err_kfree_skb; 454 454 } 455 455 456 456 pad_size = skb->len - (&pkts[i])->hlen - dlen;
+67
drivers/bluetooth/btqca.c
··· 94 94 } 95 95 EXPORT_SYMBOL_GPL(qca_read_soc_version); 96 96 97 + static int qca_read_fw_build_info(struct hci_dev *hdev) 98 + { 99 + struct sk_buff *skb; 100 + struct edl_event_hdr *edl; 101 + char cmd, build_label[QCA_FW_BUILD_VER_LEN]; 102 + int build_lbl_len, err = 0; 103 + 104 + bt_dev_dbg(hdev, "QCA read fw build info"); 105 + 106 + cmd = EDL_GET_BUILD_INFO_CMD; 107 + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, 108 + &cmd, 0, HCI_INIT_TIMEOUT); 109 + if (IS_ERR(skb)) { 110 + err = PTR_ERR(skb); 111 + bt_dev_err(hdev, "Reading QCA fw build info failed (%d)", 112 + err); 113 + return err; 114 + } 115 + 116 + edl = (struct edl_event_hdr *)(skb->data); 117 + if (!edl) { 118 + bt_dev_err(hdev, "QCA read fw build info with no header"); 119 + err = -EILSEQ; 120 + goto out; 121 + } 122 + 123 + if (edl->cresp != EDL_CMD_REQ_RES_EVT || 124 + edl->rtype != EDL_GET_BUILD_INFO_CMD) { 125 + bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, 126 + edl->rtype); 127 + err = -EIO; 128 + goto out; 129 + } 130 + 131 + build_lbl_len = edl->data[0]; 132 + if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) { 133 + memcpy(build_label, edl->data + 1, build_lbl_len); 134 + *(build_label + build_lbl_len) = '\0'; 135 + } 136 + 137 + hci_set_fw_info(hdev, "%s", build_label); 138 + 139 + out: 140 + kfree_skb(skb); 141 + return err; 142 + } 143 + 97 144 static int qca_send_reset(struct hci_dev *hdev) 98 145 { 99 146 struct sk_buff *skb; ··· 564 517 return err; 565 518 } 566 519 520 + /* WCN399x supports the Microsoft vendor extension with 0xFD70 as the 521 + * VsMsftOpCode. 522 + */ 523 + switch (soc_type) { 524 + case QCA_WCN3990: 525 + case QCA_WCN3991: 526 + case QCA_WCN3998: 527 + hci_set_msft_opcode(hdev, 0xFD70); 528 + break; 529 + default: 530 + break; 531 + } 532 + 567 533 /* Perform HCI reset */ 568 534 err = qca_send_reset(hdev); 569 535 if (err < 0) { 570 536 bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err); 571 537 return err; 538 + } 539 + 540 + if (soc_type == QCA_WCN3991) { 541 + /* get fw build info */ 542 + err = qca_read_fw_build_info(hdev); 543 + if (err < 0) 544 + return err; 572 545 } 573 546 574 547 bt_dev_info(hdev, "QCA setup on UART is completed");
+1
drivers/bluetooth/btqca.h
··· 11 11 #define EDL_PATCH_CMD_LEN (1) 12 12 #define EDL_PATCH_VER_REQ_CMD (0x19) 13 13 #define EDL_PATCH_TLV_REQ_CMD (0x1E) 14 + #define EDL_GET_BUILD_INFO_CMD (0x20) 14 15 #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) 15 16 #define MAX_SIZE_PER_TLV_SEGMENT (243) 16 17 #define QCA_PRE_SHUTDOWN_CMD (0xFC08)
+19 -8
drivers/bluetooth/btqcomsmd.c
··· 142 142 143 143 btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", 144 144 btqcomsmd_cmd_callback, btq); 145 - if (IS_ERR(btq->cmd_channel)) 146 - return PTR_ERR(btq->cmd_channel); 145 + if (IS_ERR(btq->cmd_channel)) { 146 + ret = PTR_ERR(btq->cmd_channel); 147 + goto destroy_acl_channel; 148 + } 147 149 148 150 hdev = hci_alloc_dev(); 149 - if (!hdev) 150 - return -ENOMEM; 151 + if (!hdev) { 152 + ret = -ENOMEM; 153 + goto destroy_cmd_channel; 154 + } 151 155 152 156 hci_set_drvdata(hdev, btq); 153 157 btq->hdev = hdev; ··· 165 161 hdev->set_bdaddr = qca_set_bdaddr_rome; 166 162 167 163 ret = hci_register_dev(hdev); 168 - if (ret < 0) { 169 - hci_free_dev(hdev); 170 - return ret; 171 - } 164 + if (ret < 0) 165 + goto hci_free_dev; 172 166 173 167 platform_set_drvdata(pdev, btq); 174 168 175 169 return 0; 170 + 171 + hci_free_dev: 172 + hci_free_dev(hdev); 173 + destroy_cmd_channel: 174 + rpmsg_destroy_ept(btq->cmd_channel); 175 + destroy_acl_channel: 176 + rpmsg_destroy_ept(btq->acl_channel); 177 + 178 + return ret; 176 179 } 177 180 178 181 static int btqcomsmd_remove(struct platform_device *pdev)
+40 -3
drivers/bluetooth/btrtl.c
··· 38 38 .hci_ver = (hciv), \ 39 39 .hci_bus = (bus) 40 40 41 + enum btrtl_chip_id { 42 + CHIP_ID_8723A, 43 + CHIP_ID_8723B, 44 + CHIP_ID_8821A, 45 + CHIP_ID_8761A, 46 + CHIP_ID_8822B = 8, 47 + CHIP_ID_8723D, 48 + CHIP_ID_8821C, 49 + CHIP_ID_8822C = 13, 50 + CHIP_ID_8761B, 51 + CHIP_ID_8852A = 18, 52 + }; 53 + 41 54 struct id_table { 42 55 __u16 match_flags; 43 56 __u16 lmp_subver; ··· 71 58 u8 *cfg_data; 72 59 int cfg_len; 73 60 bool drop_fw; 61 + int project_id; 74 62 }; 75 63 76 64 static const struct id_table ic_id_table[] = { ··· 321 307 322 308 /* Find project_id in table */ 323 309 for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) { 324 - if (project_id == project_id_to_lmp_subver[i].id) 310 + if (project_id == project_id_to_lmp_subver[i].id) { 311 + btrtl_dev->project_id = project_id; 325 312 break; 313 + } 326 314 } 327 315 328 316 if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { ··· 674 658 } 675 659 } 676 660 661 + /* RTL8822CE supports the Microsoft vendor extension and uses 0xFCF0 662 + * for VsMsftOpCode. 663 + */ 664 + if (lmp_subver == RTL_ROM_LMP_8822B) 665 + hci_set_msft_opcode(hdev, 0xFCF0); 666 + 677 667 return btrtl_dev; 678 668 679 669 err_free: ··· 730 708 731 709 ret = btrtl_download_firmware(hdev, btrtl_dev); 732 710 733 - btrtl_free(btrtl_dev); 734 - 735 711 /* Enable controller to do both LE scan and BR/EDR inquiry 736 712 * simultaneously. 737 713 */ 738 714 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); 739 715 716 + /* Enable central-peripheral role (able to create new connections with 717 + * an existing connection in slave role). 718 + */ 719 + /* Enable WBS supported for the specific Realtek devices. */ 720 + switch (btrtl_dev->project_id) { 721 + case CHIP_ID_8822C: 722 + case CHIP_ID_8852A: 723 + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); 724 + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); 725 + break; 726 + default: 727 + rtl_dev_dbg(hdev, "Central-peripheral role not enabled."); 728 + rtl_dev_dbg(hdev, "WBS supported not enabled."); 729 + break; 730 + } 731 + 732 + btrtl_free(btrtl_dev); 740 733 return ret; 741 734 } 742 735 EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+269 -44
drivers/bluetooth/btusb.c
··· 368 368 BTUSB_WIDEBAND_SPEECH }, 369 369 { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEWGEN | 370 370 BTUSB_WIDEBAND_SPEECH}, 371 + { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_NEWGEN | 372 + BTUSB_WIDEBAND_SPEECH}, 371 373 { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, 372 374 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, 373 375 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, ··· 508 506 #define BTUSB_HW_RESET_ACTIVE 12 509 507 #define BTUSB_TX_WAIT_VND_EVT 13 510 508 #define BTUSB_WAKEUP_DISABLE 14 511 - #define BTUSB_USE_ALT1_FOR_WBS 15 512 509 513 510 struct btusb_data { 514 511 struct hci_dev *hdev; ··· 1737 1736 new_alts = data->sco_num; 1738 1737 } 1739 1738 } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) { 1740 - /* Check if Alt 6 is supported for Transparent audio */ 1741 - if (btusb_find_altsetting(data, 6)) { 1742 - data->usb_alt6_packet_flow = true; 1743 - new_alts = 6; 1744 - } else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) { 1745 - new_alts = 1; 1746 - } else { 1747 - bt_dev_err(hdev, "Device does not support ALT setting 6"); 1748 - } 1739 + /* Bluetooth USB spec recommends alt 6 (63 bytes), but 1740 + * many adapters do not support it. Alt 1 appears to 1741 + * work for all adapters that do not have alt 6, and 1742 + * which work with WBS at all. 1743 + */ 1744 + new_alts = btusb_find_altsetting(data, 6) ? 6 : 1; 1749 1745 } 1750 1746 1751 1747 if (btusb_switch_alt_setting(hdev, new_alts) < 0) ··· 1901 1903 le16_to_cpu(rp->lmp_subver) == 0x1012 && 1902 1904 le16_to_cpu(rp->hci_rev) == 0x0810 && 1903 1905 le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_4_0) { 1904 - bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues\n"); 1906 + bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues"); 1905 1907 1906 1908 pm_runtime_allow(&data->udev->dev); 1907 1909 ··· 1909 1911 if (ret >= 0) 1910 1912 msleep(200); 1911 1913 else 1912 - bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround\n"); 1914 + bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround"); 1913 1915 1914 1916 pm_runtime_forbid(&data->udev->dev); 1915 1917 ··· 2922 2924 * extension are using 0xFC1E for VsMsftOpCode. 2923 2925 */ 2924 2926 switch (ver.hw_variant) { 2927 + case 0x11: /* JfP */ 2925 2928 case 0x12: /* ThP */ 2929 + case 0x13: /* HrP */ 2930 + case 0x14: /* CcP */ 2926 2931 hci_set_msft_opcode(hdev, 0xFC1E); 2927 2932 break; 2928 2933 } ··· 3128 3127 #define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin" 3129 3128 3130 3129 #define HCI_WMT_MAX_EVENT_SIZE 64 3130 + /* It is for mt79xx download rom patch*/ 3131 + #define MTK_FW_ROM_PATCH_HEADER_SIZE 32 3132 + #define MTK_FW_ROM_PATCH_GD_SIZE 64 3133 + #define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64 3134 + #define MTK_SEC_MAP_COMMON_SIZE 12 3135 + #define MTK_SEC_MAP_NEED_SEND_SIZE 52 3131 3136 3132 3137 enum { 3133 3138 BTMTK_WMT_PATCH_DWNLD = 0x1, ··· 3145 3138 enum { 3146 3139 BTMTK_WMT_INVALID, 3147 3140 BTMTK_WMT_PATCH_UNDONE, 3141 + BTMTK_WMT_PATCH_PROGRESS, 3148 3142 BTMTK_WMT_PATCH_DONE, 3149 3143 BTMTK_WMT_ON_UNDONE, 3150 3144 BTMTK_WMT_ON_DONE, ··· 3161 3153 3162 3154 struct btmtk_hci_wmt_cmd { 3163 3155 struct btmtk_wmt_hdr hdr; 3164 - u8 data[256]; 3156 + u8 data[]; 3165 3157 } __packed; 3166 3158 3167 3159 struct btmtk_hci_wmt_evt { ··· 3190 3182 u32 *status; 3191 3183 }; 3192 3184 3185 + struct btmtk_patch_header { 3186 + u8 datetime[16]; 3187 + u8 platform[4]; 3188 + __le16 hwver; 3189 + __le16 swver; 3190 + __le32 magicnum; 3191 + } __packed; 3192 + 3193 + struct btmtk_global_desc { 3194 + __le32 patch_ver; 3195 + __le32 sub_sys; 3196 + __le32 feature_opt; 3197 + __le32 section_num; 3198 + } __packed; 3199 + 3200 + struct btmtk_section_map { 3201 + __le32 sectype; 3202 + __le32 secoffset; 3203 + __le32 secsize; 3204 + union { 3205 + __le32 u4SecSpec[13]; 3206 + struct { 3207 + __le32 dlAddr; 3208 + __le32 dlsize; 3209 + __le32 seckeyidx; 3210 + __le32 alignlen; 3211 + __le32 sectype; 3212 + __le32 dlmodecrctype; 3213 + __le32 crc; 3214 + __le32 reserved[6]; 3215 + } bin_info_spec; 3216 + }; 3217 + } __packed; 3218 + 3193 3219 static void btusb_mtk_wmt_recv(struct urb *urb) 3194 3220 { 3195 3221 struct hci_dev *hdev = urb->context; ··· 3241 3199 skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); 3242 3200 if (!skb) { 3243 3201 hdev->stat.err_rx++; 3244 - goto err_out; 3202 + return; 3245 3203 } 3246 3204 3247 3205 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; ··· 3259 3217 */ 3260 3218 if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { 3261 3219 data->evt_skb = skb_clone(skb, GFP_ATOMIC); 3262 - if (!data->evt_skb) 3263 - goto err_out; 3220 + if (!data->evt_skb) { 3221 + kfree_skb(skb); 3222 + return; 3223 + } 3264 3224 } 3265 3225 3266 3226 err = hci_recv_frame(hdev, skb); 3267 - if (err < 0) 3268 - goto err_free_skb; 3227 + if (err < 0) { 3228 + kfree_skb(data->evt_skb); 3229 + data->evt_skb = NULL; 3230 + return; 3231 + } 3269 3232 3270 3233 if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT, 3271 3234 &data->flags)) { ··· 3279 3232 wake_up_bit(&data->flags, 3280 3233 BTUSB_TX_WAIT_VND_EVT); 3281 3234 } 3282 - err_out: 3283 - return; 3284 - err_free_skb: 3285 - kfree_skb(data->evt_skb); 3286 - data->evt_skb = NULL; 3287 3235 return; 3288 3236 } else if (urb->status == -ENOENT) { 3289 3237 /* Avoid suspend failed when usb_kill_urb */ ··· 3294 3252 * to generate the event. Otherwise, the WMT event cannot return from 3295 3253 * the device successfully. 3296 3254 */ 3297 - udelay(100); 3255 + udelay(500); 3298 3256 3299 3257 usb_anchor_urb(urb, &data->ctrl_anchor); 3300 3258 err = usb_submit_urb(urb, GFP_ATOMIC); ··· 3369 3327 struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; 3370 3328 u32 hlen, status = BTMTK_WMT_INVALID; 3371 3329 struct btmtk_hci_wmt_evt *wmt_evt; 3372 - struct btmtk_hci_wmt_cmd wc; 3330 + struct btmtk_hci_wmt_cmd *wc; 3373 3331 struct btmtk_wmt_hdr *hdr; 3374 3332 int err; 3375 3333 ··· 3383 3341 if (hlen > 255) 3384 3342 return -EINVAL; 3385 3343 3386 - hdr = (struct btmtk_wmt_hdr *)&wc; 3344 + wc = kzalloc(hlen, GFP_KERNEL); 3345 + if (!wc) 3346 + return -ENOMEM; 3347 + 3348 + hdr = &wc->hdr; 3387 3349 hdr->dir = 1; 3388 3350 hdr->op = wmt_params->op; 3389 3351 hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); 3390 3352 hdr->flag = wmt_params->flag; 3391 - memcpy(wc.data, wmt_params->data, wmt_params->dlen); 3353 + memcpy(wc->data, wmt_params->data, wmt_params->dlen); 3392 3354 3393 3355 set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 3394 3356 3395 - err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); 3357 + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); 3396 3358 3397 3359 if (err < 0) { 3398 3360 clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 3399 - return err; 3361 + goto err_free_wc; 3400 3362 } 3401 3363 3402 3364 /* The vendor specific WMT commands are all answered by a vendor ··· 3417 3371 if (err == -EINTR) { 3418 3372 bt_dev_err(hdev, "Execution of wmt command interrupted"); 3419 3373 clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 3420 - return err; 3374 + goto err_free_wc; 3421 3375 } 3422 3376 3423 3377 if (err) { 3424 3378 bt_dev_err(hdev, "Execution of wmt command timed out"); 3425 3379 clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 3426 - return -ETIMEDOUT; 3380 + err = -ETIMEDOUT; 3381 + goto err_free_wc; 3427 3382 } 3428 3383 3429 3384 /* Parse and handle the return WMT event */ ··· 3452 3405 else 3453 3406 status = BTMTK_WMT_ON_UNDONE; 3454 3407 break; 3408 + case BTMTK_WMT_PATCH_DWNLD: 3409 + if (wmt_evt->whdr.flag == 2) 3410 + status = BTMTK_WMT_PATCH_DONE; 3411 + else if (wmt_evt->whdr.flag == 1) 3412 + status = BTMTK_WMT_PATCH_PROGRESS; 3413 + else 3414 + status = BTMTK_WMT_PATCH_UNDONE; 3415 + break; 3455 3416 } 3456 3417 3457 3418 if (wmt_params->status) ··· 3468 3413 err_free_skb: 3469 3414 kfree_skb(data->evt_skb); 3470 3415 data->evt_skb = NULL; 3416 + err_free_wc: 3417 + kfree(wc); 3418 + return err; 3419 + } 3420 + 3421 + static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname) 3422 + { 3423 + struct btmtk_hci_wmt_params wmt_params; 3424 + struct btmtk_global_desc *globaldesc = NULL; 3425 + struct btmtk_section_map *sectionmap; 3426 + const struct firmware *fw; 3427 + const u8 *fw_ptr; 3428 + const u8 *fw_bin_ptr; 3429 + int err, dlen, i, status; 3430 + u8 flag, first_block, retry; 3431 + u32 section_num, dl_size, section_offset; 3432 + u8 cmd[64]; 3433 + 3434 + err = request_firmware(&fw, fwname, &hdev->dev); 3435 + if (err < 0) { 3436 + bt_dev_err(hdev, "Failed to load firmware file (%d)", err); 3437 + return err; 3438 + } 3439 + 3440 + fw_ptr = fw->data; 3441 + fw_bin_ptr = fw_ptr; 3442 + globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE); 3443 + section_num = globaldesc->section_num; 3444 + 3445 + for (i = 0; i < section_num; i++) { 3446 + first_block = 1; 3447 + fw_ptr = fw_bin_ptr; 3448 + sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE + 3449 + MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i); 3450 + 3451 + section_offset = sectionmap->secoffset; 3452 + dl_size = sectionmap->bin_info_spec.dlsize; 3453 + 3454 + if (dl_size > 0) { 3455 + retry = 20; 3456 + while (retry > 0) { 3457 + cmd[0] = 0; /* 0 means legacy dl mode. */ 3458 + memcpy(cmd + 1, 3459 + fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE + 3460 + MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i + 3461 + MTK_SEC_MAP_COMMON_SIZE, 3462 + MTK_SEC_MAP_NEED_SEND_SIZE + 1); 3463 + 3464 + wmt_params.op = BTMTK_WMT_PATCH_DWNLD; 3465 + wmt_params.status = &status; 3466 + wmt_params.flag = 0; 3467 + wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1; 3468 + wmt_params.data = &cmd; 3469 + 3470 + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 3471 + if (err < 0) { 3472 + bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", 3473 + err); 3474 + goto err_release_fw; 3475 + } 3476 + 3477 + if (status == BTMTK_WMT_PATCH_UNDONE) { 3478 + break; 3479 + } else if (status == BTMTK_WMT_PATCH_PROGRESS) { 3480 + msleep(100); 3481 + retry--; 3482 + } else if (status == BTMTK_WMT_PATCH_DONE) { 3483 + goto next_section; 3484 + } else { 3485 + bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)", 3486 + status); 3487 + goto err_release_fw; 3488 + } 3489 + } 3490 + 3491 + fw_ptr += section_offset; 3492 + wmt_params.op = BTMTK_WMT_PATCH_DWNLD; 3493 + wmt_params.status = NULL; 3494 + 3495 + while (dl_size > 0) { 3496 + dlen = min_t(int, 250, dl_size); 3497 + if (first_block == 1) { 3498 + flag = 1; 3499 + first_block = 0; 3500 + } else if (dl_size - dlen <= 0) { 3501 + flag = 3; 3502 + } else { 3503 + flag = 2; 3504 + } 3505 + 3506 + wmt_params.flag = flag; 3507 + wmt_params.dlen = dlen; 3508 + wmt_params.data = fw_ptr; 3509 + 3510 + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 3511 + if (err < 0) { 3512 + bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", 3513 + err); 3514 + goto err_release_fw; 3515 + } 3516 + 3517 + dl_size -= dlen; 3518 + fw_ptr += dlen; 3519 + } 3520 + } 3521 + next_section: 3522 + continue; 3523 + } 3524 + /* Wait a few moments for firmware activation done */ 3525 + usleep_range(100000, 120000); 3526 + 3527 + err_release_fw: 3528 + release_firmware(fw); 3471 3529 3472 3530 return err; 3473 3531 } ··· 3633 3465 while (fw_size > 0) { 3634 3466 dlen = min_t(int, 250, fw_size); 3635 3467 3636 - /* Tell deivice the position in sequence */ 3468 + /* Tell device the position in sequence */ 3637 3469 if (fw_size - dlen <= 0) 3638 3470 flag = 3; 3639 3471 else if (fw_size < fw->size - 30) ··· 3723 3555 return err; 3724 3556 } 3725 3557 3726 - static int btusb_mtk_id_get(struct btusb_data *data, u32 *id) 3558 + static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id) 3727 3559 { 3728 - return btusb_mtk_reg_read(data, 0x80000008, id); 3560 + return btusb_mtk_reg_read(data, reg, id); 3729 3561 } 3730 3562 3731 3563 static int btusb_mtk_setup(struct hci_dev *hdev) ··· 3739 3571 const char *fwname; 3740 3572 int err, status; 3741 3573 u32 dev_id; 3574 + char fw_bin_name[64]; 3575 + u32 fw_version; 3742 3576 u8 param; 3743 3577 3744 3578 calltime = ktime_get(); 3745 3579 3746 - err = btusb_mtk_id_get(data, &dev_id); 3580 + err = btusb_mtk_id_get(data, 0x80000008, &dev_id); 3747 3581 if (err < 0) { 3748 3582 bt_dev_err(hdev, "Failed to get device id (%d)", err); 3749 3583 return err; 3584 + } 3585 + 3586 + if (!dev_id) { 3587 + err = btusb_mtk_id_get(data, 0x70010200, &dev_id); 3588 + if (err < 0) { 3589 + bt_dev_err(hdev, "Failed to get device id (%d)", err); 3590 + return err; 3591 + } 3592 + err = btusb_mtk_id_get(data, 0x80021004, &fw_version); 3593 + if (err < 0) { 3594 + bt_dev_err(hdev, "Failed to get fw version (%d)", err); 3595 + return err; 3596 + } 3750 3597 } 3751 3598 3752 3599 switch (dev_id) { ··· 3771 3588 case 0x7668: 3772 3589 fwname = FIRMWARE_MT7668; 3773 3590 break; 3591 + case 0x7961: 3592 + snprintf(fw_bin_name, sizeof(fw_bin_name), 3593 + "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", 3594 + dev_id & 0xffff, (fw_version & 0xff) + 1); 3595 + err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name); 3596 + 3597 + /* Enable Bluetooth protocol */ 3598 + param = 1; 3599 + wmt_params.op = BTMTK_WMT_FUNC_CTRL; 3600 + wmt_params.flag = 0; 3601 + wmt_params.dlen = sizeof(param); 3602 + wmt_params.data = &param; 3603 + wmt_params.status = NULL; 3604 + 3605 + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 3606 + if (err < 0) { 3607 + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 3608 + return err; 3609 + } 3610 + goto done; 3774 3611 default: 3775 - bt_dev_err(hdev, "Unsupported support hardware variant (%08x)", 3612 + bt_dev_err(hdev, "Unsupported hardware variant (%08x)", 3776 3613 dev_id); 3777 3614 return -ENODEV; 3778 3615 } ··· 3868 3665 } 3869 3666 kfree_skb(skb); 3870 3667 3668 + done: 3871 3669 rettime = ktime_get(); 3872 3670 delta = ktime_sub(rettime, calltime); 3873 3671 duration = (unsigned long long)ktime_to_ns(delta) >> 10; ··· 3929 3725 3930 3726 skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); 3931 3727 if (!skb) { 3932 - bt_dev_err(hdev, "%s: No memory\n", __func__); 3728 + bt_dev_err(hdev, "%s: No memory", __func__); 3933 3729 return -ENOMEM; 3934 3730 } 3935 3731 ··· 3938 3734 3939 3735 ret = btusb_send_frame(hdev, skb); 3940 3736 if (ret) { 3941 - bt_dev_err(hdev, "%s: configuration failed\n", __func__); 3737 + bt_dev_err(hdev, "%s: configuration failed", __func__); 3942 3738 kfree_skb(skb); 3943 3739 return ret; 3944 3740 } ··· 4273 4069 info = &qca_devices_table[i]; 4274 4070 } 4275 4071 if (!info) { 4072 + /* If the rom_version is not matched in the qca_devices_table 4073 + * and the high ROM version is not zero, we assume this chip no 4074 + * need to load the rampatch and nvm. 4075 + */ 4076 + if (ver_rom & ~0xffffU) 4077 + return 0; 4078 + 4276 4079 bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom); 4277 4080 return -ENODEV; 4278 4081 } ··· 4473 4262 return true; 4474 4263 4475 4264 return !device_may_wakeup(&data->udev->dev); 4265 + } 4266 + 4267 + static int btusb_shutdown_qca(struct hci_dev *hdev) 4268 + { 4269 + struct sk_buff *skb; 4270 + 4271 + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); 4272 + if (IS_ERR(skb)) { 4273 + bt_dev_err(hdev, "HCI reset during shutdown failed"); 4274 + return PTR_ERR(skb); 4275 + } 4276 + kfree_skb(skb); 4277 + 4278 + return 0; 4476 4279 } 4477 4280 4478 4281 static int btusb_probe(struct usb_interface *intf, ··· 4748 4523 4749 4524 if (id->driver_info & BTUSB_QCA_WCN6855) { 4750 4525 data->setup_on_usb = btusb_setup_qca; 4526 + hdev->shutdown = btusb_shutdown_qca; 4751 4527 hdev->set_bdaddr = btusb_set_bdaddr_wcn6855; 4752 4528 hdev->cmd_timeout = btusb_qca_cmd_timeout; 4753 4529 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); ··· 4774 4548 * (DEVICE_REMOTE_WAKEUP) 4775 4549 */ 4776 4550 set_bit(BTUSB_WAKEUP_DISABLE, &data->flags); 4777 - if (btusb_find_altsetting(data, 1)) 4778 - set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags); 4779 - else 4780 - bt_dev_err(hdev, "Device does not support ALT setting 1"); 4781 4551 } 4782 4552 4783 4553 if (!reset) ··· 4849 4627 data->diag = NULL; 4850 4628 } 4851 4629 4852 - if (enable_autosuspend) 4853 - usb_enable_autosuspend(data->udev); 4630 + if (!enable_autosuspend) 4631 + usb_disable_autosuspend(data->udev); 4854 4632 4855 4633 err = hci_register_dev(hdev); 4856 4634 if (err < 0) ··· 4910 4688 gpiod_put(data->reset_gpio); 4911 4689 4912 4690 hci_free_dev(hdev); 4691 + 4692 + if (!enable_autosuspend) 4693 + usb_enable_autosuspend(data->udev); 4913 4694 } 4914 4695 4915 4696 #ifdef CONFIG_PM
+1
drivers/bluetooth/hci_bcm.c
··· 654 654 { H4_RECV_ACL, .recv = hci_recv_frame }, 655 655 { H4_RECV_SCO, .recv = hci_recv_frame }, 656 656 { H4_RECV_EVENT, .recv = hci_recv_frame }, 657 + { H4_RECV_ISO, .recv = hci_recv_frame }, 657 658 { BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, 658 659 { BCM_RECV_NULL, .recv = hci_recv_diag }, 659 660 { BCM_RECV_TYPE49, .recv = hci_recv_diag },
+7
drivers/bluetooth/hci_h5.c
··· 906 906 /* Give the device some time before the hci-core sends it a reset */ 907 907 usleep_range(10000, 20000); 908 908 909 + /* Enable controller to do both LE scan and BR/EDR inquiry 910 + * simultaneously. 911 + */ 912 + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks); 913 + 909 914 out_free: 910 915 btrtl_free(btrtl_dev); 911 916 ··· 1026 1021 { .compatible = "realtek,rtl8822cs-bt", 1027 1022 .data = (const void *)&rtl_vnd }, 1028 1023 { .compatible = "realtek,rtl8723bs-bt", 1024 + .data = (const void *)&rtl_vnd }, 1025 + { .compatible = "realtek,rtl8723ds-bt", 1029 1026 .data = (const void *)&rtl_vnd }, 1030 1027 #endif 1031 1028 { },
+3 -4
drivers/bluetooth/hci_ldisc.c
··· 127 127 if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) 128 128 goto no_schedule; 129 129 130 - if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) { 131 - set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); 130 + set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); 131 + if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) 132 132 goto no_schedule; 133 - } 134 133 135 134 BT_DBG(""); 136 135 ··· 173 174 kfree_skb(skb); 174 175 } 175 176 177 + clear_bit(HCI_UART_SENDING, &hu->tx_state); 176 178 if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)) 177 179 goto restart; 178 180 179 - clear_bit(HCI_UART_SENDING, &hu->tx_state); 180 181 wake_up_bit(&hu->tx_state, HCI_UART_SENDING); 181 182 } 182 183
+24 -9
drivers/bluetooth/hci_qca.c
··· 50 50 #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 51 51 #define CMD_TRANS_TIMEOUT_MS 100 52 52 #define MEMDUMP_TIMEOUT_MS 8000 53 - #define IBS_DISABLE_SSR_TIMEOUT_MS (MEMDUMP_TIMEOUT_MS + 1000) 53 + #define IBS_DISABLE_SSR_TIMEOUT_MS \ 54 + (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS) 54 55 #define FW_DOWNLOAD_TIMEOUT_MS 3000 55 56 56 57 /* susclk rate */ ··· 77 76 QCA_MEMDUMP_COLLECTION, 78 77 QCA_HW_ERROR_EVENT, 79 78 QCA_SSR_TRIGGERED, 80 - QCA_BT_OFF 79 + QCA_BT_OFF, 80 + QCA_ROM_FW 81 81 }; 82 82 83 83 enum qca_capabilities { ··· 1026 1024 dump_size = __le32_to_cpu(dump->dump_size); 1027 1025 if (!(dump_size)) { 1028 1026 bt_dev_err(hu->hdev, "Rx invalid memdump size"); 1027 + kfree(qca_memdump); 1029 1028 kfree_skb(skb); 1029 + qca->qca_memdump = NULL; 1030 1030 mutex_unlock(&qca->hci_memdump_lock); 1031 1031 return; 1032 1032 } ··· 1665 1661 if (ret) 1666 1662 return ret; 1667 1663 1664 + clear_bit(QCA_ROM_FW, &qca->flags); 1668 1665 /* Patch downloading has to be done without IBS mode */ 1669 1666 set_bit(QCA_IBS_DISABLED, &qca->flags); 1670 1667 ··· 1723 1718 hu->hdev->cmd_timeout = qca_cmd_timeout; 1724 1719 } else if (ret == -ENOENT) { 1725 1720 /* No patch/nvm-config found, run with original fw/config */ 1721 + set_bit(QCA_ROM_FW, &qca->flags); 1726 1722 ret = 0; 1727 1723 } else if (ret == -EAGAIN) { 1728 1724 /* 1729 1725 * Userspace firmware loader will return -EAGAIN in case no 1730 1726 * patch/nvm-config is found, so run with original fw/config. 1731 1727 */ 1728 + set_bit(QCA_ROM_FW, &qca->flags); 1732 1729 ret = 0; 1733 1730 } 1734 1731 ··· 2107 2100 2108 2101 set_bit(QCA_SUSPENDING, &qca->flags); 2109 2102 2110 - if (test_bit(QCA_BT_OFF, &qca->flags)) 2103 + /* if BT SoC is running with default firmware then it does not 2104 + * support in-band sleep 2105 + */ 2106 + if (test_bit(QCA_ROM_FW, &qca->flags)) 2111 2107 return 0; 2112 2108 2113 - if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { 2109 + /* During SSR after memory dump collection, controller will be 2110 + * powered off and then powered on.If controller is powered off 2111 + * during SSR then we should wait until SSR is completed. 2112 + */ 2113 + if (test_bit(QCA_BT_OFF, &qca->flags) && 2114 + !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) 2115 + return 0; 2116 + 2117 + if (test_bit(QCA_IBS_DISABLED, &qca->flags) || 2118 + test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { 2114 2119 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? 2115 2120 IBS_DISABLE_SSR_TIMEOUT_MS : 2116 2121 FW_DOWNLOAD_TIMEOUT_MS; 2117 2122 2118 2123 /* QCA_IBS_DISABLED flag is set to true, During FW download 2119 2124 * and during memory dump collection. It is reset to false, 2120 - * After FW download complete and after memory dump collections. 2125 + * After FW download complete. 2121 2126 */ 2122 2127 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, 2123 2128 TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout)); ··· 2140 2121 goto error; 2141 2122 } 2142 2123 } 2143 - 2144 - /* After memory dump collection, Controller is powered off.*/ 2145 - if (test_bit(QCA_BT_OFF, &qca->flags)) 2146 - return 0; 2147 2124 2148 2125 cancel_work_sync(&qca->ws_awake_device); 2149 2126 cancel_work_sync(&qca->ws_awake_rx);
+2 -2
drivers/bluetooth/hci_serdev.c
··· 83 83 hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); 84 84 kfree_skb(skb); 85 85 } 86 - } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); 87 86 88 - clear_bit(HCI_UART_SENDING, &hu->tx_state); 87 + clear_bit(HCI_UART_SENDING, &hu->tx_state); 88 + } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); 89 89 } 90 90 91 91 /* ------- Interface to HCI layer ------ */
+8
include/net/bluetooth/hci.h
··· 238 238 * during the hdev->setup vendor callback. 239 239 */ 240 240 HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, 241 + 242 + /* 243 + * When this quirk is set, then the hci_suspend_notifier is not 244 + * registered. This is intended for devices which drop completely 245 + * from the bus on system-suspend and which will show up as a new 246 + * HCI after resume. 247 + */ 248 + HCI_QUIRK_NO_SUSPEND_NOTIFIER, 241 249 }; 242 250 243 251 /* HCI device flags */
+32 -5
include/net/bluetooth/hci_core.h
··· 105 105 SUSPEND_POWERING_DOWN, 106 106 107 107 SUSPEND_PREPARE_NOTIFIER, 108 + 109 + SUSPEND_SET_ADV_FILTER, 108 110 __SUSPEND_NUM_TASKS 109 111 }; 110 112 ··· 252 250 __u8 value[HCI_MAX_AD_LENGTH]; 253 251 }; 254 252 253 + struct adv_rssi_thresholds { 254 + __s8 low_threshold; 255 + __s8 high_threshold; 256 + __u16 low_threshold_timeout; 257 + __u16 high_threshold_timeout; 258 + __u8 sampling_period; 259 + }; 260 + 255 261 struct adv_monitor { 256 262 struct list_head patterns; 257 - bool active; 263 + struct adv_rssi_thresholds rssi; 258 264 __u16 handle; 265 + 266 + enum { 267 + ADV_MONITOR_STATE_NOT_REGISTERED, 268 + ADV_MONITOR_STATE_REGISTERED, 269 + ADV_MONITOR_STATE_OFFLOADED 270 + } state; 259 271 }; 260 272 261 273 #define HCI_MIN_ADV_MONITOR_HANDLE 1 262 - #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 274 + #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 263 275 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 276 + #define HCI_ADV_MONITOR_EXT_NONE 1 277 + #define HCI_ADV_MONITOR_EXT_MSFT 2 264 278 265 279 #define HCI_MAX_SHORT_NAME_LENGTH 10 266 280 ··· 1334 1316 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); 1335 1317 1336 1318 void hci_adv_monitors_clear(struct hci_dev *hdev); 1337 - void hci_free_adv_monitor(struct adv_monitor *monitor); 1338 - int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); 1339 - int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); 1319 + void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); 1320 + int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); 1321 + int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); 1322 + bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 1323 + int *err); 1324 + bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err); 1325 + bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err); 1340 1326 bool hci_is_adv_monitoring(struct hci_dev *hdev); 1327 + int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); 1341 1328 1342 1329 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1343 1330 ··· 1365 1342 #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) 1366 1343 #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) 1367 1344 #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) 1345 + #define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) 1368 1346 #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) 1369 1347 #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) 1370 1348 #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) ··· 1818 1794 u8 instance); 1819 1795 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, 1820 1796 u8 instance); 1797 + void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); 1821 1798 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); 1799 + int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); 1800 + int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); 1822 1801 1823 1802 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 1824 1803 u16 to_multiplier);
+1
include/net/bluetooth/l2cap.h
··· 207 207 __le16 len; 208 208 __le16 cid; 209 209 } __packed; 210 + #define L2CAP_LEN_SIZE 2 210 211 #define L2CAP_HDR_SIZE 4 211 212 #define L2CAP_ENH_HDR_SIZE 6 212 213 #define L2CAP_EXT_HDR_SIZE 8
+16
include/net/bluetooth/mgmt.h
··· 821 821 __u8 instance; 822 822 } __packed; 823 823 824 + struct mgmt_adv_rssi_thresholds { 825 + __s8 high_threshold; 826 + __le16 high_threshold_timeout; 827 + __s8 low_threshold; 828 + __le16 low_threshold_timeout; 829 + __u8 sampling_period; 830 + } __packed; 831 + 832 + #define MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI 0x0056 833 + struct mgmt_cp_add_adv_patterns_monitor_rssi { 834 + struct mgmt_adv_rssi_thresholds rssi; 835 + __u8 pattern_count; 836 + struct mgmt_adv_pattern patterns[]; 837 + } __packed; 838 + #define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8 839 + 824 840 #define MGMT_EV_CMD_COMPLETE 0x0001 825 841 struct mgmt_ev_cmd_complete { 826 842 __le16 opcode;
+2 -1
net/bluetooth/a2mp.c
··· 381 381 hdev = hci_dev_get(req->id); 382 382 if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { 383 383 struct a2mp_amp_assoc_rsp rsp; 384 - rsp.id = req->id; 385 384 386 385 memset(&rsp, 0, sizeof(rsp)); 386 + rsp.id = req->id; 387 387 388 388 if (tmp) { 389 389 rsp.status = A2MP_STATUS_COLLISION_OCCURED; ··· 512 512 assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); 513 513 if (!assoc) { 514 514 amp_ctrl_put(ctrl); 515 + hci_dev_put(hdev); 515 516 return -ENOMEM; 516 517 } 517 518
+11 -11
net/bluetooth/af_bluetooth.c
··· 72 72 BUG_ON(!sock_allow_reclassification(sk)); 73 73 74 74 sock_lock_init_class_and_name(sk, 75 - bt_slock_key_strings[proto], &bt_slock_key[proto], 76 - bt_key_strings[proto], &bt_lock_key[proto]); 75 + bt_slock_key_strings[proto], &bt_slock_key[proto], 76 + bt_key_strings[proto], &bt_lock_key[proto]); 77 77 } 78 78 EXPORT_SYMBOL(bt_sock_reclassify_lock); 79 79 ··· 451 451 } 452 452 453 453 __poll_t bt_sock_poll(struct file *file, struct socket *sock, 454 - poll_table *wait) 454 + poll_table *wait) 455 455 { 456 456 struct sock *sk = sock->sk; 457 457 __poll_t mask = 0; ··· 478 478 mask |= EPOLLHUP; 479 479 480 480 if (sk->sk_state == BT_CONNECT || 481 - sk->sk_state == BT_CONNECT2 || 482 - sk->sk_state == BT_CONFIG) 481 + sk->sk_state == BT_CONNECT2 || 482 + sk->sk_state == BT_CONFIG) 483 483 return mask; 484 484 485 485 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) ··· 508 508 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 509 509 if (amount < 0) 510 510 amount = 0; 511 - err = put_user(amount, (int __user *) arg); 511 + err = put_user(amount, (int __user *)arg); 512 512 break; 513 513 514 514 case TIOCINQ: ··· 519 519 skb = skb_peek(&sk->sk_receive_queue); 520 520 amount = skb ? skb->len : 0; 521 521 release_sock(sk); 522 - err = put_user(amount, (int __user *) arg); 522 + err = put_user(amount, (int __user *)arg); 523 523 break; 524 524 525 525 default: ··· 637 637 struct bt_sock_list *l = PDE_DATA(file_inode(seq->file)); 638 638 639 639 if (v == SEQ_START_TOKEN) { 640 - seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent"); 640 + seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent"); 641 641 642 642 if (l->custom_seq_show) { 643 643 seq_putc(seq, ' '); ··· 657 657 sk_wmem_alloc_get(sk), 658 658 from_kuid(seq_user_ns(seq), sock_i_uid(sk)), 659 659 sock_i_ino(sk), 660 - bt->parent? sock_i_ino(bt->parent): 0LU); 660 + bt->parent ? sock_i_ino(bt->parent) : 0LU); 661 661 662 662 if (l->custom_seq_show) { 663 663 seq_putc(seq, ' '); ··· 678 678 679 679 int bt_procfs_init(struct net *net, const char *name, 680 680 struct bt_sock_list *sk_list, 681 - int (* seq_show)(struct seq_file *, void *)) 681 + int (*seq_show)(struct seq_file *, void *)) 682 682 { 683 683 sk_list->custom_seq_show = seq_show; 684 684 ··· 694 694 #else 695 695 int bt_procfs_init(struct net *net, const char *name, 696 696 struct bt_sock_list *sk_list, 697 - int (* seq_show)(struct seq_file *, void *)) 697 + int (*seq_show)(struct seq_file *, void *)) 698 698 { 699 699 return 0; 700 700 }
+3
net/bluetooth/amp.c
··· 297 297 struct hci_request req; 298 298 int err; 299 299 300 + if (!mgr) 301 + return; 302 + 300 303 cp.phy_handle = hcon->handle; 301 304 cp.len_so_far = cpu_to_le16(0); 302 305 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+35 -2
net/bluetooth/hci_conn.c
··· 203 203 204 204 BT_DBG("hcon %p", conn); 205 205 206 + /* Many controllers disallow HCI Create Connection while it is doing 207 + * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create 208 + * Connection. This may cause the MGMT discovering state to become false 209 + * without user space's request but it is okay since the MGMT Discovery 210 + * APIs do not promise that discovery should be done forever. Instead, 211 + * the user space monitors the status of MGMT discovering and it may 212 + * request for discovery again when this flag becomes false. 213 + */ 214 + if (test_bit(HCI_INQUIRY, &hdev->flags)) { 215 + /* Put this connection to "pending" state so that it will be 216 + * executed after the inquiry cancel command complete event. 217 + */ 218 + conn->state = BT_CONNECT2; 219 + hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); 220 + return; 221 + } 222 + 206 223 conn->state = BT_CONNECT; 207 224 conn->out = true; 208 225 conn->role = HCI_ROLE_MASTER; ··· 293 276 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); 294 277 } 295 278 279 + static bool find_next_esco_param(struct hci_conn *conn, 280 + const struct sco_param *esco_param, int size) 281 + { 282 + for (; conn->attempt <= size; conn->attempt++) { 283 + if (lmp_esco_2m_capable(conn->link) || 284 + (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) 285 + break; 286 + BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", 287 + conn, conn->attempt); 288 + } 289 + 290 + return conn->attempt <= size; 291 + } 292 + 296 293 bool hci_setup_sync(struct hci_conn *conn, __u16 handle) 297 294 { 298 295 struct hci_dev *hdev = conn->hdev; ··· 328 297 329 298 switch (conn->setting & SCO_AIRMODE_MASK) { 330 299 case SCO_AIRMODE_TRANSP: 331 - if (conn->attempt > ARRAY_SIZE(esco_param_msbc)) 300 + if (!find_next_esco_param(conn, esco_param_msbc, 301 + ARRAY_SIZE(esco_param_msbc))) 332 302 return false; 333 303 param = &esco_param_msbc[conn->attempt - 1]; 334 304 break; 335 305 case SCO_AIRMODE_CVSD: 336 306 if (lmp_esco_capable(conn->link)) { 337 - if (conn->attempt > ARRAY_SIZE(esco_param_cvsd)) 307 + if (!find_next_esco_param(conn, esco_param_cvsd, 308 + ARRAY_SIZE(esco_param_cvsd))) 338 309 return false; 339 310 param = &esco_param_cvsd[conn->attempt - 1]; 340 311 } else {
+175 -58
net/bluetooth/hci_core.c
··· 1362 1362 * cleared). If it is interrupted by a signal, return -EINTR. 1363 1363 */ 1364 1364 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 1365 - TASK_INTERRUPTIBLE)) 1366 - return -EINTR; 1365 + TASK_INTERRUPTIBLE)) { 1366 + err = -EINTR; 1367 + goto done; 1368 + } 1367 1369 } 1368 1370 1369 1371 /* for unlimited number of responses we will use buffer with ··· 3053 3051 int handle; 3054 3052 3055 3053 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) 3056 - hci_free_adv_monitor(monitor); 3054 + hci_free_adv_monitor(hdev, monitor); 3057 3055 3058 3056 idr_destroy(&hdev->adv_monitors_idr); 3059 3057 } 3060 3058 3061 - void hci_free_adv_monitor(struct adv_monitor *monitor) 3059 + /* Frees the monitor structure and do some bookkeepings. 3060 + * This function requires the caller holds hdev->lock. 3061 + */ 3062 + void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 3062 3063 { 3063 3064 struct adv_pattern *pattern; 3064 3065 struct adv_pattern *tmp; ··· 3069 3064 if (!monitor) 3070 3065 return; 3071 3066 3072 - list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) 3067 + list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { 3068 + list_del(&pattern->list); 3073 3069 kfree(pattern); 3070 + } 3071 + 3072 + if (monitor->handle) 3073 + idr_remove(&hdev->adv_monitors_idr, monitor->handle); 3074 + 3075 + if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { 3076 + hdev->adv_monitors_cnt--; 3077 + mgmt_adv_monitor_removed(hdev, monitor->handle); 3078 + } 3074 3079 3075 3080 kfree(monitor); 3076 3081 } 3077 3082 3078 - /* This function requires the caller holds hdev->lock */ 3079 - int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 3083 + int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) 3084 + { 3085 + return mgmt_add_adv_patterns_monitor_complete(hdev, status); 3086 + } 3087 + 3088 + int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) 3089 + { 3090 + return mgmt_remove_adv_monitor_complete(hdev, status); 3091 + } 3092 + 3093 + /* Assigns handle to a monitor, and if offloading is supported and power is on, 3094 + * also attempts to forward the request to the controller. 3095 + * Returns true if request is forwarded (result is pending), false otherwise. 3096 + * This function requires the caller holds hdev->lock. 3097 + */ 3098 + bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 3099 + int *err) 3080 3100 { 3081 3101 int min, max, handle; 3082 3102 3083 - if (!monitor) 3084 - return -EINVAL; 3103 + *err = 0; 3104 + 3105 + if (!monitor) { 3106 + *err = -EINVAL; 3107 + return false; 3108 + } 3085 3109 3086 3110 min = HCI_MIN_ADV_MONITOR_HANDLE; 3087 3111 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; 3088 3112 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, 3089 3113 GFP_KERNEL); 3090 - if (handle < 0) 3091 - return handle; 3092 - 3093 - hdev->adv_monitors_cnt++; 3094 - monitor->handle = handle; 3095 - 3096 - hci_update_background_scan(hdev); 3097 - 3098 - return 0; 3099 - } 3100 - 3101 - static int free_adv_monitor(int id, void *ptr, void *data) 3102 - { 3103 - struct hci_dev *hdev = data; 3104 - struct adv_monitor *monitor = ptr; 3105 - 3106 - idr_remove(&hdev->adv_monitors_idr, monitor->handle); 3107 - hci_free_adv_monitor(monitor); 3108 - hdev->adv_monitors_cnt--; 3109 - 3110 - return 0; 3111 - } 3112 - 3113 - /* This function requires the caller holds hdev->lock */ 3114 - int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) 3115 - { 3116 - struct adv_monitor *monitor; 3117 - 3118 - if (handle) { 3119 - monitor = idr_find(&hdev->adv_monitors_idr, handle); 3120 - if (!monitor) 3121 - return -ENOENT; 3122 - 3123 - idr_remove(&hdev->adv_monitors_idr, monitor->handle); 3124 - hci_free_adv_monitor(monitor); 3125 - hdev->adv_monitors_cnt--; 3126 - } else { 3127 - /* Remove all monitors if handle is 0. */ 3128 - idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev); 3114 + if (handle < 0) { 3115 + *err = handle; 3116 + return false; 3129 3117 } 3130 3118 3131 - hci_update_background_scan(hdev); 3119 + monitor->handle = handle; 3132 3120 3133 - return 0; 3121 + if (!hdev_is_powered(hdev)) 3122 + return false; 3123 + 3124 + switch (hci_get_adv_monitor_offload_ext(hdev)) { 3125 + case HCI_ADV_MONITOR_EXT_NONE: 3126 + hci_update_background_scan(hdev); 3127 + bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err); 3128 + /* Message was not forwarded to controller - not an error */ 3129 + return false; 3130 + case HCI_ADV_MONITOR_EXT_MSFT: 3131 + *err = msft_add_monitor_pattern(hdev, monitor); 3132 + bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name, 3133 + *err); 3134 + break; 3135 + } 3136 + 3137 + return (*err == 0); 3138 + } 3139 + 3140 + /* Attempts to tell the controller and free the monitor. If somehow the 3141 + * controller doesn't have a corresponding handle, remove anyway. 3142 + * Returns true if request is forwarded (result is pending), false otherwise. 3143 + * This function requires the caller holds hdev->lock. 3144 + */ 3145 + static bool hci_remove_adv_monitor(struct hci_dev *hdev, 3146 + struct adv_monitor *monitor, 3147 + u16 handle, int *err) 3148 + { 3149 + *err = 0; 3150 + 3151 + switch (hci_get_adv_monitor_offload_ext(hdev)) { 3152 + case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ 3153 + goto free_monitor; 3154 + case HCI_ADV_MONITOR_EXT_MSFT: 3155 + *err = msft_remove_monitor(hdev, monitor, handle); 3156 + break; 3157 + } 3158 + 3159 + /* In case no matching handle registered, just free the monitor */ 3160 + if (*err == -ENOENT) 3161 + goto free_monitor; 3162 + 3163 + return (*err == 0); 3164 + 3165 + free_monitor: 3166 + if (*err == -ENOENT) 3167 + bt_dev_warn(hdev, "Removing monitor with no matching handle %d", 3168 + monitor->handle); 3169 + hci_free_adv_monitor(hdev, monitor); 3170 + 3171 + *err = 0; 3172 + return false; 3173 + } 3174 + 3175 + /* Returns true if request is forwarded (result is pending), false otherwise. 3176 + * This function requires the caller holds hdev->lock. 3177 + */ 3178 + bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err) 3179 + { 3180 + struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); 3181 + bool pending; 3182 + 3183 + if (!monitor) { 3184 + *err = -EINVAL; 3185 + return false; 3186 + } 3187 + 3188 + pending = hci_remove_adv_monitor(hdev, monitor, handle, err); 3189 + if (!*err && !pending) 3190 + hci_update_background_scan(hdev); 3191 + 3192 + bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending", 3193 + hdev->name, handle, *err, pending ? "" : "not "); 3194 + 3195 + return pending; 3196 + } 3197 + 3198 + /* Returns true if request is forwarded (result is pending), false otherwise. 3199 + * This function requires the caller holds hdev->lock. 3200 + */ 3201 + bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err) 3202 + { 3203 + struct adv_monitor *monitor; 3204 + int idr_next_id = 0; 3205 + bool pending = false; 3206 + bool update = false; 3207 + 3208 + *err = 0; 3209 + 3210 + while (!*err && !pending) { 3211 + monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); 3212 + if (!monitor) 3213 + break; 3214 + 3215 + pending = hci_remove_adv_monitor(hdev, monitor, 0, err); 3216 + 3217 + if (!*err && !pending) 3218 + update = true; 3219 + } 3220 + 3221 + if (update) 3222 + hci_update_background_scan(hdev); 3223 + 3224 + bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending", 3225 + hdev->name, *err, pending ? "" : "not "); 3226 + 3227 + return pending; 3134 3228 } 3135 3229 3136 3230 /* This function requires the caller holds hdev->lock */ 3137 3231 bool hci_is_adv_monitoring(struct hci_dev *hdev) 3138 3232 { 3139 3233 return !idr_is_empty(&hdev->adv_monitors_idr); 3234 + } 3235 + 3236 + int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) 3237 + { 3238 + if (msft_monitor_supported(hdev)) 3239 + return HCI_ADV_MONITOR_EXT_MSFT; 3240 + 3241 + return HCI_ADV_MONITOR_EXT_NONE; 3140 3242 } 3141 3243 3142 3244 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, ··· 3678 3566 } 3679 3567 3680 3568 /* Suspend notifier should only act on events when powered. */ 3681 - if (!hdev_is_powered(hdev)) 3569 + if (!hdev_is_powered(hdev) || 3570 + hci_dev_test_flag(hdev, HCI_UNREGISTER)) 3682 3571 goto done; 3683 3572 3684 3573 if (action == PM_SUSPEND_PREPARE) { ··· 3940 3827 hci_sock_dev_event(hdev, HCI_DEV_REG); 3941 3828 hci_dev_hold(hdev); 3942 3829 3943 - hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 3944 - error = register_pm_notifier(&hdev->suspend_notifier); 3945 - if (error) 3946 - goto err_wqueue; 3830 + if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 3831 + hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 3832 + error = register_pm_notifier(&hdev->suspend_notifier); 3833 + if (error) 3834 + goto err_wqueue; 3835 + } 3947 3836 3948 3837 queue_work(hdev->req_workqueue, &hdev->power_on); 3949 3838 ··· 3980 3865 3981 3866 cancel_work_sync(&hdev->power_on); 3982 3867 3983 - hci_suspend_clear_tasks(hdev); 3984 - unregister_pm_notifier(&hdev->suspend_notifier); 3985 - cancel_work_sync(&hdev->suspend_prepare); 3868 + if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 3869 + hci_suspend_clear_tasks(hdev); 3870 + unregister_pm_notifier(&hdev->suspend_notifier); 3871 + cancel_work_sync(&hdev->suspend_prepare); 3872 + } 3986 3873 3987 3874 hci_dev_do_close(hdev); 3988 3875
+40 -40
net/bluetooth/hci_debugfs.c
··· 237 237 return 0; 238 238 } 239 239 240 - DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get, 241 - conn_info_min_age_set, "%llu\n"); 240 + DEFINE_DEBUGFS_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get, 241 + conn_info_min_age_set, "%llu\n"); 242 242 243 243 static int conn_info_max_age_set(void *data, u64 val) 244 244 { ··· 265 265 return 0; 266 266 } 267 267 268 - DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get, 269 - conn_info_max_age_set, "%llu\n"); 268 + DEFINE_DEBUGFS_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get, 269 + conn_info_max_age_set, "%llu\n"); 270 270 271 271 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf, 272 272 size_t count, loff_t *ppos) ··· 419 419 return 0; 420 420 } 421 421 422 - DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get, 423 - NULL, "0x%4.4llx\n"); 422 + DEFINE_DEBUGFS_ATTRIBUTE(voice_setting_fops, voice_setting_get, 423 + NULL, "0x%4.4llx\n"); 424 424 425 425 static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf, 426 426 size_t count, loff_t *ppos) ··· 476 476 return 0; 477 477 } 478 478 479 - DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops, 480 - min_encrypt_key_size_get, 481 - min_encrypt_key_size_set, "%llu\n"); 479 + DEFINE_DEBUGFS_ATTRIBUTE(min_encrypt_key_size_fops, 480 + min_encrypt_key_size_get, 481 + min_encrypt_key_size_set, "%llu\n"); 482 482 483 483 static int auto_accept_delay_get(void *data, u64 *val) 484 484 { ··· 491 491 return 0; 492 492 } 493 493 494 - DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 495 - auto_accept_delay_set, "%llu\n"); 494 + DEFINE_DEBUGFS_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 495 + auto_accept_delay_set, "%llu\n"); 496 496 497 497 static ssize_t force_bredr_smp_read(struct file *file, 498 498 char __user *user_buf, ··· 558 558 return 0; 559 559 } 560 560 561 - DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, 562 - idle_timeout_set, "%llu\n"); 561 + DEFINE_DEBUGFS_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, 562 + idle_timeout_set, "%llu\n"); 563 563 564 564 static int sniff_min_interval_set(void *data, u64 val) 565 565 { ··· 586 586 return 0; 587 587 } 588 588 589 - DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, 590 - sniff_min_interval_set, "%llu\n"); 589 + DEFINE_DEBUGFS_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, 590 + sniff_min_interval_set, "%llu\n"); 591 591 592 592 static int sniff_max_interval_set(void *data, u64 val) 593 593 { ··· 614 614 return 0; 615 615 } 616 616 617 - DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, 618 - sniff_max_interval_set, "%llu\n"); 617 + DEFINE_DEBUGFS_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, 618 + sniff_max_interval_set, "%llu\n"); 619 619 620 620 void hci_debugfs_create_bredr(struct hci_dev *hdev) 621 621 { ··· 706 706 return 0; 707 707 } 708 708 709 - DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, 710 - rpa_timeout_set, "%llu\n"); 709 + DEFINE_DEBUGFS_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, 710 + rpa_timeout_set, "%llu\n"); 711 711 712 712 static int random_address_show(struct seq_file *f, void *p) 713 713 { ··· 869 869 return 0; 870 870 } 871 871 872 - DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, 873 - conn_min_interval_set, "%llu\n"); 872 + DEFINE_DEBUGFS_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, 873 + conn_min_interval_set, "%llu\n"); 874 874 875 875 static int conn_max_interval_set(void *data, u64 val) 876 876 { ··· 897 897 return 0; 898 898 } 899 899 900 - DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, 901 - conn_max_interval_set, "%llu\n"); 900 + DEFINE_DEBUGFS_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, 901 + conn_max_interval_set, "%llu\n"); 902 902 903 903 static int conn_latency_set(void *data, u64 val) 904 904 { ··· 925 925 return 0; 926 926 } 927 927 928 - DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get, 929 - conn_latency_set, "%llu\n"); 928 + DEFINE_DEBUGFS_ATTRIBUTE(conn_latency_fops, conn_latency_get, 929 + conn_latency_set, "%llu\n"); 930 930 931 931 static int supervision_timeout_set(void *data, u64 val) 932 932 { ··· 953 953 return 0; 954 954 } 955 955 956 - DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get, 957 - supervision_timeout_set, "%llu\n"); 956 + DEFINE_DEBUGFS_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get, 957 + supervision_timeout_set, "%llu\n"); 958 958 959 959 static int adv_channel_map_set(void *data, u64 val) 960 960 { ··· 981 981 return 0; 982 982 } 983 983 984 - DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, 985 - adv_channel_map_set, "%llu\n"); 984 + DEFINE_DEBUGFS_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, 985 + adv_channel_map_set, "%llu\n"); 986 986 987 987 static int adv_min_interval_set(void *data, u64 val) 988 988 { ··· 1009 1009 return 0; 1010 1010 } 1011 1011 1012 - DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get, 1013 - adv_min_interval_set, "%llu\n"); 1012 + DEFINE_DEBUGFS_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get, 1013 + adv_min_interval_set, "%llu\n"); 1014 1014 1015 1015 static int adv_max_interval_set(void *data, u64 val) 1016 1016 { ··· 1037 1037 return 0; 1038 1038 } 1039 1039 1040 - DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get, 1041 - adv_max_interval_set, "%llu\n"); 1040 + DEFINE_DEBUGFS_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get, 1041 + adv_max_interval_set, "%llu\n"); 1042 1042 1043 1043 static int min_key_size_set(void *data, u64 val) 1044 1044 { ··· 1065 1065 return 0; 1066 1066 } 1067 1067 1068 - DEFINE_SIMPLE_ATTRIBUTE(min_key_size_fops, min_key_size_get, 1069 - min_key_size_set, "%llu\n"); 1068 + DEFINE_DEBUGFS_ATTRIBUTE(min_key_size_fops, min_key_size_get, 1069 + min_key_size_set, "%llu\n"); 1070 1070 1071 1071 static int max_key_size_set(void *data, u64 val) 1072 1072 { ··· 1093 1093 return 0; 1094 1094 } 1095 1095 1096 - DEFINE_SIMPLE_ATTRIBUTE(max_key_size_fops, max_key_size_get, 1097 - max_key_size_set, "%llu\n"); 1096 + DEFINE_DEBUGFS_ATTRIBUTE(max_key_size_fops, max_key_size_get, 1097 + max_key_size_set, "%llu\n"); 1098 1098 1099 1099 static int auth_payload_timeout_set(void *data, u64 val) 1100 1100 { ··· 1121 1121 return 0; 1122 1122 } 1123 1123 1124 - DEFINE_SIMPLE_ATTRIBUTE(auth_payload_timeout_fops, 1125 - auth_payload_timeout_get, 1126 - auth_payload_timeout_set, "%llu\n"); 1124 + DEFINE_DEBUGFS_ATTRIBUTE(auth_payload_timeout_fops, 1125 + auth_payload_timeout_get, 1126 + auth_payload_timeout_set, "%llu\n"); 1127 1127 1128 1128 static ssize_t force_no_mitm_read(struct file *file, 1129 1129 char __user *user_buf,
+48 -26
net/bluetooth/hci_request.c
··· 29 29 30 30 #include "smp.h" 31 31 #include "hci_request.h" 32 + #include "msft.h" 32 33 33 34 #define HCI_REQ_DONE 0 34 35 #define HCI_REQ_PEND 1 ··· 405 404 */ 406 405 static bool __hci_update_interleaved_scan(struct hci_dev *hdev) 407 406 { 408 - /* If there is at least one ADV monitors and one pending LE connection 409 - * or one device to be scanned for, we should alternate between 410 - * allowlist scan and one without any filters to save power. 407 + /* Do interleaved scan only if all of the following are true: 408 + * - There is at least one ADV monitor 409 + * - At least one pending LE connection or one device to be scanned for 410 + * - Monitor offloading is not supported 411 + * If so, we should alternate between allowlist scan and one without 412 + * any filters to save power. 411 413 */ 412 414 bool use_interleaving = hci_is_adv_monitoring(hdev) && 413 415 !(list_empty(&hdev->pend_le_conns) && 414 - list_empty(&hdev->pend_le_reports)); 416 + list_empty(&hdev->pend_le_reports)) && 417 + hci_get_adv_monitor_offload_ext(hdev) == 418 + HCI_ADV_MONITOR_EXT_NONE; 415 419 bool is_interleaving = is_interleave_scanning(hdev); 416 420 417 421 if (use_interleaving && !is_interleaving) { ··· 905 899 906 900 /* Use the allowlist unless the following conditions are all true: 907 901 * - We are not currently suspending 908 - * - There are 1 or more ADV monitors registered 902 + * - There are 1 or more ADV monitors registered and it's not offloaded 909 903 * - Interleaved scanning is not currently using the allowlist 910 - * 911 - * Once the controller offloading of advertisement monitor is in place, 912 - * the above condition should include the support of MSFT extension 913 - * support. 914 904 */ 915 905 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && 906 + hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && 916 907 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) 917 908 return 0x00; 918 909 ··· 1090 1087 if (hdev->suspended) { 1091 1088 window = hdev->le_scan_window_suspend; 1092 1089 interval = hdev->le_scan_int_suspend; 1090 + 1091 + set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); 1093 1092 } else if (hci_is_le_conn_scanning(hdev)) { 1094 1093 window = hdev->le_scan_window_connect; 1095 1094 interval = hdev->le_scan_int_connect; ··· 1175 1170 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1176 1171 } 1177 1172 1178 - static void hci_req_config_le_suspend_scan(struct hci_request *req) 1179 - { 1180 - /* Before changing params disable scan if enabled */ 1181 - if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN)) 1182 - hci_req_add_le_scan_disable(req, false); 1183 - 1184 - /* Configure params and enable scanning */ 1185 - hci_req_add_le_passive_scan(req); 1186 - 1187 - /* Block suspend notifier on response */ 1188 - set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks); 1189 - } 1190 - 1191 1173 static void cancel_adv_timeout(struct hci_dev *hdev) 1192 1174 { 1193 1175 if (hdev->adv_instance_timeout) { ··· 1237 1245 { 1238 1246 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, 1239 1247 status); 1240 - if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || 1241 - test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { 1248 + if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || 1249 + test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { 1250 + clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); 1251 + clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); 1242 1252 wake_up(&hdev->suspend_wait_q); 1243 1253 } 1254 + 1255 + if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) { 1256 + clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); 1257 + wake_up(&hdev->suspend_wait_q); 1258 + } 1259 + } 1260 + 1261 + static void hci_req_add_set_adv_filter_enable(struct hci_request *req, 1262 + bool enable) 1263 + { 1264 + struct hci_dev *hdev = req->hdev; 1265 + 1266 + switch (hci_get_adv_monitor_offload_ext(hdev)) { 1267 + case HCI_ADV_MONITOR_EXT_MSFT: 1268 + msft_req_add_set_filter_enable(req, enable); 1269 + break; 1270 + default: 1271 + return; 1272 + } 1273 + 1274 + /* No need to block when enabling since it's on resume path */ 1275 + if (hdev->suspended && !enable) 1276 + set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); 1244 1277 } 1245 1278 1246 1279 /* Call with hci_dev_lock */ ··· 1325 1308 hci_req_add_le_scan_disable(&req, false); 1326 1309 } 1327 1310 1311 + /* Disable advertisement filters */ 1312 + hci_req_add_set_adv_filter_enable(&req, false); 1313 + 1328 1314 /* Mark task needing completion */ 1329 1315 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); 1330 1316 ··· 1356 1336 /* Enable event filter for paired devices */ 1357 1337 hci_req_set_event_filter(&req); 1358 1338 /* Enable passive scan at lower duty cycle */ 1359 - hci_req_config_le_suspend_scan(&req); 1339 + __hci_update_background_scan(&req); 1360 1340 /* Pause scan changes again. */ 1361 1341 hdev->scanning_paused = true; 1362 1342 hci_req_run(&req, suspend_req_complete); ··· 1366 1346 1367 1347 hci_req_clear_event_filter(&req); 1368 1348 /* Reset passive/background scanning to normal */ 1369 - hci_req_config_le_suspend_scan(&req); 1349 + __hci_update_background_scan(&req); 1350 + /* Enable all of the advertisement filters */ 1351 + hci_req_add_set_adv_filter_enable(&req, true); 1370 1352 1371 1353 /* Unpause directed advertising */ 1372 1354 hdev->advertising_paused = false;
+94 -25
net/bluetooth/l2cap_core.c
··· 4519 4519 } 4520 4520 goto done; 4521 4521 4522 + case L2CAP_CONF_UNKNOWN: 4522 4523 case L2CAP_CONF_UNACCEPT: 4523 4524 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 4524 4525 char req[64]; ··· 8277 8276 mutex_unlock(&conn->chan_lock); 8278 8277 } 8279 8278 8279 + /* Append fragment into frame respecting the maximum len of rx_skb */ 8280 + static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, 8281 + u16 len) 8282 + { 8283 + if (!conn->rx_skb) { 8284 + /* Allocate skb for the complete frame (with header) */ 8285 + conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); 8286 + if (!conn->rx_skb) 8287 + return -ENOMEM; 8288 + /* Init rx_len */ 8289 + conn->rx_len = len; 8290 + } 8291 + 8292 + /* Copy as much as the rx_skb can hold */ 8293 + len = min_t(u16, len, skb->len); 8294 + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len); 8295 + skb_pull(skb, len); 8296 + conn->rx_len -= len; 8297 + 8298 + return len; 8299 + } 8300 + 8301 + static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) 8302 + { 8303 + struct sk_buff *rx_skb; 8304 + int len; 8305 + 8306 + /* Append just enough to complete the header */ 8307 + len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); 8308 + 8309 + /* If header could not be read just continue */ 8310 + if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) 8311 + return len; 8312 + 8313 + rx_skb = conn->rx_skb; 8314 + len = get_unaligned_le16(rx_skb->data); 8315 + 8316 + /* Check if rx_skb has enough space to received all fragments */ 8317 + if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) { 8318 + /* Update expected len */ 8319 + conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); 8320 + return L2CAP_LEN_SIZE; 8321 + } 8322 + 8323 + /* Reset conn->rx_skb since it will need to be reallocated in order to 8324 + * fit all fragments. 8325 + */ 8326 + conn->rx_skb = NULL; 8327 + 8328 + /* Reallocates rx_skb using the exact expected length */ 8329 + len = l2cap_recv_frag(conn, rx_skb, 8330 + len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); 8331 + kfree_skb(rx_skb); 8332 + 8333 + return len; 8334 + } 8335 + 8336 + static void l2cap_recv_reset(struct l2cap_conn *conn) 8337 + { 8338 + kfree_skb(conn->rx_skb); 8339 + conn->rx_skb = NULL; 8340 + conn->rx_len = 0; 8341 + } 8342 + 8280 8343 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) 8281 8344 { 8282 8345 struct l2cap_conn *conn = hcon->l2cap_data; 8283 - struct l2cap_hdr *hdr; 8284 8346 int len; 8285 8347 8286 8348 /* For AMP controller do not create l2cap conn */ ··· 8362 8298 case ACL_START: 8363 8299 case ACL_START_NO_FLUSH: 8364 8300 case ACL_COMPLETE: 8365 - if (conn->rx_len) { 8301 + if (conn->rx_skb) { 8366 8302 BT_ERR("Unexpected start frame (len %d)", skb->len); 8367 - kfree_skb(conn->rx_skb); 8368 - conn->rx_skb = NULL; 8369 - conn->rx_len = 0; 8303 + l2cap_recv_reset(conn); 8370 8304 l2cap_conn_unreliable(conn, ECOMM); 8371 8305 } 8372 8306 8373 - /* Start fragment always begin with Basic L2CAP header */ 8374 - if (skb->len < L2CAP_HDR_SIZE) { 8375 - BT_ERR("Frame is too short (len %d)", skb->len); 8376 - l2cap_conn_unreliable(conn, ECOMM); 8377 - goto drop; 8307 + /* Start fragment may not contain the L2CAP length so just 8308 + * copy the initial byte when that happens and use conn->mtu as 8309 + * expected length. 8310 + */ 8311 + if (skb->len < L2CAP_LEN_SIZE) { 8312 + if (l2cap_recv_frag(conn, skb, conn->mtu) < 0) 8313 + goto drop; 8314 + return; 8378 8315 } 8379 8316 8380 - hdr = (struct l2cap_hdr *) skb->data; 8381 - len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; 8317 + len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; 8382 8318 8383 8319 if (len == skb->len) { 8384 8320 /* Complete frame received */ ··· 8395 8331 goto drop; 8396 8332 } 8397 8333 8398 - /* Allocate skb for the complete frame (with header) */ 8399 - conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); 8400 - if (!conn->rx_skb) 8334 + /* Append fragment into frame (with header) */ 8335 + if (l2cap_recv_frag(conn, skb, len) < 0) 8401 8336 goto drop; 8402 8337 8403 - skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 8404 - skb->len); 8405 - conn->rx_len = len - skb->len; 8406 8338 break; 8407 8339 8408 8340 case ACL_CONT: 8409 8341 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); 8410 8342 8411 - if (!conn->rx_len) { 8343 + if (!conn->rx_skb) { 8412 8344 BT_ERR("Unexpected continuation frame (len %d)", skb->len); 8413 8345 l2cap_conn_unreliable(conn, ECOMM); 8414 8346 goto drop; 8415 8347 } 8416 8348 8349 + /* Complete the L2CAP length if it has not been read */ 8350 + if (conn->rx_skb->len < L2CAP_LEN_SIZE) { 8351 + if (l2cap_recv_len(conn, skb) < 0) { 8352 + l2cap_conn_unreliable(conn, ECOMM); 8353 + goto drop; 8354 + } 8355 + 8356 + /* Header still could not be read just continue */ 8357 + if (conn->rx_skb->len < L2CAP_LEN_SIZE) 8358 + return; 8359 + } 8360 + 8417 8361 if (skb->len > conn->rx_len) { 8418 8362 BT_ERR("Fragment is too long (len %d, expected %d)", 8419 8363 skb->len, conn->rx_len); 8420 - kfree_skb(conn->rx_skb); 8421 - conn->rx_skb = NULL; 8422 - conn->rx_len = 0; 8364 + l2cap_recv_reset(conn); 8423 8365 l2cap_conn_unreliable(conn, ECOMM); 8424 8366 goto drop; 8425 8367 } 8426 8368 8427 - skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 8428 - skb->len); 8429 - conn->rx_len -= skb->len; 8369 + /* Append fragment into frame (with header) */ 8370 + l2cap_recv_frag(conn, skb, skb->len); 8430 8371 8431 8372 if (!conn->rx_len) { 8432 8373 /* Complete frame received. l2cap_recv_frame
+318 -107
net/bluetooth/mgmt.c
··· 124 124 MGMT_OP_REMOVE_ADV_MONITOR, 125 125 MGMT_OP_ADD_EXT_ADV_PARAMS, 126 126 MGMT_OP_ADD_EXT_ADV_DATA, 127 + MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, 127 128 }; 128 129 129 130 static const u16 mgmt_events[] = { ··· 4167 4166 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); 4168 4167 } 4169 4168 4170 - static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev, 4171 - u16 handle) 4169 + void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle) 4172 4170 { 4173 - struct mgmt_ev_adv_monitor_added ev; 4171 + struct mgmt_ev_adv_monitor_removed ev; 4172 + struct mgmt_pending_cmd *cmd; 4173 + struct sock *sk_skip = NULL; 4174 + struct mgmt_cp_remove_adv_monitor *cp; 4175 + 4176 + cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); 4177 + if (cmd) { 4178 + cp = cmd->param; 4179 + 4180 + if (cp->monitor_handle) 4181 + sk_skip = cmd->sk; 4182 + } 4174 4183 4175 4184 ev.monitor_handle = cpu_to_le16(handle); 4176 4185 4177 - mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk); 4186 + mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip); 4178 4187 } 4179 4188 4180 4189 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, ··· 4195 4184 int handle, err; 4196 4185 size_t rp_size = 0; 4197 4186 __u32 supported = 0; 4187 + __u32 enabled = 0; 4198 4188 __u16 num_handles = 0; 4199 4189 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES]; 4200 4190 ··· 4203 4191 4204 4192 hci_dev_lock(hdev); 4205 4193 4206 - if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR) 4194 + if (msft_monitor_supported(hdev)) 4207 4195 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS; 4208 4196 4209 - idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) { 4197 + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) 4210 4198 handles[num_handles++] = monitor->handle; 4211 - } 4212 4199 4213 4200 hci_dev_unlock(hdev); 4214 4201 ··· 4216 4205 if (!rp) 4217 4206 return -ENOMEM; 4218 4207 4219 - /* Once controller-based monitoring is in place, the enabled_features 4220 - * should reflect the use. 4221 - */ 4208 + /* All supported features are currently enabled */ 4209 + enabled = supported; 4210 + 4222 4211 rp->supported_features = cpu_to_le32(supported); 4223 - rp->enabled_features = 0; 4212 + rp->enabled_features = cpu_to_le32(enabled); 4224 4213 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES); 4225 4214 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS; 4226 4215 rp->num_handles = cpu_to_le16(num_handles); ··· 4236 4225 return err; 4237 4226 } 4238 4227 4239 - static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, 4240 - void *data, u16 len) 4228 + int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) 4241 4229 { 4242 - struct mgmt_cp_add_adv_patterns_monitor *cp = data; 4243 4230 struct mgmt_rp_add_adv_patterns_monitor rp; 4244 - struct adv_monitor *m = NULL; 4231 + struct mgmt_pending_cmd *cmd; 4232 + struct adv_monitor *monitor; 4233 + int err = 0; 4234 + 4235 + hci_dev_lock(hdev); 4236 + 4237 + cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev); 4238 + if (!cmd) { 4239 + cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev); 4240 + if (!cmd) 4241 + goto done; 4242 + } 4243 + 4244 + monitor = cmd->user_data; 4245 + rp.monitor_handle = cpu_to_le16(monitor->handle); 4246 + 4247 + if (!status) { 4248 + mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle); 4249 + hdev->adv_monitors_cnt++; 4250 + if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED) 4251 + monitor->state = ADV_MONITOR_STATE_REGISTERED; 4252 + hci_update_background_scan(hdev); 4253 + } 4254 + 4255 + err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, 4256 + mgmt_status(status), &rp, sizeof(rp)); 4257 + mgmt_pending_remove(cmd); 4258 + 4259 + done: 4260 + hci_dev_unlock(hdev); 4261 + bt_dev_dbg(hdev, "add monitor %d complete, status %d", 4262 + rp.monitor_handle, status); 4263 + 4264 + return err; 4265 + } 4266 + 4267 + static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, 4268 + struct adv_monitor *m, u8 status, 4269 + void *data, u16 len, u16 op) 4270 + { 4271 + struct mgmt_rp_add_adv_patterns_monitor rp; 4272 + struct mgmt_pending_cmd *cmd; 4273 + int err; 4274 + bool pending; 4275 + 4276 + hci_dev_lock(hdev); 4277 + 4278 + if (status) 4279 + goto unlock; 4280 + 4281 + if (pending_find(MGMT_OP_SET_LE, hdev) || 4282 + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || 4283 + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) || 4284 + pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) { 4285 + status = MGMT_STATUS_BUSY; 4286 + goto unlock; 4287 + } 4288 + 4289 + cmd = mgmt_pending_add(sk, op, hdev, data, len); 4290 + if (!cmd) { 4291 + status = MGMT_STATUS_NO_RESOURCES; 4292 + goto unlock; 4293 + } 4294 + 4295 + cmd->user_data = m; 4296 + pending = hci_add_adv_monitor(hdev, m, &err); 4297 + if (err) { 4298 + if (err == -ENOSPC || err == -ENOMEM) 4299 + status = MGMT_STATUS_NO_RESOURCES; 4300 + else if (err == -EINVAL) 4301 + status = MGMT_STATUS_INVALID_PARAMS; 4302 + else 4303 + status = MGMT_STATUS_FAILED; 4304 + 4305 + mgmt_pending_remove(cmd); 4306 + goto unlock; 4307 + } 4308 + 4309 + if (!pending) { 4310 + mgmt_pending_remove(cmd); 4311 + rp.monitor_handle = cpu_to_le16(m->handle); 4312 + mgmt_adv_monitor_added(sk, hdev, m->handle); 4313 + m->state = ADV_MONITOR_STATE_REGISTERED; 4314 + hdev->adv_monitors_cnt++; 4315 + 4316 + hci_dev_unlock(hdev); 4317 + return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS, 4318 + &rp, sizeof(rp)); 4319 + } 4320 + 4321 + hci_dev_unlock(hdev); 4322 + 4323 + return 0; 4324 + 4325 + unlock: 4326 + hci_free_adv_monitor(hdev, m); 4327 + hci_dev_unlock(hdev); 4328 + return mgmt_cmd_status(sk, hdev->id, op, status); 4329 + } 4330 + 4331 + static void parse_adv_monitor_rssi(struct adv_monitor *m, 4332 + struct mgmt_adv_rssi_thresholds *rssi) 4333 + { 4334 + if (rssi) { 4335 + m->rssi.low_threshold = rssi->low_threshold; 4336 + m->rssi.low_threshold_timeout = 4337 + __le16_to_cpu(rssi->low_threshold_timeout); 4338 + m->rssi.high_threshold = rssi->high_threshold; 4339 + m->rssi.high_threshold_timeout = 4340 + __le16_to_cpu(rssi->high_threshold_timeout); 4341 + m->rssi.sampling_period = rssi->sampling_period; 4342 + } else { 4343 + /* Default values. These numbers are the least constricting 4344 + * parameters for MSFT API to work, so it behaves as if there 4345 + * are no rssi parameter to consider. May need to be changed 4346 + * if other API are to be supported. 4347 + */ 4348 + m->rssi.low_threshold = -127; 4349 + m->rssi.low_threshold_timeout = 60; 4350 + m->rssi.high_threshold = -127; 4351 + m->rssi.high_threshold_timeout = 0; 4352 + m->rssi.sampling_period = 0; 4353 + } 4354 + } 4355 + 4356 + static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count, 4357 + struct mgmt_adv_pattern *patterns) 4358 + { 4359 + u8 offset = 0, length = 0; 4245 4360 struct adv_pattern *p = NULL; 4246 - unsigned int mp_cnt = 0, prev_adv_monitors_cnt; 4247 - __u8 cp_ofst = 0, cp_len = 0; 4248 - int err, i; 4361 + int i; 4249 4362 4250 - BT_DBG("request for %s", hdev->name); 4251 - 4252 - if (len <= sizeof(*cp) || cp->pattern_count == 0) { 4253 - err = mgmt_cmd_status(sk, hdev->id, 4254 - MGMT_OP_ADD_ADV_PATTERNS_MONITOR, 4255 - MGMT_STATUS_INVALID_PARAMS); 4256 - goto failed; 4257 - } 4258 - 4259 - m = kmalloc(sizeof(*m), GFP_KERNEL); 4260 - if (!m) { 4261 - err = -ENOMEM; 4262 - goto failed; 4263 - } 4264 - 4265 - INIT_LIST_HEAD(&m->patterns); 4266 - m->active = false; 4267 - 4268 - for (i = 0; i < cp->pattern_count; i++) { 4269 - if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) { 4270 - err = mgmt_cmd_status(sk, hdev->id, 4271 - MGMT_OP_ADD_ADV_PATTERNS_MONITOR, 4272 - MGMT_STATUS_INVALID_PARAMS); 4273 - goto failed; 4274 - } 4275 - 4276 - cp_ofst = cp->patterns[i].offset; 4277 - cp_len = cp->patterns[i].length; 4278 - if (cp_ofst >= HCI_MAX_AD_LENGTH || 4279 - cp_len > HCI_MAX_AD_LENGTH || 4280 - (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) { 4281 - err = mgmt_cmd_status(sk, hdev->id, 4282 - MGMT_OP_ADD_ADV_PATTERNS_MONITOR, 4283 - MGMT_STATUS_INVALID_PARAMS); 4284 - goto failed; 4285 - } 4363 + for (i = 0; i < pattern_count; i++) { 4364 + offset = patterns[i].offset; 4365 + length = patterns[i].length; 4366 + if (offset >= HCI_MAX_AD_LENGTH || 4367 + length > HCI_MAX_AD_LENGTH || 4368 + (offset + length) > HCI_MAX_AD_LENGTH) 4369 + return MGMT_STATUS_INVALID_PARAMS; 4286 4370 4287 4371 p = kmalloc(sizeof(*p), GFP_KERNEL); 4288 - if (!p) { 4289 - err = -ENOMEM; 4290 - goto failed; 4291 - } 4372 + if (!p) 4373 + return MGMT_STATUS_NO_RESOURCES; 4292 4374 4293 - p->ad_type = cp->patterns[i].ad_type; 4294 - p->offset = cp->patterns[i].offset; 4295 - p->length = cp->patterns[i].length; 4296 - memcpy(p->value, cp->patterns[i].value, p->length); 4375 + p->ad_type = patterns[i].ad_type; 4376 + p->offset = patterns[i].offset; 4377 + p->length = patterns[i].length; 4378 + memcpy(p->value, patterns[i].value, p->length); 4297 4379 4298 4380 INIT_LIST_HEAD(&p->list); 4299 4381 list_add(&p->list, &m->patterns); 4300 4382 } 4301 4383 4302 - if (mp_cnt != cp->pattern_count) { 4303 - err = mgmt_cmd_status(sk, hdev->id, 4304 - MGMT_OP_ADD_ADV_PATTERNS_MONITOR, 4305 - MGMT_STATUS_INVALID_PARAMS); 4306 - goto failed; 4384 + return MGMT_STATUS_SUCCESS; 4385 + } 4386 + 4387 + static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, 4388 + void *data, u16 len) 4389 + { 4390 + struct mgmt_cp_add_adv_patterns_monitor *cp = data; 4391 + struct adv_monitor *m = NULL; 4392 + u8 status = MGMT_STATUS_SUCCESS; 4393 + size_t expected_size = sizeof(*cp); 4394 + 4395 + BT_DBG("request for %s", hdev->name); 4396 + 4397 + if (len <= sizeof(*cp)) { 4398 + status = MGMT_STATUS_INVALID_PARAMS; 4399 + goto done; 4307 4400 } 4401 + 4402 + expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); 4403 + if (len != expected_size) { 4404 + status = MGMT_STATUS_INVALID_PARAMS; 4405 + goto done; 4406 + } 4407 + 4408 + m = kzalloc(sizeof(*m), GFP_KERNEL); 4409 + if (!m) { 4410 + status = MGMT_STATUS_NO_RESOURCES; 4411 + goto done; 4412 + } 4413 + 4414 + INIT_LIST_HEAD(&m->patterns); 4415 + 4416 + parse_adv_monitor_rssi(m, NULL); 4417 + status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); 4418 + 4419 + done: 4420 + return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, 4421 + MGMT_OP_ADD_ADV_PATTERNS_MONITOR); 4422 + } 4423 + 4424 + static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev, 4425 + void *data, u16 len) 4426 + { 4427 + struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data; 4428 + struct adv_monitor *m = NULL; 4429 + u8 status = MGMT_STATUS_SUCCESS; 4430 + size_t expected_size = sizeof(*cp); 4431 + 4432 + BT_DBG("request for %s", hdev->name); 4433 + 4434 + if (len <= sizeof(*cp)) { 4435 + status = MGMT_STATUS_INVALID_PARAMS; 4436 + goto done; 4437 + } 4438 + 4439 + expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); 4440 + if (len != expected_size) { 4441 + status = MGMT_STATUS_INVALID_PARAMS; 4442 + goto done; 4443 + } 4444 + 4445 + m = kzalloc(sizeof(*m), GFP_KERNEL); 4446 + if (!m) { 4447 + status = MGMT_STATUS_NO_RESOURCES; 4448 + goto done; 4449 + } 4450 + 4451 + INIT_LIST_HEAD(&m->patterns); 4452 + 4453 + parse_adv_monitor_rssi(m, &cp->rssi); 4454 + status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); 4455 + 4456 + done: 4457 + return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, 4458 + MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI); 4459 + } 4460 + 4461 + int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) 4462 + { 4463 + struct mgmt_rp_remove_adv_monitor rp; 4464 + struct mgmt_cp_remove_adv_monitor *cp; 4465 + struct mgmt_pending_cmd *cmd; 4466 + int err = 0; 4308 4467 4309 4468 hci_dev_lock(hdev); 4310 4469 4311 - prev_adv_monitors_cnt = hdev->adv_monitors_cnt; 4470 + cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); 4471 + if (!cmd) 4472 + goto done; 4312 4473 4313 - err = hci_add_adv_monitor(hdev, m); 4314 - if (err) { 4315 - if (err == -ENOSPC) { 4316 - mgmt_cmd_status(sk, hdev->id, 4317 - MGMT_OP_ADD_ADV_PATTERNS_MONITOR, 4318 - MGMT_STATUS_NO_RESOURCES); 4319 - } 4320 - goto unlock; 4321 - } 4474 + cp = cmd->param; 4475 + rp.monitor_handle = cp->monitor_handle; 4322 4476 4323 - if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt) 4324 - mgmt_adv_monitor_added(sk, hdev, m->handle); 4477 + if (!status) 4478 + hci_update_background_scan(hdev); 4325 4479 4480 + err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, 4481 + mgmt_status(status), &rp, sizeof(rp)); 4482 + mgmt_pending_remove(cmd); 4483 + 4484 + done: 4326 4485 hci_dev_unlock(hdev); 4486 + bt_dev_dbg(hdev, "remove monitor %d complete, status %d", 4487 + rp.monitor_handle, status); 4327 4488 4328 - rp.monitor_handle = cpu_to_le16(m->handle); 4329 - 4330 - return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, 4331 - MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); 4332 - 4333 - unlock: 4334 - hci_dev_unlock(hdev); 4335 - 4336 - failed: 4337 - hci_free_adv_monitor(m); 4338 4489 return err; 4339 4490 } 4340 4491 ··· 4505 4332 { 4506 4333 struct mgmt_cp_remove_adv_monitor *cp = data; 4507 4334 struct mgmt_rp_remove_adv_monitor rp; 4508 - unsigned int prev_adv_monitors_cnt; 4509 - u16 handle; 4510 - int err; 4335 + struct mgmt_pending_cmd *cmd; 4336 + u16 handle = __le16_to_cpu(cp->monitor_handle); 4337 + int err, status; 4338 + bool pending; 4511 4339 4512 4340 BT_DBG("request for %s", hdev->name); 4341 + rp.monitor_handle = cp->monitor_handle; 4513 4342 4514 4343 hci_dev_lock(hdev); 4515 4344 4516 - handle = __le16_to_cpu(cp->monitor_handle); 4517 - prev_adv_monitors_cnt = hdev->adv_monitors_cnt; 4518 - 4519 - err = hci_remove_adv_monitor(hdev, handle); 4520 - if (err == -ENOENT) { 4521 - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, 4522 - MGMT_STATUS_INVALID_INDEX); 4345 + if (pending_find(MGMT_OP_SET_LE, hdev) || 4346 + pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) || 4347 + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || 4348 + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { 4349 + status = MGMT_STATUS_BUSY; 4523 4350 goto unlock; 4524 4351 } 4525 4352 4526 - if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt) 4527 - mgmt_adv_monitor_removed(sk, hdev, handle); 4353 + cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); 4354 + if (!cmd) { 4355 + status = MGMT_STATUS_NO_RESOURCES; 4356 + goto unlock; 4357 + } 4358 + 4359 + if (handle) 4360 + pending = hci_remove_single_adv_monitor(hdev, handle, &err); 4361 + else 4362 + pending = hci_remove_all_adv_monitor(hdev, &err); 4363 + 4364 + if (err) { 4365 + mgmt_pending_remove(cmd); 4366 + 4367 + if (err == -ENOENT) 4368 + status = MGMT_STATUS_INVALID_INDEX; 4369 + else 4370 + status = MGMT_STATUS_FAILED; 4371 + 4372 + goto unlock; 4373 + } 4374 + 4375 + /* monitor can be removed without forwarding request to controller */ 4376 + if (!pending) { 4377 + mgmt_pending_remove(cmd); 4378 + hci_dev_unlock(hdev); 4379 + 4380 + return mgmt_cmd_complete(sk, hdev->id, 4381 + MGMT_OP_REMOVE_ADV_MONITOR, 4382 + MGMT_STATUS_SUCCESS, 4383 + &rp, sizeof(rp)); 4384 + } 4528 4385 4529 4386 hci_dev_unlock(hdev); 4530 - 4531 - rp.monitor_handle = cp->monitor_handle; 4532 - 4533 - return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, 4534 - MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); 4387 + return 0; 4535 4388 4536 4389 unlock: 4537 4390 hci_dev_unlock(hdev); 4538 - return err; 4391 + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, 4392 + status); 4539 4393 } 4540 4394 4541 4395 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, ··· 4991 4791 4992 4792 if (hdev->discovery.state != DISCOVERY_STOPPED || 4993 4793 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { 4794 + err = mgmt_cmd_complete(sk, hdev->id, 4795 + MGMT_OP_START_SERVICE_DISCOVERY, 4796 + MGMT_STATUS_BUSY, &cp->type, 4797 + sizeof(cp->type)); 4798 + goto failed; 4799 + } 4800 + 4801 + if (hdev->discovery_paused) { 4994 4802 err = mgmt_cmd_complete(sk, hdev->id, 4995 4803 MGMT_OP_START_SERVICE_DISCOVERY, 4996 4804 MGMT_STATUS_BUSY, &cp->type, ··· 8441 8233 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE, 8442 8234 HCI_MGMT_VAR_LEN }, 8443 8235 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE, 8236 + HCI_MGMT_VAR_LEN }, 8237 + { add_adv_patterns_monitor_rssi, 8238 + MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE, 8444 8239 HCI_MGMT_VAR_LEN }, 8445 8240 }; 8446 8241
+459 -1
net/bluetooth/msft.c
··· 5 5 6 6 #include <net/bluetooth/bluetooth.h> 7 7 #include <net/bluetooth/hci_core.h> 8 + #include <net/bluetooth/mgmt.h> 8 9 10 + #include "hci_request.h" 11 + #include "mgmt_util.h" 9 12 #include "msft.h" 13 + 14 + #define MSFT_RSSI_THRESHOLD_VALUE_MIN -127 15 + #define MSFT_RSSI_THRESHOLD_VALUE_MAX 20 16 + #define MSFT_RSSI_LOW_TIMEOUT_MAX 0x3C 10 17 11 18 #define MSFT_OP_READ_SUPPORTED_FEATURES 0x00 12 19 struct msft_cp_read_supported_features { ··· 28 21 __u8 evt_prefix[]; 29 22 } __packed; 30 23 24 + #define MSFT_OP_LE_MONITOR_ADVERTISEMENT 0x03 25 + #define MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN 0x01 26 + struct msft_le_monitor_advertisement_pattern { 27 + __u8 length; 28 + __u8 data_type; 29 + __u8 start_byte; 30 + __u8 pattern[0]; 31 + }; 32 + 33 + struct msft_le_monitor_advertisement_pattern_data { 34 + __u8 count; 35 + __u8 data[0]; 36 + }; 37 + 38 + struct msft_cp_le_monitor_advertisement { 39 + __u8 sub_opcode; 40 + __s8 rssi_high; 41 + __s8 rssi_low; 42 + __u8 rssi_low_interval; 43 + __u8 rssi_sampling_period; 44 + __u8 cond_type; 45 + __u8 data[0]; 46 + } __packed; 47 + 48 + struct msft_rp_le_monitor_advertisement { 49 + __u8 status; 50 + __u8 sub_opcode; 51 + __u8 handle; 52 + } __packed; 53 + 54 + #define MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT 0x04 55 + struct msft_cp_le_cancel_monitor_advertisement { 56 + __u8 sub_opcode; 57 + __u8 handle; 58 + } __packed; 59 + 60 + struct msft_rp_le_cancel_monitor_advertisement { 61 + __u8 status; 62 + __u8 sub_opcode; 63 + } __packed; 64 + 65 + #define MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE 0x05 66 + struct msft_cp_le_set_advertisement_filter_enable { 67 + __u8 sub_opcode; 68 + __u8 enable; 69 + } __packed; 70 + 71 + struct msft_rp_le_set_advertisement_filter_enable { 72 + __u8 status; 73 + __u8 sub_opcode; 74 + } __packed; 75 + 76 + struct msft_monitor_advertisement_handle_data { 77 + __u8 msft_handle; 78 + __u16 mgmt_handle; 79 + struct list_head list; 80 + }; 81 + 31 82 struct msft_data { 32 83 __u64 features; 33 84 __u8 evt_prefix_len; 34 85 __u8 *evt_prefix; 86 + struct list_head handle_map; 87 + __u16 pending_add_handle; 88 + __u16 pending_remove_handle; 89 + __u8 reregistering; 90 + __u8 filter_enabled; 35 91 }; 92 + 93 + static int __msft_add_monitor_pattern(struct hci_dev *hdev, 94 + struct adv_monitor *monitor); 95 + 96 + bool msft_monitor_supported(struct hci_dev *hdev) 97 + { 98 + return !!(msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR); 99 + } 36 100 37 101 static bool read_supported_features(struct hci_dev *hdev, 38 102 struct msft_data *msft) ··· 150 72 return false; 151 73 } 152 74 75 + /* This function requires the caller holds hdev->lock */ 76 + static void reregister_monitor_on_restart(struct hci_dev *hdev, int handle) 77 + { 78 + struct adv_monitor *monitor; 79 + struct msft_data *msft = hdev->msft_data; 80 + int err; 81 + 82 + while (1) { 83 + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); 84 + if (!monitor) { 85 + /* All monitors have been reregistered */ 86 + msft->reregistering = false; 87 + hci_update_background_scan(hdev); 88 + return; 89 + } 90 + 91 + msft->pending_add_handle = (u16)handle; 92 + err = __msft_add_monitor_pattern(hdev, monitor); 93 + 94 + /* If success, we return and wait for monitor added callback */ 95 + if (!err) 96 + return; 97 + 98 + /* Otherwise remove the monitor and keep registering */ 99 + hci_free_adv_monitor(hdev, monitor); 100 + handle++; 101 + } 102 + } 103 + 153 104 void msft_do_open(struct hci_dev *hdev) 154 105 { 155 106 struct msft_data *msft; ··· 197 90 return; 198 91 } 199 92 93 + INIT_LIST_HEAD(&msft->handle_map); 200 94 hdev->msft_data = msft; 95 + 96 + if (msft_monitor_supported(hdev)) { 97 + msft->reregistering = true; 98 + msft_set_filter_enable(hdev, true); 99 + reregister_monitor_on_restart(hdev, 0); 100 + } 201 101 } 202 102 203 103 void msft_do_close(struct hci_dev *hdev) 204 104 { 205 105 struct msft_data *msft = hdev->msft_data; 106 + struct msft_monitor_advertisement_handle_data *handle_data, *tmp; 107 + struct adv_monitor *monitor; 206 108 207 109 if (!msft) 208 110 return; ··· 219 103 bt_dev_dbg(hdev, "Cleanup of MSFT extension"); 220 104 221 105 hdev->msft_data = NULL; 106 + 107 + list_for_each_entry_safe(handle_data, tmp, &msft->handle_map, list) { 108 + monitor = idr_find(&hdev->adv_monitors_idr, 109 + handle_data->mgmt_handle); 110 + 111 + if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) 112 + monitor->state = ADV_MONITOR_STATE_REGISTERED; 113 + 114 + list_del(&handle_data->list); 115 + kfree(handle_data); 116 + } 222 117 223 118 kfree(msft->evt_prefix); 224 119 kfree(msft); ··· 272 145 { 273 146 struct msft_data *msft = hdev->msft_data; 274 147 275 - return msft ? msft->features : 0; 148 + return msft ? msft->features : 0; 149 + } 150 + 151 + /* is_mgmt = true matches the handle exposed to userspace via mgmt. 152 + * is_mgmt = false matches the handle used by the msft controller. 153 + * This function requires the caller holds hdev->lock 154 + */ 155 + static struct msft_monitor_advertisement_handle_data *msft_find_handle_data 156 + (struct hci_dev *hdev, u16 handle, bool is_mgmt) 157 + { 158 + struct msft_monitor_advertisement_handle_data *entry; 159 + struct msft_data *msft = hdev->msft_data; 160 + 161 + list_for_each_entry(entry, &msft->handle_map, list) { 162 + if (is_mgmt && entry->mgmt_handle == handle) 163 + return entry; 164 + if (!is_mgmt && entry->msft_handle == handle) 165 + return entry; 166 + } 167 + 168 + return NULL; 169 + } 170 + 171 + static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, 172 + u8 status, u16 opcode, 173 + struct sk_buff *skb) 174 + { 175 + struct msft_rp_le_monitor_advertisement *rp; 176 + struct adv_monitor *monitor; 177 + struct msft_monitor_advertisement_handle_data *handle_data; 178 + struct msft_data *msft = hdev->msft_data; 179 + 180 + hci_dev_lock(hdev); 181 + 182 + monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle); 183 + if (!monitor) { 184 + bt_dev_err(hdev, "msft add advmon: monitor %d is not found!", 185 + msft->pending_add_handle); 186 + status = HCI_ERROR_UNSPECIFIED; 187 + goto unlock; 188 + } 189 + 190 + if (status) 191 + goto unlock; 192 + 193 + rp = (struct msft_rp_le_monitor_advertisement *)skb->data; 194 + if (skb->len < sizeof(*rp)) { 195 + status = HCI_ERROR_UNSPECIFIED; 196 + goto unlock; 197 + } 198 + 199 + handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); 200 + if (!handle_data) { 201 + status = HCI_ERROR_UNSPECIFIED; 202 + goto unlock; 203 + } 204 + 205 + handle_data->mgmt_handle = monitor->handle; 206 + handle_data->msft_handle = rp->handle; 207 + INIT_LIST_HEAD(&handle_data->list); 208 + list_add(&handle_data->list, &msft->handle_map); 209 + 210 + monitor->state = ADV_MONITOR_STATE_OFFLOADED; 211 + 212 + unlock: 213 + if (status && monitor) 214 + hci_free_adv_monitor(hdev, monitor); 215 + 216 + /* If in restart/reregister sequence, keep registering. */ 217 + if (msft->reregistering) 218 + reregister_monitor_on_restart(hdev, 219 + msft->pending_add_handle + 1); 220 + 221 + hci_dev_unlock(hdev); 222 + 223 + if (!msft->reregistering) 224 + hci_add_adv_patterns_monitor_complete(hdev, status); 225 + } 226 + 227 + static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, 228 + u8 status, u16 opcode, 229 + struct sk_buff *skb) 230 + { 231 + struct msft_cp_le_cancel_monitor_advertisement *cp; 232 + struct msft_rp_le_cancel_monitor_advertisement *rp; 233 + struct adv_monitor *monitor; 234 + struct msft_monitor_advertisement_handle_data *handle_data; 235 + struct msft_data *msft = hdev->msft_data; 236 + int err; 237 + bool pending; 238 + 239 + if (status) 240 + goto done; 241 + 242 + rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; 243 + if (skb->len < sizeof(*rp)) { 244 + status = HCI_ERROR_UNSPECIFIED; 245 + goto done; 246 + } 247 + 248 + hci_dev_lock(hdev); 249 + 250 + cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); 251 + handle_data = msft_find_handle_data(hdev, cp->handle, false); 252 + 253 + if (handle_data) { 254 + monitor = idr_find(&hdev->adv_monitors_idr, 255 + handle_data->mgmt_handle); 256 + if (monitor) 257 + hci_free_adv_monitor(hdev, monitor); 258 + 259 + list_del(&handle_data->list); 260 + kfree(handle_data); 261 + } 262 + 263 + /* If remove all monitors is required, we need to continue the process 264 + * here because the earlier it was paused when waiting for the 265 + * response from controller. 266 + */ 267 + if (msft->pending_remove_handle == 0) { 268 + pending = hci_remove_all_adv_monitor(hdev, &err); 269 + if (pending) { 270 + hci_dev_unlock(hdev); 271 + return; 272 + } 273 + 274 + if (err) 275 + status = HCI_ERROR_UNSPECIFIED; 276 + } 277 + 278 + hci_dev_unlock(hdev); 279 + 280 + done: 281 + hci_remove_adv_monitor_complete(hdev, status); 282 + } 283 + 284 + static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, 285 + u8 status, u16 opcode, 286 + struct sk_buff *skb) 287 + { 288 + struct msft_cp_le_set_advertisement_filter_enable *cp; 289 + struct msft_rp_le_set_advertisement_filter_enable *rp; 290 + struct msft_data *msft = hdev->msft_data; 291 + 292 + rp = (struct msft_rp_le_set_advertisement_filter_enable *)skb->data; 293 + if (skb->len < sizeof(*rp)) 294 + return; 295 + 296 + /* Error 0x0C would be returned if the filter enabled status is 297 + * already set to whatever we were trying to set. 298 + * Although the default state should be disabled, some controller set 299 + * the initial value to enabled. Because there is no way to know the 300 + * actual initial value before sending this command, here we also treat 301 + * error 0x0C as success. 302 + */ 303 + if (status != 0x00 && status != 0x0C) 304 + return; 305 + 306 + hci_dev_lock(hdev); 307 + 308 + cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); 309 + msft->filter_enabled = cp->enable; 310 + 311 + if (status == 0x0C) 312 + bt_dev_warn(hdev, "MSFT filter_enable is already %s", 313 + cp->enable ? "on" : "off"); 314 + 315 + hci_dev_unlock(hdev); 316 + } 317 + 318 + static bool msft_monitor_rssi_valid(struct adv_monitor *monitor) 319 + { 320 + struct adv_rssi_thresholds *r = &monitor->rssi; 321 + 322 + if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || 323 + r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX || 324 + r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || 325 + r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX) 326 + return false; 327 + 328 + /* High_threshold_timeout is not supported, 329 + * once high_threshold is reached, events are immediately reported. 330 + */ 331 + if (r->high_threshold_timeout != 0) 332 + return false; 333 + 334 + if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX) 335 + return false; 336 + 337 + /* Sampling period from 0x00 to 0xFF are all allowed */ 338 + return true; 339 + } 340 + 341 + static bool msft_monitor_pattern_valid(struct adv_monitor *monitor) 342 + { 343 + return msft_monitor_rssi_valid(monitor); 344 + /* No additional check needed for pattern-based monitor */ 345 + } 346 + 347 + /* This function requires the caller holds hdev->lock */ 348 + static int __msft_add_monitor_pattern(struct hci_dev *hdev, 349 + struct adv_monitor *monitor) 350 + { 351 + struct msft_cp_le_monitor_advertisement *cp; 352 + struct msft_le_monitor_advertisement_pattern_data *pattern_data; 353 + struct msft_le_monitor_advertisement_pattern *pattern; 354 + struct adv_pattern *entry; 355 + struct hci_request req; 356 + struct msft_data *msft = hdev->msft_data; 357 + size_t total_size = sizeof(*cp) + sizeof(*pattern_data); 358 + ptrdiff_t offset = 0; 359 + u8 pattern_count = 0; 360 + int err = 0; 361 + 362 + if (!msft_monitor_pattern_valid(monitor)) 363 + return -EINVAL; 364 + 365 + list_for_each_entry(entry, &monitor->patterns, list) { 366 + pattern_count++; 367 + total_size += sizeof(*pattern) + entry->length; 368 + } 369 + 370 + cp = kmalloc(total_size, GFP_KERNEL); 371 + if (!cp) 372 + return -ENOMEM; 373 + 374 + cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; 375 + cp->rssi_high = monitor->rssi.high_threshold; 376 + cp->rssi_low = monitor->rssi.low_threshold; 377 + cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout; 378 + cp->rssi_sampling_period = monitor->rssi.sampling_period; 379 + 380 + cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; 381 + 382 + pattern_data = (void *)cp->data; 383 + pattern_data->count = pattern_count; 384 + 385 + list_for_each_entry(entry, &monitor->patterns, list) { 386 + pattern = (void *)(pattern_data->data + offset); 387 + /* the length also includes data_type and offset */ 388 + pattern->length = entry->length + 2; 389 + pattern->data_type = entry->ad_type; 390 + pattern->start_byte = entry->offset; 391 + memcpy(pattern->pattern, entry->value, entry->length); 392 + offset += sizeof(*pattern) + entry->length; 393 + } 394 + 395 + hci_req_init(&req, hdev); 396 + hci_req_add(&req, hdev->msft_opcode, total_size, cp); 397 + err = hci_req_run_skb(&req, msft_le_monitor_advertisement_cb); 398 + kfree(cp); 399 + 400 + if (!err) 401 + msft->pending_add_handle = monitor->handle; 402 + 403 + return err; 404 + } 405 + 406 + /* This function requires the caller holds hdev->lock */ 407 + int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) 408 + { 409 + struct msft_data *msft = hdev->msft_data; 410 + 411 + if (!msft) 412 + return -EOPNOTSUPP; 413 + 414 + if (msft->reregistering) 415 + return -EBUSY; 416 + 417 + return __msft_add_monitor_pattern(hdev, monitor); 418 + } 419 + 420 + /* This function requires the caller holds hdev->lock */ 421 + int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 422 + u16 handle) 423 + { 424 + struct msft_cp_le_cancel_monitor_advertisement cp; 425 + struct msft_monitor_advertisement_handle_data *handle_data; 426 + struct hci_request req; 427 + struct msft_data *msft = hdev->msft_data; 428 + int err = 0; 429 + 430 + if (!msft) 431 + return -EOPNOTSUPP; 432 + 433 + if (msft->reregistering) 434 + return -EBUSY; 435 + 436 + handle_data = msft_find_handle_data(hdev, monitor->handle, true); 437 + 438 + /* If no matched handle, just remove without telling controller */ 439 + if (!handle_data) 440 + return -ENOENT; 441 + 442 + cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; 443 + cp.handle = handle_data->msft_handle; 444 + 445 + hci_req_init(&req, hdev); 446 + hci_req_add(&req, hdev->msft_opcode, sizeof(cp), &cp); 447 + err = hci_req_run_skb(&req, msft_le_cancel_monitor_advertisement_cb); 448 + 449 + if (!err) 450 + msft->pending_remove_handle = handle; 451 + 452 + return err; 453 + } 454 + 455 + void msft_req_add_set_filter_enable(struct hci_request *req, bool enable) 456 + { 457 + struct hci_dev *hdev = req->hdev; 458 + struct msft_cp_le_set_advertisement_filter_enable cp; 459 + 460 + cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE; 461 + cp.enable = enable; 462 + 463 + hci_req_add(req, hdev->msft_opcode, sizeof(cp), &cp); 464 + } 465 + 466 + int msft_set_filter_enable(struct hci_dev *hdev, bool enable) 467 + { 468 + struct hci_request req; 469 + struct msft_data *msft = hdev->msft_data; 470 + int err; 471 + 472 + if (!msft) 473 + return -EOPNOTSUPP; 474 + 475 + hci_req_init(&req, hdev); 476 + msft_req_add_set_filter_enable(&req, enable); 477 + err = hci_req_run_skb(&req, msft_le_set_advertisement_filter_enable_cb); 478 + 479 + return err; 276 480 }
+30
net/bluetooth/msft.h
··· 12 12 13 13 #if IS_ENABLED(CONFIG_BT_MSFTEXT) 14 14 15 + bool msft_monitor_supported(struct hci_dev *hdev); 15 16 void msft_do_open(struct hci_dev *hdev); 16 17 void msft_do_close(struct hci_dev *hdev); 17 18 void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); 18 19 __u64 msft_get_features(struct hci_dev *hdev); 20 + int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); 21 + int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 22 + u16 handle); 23 + void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); 24 + int msft_set_filter_enable(struct hci_dev *hdev, bool enable); 19 25 20 26 #else 27 + 28 + static inline bool msft_monitor_supported(struct hci_dev *hdev) 29 + { 30 + return false; 31 + } 21 32 22 33 static inline void msft_do_open(struct hci_dev *hdev) {} 23 34 static inline void msft_do_close(struct hci_dev *hdev) {} 24 35 static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} 25 36 static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } 37 + static inline int msft_add_monitor_pattern(struct hci_dev *hdev, 38 + struct adv_monitor *monitor) 39 + { 40 + return -EOPNOTSUPP; 41 + } 42 + 43 + static inline int msft_remove_monitor(struct hci_dev *hdev, 44 + struct adv_monitor *monitor, 45 + u16 handle) 46 + { 47 + return -EOPNOTSUPP; 48 + } 49 + 50 + static inline void msft_req_add_set_filter_enable(struct hci_request *req, 51 + bool enable) {} 52 + static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable) 53 + { 54 + return -EOPNOTSUPP; 55 + } 26 56 27 57 #endif
+2 -3
net/bluetooth/smp.c
··· 25 25 #include <linux/crypto.h> 26 26 #include <crypto/aes.h> 27 27 #include <crypto/algapi.h> 28 - #include <crypto/b128ops.h> 29 28 #include <crypto/hash.h> 30 29 #include <crypto/kpp.h> 31 30 ··· 424 425 SMP_DBG("p1 %16phN", p1); 425 426 426 427 /* res = r XOR p1 */ 427 - u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); 428 + crypto_xor_cpy(res, r, p1, sizeof(p1)); 428 429 429 430 /* res = e(k, res) */ 430 431 err = smp_e(k, res); ··· 441 442 SMP_DBG("p2 %16phN", p2); 442 443 443 444 /* res = res XOR p2 */ 444 - u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); 445 + crypto_xor(res, p2, sizeof(p2)); 445 446 446 447 /* res = e(k, res) */ 447 448 err = smp_e(k, res);