Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2015-03-19

This wont the last 4.1 bluetooth-next pull request, but we've piled up
enough patches in less than a week that I wanted to save you from a
single huge "last-minute" pull somewhere closer to the merge window.

The main changes are:

- Simultaneous LE & BR/EDR discovery support for HW that can do it
- Complete LE OOB pairing support
- More fine-grained mgmt-command access control (normal user can now do
harmless read-only operations).
- Added RF power amplifier support in cc2520 ieee802154 driver
- Some cleanups/fixes in ieee802154 code

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1497 -652
+4
Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
··· 13 13 - cca-gpio: GPIO spec for the CCA pin 14 14 - vreg-gpio: GPIO spec for the VREG pin 15 15 - reset-gpio: GPIO spec for the RESET pin 16 + Optional properties: 17 + - amplified: include if the CC2520 is connected to a CC2591 amplifier 18 + 16 19 Example: 17 20 cc2520@0 { 18 21 compatible = "ti,cc2520"; 19 22 reg = <0>; 20 23 spi-max-frequency = <4000000>; 24 + amplified; 21 25 pinctrl-names = "default"; 22 26 pinctrl-0 = <&cc2520_cape_pins>; 23 27 fifo-gpio = <&gpio1 18 0>;
+6 -2
drivers/bluetooth/btusb.c
··· 215 215 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, 216 216 217 217 /* QCA ROME chipset */ 218 - { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME}, 219 - { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME}, 218 + { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, 219 + { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, 220 220 221 221 /* Broadcom BCM2035 */ 222 222 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, ··· 3019 3019 hdev->shutdown = btusb_shutdown_intel; 3020 3020 hdev->set_bdaddr = btusb_set_bdaddr_intel; 3021 3021 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 3022 + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); 3022 3023 } 3023 3024 3024 3025 if (id->driver_info & BTUSB_INTEL_NEW) { ··· 3043 3042 3044 3043 if (id->driver_info & BTUSB_ATH3012) { 3045 3044 hdev->set_bdaddr = btusb_set_bdaddr_ath3012; 3045 + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); 3046 3046 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 3047 3047 } 3048 3048 ··· 3087 3085 /* Fake CSR devices with broken commands */ 3088 3086 if (bcdDevice <= 0x100) 3089 3087 hdev->setup = btusb_setup_csr; 3088 + 3089 + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); 3090 3090 } 3091 3091 3092 3092 if (id->driver_info & BTUSB_SNIFFER) {
+1 -1
drivers/net/ieee802154/at86rf230.c
··· 325 325 int rc; 326 326 327 327 rc = __at86rf230_read(lp, addr, data); 328 - if (rc > 0) 328 + if (!rc) 329 329 *data = (*data & mask) >> shift; 330 330 331 331 return rc;
+93 -57
drivers/net/ieee802154/cc2520.c
··· 714 714 return IRQ_HANDLED; 715 715 } 716 716 717 + static int cc2520_get_platform_data(struct spi_device *spi, 718 + struct cc2520_platform_data *pdata) 719 + { 720 + struct device_node *np = spi->dev.of_node; 721 + struct cc2520_private *priv = spi_get_drvdata(spi); 722 + 723 + if (!np) { 724 + struct cc2520_platform_data *spi_pdata = spi->dev.platform_data; 725 + if (!spi_pdata) 726 + return -ENOENT; 727 + *pdata = *spi_pdata; 728 + return 0; 729 + } 730 + 731 + pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0); 732 + priv->fifo_pin = pdata->fifo; 733 + 734 + pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0); 735 + 736 + pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0); 737 + pdata->cca = of_get_named_gpio(np, "cca-gpio", 0); 738 + pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0); 739 + pdata->reset = of_get_named_gpio(np, "reset-gpio", 0); 740 + 741 + pdata->amplified = of_property_read_bool(np, "amplified"); 742 + 743 + return 0; 744 + } 745 + 717 746 static int cc2520_hw_init(struct cc2520_private *priv) 718 747 { 719 748 u8 status = 0, state = 0xff; 720 749 int ret; 721 750 int timeout = 100; 751 + struct cc2520_platform_data pdata; 752 + 753 + ret = cc2520_get_platform_data(priv->spi, &pdata); 754 + if (ret) 755 + goto err_ret; 722 756 723 757 ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state); 724 758 if (ret) ··· 775 741 776 742 dev_vdbg(&priv->spi->dev, "oscillator brought up\n"); 777 743 778 - /* Registers default value: section 28.1 in Datasheet */ 779 - ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7); 780 - if (ret) 781 - goto err_ret; 744 + /* If the CC2520 is connected to a CC2591 amplifier, we must both 745 + * configure GPIOs on the CC2520 to correctly configure the CC2591 746 + * and change a couple settings of the CC2520 to work with the 747 + * amplifier. See section 8 page 17 of TI application note AN065. 748 + * http://www.ti.com/lit/an/swra229a/swra229a.pdf 749 + */ 750 + if (pdata.amplified) { 751 + ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF9); 752 + if (ret) 753 + goto err_ret; 782 754 755 + ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x16); 756 + if (ret) 757 + goto err_ret; 758 + 759 + ret = cc2520_write_register(priv, CC2520_GPIOCTRL0, 0x46); 760 + if (ret) 761 + goto err_ret; 762 + 763 + ret = cc2520_write_register(priv, CC2520_GPIOCTRL5, 0x47); 764 + if (ret) 765 + goto err_ret; 766 + 767 + ret = cc2520_write_register(priv, CC2520_GPIOPOLARITY, 0x1e); 768 + if (ret) 769 + goto err_ret; 770 + 771 + ret = cc2520_write_register(priv, CC2520_TXCTRL, 0xc1); 772 + if (ret) 773 + goto err_ret; 774 + } else { 775 + ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7); 776 + if (ret) 777 + goto err_ret; 778 + 779 + ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11); 780 + if (ret) 781 + goto err_ret; 782 + } 783 + 784 + /* Registers default value: section 28.1 in Datasheet */ 783 785 ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A); 784 786 if (ret) 785 787 goto err_ret; ··· 837 767 goto err_ret; 838 768 839 769 ret = cc2520_write_register(priv, CC2520_FSCAL1, 0x2b); 840 - if (ret) 841 - goto err_ret; 842 - 843 - ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11); 844 770 if (ret) 845 771 goto err_ret; 846 772 ··· 874 808 return ret; 875 809 } 876 810 877 - static struct cc2520_platform_data * 878 - cc2520_get_platform_data(struct spi_device *spi) 879 - { 880 - struct cc2520_platform_data *pdata; 881 - struct device_node *np = spi->dev.of_node; 882 - struct cc2520_private *priv = spi_get_drvdata(spi); 883 - 884 - if (!np) 885 - return spi->dev.platform_data; 886 - 887 - pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL); 888 - if (!pdata) 889 - goto done; 890 - 891 - pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0); 892 - priv->fifo_pin = pdata->fifo; 893 - 894 - pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0); 895 - 896 - pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0); 897 - pdata->cca = of_get_named_gpio(np, "cca-gpio", 0); 898 - pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0); 899 - pdata->reset = of_get_named_gpio(np, "reset-gpio", 0); 900 - 901 - spi->dev.platform_data = pdata; 902 - 903 - done: 904 - return pdata; 905 - } 906 - 907 811 static int cc2520_probe(struct spi_device *spi) 908 812 { 909 813 struct cc2520_private *priv; 910 - struct cc2520_platform_data *pdata; 814 + struct cc2520_platform_data pdata; 911 815 int ret; 912 816 913 817 priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); ··· 886 850 887 851 spi_set_drvdata(spi, priv); 888 852 889 - pdata = cc2520_get_platform_data(spi); 890 - if (!pdata) { 853 + ret = cc2520_get_platform_data(spi, &pdata); 854 + if (ret < 0) { 891 855 dev_err(&spi->dev, "no platform data\n"); 892 856 return -EINVAL; 893 857 } ··· 905 869 init_completion(&priv->tx_complete); 906 870 907 871 /* Request all the gpio's */ 908 - if (!gpio_is_valid(pdata->fifo)) { 872 + if (!gpio_is_valid(pdata.fifo)) { 909 873 dev_err(&spi->dev, "fifo gpio is not valid\n"); 910 874 ret = -EINVAL; 911 875 goto err_hw_init; 912 876 } 913 877 914 - ret = devm_gpio_request_one(&spi->dev, pdata->fifo, 878 + ret = devm_gpio_request_one(&spi->dev, pdata.fifo, 915 879 GPIOF_IN, "fifo"); 916 880 if (ret) 917 881 goto err_hw_init; 918 882 919 - if (!gpio_is_valid(pdata->cca)) { 883 + if (!gpio_is_valid(pdata.cca)) { 920 884 dev_err(&spi->dev, "cca gpio is not valid\n"); 921 885 ret = -EINVAL; 922 886 goto err_hw_init; 923 887 } 924 888 925 - ret = devm_gpio_request_one(&spi->dev, pdata->cca, 889 + ret = devm_gpio_request_one(&spi->dev, pdata.cca, 926 890 GPIOF_IN, "cca"); 927 891 if (ret) 928 892 goto err_hw_init; 929 893 930 - if (!gpio_is_valid(pdata->fifop)) { 894 + if (!gpio_is_valid(pdata.fifop)) { 931 895 dev_err(&spi->dev, "fifop gpio is not valid\n"); 932 896 ret = -EINVAL; 933 897 goto err_hw_init; 934 898 } 935 899 936 - ret = devm_gpio_request_one(&spi->dev, pdata->fifop, 900 + ret = devm_gpio_request_one(&spi->dev, pdata.fifop, 937 901 GPIOF_IN, "fifop"); 938 902 if (ret) 939 903 goto err_hw_init; 940 904 941 - if (!gpio_is_valid(pdata->sfd)) { 905 + if (!gpio_is_valid(pdata.sfd)) { 942 906 dev_err(&spi->dev, "sfd gpio is not valid\n"); 943 907 ret = -EINVAL; 944 908 goto err_hw_init; 945 909 } 946 910 947 - ret = devm_gpio_request_one(&spi->dev, pdata->sfd, 911 + ret = devm_gpio_request_one(&spi->dev, pdata.sfd, 948 912 GPIOF_IN, "sfd"); 949 913 if (ret) 950 914 goto err_hw_init; 951 915 952 - if (!gpio_is_valid(pdata->reset)) { 916 + if (!gpio_is_valid(pdata.reset)) { 953 917 dev_err(&spi->dev, "reset gpio is not valid\n"); 954 918 ret = -EINVAL; 955 919 goto err_hw_init; 956 920 } 957 921 958 - ret = devm_gpio_request_one(&spi->dev, pdata->reset, 922 + ret = devm_gpio_request_one(&spi->dev, pdata.reset, 959 923 GPIOF_OUT_INIT_LOW, "reset"); 960 924 if (ret) 961 925 goto err_hw_init; 962 926 963 - if (!gpio_is_valid(pdata->vreg)) { 927 + if (!gpio_is_valid(pdata.vreg)) { 964 928 dev_err(&spi->dev, "vreg gpio is not valid\n"); 965 929 ret = -EINVAL; 966 930 goto err_hw_init; 967 931 } 968 932 969 - ret = devm_gpio_request_one(&spi->dev, pdata->vreg, 933 + ret = devm_gpio_request_one(&spi->dev, pdata.vreg, 970 934 GPIOF_OUT_INIT_LOW, "vreg"); 971 935 if (ret) 972 936 goto err_hw_init; 973 937 974 - gpio_set_value(pdata->vreg, HIGH); 938 + gpio_set_value(pdata.vreg, HIGH); 975 939 usleep_range(100, 150); 976 940 977 - gpio_set_value(pdata->reset, HIGH); 941 + gpio_set_value(pdata.reset, HIGH); 978 942 usleep_range(200, 250); 979 943 980 944 ret = cc2520_hw_init(priv); ··· 983 947 984 948 /* Set up fifop interrupt */ 985 949 ret = devm_request_irq(&spi->dev, 986 - gpio_to_irq(pdata->fifop), 950 + gpio_to_irq(pdata.fifop), 987 951 cc2520_fifop_isr, 988 952 IRQF_TRIGGER_RISING, 989 953 dev_name(&spi->dev), ··· 995 959 996 960 /* Set up sfd interrupt */ 997 961 ret = devm_request_irq(&spi->dev, 998 - gpio_to_irq(pdata->sfd), 962 + gpio_to_irq(pdata.sfd), 999 963 cc2520_sfd_isr, 1000 964 IRQF_TRIGGER_FALLING, 1001 965 dev_name(&spi->dev),
+1
include/linux/spi/cc2520.h
··· 21 21 int sfd; 22 22 int reset; 23 23 int vreg; 24 + bool amplified; 24 25 }; 25 26 26 27 #endif
+5
include/net/bluetooth/bluetooth.h
··· 335 335 336 336 int bt_to_errno(__u16 code); 337 337 338 + void hci_sock_set_flag(struct sock *sk, int nr); 339 + void hci_sock_clear_flag(struct sock *sk, int nr); 340 + int hci_sock_test_flag(struct sock *sk, int nr); 341 + unsigned short hci_sock_get_channel(struct sock *sk); 342 + 338 343 int hci_sock_init(void); 339 344 void hci_sock_cleanup(void); 340 345
+22
include/net/bluetooth/hci.h
··· 160 160 * during the hdev->setup vendor callback. 161 161 */ 162 162 HCI_QUIRK_STRICT_DUPLICATE_FILTER, 163 + 164 + /* When this quirk is set, LE scan and BR/EDR inquiry is done 165 + * simultaneously, otherwise it's interleaved. 166 + * 167 + * This quirk can be set before hci_register_dev is called or 168 + * during the hdev->setup vendor callback. 169 + */ 170 + HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 163 171 }; 164 172 165 173 /* HCI device flags */ ··· 185 177 HCI_RAW, 186 178 187 179 HCI_RESET, 180 + }; 181 + 182 + /* HCI socket flags */ 183 + enum { 184 + HCI_SOCK_TRUSTED, 185 + HCI_MGMT_INDEX_EVENTS, 186 + HCI_MGMT_UNCONF_INDEX_EVENTS, 187 + HCI_MGMT_EXT_INDEX_EVENTS, 188 + HCI_MGMT_GENERIC_EVENTS, 189 + HCI_MGMT_OOB_DATA_EVENTS, 188 190 }; 189 191 190 192 /* ··· 465 447 #define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */ 466 448 #define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */ 467 449 #define EIR_DEVICE_ID 0x10 /* device ID */ 450 + #define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */ 451 + #define EIR_LE_ROLE 0x1C /* LE role */ 452 + #define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */ 453 + #define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */ 468 454 469 455 /* Low Energy Advertising Flags */ 470 456 #define LE_AD_LIMITED 0x01 /* Limited Discoverable */
+6 -8
include/net/bluetooth/hci_core.h
··· 596 596 HCI_CONN_SC_ENABLED, 597 597 HCI_CONN_AES_CCM, 598 598 HCI_CONN_POWER_SAVE, 599 - HCI_CONN_REMOTE_OOB, 600 599 HCI_CONN_FLUSH_KEY, 601 600 HCI_CONN_ENCRYPT, 602 601 HCI_CONN_AUTH, ··· 1283 1284 /* ----- HCI Sockets ----- */ 1284 1285 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); 1285 1286 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 1286 - struct sock *skip_sk); 1287 + int flag, struct sock *skip_sk); 1287 1288 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); 1288 1289 1289 1290 void hci_sock_dev_event(struct hci_dev *hdev, int event); 1290 1291 1291 - #define HCI_MGMT_VAR_LEN (1 << 0) 1292 - #define HCI_MGMT_NO_HDEV (1 << 1) 1293 - #define HCI_MGMT_UNCONFIGURED (1 << 2) 1292 + #define HCI_MGMT_VAR_LEN BIT(0) 1293 + #define HCI_MGMT_NO_HDEV BIT(1) 1294 + #define HCI_MGMT_UNTRUSTED BIT(2) 1295 + #define HCI_MGMT_UNCONFIGURED BIT(3) 1294 1296 1295 1297 struct hci_mgmt_handler { 1296 1298 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, ··· 1305 1305 unsigned short channel; 1306 1306 size_t handler_count; 1307 1307 const struct hci_mgmt_handler *handlers; 1308 + void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); 1308 1309 }; 1309 1310 1310 1311 int hci_mgmt_chan_register(struct hci_mgmt_chan *c); ··· 1329 1328 #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 1330 1329 #define DISCOV_BREDR_INQUIRY_LEN 0x08 1331 1330 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ 1332 - 1333 - int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk, 1334 - struct msghdr *msg, size_t msglen); 1335 1331 1336 1332 int mgmt_new_settings(struct hci_dev *hdev); 1337 1333 void mgmt_index_added(struct hci_dev *hdev);
+50
include/net/bluetooth/mgmt.h
··· 44 44 #define MGMT_STATUS_INVALID_INDEX 0x11 45 45 #define MGMT_STATUS_RFKILLED 0x12 46 46 #define MGMT_STATUS_ALREADY_PAIRED 0x13 47 + #define MGMT_STATUS_PERMISSION_DENIED 0x14 47 48 48 49 struct mgmt_hdr { 49 50 __le16 opcode; ··· 506 505 } __packed; 507 506 #define MGMT_START_SERVICE_DISCOVERY_SIZE 4 508 507 508 + #define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B 509 + struct mgmt_cp_read_local_oob_ext_data { 510 + __u8 type; 511 + } __packed; 512 + #define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1 513 + struct mgmt_rp_read_local_oob_ext_data { 514 + __u8 type; 515 + __le16 eir_len; 516 + __u8 eir[0]; 517 + } __packed; 518 + 519 + #define MGMT_OP_READ_EXT_INDEX_LIST 0x003C 520 + #define MGMT_READ_EXT_INDEX_LIST_SIZE 0 521 + struct mgmt_rp_read_ext_index_list { 522 + __le16 num_controllers; 523 + struct { 524 + __le16 index; 525 + __u8 type; 526 + __u8 bus; 527 + } entry[0]; 528 + } __packed; 529 + 530 + #define MGMT_OP_READ_ADV_FEATURES 0x0003D 531 + #define MGMT_READ_ADV_FEATURES_SIZE 0 532 + struct mgmt_rp_read_adv_features { 533 + __le32 supported_flags; 534 + __u8 max_adv_data_len; 535 + __u8 max_scan_rsp_len; 536 + __u8 max_instances; 537 + __u8 num_instances; 538 + __u8 instance[0]; 539 + } __packed; 540 + 509 541 #define MGMT_EV_CMD_COMPLETE 0x0001 510 542 struct mgmt_ev_cmd_complete { 511 543 __le16 opcode; ··· 726 692 #define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e 727 693 728 694 #define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f 695 + 696 + struct mgmt_ev_ext_index { 697 + __u8 type; 698 + __u8 bus; 699 + } __packed; 700 + 701 + #define MGMT_EV_EXT_INDEX_ADDED 0x0020 702 + 703 + #define MGMT_EV_EXT_INDEX_REMOVED 0x0021 704 + 705 + #define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022 706 + struct mgmt_ev_local_oob_data_updated { 707 + __u8 type; 708 + __le16 eir_len; 709 + __u8 eir[0]; 710 + } __packed;
+1 -1
net/bluetooth/Makefile
··· 13 13 14 14 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 15 15 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ 16 - a2mp.o amp.o ecc.o hci_request.o 16 + a2mp.o amp.o ecc.o hci_request.o mgmt_util.o 17 17 18 18 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o 19 19 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
+19 -5
net/bluetooth/hci_core.c
··· 2902 2902 2903 2903 hci_dev_lock(hdev); 2904 2904 2905 - hci_inquiry_cache_flush(hdev); 2905 + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 2906 + &hdev->quirks)) { 2907 + /* If we were running LE only scan, change discovery 2908 + * state. If we were running both LE and BR/EDR inquiry 2909 + * simultaneously, and BR/EDR inquiry is already 2910 + * finished, stop discovery, otherwise BR/EDR inquiry 2911 + * will stop discovery when finished. 2912 + */ 2913 + if (!test_bit(HCI_INQUIRY, &hdev->flags)) 2914 + hci_discovery_set_state(hdev, 2915 + DISCOVERY_STOPPED); 2916 + } else { 2917 + hci_inquiry_cache_flush(hdev); 2906 2918 2907 - err = hci_req_run(&req, inquiry_complete); 2908 - if (err) { 2909 - BT_ERR("Inquiry request failed: err %d", err); 2910 - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2919 + err = hci_req_run(&req, inquiry_complete); 2920 + if (err) { 2921 + BT_ERR("Inquiry request failed: err %d", err); 2922 + hci_discovery_set_state(hdev, 2923 + DISCOVERY_STOPPED); 2924 + } 2911 2925 } 2912 2926 2913 2927 hci_dev_unlock(hdev);
+1 -1
net/bluetooth/hci_debugfs.c
··· 166 166 seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n", 167 167 &data->bdaddr, data->bdaddr_type, data->present, 168 168 16, data->hash192, 16, data->rand192, 169 - 16, data->hash256, 19, data->rand256); 169 + 16, data->hash256, 16, data->rand256); 170 170 } 171 171 hci_dev_unlock(hdev); 172 172
+46 -34
net/bluetooth/hci_event.c
··· 2126 2126 goto unlock; 2127 2127 2128 2128 if (list_empty(&discov->resolve)) { 2129 - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2129 + /* When BR/EDR inquiry is active and no LE scanning is in 2130 + * progress, then change discovery state to indicate completion. 2131 + * 2132 + * When running LE scanning and BR/EDR inquiry simultaneously 2133 + * and the LE scan already finished, then change the discovery 2134 + * state to indicate completion. 2135 + */ 2136 + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2137 + !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2138 + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2130 2139 goto unlock; 2131 2140 } 2132 2141 ··· 2144 2135 e->name_state = NAME_PENDING; 2145 2136 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 2146 2137 } else { 2147 - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2138 + /* When BR/EDR inquiry is active and no LE scanning is in 2139 + * progress, then change discovery state to indicate completion. 2140 + * 2141 + * When running LE scanning and BR/EDR inquiry simultaneously 2142 + * and the LE scan already finished, then change the discovery 2143 + * state to indicate completion. 2144 + */ 2145 + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2146 + !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2147 + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2148 2148 } 2149 2149 2150 2150 unlock: ··· 3907 3889 if (!data) 3908 3890 return 0x00; 3909 3891 3910 - if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) { 3911 - if (bredr_sc_enabled(hdev)) { 3912 - /* When Secure Connections is enabled, then just 3913 - * return the present value stored with the OOB 3914 - * data. The stored value contains the right present 3915 - * information. However it can only be trusted when 3916 - * not in Secure Connection Only mode. 3917 - */ 3918 - if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 3919 - return data->present; 3920 - 3921 - /* When Secure Connections Only mode is enabled, then 3922 - * the P-256 values are required. If they are not 3923 - * available, then do not declare that OOB data is 3924 - * present. 3925 - */ 3926 - if (!memcmp(data->rand256, ZERO_KEY, 16) || 3927 - !memcmp(data->hash256, ZERO_KEY, 16)) 3928 - return 0x00; 3929 - 3930 - return 0x02; 3931 - } 3932 - 3933 - /* When Secure Connections is not enabled or actually 3934 - * not supported by the hardware, then check that if 3935 - * P-192 data values are present. 3892 + if (bredr_sc_enabled(hdev)) { 3893 + /* When Secure Connections is enabled, then just 3894 + * return the present value stored with the OOB 3895 + * data. The stored value contains the right present 3896 + * information. However it can only be trusted when 3897 + * not in Secure Connection Only mode. 3936 3898 */ 3937 - if (!memcmp(data->rand192, ZERO_KEY, 16) || 3938 - !memcmp(data->hash192, ZERO_KEY, 16)) 3899 + if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 3900 + return data->present; 3901 + 3902 + /* When Secure Connections Only mode is enabled, then 3903 + * the P-256 values are required. If they are not 3904 + * available, then do not declare that OOB data is 3905 + * present. 3906 + */ 3907 + if (!memcmp(data->rand256, ZERO_KEY, 16) || 3908 + !memcmp(data->hash256, ZERO_KEY, 16)) 3939 3909 return 0x00; 3940 3910 3941 - return 0x01; 3911 + return 0x02; 3942 3912 } 3943 3913 3944 - return 0x00; 3914 + /* When Secure Connections is not enabled or actually 3915 + * not supported by the hardware, then check that if 3916 + * P-192 data values are present. 3917 + */ 3918 + if (!memcmp(data->rand192, ZERO_KEY, 16) || 3919 + !memcmp(data->hash192, ZERO_KEY, 16)) 3920 + return 0x00; 3921 + 3922 + return 0x01; 3945 3923 } 3946 3924 3947 3925 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) ··· 4024 4010 4025 4011 conn->remote_cap = ev->capability; 4026 4012 conn->remote_auth = ev->authentication; 4027 - if (ev->oob_data) 4028 - set_bit(HCI_CONN_REMOTE_OOB, &conn->flags); 4029 4013 4030 4014 unlock: 4031 4015 hci_dev_unlock(hdev);
+172 -8
net/bluetooth/hci_sock.c
··· 30 30 #include <net/bluetooth/bluetooth.h> 31 31 #include <net/bluetooth/hci_core.h> 32 32 #include <net/bluetooth/hci_mon.h> 33 + #include <net/bluetooth/mgmt.h> 34 + 35 + #include "mgmt_util.h" 33 36 34 37 static LIST_HEAD(mgmt_chan_list); 35 38 static DEFINE_MUTEX(mgmt_chan_list_lock); ··· 50 47 struct hci_filter filter; 51 48 __u32 cmsg_mask; 52 49 unsigned short channel; 50 + unsigned long flags; 53 51 }; 52 + 53 + void hci_sock_set_flag(struct sock *sk, int nr) 54 + { 55 + set_bit(nr, &hci_pi(sk)->flags); 56 + } 57 + 58 + void hci_sock_clear_flag(struct sock *sk, int nr) 59 + { 60 + clear_bit(nr, &hci_pi(sk)->flags); 61 + } 62 + 63 + int hci_sock_test_flag(struct sock *sk, int nr) 64 + { 65 + return test_bit(nr, &hci_pi(sk)->flags); 66 + } 67 + 68 + unsigned short hci_sock_get_channel(struct sock *sk) 69 + { 70 + return hci_pi(sk)->channel; 71 + } 54 72 55 73 static inline int hci_test_bit(int nr, const void *addr) 56 74 { ··· 212 188 213 189 /* Send frame to sockets with specific channel */ 214 190 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 215 - struct sock *skip_sk) 191 + int flag, struct sock *skip_sk) 216 192 { 217 193 struct sock *sk; 218 194 ··· 222 198 223 199 sk_for_each(sk, &hci_sk_list.head) { 224 200 struct sk_buff *nskb; 201 + 202 + /* Ignore socket without the flag set */ 203 + if (!hci_sock_test_flag(sk, flag)) 204 + continue; 225 205 226 206 /* Skip the original socket */ 227 207 if (sk == skip_sk) ··· 294 266 hdr->index = cpu_to_le16(hdev->id); 295 267 hdr->len = cpu_to_le16(skb->len); 296 268 297 - hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL); 269 + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, 270 + HCI_SOCK_TRUSTED, NULL); 298 271 kfree_skb(skb_copy); 299 272 } 300 273 ··· 402 373 403 374 skb = create_monitor_event(hdev, event); 404 375 if (skb) { 405 - hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL); 376 + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 377 + HCI_SOCK_TRUSTED, NULL); 406 378 kfree_skb(skb); 407 379 } 408 380 } ··· 782 752 goto done; 783 753 } 784 754 755 + /* The monitor interface is restricted to CAP_NET_RAW 756 + * capabilities and with that implicitly trusted. 757 + */ 758 + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 759 + 785 760 send_monitor_replay(sk); 786 761 787 762 atomic_inc(&monitor_promisc); ··· 803 768 goto done; 804 769 } 805 770 806 - if (!capable(CAP_NET_ADMIN)) { 807 - err = -EPERM; 808 - goto done; 809 - } 771 + /* Users with CAP_NET_ADMIN capabilities are allowed 772 + * access to all management commands and events. For 773 + * untrusted users the interface is restricted and 774 + * also only untrusted events are sent. 775 + */ 776 + if (capable(CAP_NET_ADMIN)) 777 + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 810 778 779 + /* At the moment the index and unconfigured index events 780 + * are enabled unconditionally. Setting them on each 781 + * socket when binding keeps this functionality. They 782 + * however might be cleared later and then sending of these 783 + * events will be disabled, but that is then intentional. 784 + * 785 + * This also enables generic events that are safe to be 786 + * received by untrusted users. Example for such events 787 + * are changes to settings, class of device, name etc. 788 + */ 789 + if (haddr.hci_channel == HCI_CHANNEL_CONTROL) { 790 + hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS); 791 + hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); 792 + hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS); 793 + } 811 794 break; 812 795 } 813 796 ··· 954 901 return err ? : copied; 955 902 } 956 903 904 + static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, 905 + struct msghdr *msg, size_t msglen) 906 + { 907 + void *buf; 908 + u8 *cp; 909 + struct mgmt_hdr *hdr; 910 + u16 opcode, index, len; 911 + struct hci_dev *hdev = NULL; 912 + const struct hci_mgmt_handler *handler; 913 + bool var_len, no_hdev; 914 + int err; 915 + 916 + BT_DBG("got %zu bytes", msglen); 917 + 918 + if (msglen < sizeof(*hdr)) 919 + return -EINVAL; 920 + 921 + buf = kmalloc(msglen, GFP_KERNEL); 922 + if (!buf) 923 + return -ENOMEM; 924 + 925 + if (memcpy_from_msg(buf, msg, msglen)) { 926 + err = -EFAULT; 927 + goto done; 928 + } 929 + 930 + hdr = buf; 931 + opcode = __le16_to_cpu(hdr->opcode); 932 + index = __le16_to_cpu(hdr->index); 933 + len = __le16_to_cpu(hdr->len); 934 + 935 + if (len != msglen - sizeof(*hdr)) { 936 + err = -EINVAL; 937 + goto done; 938 + } 939 + 940 + if (opcode >= chan->handler_count || 941 + chan->handlers[opcode].func == NULL) { 942 + BT_DBG("Unknown op %u", opcode); 943 + err = mgmt_cmd_status(sk, index, opcode, 944 + MGMT_STATUS_UNKNOWN_COMMAND); 945 + goto done; 946 + } 947 + 948 + handler = &chan->handlers[opcode]; 949 + 950 + if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) && 951 + !(handler->flags & HCI_MGMT_UNTRUSTED)) { 952 + err = mgmt_cmd_status(sk, index, opcode, 953 + MGMT_STATUS_PERMISSION_DENIED); 954 + goto done; 955 + } 956 + 957 + if (index != MGMT_INDEX_NONE) { 958 + hdev = hci_dev_get(index); 959 + if (!hdev) { 960 + err = mgmt_cmd_status(sk, index, opcode, 961 + MGMT_STATUS_INVALID_INDEX); 962 + goto done; 963 + } 964 + 965 + if (hci_dev_test_flag(hdev, HCI_SETUP) || 966 + hci_dev_test_flag(hdev, HCI_CONFIG) || 967 + hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 968 + err = mgmt_cmd_status(sk, index, opcode, 969 + MGMT_STATUS_INVALID_INDEX); 970 + goto done; 971 + } 972 + 973 + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 974 + !(handler->flags & HCI_MGMT_UNCONFIGURED)) { 975 + err = mgmt_cmd_status(sk, index, opcode, 976 + MGMT_STATUS_INVALID_INDEX); 977 + goto done; 978 + } 979 + } 980 + 981 + no_hdev = (handler->flags & HCI_MGMT_NO_HDEV); 982 + if (no_hdev != !hdev) { 983 + err = mgmt_cmd_status(sk, index, opcode, 984 + MGMT_STATUS_INVALID_INDEX); 985 + goto done; 986 + } 987 + 988 + var_len = (handler->flags & HCI_MGMT_VAR_LEN); 989 + if ((var_len && len < handler->data_len) || 990 + (!var_len && len != handler->data_len)) { 991 + err = mgmt_cmd_status(sk, index, opcode, 992 + MGMT_STATUS_INVALID_PARAMS); 993 + goto done; 994 + } 995 + 996 + if (hdev && chan->hdev_init) 997 + chan->hdev_init(sk, hdev); 998 + 999 + cp = buf + sizeof(*hdr); 1000 + 1001 + err = handler->func(sk, hdev, cp, len); 1002 + if (err < 0) 1003 + goto done; 1004 + 1005 + err = msglen; 1006 + 1007 + done: 1008 + if (hdev) 1009 + hci_dev_put(hdev); 1010 + 1011 + kfree(buf); 1012 + return err; 1013 + } 1014 + 957 1015 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, 958 1016 size_t len) 959 1017 { ··· 1098 934 mutex_lock(&mgmt_chan_list_lock); 1099 935 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); 1100 936 if (chan) 1101 - err = mgmt_control(chan, sk, msg, len); 937 + err = hci_mgmt_cmd(chan, sk, msg, len); 1102 938 else 1103 939 err = -EINVAL; 1104 940
+572 -487
net/bluetooth/mgmt.c
··· 35 35 36 36 #include "hci_request.h" 37 37 #include "smp.h" 38 + #include "mgmt_util.h" 38 39 39 40 #define MGMT_VERSION 1 40 41 #define MGMT_REVISION 9 ··· 97 96 MGMT_OP_SET_EXTERNAL_CONFIG, 98 97 MGMT_OP_SET_PUBLIC_ADDRESS, 99 98 MGMT_OP_START_SERVICE_DISCOVERY, 99 + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 100 + MGMT_OP_READ_EXT_INDEX_LIST, 101 + MGMT_OP_READ_ADV_FEATURES, 100 102 }; 101 103 102 104 static const u16 mgmt_events[] = { ··· 132 128 MGMT_EV_UNCONF_INDEX_ADDED, 133 129 MGMT_EV_UNCONF_INDEX_REMOVED, 134 130 MGMT_EV_NEW_CONFIG_OPTIONS, 131 + MGMT_EV_EXT_INDEX_ADDED, 132 + MGMT_EV_EXT_INDEX_REMOVED, 133 + MGMT_EV_LOCAL_OOB_DATA_UPDATED, 135 134 }; 136 135 137 136 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) 138 137 139 138 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 140 139 "\x00\x00\x00\x00\x00\x00\x00\x00" 141 - 142 - struct mgmt_pending_cmd { 143 - struct list_head list; 144 - u16 opcode; 145 - int index; 146 - void *param; 147 - size_t param_len; 148 - struct sock *sk; 149 - void *user_data; 150 - int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); 151 - }; 152 140 153 141 /* HCI to MGMT error code conversion table */ 154 142 static u8 mgmt_status_table[] = { ··· 215 219 return MGMT_STATUS_FAILED; 216 220 } 217 221 218 - static int mgmt_send_event(u16 event, struct hci_dev *hdev, 219 - unsigned short channel, void *data, u16 data_len, 220 - struct sock *skip_sk) 222 + static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data, 223 + u16 len, int flag) 221 224 { 222 - struct sk_buff *skb; 223 - struct mgmt_hdr *hdr; 225 + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, 226 + flag, NULL); 227 + } 224 228 225 - skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); 226 - if (!skb) 227 - return -ENOMEM; 229 + static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data, 230 + u16 len, int flag, struct sock *skip_sk) 231 + { 232 + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, 233 + flag, skip_sk); 234 + } 228 235 229 - hdr = (void *) skb_put(skb, sizeof(*hdr)); 230 - hdr->opcode = cpu_to_le16(event); 231 - if (hdev) 232 - hdr->index = cpu_to_le16(hdev->id); 233 - else 234 - hdr->index = cpu_to_le16(MGMT_INDEX_NONE); 235 - hdr->len = cpu_to_le16(data_len); 236 - 237 - if (data) 238 - memcpy(skb_put(skb, data_len), data, data_len); 239 - 240 - /* Time stamp */ 241 - __net_timestamp(skb); 242 - 243 - hci_send_to_channel(channel, skb, skip_sk); 244 - kfree_skb(skb); 245 - 246 - return 0; 236 + static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data, 237 + u16 len, struct sock *skip_sk) 238 + { 239 + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, 240 + HCI_MGMT_GENERIC_EVENTS, skip_sk); 247 241 } 248 242 249 243 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, 250 244 struct sock *skip_sk) 251 245 { 252 246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, 253 - skip_sk); 254 - } 255 - 256 - static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 257 - { 258 - struct sk_buff *skb; 259 - struct mgmt_hdr *hdr; 260 - struct mgmt_ev_cmd_status *ev; 261 - int err; 262 - 263 - BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 264 - 265 - skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); 266 - if (!skb) 267 - return -ENOMEM; 268 - 269 - hdr = (void *) skb_put(skb, sizeof(*hdr)); 270 - 271 - hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); 272 - hdr->index = cpu_to_le16(index); 273 - hdr->len = cpu_to_le16(sizeof(*ev)); 274 - 275 - ev = (void *) skb_put(skb, sizeof(*ev)); 276 - ev->status = status; 277 - ev->opcode = cpu_to_le16(cmd); 278 - 279 - err = sock_queue_rcv_skb(sk, skb); 280 - if (err < 0) 281 - kfree_skb(skb); 282 - 283 - return err; 284 - } 285 - 286 - static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, 287 - void *rp, size_t rp_len) 288 - { 289 - struct sk_buff *skb; 290 - struct mgmt_hdr *hdr; 291 - struct mgmt_ev_cmd_complete *ev; 292 - int err; 293 - 294 - BT_DBG("sock %p", sk); 295 - 296 - skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); 297 - if (!skb) 298 - return -ENOMEM; 299 - 300 - hdr = (void *) skb_put(skb, sizeof(*hdr)); 301 - 302 - hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 303 - hdr->index = cpu_to_le16(index); 304 - hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 305 - 306 - ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); 307 - ev->opcode = cpu_to_le16(cmd); 308 - ev->status = status; 309 - 310 - if (rp) 311 - memcpy(ev->data, rp, rp_len); 312 - 313 - err = sock_queue_rcv_skb(sk, skb); 314 - if (err < 0) 315 - kfree_skb(skb); 316 - 317 - return err; 247 + HCI_SOCK_TRUSTED, skip_sk); 318 248 } 319 249 320 250 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, ··· 411 489 return err; 412 490 } 413 491 492 + static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, 493 + void *data, u16 data_len) 494 + { 495 + struct mgmt_rp_read_ext_index_list *rp; 496 + struct hci_dev *d; 497 + size_t rp_len; 498 + u16 count; 499 + int err; 500 + 501 + BT_DBG("sock %p", sk); 502 + 503 + read_lock(&hci_dev_list_lock); 504 + 505 + count = 0; 506 + list_for_each_entry(d, &hci_dev_list, list) { 507 + if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP) 508 + count++; 509 + } 510 + 511 + rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count); 512 + rp = kmalloc(rp_len, GFP_ATOMIC); 513 + if (!rp) { 514 + read_unlock(&hci_dev_list_lock); 515 + return -ENOMEM; 516 + } 517 + 518 + count = 0; 519 + list_for_each_entry(d, &hci_dev_list, list) { 520 + if (hci_dev_test_flag(d, HCI_SETUP) || 521 + hci_dev_test_flag(d, HCI_CONFIG) || 522 + hci_dev_test_flag(d, HCI_USER_CHANNEL)) 523 + continue; 524 + 525 + /* Devices marked as raw-only are neither configured 526 + * nor unconfigured controllers. 527 + */ 528 + if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) 529 + continue; 530 + 531 + if (d->dev_type == HCI_BREDR) { 532 + if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) 533 + rp->entry[count].type = 0x01; 534 + else 535 + rp->entry[count].type = 0x00; 536 + } else if (d->dev_type == HCI_AMP) { 537 + rp->entry[count].type = 0x02; 538 + } else { 539 + continue; 540 + } 541 + 542 + rp->entry[count].bus = d->bus; 543 + rp->entry[count++].index = cpu_to_le16(d->id); 544 + BT_DBG("Added hci%u", d->id); 545 + } 546 + 547 + rp->num_controllers = cpu_to_le16(count); 548 + rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count); 549 + 550 + read_unlock(&hci_dev_list_lock); 551 + 552 + /* If this command is called at least once, then all the 553 + * default index and unconfigured index events are disabled 554 + * and from now on only extended index events are used. 555 + */ 556 + hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS); 557 + hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS); 558 + hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); 559 + 560 + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, 561 + MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len); 562 + 563 + kfree(rp); 564 + 565 + return err; 566 + } 567 + 414 568 static bool is_configured(struct hci_dev *hdev) 415 569 { 416 570 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && ··· 519 521 { 520 522 __le32 options = get_missing_options(hdev); 521 523 522 - return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options, 523 - sizeof(options), skip); 524 + return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options, 525 + sizeof(options), skip); 524 526 } 525 527 526 528 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) ··· 777 779 return ptr; 778 780 } 779 781 780 - static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode, 781 - struct hci_dev *hdev) 782 + static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev) 782 783 { 783 - struct mgmt_pending_cmd *cmd; 784 - 785 - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 786 - if (cmd->opcode == opcode) 787 - return cmd; 788 - } 789 - 790 - return NULL; 784 + return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev); 791 785 } 792 786 793 - static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode, 794 - struct hci_dev *hdev, 795 - const void *data) 787 + static struct mgmt_pending_cmd *pending_find_data(u16 opcode, 788 + struct hci_dev *hdev, 789 + const void *data) 796 790 { 797 - struct mgmt_pending_cmd *cmd; 798 - 799 - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 800 - if (cmd->user_data != data) 801 - continue; 802 - if (cmd->opcode == opcode) 803 - return cmd; 804 - } 805 - 806 - return NULL; 791 + return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data); 807 792 } 808 793 809 794 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) ··· 847 866 /* If there's a pending mgmt command the flags will not yet have 848 867 * their final values, so check for this first. 849 868 */ 850 - cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); 869 + cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); 851 870 if (cmd) { 852 871 struct mgmt_mode *cp = cmd->param; 853 872 if (cp->val == 0x01) ··· 1055 1074 /* If there's a pending mgmt command the flag will not yet have 1056 1075 * it's final value, so check for this first. 1057 1076 */ 1058 - cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); 1077 + cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); 1059 1078 if (cmd) { 1060 1079 struct mgmt_mode *cp = cmd->param; 1061 1080 return cp->val; ··· 1203 1222 sizeof(rp)); 1204 1223 } 1205 1224 1206 - static void mgmt_pending_free(struct mgmt_pending_cmd *cmd) 1207 - { 1208 - sock_put(cmd->sk); 1209 - kfree(cmd->param); 1210 - kfree(cmd); 1211 - } 1212 - 1213 - static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 1214 - struct hci_dev *hdev, 1215 - void *data, u16 len) 1216 - { 1217 - struct mgmt_pending_cmd *cmd; 1218 - 1219 - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1220 - if (!cmd) 1221 - return NULL; 1222 - 1223 - cmd->opcode = opcode; 1224 - cmd->index = hdev->id; 1225 - 1226 - cmd->param = kmemdup(data, len, GFP_KERNEL); 1227 - if (!cmd->param) { 1228 - kfree(cmd); 1229 - return NULL; 1230 - } 1231 - 1232 - cmd->param_len = len; 1233 - 1234 - cmd->sk = sk; 1235 - sock_hold(sk); 1236 - 1237 - list_add(&cmd->list, &hdev->mgmt_pending); 1238 - 1239 - return cmd; 1240 - } 1241 - 1242 - static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 1243 - void (*cb)(struct mgmt_pending_cmd *cmd, 1244 - void *data), 1245 - void *data) 1246 - { 1247 - struct mgmt_pending_cmd *cmd, *tmp; 1248 - 1249 - list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { 1250 - if (opcode > 0 && cmd->opcode != opcode) 1251 - continue; 1252 - 1253 - cb(cmd, data); 1254 - } 1255 - } 1256 - 1257 - static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) 1258 - { 1259 - list_del(&cmd->list); 1260 - mgmt_pending_free(cmd); 1261 - } 1262 - 1263 1225 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) 1264 1226 { 1265 1227 __le32 settings = cpu_to_le32(get_current_settings(hdev)); ··· 1229 1305 1230 1306 switch (hdev->discovery.state) { 1231 1307 case DISCOVERY_FINDING: 1232 - if (test_bit(HCI_INQUIRY, &hdev->flags)) { 1308 + if (test_bit(HCI_INQUIRY, &hdev->flags)) 1233 1309 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); 1234 - } else { 1310 + 1311 + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 1235 1312 cancel_delayed_work(&hdev->le_scan_disable); 1236 1313 hci_req_add_le_scan_disable(req); 1237 1314 } ··· 1338 1413 1339 1414 hci_dev_lock(hdev); 1340 1415 1341 - if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { 1416 + if (pending_find(MGMT_OP_SET_POWERED, hdev)) { 1342 1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, 1343 1418 MGMT_STATUS_BUSY); 1344 1419 goto failed; ··· 1391 1466 1392 1467 static int new_settings(struct hci_dev *hdev, struct sock *skip) 1393 1468 { 1394 - __le32 ev; 1469 + __le32 ev = cpu_to_le32(get_current_settings(hdev)); 1395 1470 1396 - ev = cpu_to_le32(get_current_settings(hdev)); 1397 - 1398 - return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip); 1471 + return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, 1472 + sizeof(ev), skip); 1399 1473 } 1400 1474 1401 1475 int mgmt_new_settings(struct hci_dev *hdev) ··· 1490 1566 1491 1567 hci_dev_lock(hdev); 1492 1568 1493 - cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); 1569 + cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); 1494 1570 if (!cmd) 1495 1571 goto unlock; 1496 1572 ··· 1575 1651 goto failed; 1576 1652 } 1577 1653 1578 - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 1579 - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 1654 + if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 1655 + pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 1580 1656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1581 1657 MGMT_STATUS_BUSY); 1582 1658 goto failed; ··· 1747 1823 1748 1824 hci_dev_lock(hdev); 1749 1825 1750 - cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); 1826 + cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); 1751 1827 if (!cmd) 1752 1828 goto unlock; 1753 1829 ··· 1842 1918 goto failed; 1843 1919 } 1844 1920 1845 - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 1846 - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 1921 + if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 1922 + pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 1847 1923 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1848 1924 MGMT_STATUS_BUSY); 1849 1925 goto failed; ··· 1982 2058 goto failed; 1983 2059 } 1984 2060 1985 - if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { 2061 + if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { 1986 2062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 1987 2063 MGMT_STATUS_BUSY); 1988 2064 goto failed; ··· 2061 2137 goto failed; 2062 2138 } 2063 2139 2064 - if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { 2140 + if (pending_find(MGMT_OP_SET_SSP, hdev)) { 2065 2141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 2066 2142 MGMT_STATUS_BUSY); 2067 2143 goto failed; ··· 2120 2196 2121 2197 hci_dev_lock(hdev); 2122 2198 2123 - if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { 2199 + if (pending_find(MGMT_OP_SET_SSP, hdev)) { 2124 2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 2125 2201 MGMT_STATUS_BUSY); 2126 2202 goto unlock; ··· 2242 2318 goto unlock; 2243 2319 } 2244 2320 2245 - if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) || 2246 - mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { 2321 + if (pending_find(MGMT_OP_SET_LE, hdev) || 2322 + pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { 2247 2323 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 2248 2324 MGMT_STATUS_BUSY); 2249 2325 goto unlock; ··· 2327 2403 2328 2404 hci_dev_lock(hdev); 2329 2405 2330 - cmd = mgmt_pending_find(mgmt_op, hdev); 2406 + cmd = pending_find(mgmt_op, hdev); 2331 2407 if (!cmd) 2332 2408 goto unlock; 2333 2409 ··· 2821 2897 goto failed; 2822 2898 } 2823 2899 2824 - if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { 2900 + if (pending_find(MGMT_OP_DISCONNECT, hdev)) { 2825 2901 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 2826 2902 MGMT_STATUS_BUSY, &rp, sizeof(rp)); 2827 2903 goto failed; ··· 3285 3361 goto unlock; 3286 3362 } 3287 3363 3288 - cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev); 3364 + cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev); 3289 3365 if (!cmd) { 3290 3366 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 3291 3367 MGMT_STATUS_INVALID_PARAMS); ··· 3463 3539 3464 3540 hci_dev_lock(hdev); 3465 3541 3466 - cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 3542 + cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 3467 3543 if (!cmd) 3468 3544 goto unlock; 3469 3545 ··· 3515 3591 if (err < 0) 3516 3592 goto failed; 3517 3593 3518 - err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len, 3519 - sk); 3594 + err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, 3595 + data, len, sk); 3520 3596 3521 3597 goto failed; 3522 3598 } ··· 3573 3649 goto unlock; 3574 3650 } 3575 3651 3576 - if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { 3652 + if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { 3577 3653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 3578 3654 MGMT_STATUS_BUSY); 3579 3655 goto unlock; ··· 3743 3819 return err; 3744 3820 } 3745 3821 3746 - static bool trigger_discovery(struct hci_request *req, u8 *status) 3822 + static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status) 3823 + { 3824 + struct hci_dev *hdev = req->hdev; 3825 + struct hci_cp_inquiry cp; 3826 + /* General inquiry access code (GIAC) */ 3827 + u8 lap[3] = { 0x33, 0x8b, 0x9e }; 3828 + 3829 + *status = mgmt_bredr_support(hdev); 3830 + if (*status) 3831 + return false; 3832 + 3833 + if (hci_dev_test_flag(hdev, HCI_INQUIRY)) { 3834 + *status = MGMT_STATUS_BUSY; 3835 + return false; 3836 + } 3837 + 3838 + hci_inquiry_cache_flush(hdev); 3839 + 3840 + memset(&cp, 0, sizeof(cp)); 3841 + memcpy(&cp.lap, lap, sizeof(cp.lap)); 3842 + cp.length = DISCOV_BREDR_INQUIRY_LEN; 3843 + 3844 + hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 3845 + 3846 + return true; 3847 + } 3848 + 3849 + static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status) 3747 3850 { 3748 3851 struct hci_dev *hdev = req->hdev; 3749 3852 struct hci_cp_le_set_scan_param param_cp; 3750 3853 struct hci_cp_le_set_scan_enable enable_cp; 3751 - struct hci_cp_inquiry inq_cp; 3752 - /* General inquiry access code (GIAC) */ 3753 - u8 lap[3] = { 0x33, 0x8b, 0x9e }; 3754 3854 u8 own_addr_type; 3755 3855 int err; 3756 3856 3757 - switch (hdev->discovery.type) { 3758 - case DISCOV_TYPE_BREDR: 3759 - *status = mgmt_bredr_support(hdev); 3760 - if (*status) 3761 - return false; 3857 + *status = mgmt_le_support(hdev); 3858 + if (*status) 3859 + return false; 3762 3860 3763 - if (test_bit(HCI_INQUIRY, &hdev->flags)) { 3764 - *status = MGMT_STATUS_BUSY; 3861 + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { 3862 + /* Don't let discovery abort an outgoing connection attempt 3863 + * that's using directed advertising. 3864 + */ 3865 + if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { 3866 + *status = MGMT_STATUS_REJECTED; 3765 3867 return false; 3766 3868 } 3767 3869 3768 - hci_inquiry_cache_flush(hdev); 3870 + disable_advertising(req); 3871 + } 3769 3872 3770 - memset(&inq_cp, 0, sizeof(inq_cp)); 3771 - memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap)); 3772 - inq_cp.length = DISCOV_BREDR_INQUIRY_LEN; 3773 - hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp); 3873 + /* If controller is scanning, it means the background scanning is 3874 + * running. Thus, we should temporarily stop it in order to set the 3875 + * discovery scanning parameters. 3876 + */ 3877 + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) 3878 + hci_req_add_le_scan_disable(req); 3879 + 3880 + /* All active scans will be done with either a resolvable private 3881 + * address (when privacy feature has been enabled) or non-resolvable 3882 + * private address. 3883 + */ 3884 + err = hci_update_random_address(req, true, &own_addr_type); 3885 + if (err < 0) { 3886 + *status = MGMT_STATUS_FAILED; 3887 + return false; 3888 + } 3889 + 3890 + memset(&param_cp, 0, sizeof(param_cp)); 3891 + param_cp.type = LE_SCAN_ACTIVE; 3892 + param_cp.interval = cpu_to_le16(interval); 3893 + param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); 3894 + param_cp.own_address_type = own_addr_type; 3895 + 3896 + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 3897 + &param_cp); 3898 + 3899 + memset(&enable_cp, 0, sizeof(enable_cp)); 3900 + enable_cp.enable = LE_SCAN_ENABLE; 3901 + enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 3902 + 3903 + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), 3904 + &enable_cp); 3905 + 3906 + return true; 3907 + } 3908 + 3909 + static bool trigger_discovery(struct hci_request *req, u8 *status) 3910 + { 3911 + struct hci_dev *hdev = req->hdev; 3912 + 3913 + switch (hdev->discovery.type) { 3914 + case DISCOV_TYPE_BREDR: 3915 + if (!trigger_bredr_inquiry(req, status)) 3916 + return false; 3774 3917 break; 3775 3918 3776 - case DISCOV_TYPE_LE: 3777 3919 case DISCOV_TYPE_INTERLEAVED: 3778 - *status = mgmt_le_support(hdev); 3779 - if (*status) 3780 - return false; 3920 + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 3921 + &hdev->quirks)) { 3922 + /* During simultaneous discovery, we double LE scan 3923 + * interval. We must leave some time for the controller 3924 + * to do BR/EDR inquiry. 3925 + */ 3926 + if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2, 3927 + status)) 3928 + return false; 3781 3929 3782 - if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && 3783 - !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 3930 + if (!trigger_bredr_inquiry(req, status)) 3931 + return false; 3932 + 3933 + return true; 3934 + } 3935 + 3936 + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 3784 3937 *status = MGMT_STATUS_NOT_SUPPORTED; 3785 3938 return false; 3786 3939 } 3940 + /* fall through */ 3787 3941 3788 - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { 3789 - /* Don't let discovery abort an outgoing 3790 - * connection attempt that's using directed 3791 - * advertising. 3792 - */ 3793 - if (hci_conn_hash_lookup_state(hdev, LE_LINK, 3794 - BT_CONNECT)) { 3795 - *status = MGMT_STATUS_REJECTED; 3796 - return false; 3797 - } 3798 - 3799 - disable_advertising(req); 3800 - } 3801 - 3802 - /* If controller is scanning, it means the background scanning 3803 - * is running. Thus, we should temporarily stop it in order to 3804 - * set the discovery scanning parameters. 3805 - */ 3806 - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) 3807 - hci_req_add_le_scan_disable(req); 3808 - 3809 - memset(&param_cp, 0, sizeof(param_cp)); 3810 - 3811 - /* All active scans will be done with either a resolvable 3812 - * private address (when privacy feature has been enabled) 3813 - * or non-resolvable private address. 3814 - */ 3815 - err = hci_update_random_address(req, true, &own_addr_type); 3816 - if (err < 0) { 3817 - *status = MGMT_STATUS_FAILED; 3942 + case DISCOV_TYPE_LE: 3943 + if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status)) 3818 3944 return false; 3819 - } 3820 - 3821 - param_cp.type = LE_SCAN_ACTIVE; 3822 - param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); 3823 - param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); 3824 - param_cp.own_address_type = own_addr_type; 3825 - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 3826 - &param_cp); 3827 - 3828 - memset(&enable_cp, 0, sizeof(enable_cp)); 3829 - enable_cp.enable = LE_SCAN_ENABLE; 3830 - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 3831 - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), 3832 - &enable_cp); 3833 3945 break; 3834 3946 3835 3947 default: ··· 3886 3926 3887 3927 hci_dev_lock(hdev); 3888 3928 3889 - cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); 3929 + cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev); 3890 3930 if (!cmd) 3891 - cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); 3931 + cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); 3892 3932 3893 3933 if (cmd) { 3894 3934 cmd->cmd_complete(cmd, mgmt_status(status)); ··· 3910 3950 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 3911 3951 break; 3912 3952 case DISCOV_TYPE_INTERLEAVED: 3913 - timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); 3953 + /* When running simultaneous discovery, the LE scanning time 3954 + * should occupy the whole discovery time sine BR/EDR inquiry 3955 + * and LE scanning are scheduled by the controller. 3956 + * 3957 + * For interleaving discovery in comparison, BR/EDR inquiry 3958 + * and LE scanning are done sequentially with separate 3959 + * timeouts. 3960 + */ 3961 + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3962 + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 3963 + else 3964 + timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); 3914 3965 break; 3915 3966 case DISCOV_TYPE_BREDR: 3916 3967 timeout = 0; ··· 4143 4172 4144 4173 hci_dev_lock(hdev); 4145 4174 4146 - cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); 4175 + cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev); 4147 4176 if (cmd) { 4148 4177 cmd->cmd_complete(cmd, mgmt_status(status)); 4149 4178 mgmt_pending_remove(cmd); ··· 4452 4481 goto unlock; 4453 4482 } 4454 4483 4455 - if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) || 4456 - mgmt_pending_find(MGMT_OP_SET_LE, hdev)) { 4484 + if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) || 4485 + pending_find(MGMT_OP_SET_LE, hdev)) { 4457 4486 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, 4458 4487 MGMT_STATUS_BUSY); 4459 4488 goto unlock; ··· 4596 4625 4597 4626 hci_dev_lock(hdev); 4598 4627 4599 - cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); 4628 + cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); 4600 4629 if (!cmd) 4601 4630 goto unlock; 4602 4631 ··· 4642 4671 4643 4672 hci_dev_lock(hdev); 4644 4673 4645 - if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { 4674 + if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { 4646 4675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 4647 4676 MGMT_STATUS_BUSY); 4648 4677 goto unlock; ··· 4694 4723 4695 4724 hci_dev_lock(hdev); 4696 4725 4697 - cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev); 4726 + cmd = pending_find(MGMT_OP_SET_BREDR, hdev); 4698 4727 if (!cmd) 4699 4728 goto unlock; 4700 4729 ··· 4794 4823 } 4795 4824 } 4796 4825 4797 - if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) { 4826 + if (pending_find(MGMT_OP_SET_BREDR, hdev)) { 4798 4827 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4799 4828 MGMT_STATUS_BUSY); 4800 4829 goto unlock; ··· 4839 4868 4840 4869 hci_dev_lock(hdev); 4841 4870 4842 - cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev); 4871 + cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev); 4843 4872 if (!cmd) 4844 4873 goto unlock; 4845 4874 ··· 4930 4959 goto failed; 4931 4960 } 4932 4961 4933 - if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { 4962 + if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { 4934 4963 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4935 4964 MGMT_STATUS_BUSY); 4936 4965 goto failed; ··· 5323 5352 goto unlock; 5324 5353 } 5325 5354 5326 - cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); 5355 + cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); 5327 5356 if (!cmd) 5328 5357 goto unlock; 5329 5358 ··· 5376 5405 goto unlock; 5377 5406 } 5378 5407 5379 - if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { 5408 + if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { 5380 5409 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, 5381 5410 MGMT_STATUS_BUSY, &rp, sizeof(rp)); 5382 5411 goto unlock; ··· 5512 5541 conn = NULL; 5513 5542 } 5514 5543 5515 - cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn); 5544 + cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn); 5516 5545 if (!cmd) 5517 5546 goto unlock; 5518 5547 ··· 5678 5707 5679 5708 hci_dev_lock(hdev); 5680 5709 5681 - cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev); 5710 + cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev); 5682 5711 if (!cmd) 5683 5712 goto unlock; 5684 5713 ··· 5801 5830 5802 5831 hci_dev_lock(hdev); 5803 5832 5804 - cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev); 5833 + cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev); 5805 5834 if (!cmd) 5806 5835 goto unlock; 5807 5836 ··· 6133 6162 return err; 6134 6163 } 6135 6164 6165 + static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, 6166 + u8 data_len) 6167 + { 6168 + eir[eir_len++] = sizeof(type) + data_len; 6169 + eir[eir_len++] = type; 6170 + memcpy(&eir[eir_len], data, data_len); 6171 + eir_len += data_len; 6172 + 6173 + return eir_len; 6174 + } 6175 + 6176 + static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, 6177 + void *data, u16 data_len) 6178 + { 6179 + struct mgmt_cp_read_local_oob_ext_data *cp = data; 6180 + struct mgmt_rp_read_local_oob_ext_data *rp; 6181 + size_t rp_len; 6182 + u16 eir_len; 6183 + u8 status, flags, role, addr[7], hash[16], rand[16]; 6184 + int err; 6185 + 6186 + BT_DBG("%s", hdev->name); 6187 + 6188 + if (!hdev_is_powered(hdev)) 6189 + return mgmt_cmd_complete(sk, hdev->id, 6190 + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6191 + MGMT_STATUS_NOT_POWERED, 6192 + &cp->type, sizeof(cp->type)); 6193 + 6194 + switch (cp->type) { 6195 + case BIT(BDADDR_BREDR): 6196 + status = mgmt_bredr_support(hdev); 6197 + if (status) 6198 + return mgmt_cmd_complete(sk, hdev->id, 6199 + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6200 + status, &cp->type, 6201 + sizeof(cp->type)); 6202 + eir_len = 5; 6203 + break; 6204 + case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): 6205 + status = mgmt_le_support(hdev); 6206 + if (status) 6207 + return mgmt_cmd_complete(sk, hdev->id, 6208 + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6209 + status, &cp->type, 6210 + sizeof(cp->type)); 6211 + eir_len = 9 + 3 + 18 + 18 + 3; 6212 + break; 6213 + default: 6214 + return mgmt_cmd_complete(sk, hdev->id, 6215 + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6216 + MGMT_STATUS_INVALID_PARAMS, 6217 + &cp->type, sizeof(cp->type)); 6218 + } 6219 + 6220 + hci_dev_lock(hdev); 6221 + 6222 + rp_len = sizeof(*rp) + eir_len; 6223 + rp = kmalloc(rp_len, GFP_ATOMIC); 6224 + if (!rp) { 6225 + hci_dev_unlock(hdev); 6226 + return -ENOMEM; 6227 + } 6228 + 6229 + eir_len = 0; 6230 + switch (cp->type) { 6231 + case BIT(BDADDR_BREDR): 6232 + eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV, 6233 + hdev->dev_class, 3); 6234 + break; 6235 + case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): 6236 + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && 6237 + smp_generate_oob(hdev, hash, rand) < 0) { 6238 + hci_dev_unlock(hdev); 6239 + err = mgmt_cmd_complete(sk, hdev->id, 6240 + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6241 + MGMT_STATUS_FAILED, 6242 + &cp->type, sizeof(cp->type)); 6243 + goto done; 6244 + } 6245 + 6246 + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 6247 + memcpy(addr, &hdev->rpa, 6); 6248 + addr[6] = 0x01; 6249 + } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 6250 + !bacmp(&hdev->bdaddr, BDADDR_ANY) || 6251 + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 6252 + bacmp(&hdev->static_addr, BDADDR_ANY))) { 6253 + memcpy(addr, &hdev->static_addr, 6); 6254 + addr[6] = 0x01; 6255 + } else { 6256 + memcpy(addr, &hdev->bdaddr, 6); 6257 + addr[6] = 0x00; 6258 + } 6259 + 6260 + eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR, 6261 + addr, sizeof(addr)); 6262 + 6263 + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 6264 + role = 0x02; 6265 + else 6266 + role = 0x01; 6267 + 6268 + eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE, 6269 + &role, sizeof(role)); 6270 + 6271 + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) { 6272 + eir_len = eir_append_data(rp->eir, eir_len, 6273 + EIR_LE_SC_CONFIRM, 6274 + hash, sizeof(hash)); 6275 + 6276 + eir_len = eir_append_data(rp->eir, eir_len, 6277 + EIR_LE_SC_RANDOM, 6278 + rand, sizeof(rand)); 6279 + } 6280 + 6281 + flags = get_adv_discov_flags(hdev); 6282 + 6283 + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 6284 + flags |= LE_AD_NO_BREDR; 6285 + 6286 + eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS, 6287 + &flags, sizeof(flags)); 6288 + break; 6289 + } 6290 + 6291 + rp->type = cp->type; 6292 + rp->eir_len = cpu_to_le16(eir_len); 6293 + 6294 + hci_dev_unlock(hdev); 6295 + 6296 + hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); 6297 + 6298 + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6299 + MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len); 6300 + if (err < 0) 6301 + goto done; 6302 + 6303 + err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, 6304 + rp, sizeof(*rp) + eir_len, 6305 + HCI_MGMT_OOB_DATA_EVENTS, sk); 6306 + 6307 + done: 6308 + kfree(rp); 6309 + 6310 + return err; 6311 + } 6312 + 6313 + static int read_adv_features(struct sock *sk, struct hci_dev *hdev, 6314 + void *data, u16 data_len) 6315 + { 6316 + struct mgmt_rp_read_adv_features *rp; 6317 + size_t rp_len; 6318 + int err; 6319 + 6320 + BT_DBG("%s", hdev->name); 6321 + 6322 + hci_dev_lock(hdev); 6323 + 6324 + rp_len = sizeof(*rp); 6325 + rp = kmalloc(rp_len, GFP_ATOMIC); 6326 + if (!rp) { 6327 + hci_dev_unlock(hdev); 6328 + return -ENOMEM; 6329 + } 6330 + 6331 + rp->supported_flags = cpu_to_le32(0); 6332 + rp->max_adv_data_len = 31; 6333 + rp->max_scan_rsp_len = 31; 6334 + rp->max_instances = 0; 6335 + rp->num_instances = 0; 6336 + 6337 + hci_dev_unlock(hdev); 6338 + 6339 + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, 6340 + MGMT_STATUS_SUCCESS, rp, rp_len); 6341 + 6342 + kfree(rp); 6343 + 6344 + return err; 6345 + } 6346 + 6136 6347 static const struct hci_mgmt_handler mgmt_handlers[] = { 6137 6348 { NULL }, /* 0x0000 (no command) */ 6138 6349 { read_version, MGMT_READ_VERSION_SIZE, 6139 - HCI_MGMT_NO_HDEV }, 6350 + HCI_MGMT_NO_HDEV | 6351 + HCI_MGMT_UNTRUSTED }, 6140 6352 { read_commands, MGMT_READ_COMMANDS_SIZE, 6141 - HCI_MGMT_NO_HDEV }, 6353 + HCI_MGMT_NO_HDEV | 6354 + HCI_MGMT_UNTRUSTED }, 6142 6355 { read_index_list, MGMT_READ_INDEX_LIST_SIZE, 6143 - HCI_MGMT_NO_HDEV }, 6144 - { read_controller_info, MGMT_READ_INFO_SIZE, 0 }, 6145 - { set_powered, MGMT_SETTING_SIZE, 0 }, 6146 - { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE, 0 }, 6147 - { set_connectable, MGMT_SETTING_SIZE, 0 }, 6148 - { set_fast_connectable, MGMT_SETTING_SIZE, 0 }, 6149 - { set_bondable, MGMT_SETTING_SIZE, 0 }, 6150 - { set_link_security, MGMT_SETTING_SIZE, 0 }, 6151 - { set_ssp, MGMT_SETTING_SIZE, 0 }, 6152 - { set_hs, MGMT_SETTING_SIZE, 0 }, 6153 - { set_le, MGMT_SETTING_SIZE, 0 }, 6154 - { set_dev_class, MGMT_SET_DEV_CLASS_SIZE, 0 }, 6155 - { set_local_name, MGMT_SET_LOCAL_NAME_SIZE, 0 }, 6156 - { add_uuid, MGMT_ADD_UUID_SIZE, 0 }, 6157 - { remove_uuid, MGMT_REMOVE_UUID_SIZE, 0 }, 6356 + HCI_MGMT_NO_HDEV | 6357 + HCI_MGMT_UNTRUSTED }, 6358 + { read_controller_info, MGMT_READ_INFO_SIZE, 6359 + HCI_MGMT_UNTRUSTED }, 6360 + { set_powered, MGMT_SETTING_SIZE }, 6361 + { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE }, 6362 + { set_connectable, MGMT_SETTING_SIZE }, 6363 + { set_fast_connectable, MGMT_SETTING_SIZE }, 6364 + { set_bondable, MGMT_SETTING_SIZE }, 6365 + { set_link_security, MGMT_SETTING_SIZE }, 6366 + { set_ssp, MGMT_SETTING_SIZE }, 6367 + { set_hs, MGMT_SETTING_SIZE }, 6368 + { set_le, MGMT_SETTING_SIZE }, 6369 + { set_dev_class, MGMT_SET_DEV_CLASS_SIZE }, 6370 + { set_local_name, MGMT_SET_LOCAL_NAME_SIZE }, 6371 + { add_uuid, MGMT_ADD_UUID_SIZE }, 6372 + { remove_uuid, MGMT_REMOVE_UUID_SIZE }, 6158 6373 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE, 6159 6374 HCI_MGMT_VAR_LEN }, 6160 6375 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE, 6161 6376 HCI_MGMT_VAR_LEN }, 6162 - { disconnect, MGMT_DISCONNECT_SIZE, 0 }, 6163 - { get_connections, MGMT_GET_CONNECTIONS_SIZE, 0 }, 6164 - { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE, 0 }, 6165 - { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE, 0 }, 6166 - { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE, 0 }, 6167 - { pair_device, MGMT_PAIR_DEVICE_SIZE, 0 }, 6168 - { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE, 0 }, 6169 - { unpair_device, MGMT_UNPAIR_DEVICE_SIZE, 0 }, 6170 - { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE, 0 }, 6171 - { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE, 0 }, 6172 - { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE, 0 }, 6173 - { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE, 0 }, 6377 + { disconnect, MGMT_DISCONNECT_SIZE }, 6378 + { get_connections, MGMT_GET_CONNECTIONS_SIZE }, 6379 + { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE }, 6380 + { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE }, 6381 + { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE }, 6382 + { pair_device, MGMT_PAIR_DEVICE_SIZE }, 6383 + { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE }, 6384 + { unpair_device, MGMT_UNPAIR_DEVICE_SIZE }, 6385 + { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE }, 6386 + { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, 6387 + { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE }, 6388 + { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, 6174 6389 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE }, 6175 6390 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE, 6176 6391 HCI_MGMT_VAR_LEN }, 6177 - { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE, 0 }, 6178 - { start_discovery, MGMT_START_DISCOVERY_SIZE, 0 }, 6179 - { stop_discovery, MGMT_STOP_DISCOVERY_SIZE, 0 }, 6180 - { confirm_name, MGMT_CONFIRM_NAME_SIZE, 0 }, 6181 - { block_device, MGMT_BLOCK_DEVICE_SIZE, 0 }, 6182 - { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE, 0 }, 6183 - { set_device_id, MGMT_SET_DEVICE_ID_SIZE, 0 }, 6184 - { set_advertising, MGMT_SETTING_SIZE, 0 }, 6185 - { set_bredr, MGMT_SETTING_SIZE, 0 }, 6186 - { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE, 0 }, 6187 - { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE, 0 }, 6188 - { set_secure_conn, MGMT_SETTING_SIZE, 0 }, 6189 - { set_debug_keys, MGMT_SETTING_SIZE, 0 }, 6190 - { set_privacy, MGMT_SET_PRIVACY_SIZE, 0 }, 6392 + { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, 6393 + { start_discovery, MGMT_START_DISCOVERY_SIZE }, 6394 + { stop_discovery, MGMT_STOP_DISCOVERY_SIZE }, 6395 + { confirm_name, MGMT_CONFIRM_NAME_SIZE }, 6396 + { block_device, MGMT_BLOCK_DEVICE_SIZE }, 6397 + { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE }, 6398 + { set_device_id, MGMT_SET_DEVICE_ID_SIZE }, 6399 + { set_advertising, MGMT_SETTING_SIZE }, 6400 + { set_bredr, MGMT_SETTING_SIZE }, 6401 + { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE }, 6402 + { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE }, 6403 + { set_secure_conn, MGMT_SETTING_SIZE }, 6404 + { set_debug_keys, MGMT_SETTING_SIZE }, 6405 + { set_privacy, MGMT_SET_PRIVACY_SIZE }, 6191 6406 { load_irks, MGMT_LOAD_IRKS_SIZE, 6192 6407 HCI_MGMT_VAR_LEN }, 6193 - { get_conn_info, MGMT_GET_CONN_INFO_SIZE, 0 }, 6194 - { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE, 0 }, 6195 - { add_device, MGMT_ADD_DEVICE_SIZE, 0 }, 6196 - { remove_device, MGMT_REMOVE_DEVICE_SIZE, 0 }, 6408 + { get_conn_info, MGMT_GET_CONN_INFO_SIZE }, 6409 + { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE }, 6410 + { add_device, MGMT_ADD_DEVICE_SIZE }, 6411 + { remove_device, MGMT_REMOVE_DEVICE_SIZE }, 6197 6412 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE, 6198 6413 HCI_MGMT_VAR_LEN }, 6199 6414 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE, 6200 - HCI_MGMT_NO_HDEV }, 6415 + HCI_MGMT_NO_HDEV | 6416 + HCI_MGMT_UNTRUSTED }, 6201 6417 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE, 6202 - HCI_MGMT_UNCONFIGURED }, 6418 + HCI_MGMT_UNCONFIGURED | 6419 + HCI_MGMT_UNTRUSTED }, 6203 6420 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE, 6204 6421 HCI_MGMT_UNCONFIGURED }, 6205 6422 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE, 6206 6423 HCI_MGMT_UNCONFIGURED }, 6207 6424 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE, 6208 6425 HCI_MGMT_VAR_LEN }, 6426 + { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE }, 6427 + { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE, 6428 + HCI_MGMT_NO_HDEV | 6429 + HCI_MGMT_UNTRUSTED }, 6430 + { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE }, 6209 6431 }; 6210 - 6211 - int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk, 6212 - struct msghdr *msg, size_t msglen) 6213 - { 6214 - void *buf; 6215 - u8 *cp; 6216 - struct mgmt_hdr *hdr; 6217 - u16 opcode, index, len; 6218 - struct hci_dev *hdev = NULL; 6219 - const struct hci_mgmt_handler *handler; 6220 - bool var_len, no_hdev; 6221 - int err; 6222 - 6223 - BT_DBG("got %zu bytes", msglen); 6224 - 6225 - if (msglen < sizeof(*hdr)) 6226 - return -EINVAL; 6227 - 6228 - buf = kmalloc(msglen, GFP_KERNEL); 6229 - if (!buf) 6230 - return -ENOMEM; 6231 - 6232 - if (memcpy_from_msg(buf, msg, msglen)) { 6233 - err = -EFAULT; 6234 - goto done; 6235 - } 6236 - 6237 - hdr = buf; 6238 - opcode = __le16_to_cpu(hdr->opcode); 6239 - index = __le16_to_cpu(hdr->index); 6240 - len = __le16_to_cpu(hdr->len); 6241 - 6242 - if (len != msglen - sizeof(*hdr)) { 6243 - err = -EINVAL; 6244 - goto done; 6245 - } 6246 - 6247 - if (opcode >= chan->handler_count || 6248 - chan->handlers[opcode].func == NULL) { 6249 - BT_DBG("Unknown op %u", opcode); 6250 - err = mgmt_cmd_status(sk, index, opcode, 6251 - MGMT_STATUS_UNKNOWN_COMMAND); 6252 - goto done; 6253 - } 6254 - 6255 - handler = &chan->handlers[opcode]; 6256 - 6257 - if (index != MGMT_INDEX_NONE) { 6258 - hdev = hci_dev_get(index); 6259 - if (!hdev) { 6260 - err = mgmt_cmd_status(sk, index, opcode, 6261 - MGMT_STATUS_INVALID_INDEX); 6262 - goto done; 6263 - } 6264 - 6265 - if (hci_dev_test_flag(hdev, HCI_SETUP) || 6266 - hci_dev_test_flag(hdev, HCI_CONFIG) || 6267 - hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 6268 - err = mgmt_cmd_status(sk, index, opcode, 6269 - MGMT_STATUS_INVALID_INDEX); 6270 - goto done; 6271 - } 6272 - 6273 - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 6274 - !(handler->flags & HCI_MGMT_UNCONFIGURED)) { 6275 - err = mgmt_cmd_status(sk, index, opcode, 6276 - MGMT_STATUS_INVALID_INDEX); 6277 - goto done; 6278 - } 6279 - } 6280 - 6281 - no_hdev = (handler->flags & HCI_MGMT_NO_HDEV); 6282 - if (no_hdev != !hdev) { 6283 - err = mgmt_cmd_status(sk, index, opcode, 6284 - MGMT_STATUS_INVALID_INDEX); 6285 - goto done; 6286 - } 6287 - 6288 - var_len = (handler->flags & HCI_MGMT_VAR_LEN); 6289 - if ((var_len && len < handler->data_len) || 6290 - (!var_len && len != handler->data_len)) { 6291 - err = mgmt_cmd_status(sk, index, opcode, 6292 - MGMT_STATUS_INVALID_PARAMS); 6293 - goto done; 6294 - } 6295 - 6296 - if (hdev) 6297 - mgmt_init_hdev(sk, hdev); 6298 - 6299 - cp = buf + sizeof(*hdr); 6300 - 6301 - err = handler->func(sk, hdev, cp, len); 6302 - if (err < 0) 6303 - goto done; 6304 - 6305 - err = msglen; 6306 - 6307 - done: 6308 - if (hdev) 6309 - hci_dev_put(hdev); 6310 - 6311 - kfree(buf); 6312 - return err; 6313 - } 6314 6432 6315 6433 void mgmt_index_added(struct hci_dev *hdev) 6316 6434 { 6317 - if (hdev->dev_type != HCI_BREDR) 6318 - return; 6435 + struct mgmt_ev_ext_index ev; 6319 6436 6320 6437 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 6321 6438 return; 6322 6439 6323 - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 6324 - mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL); 6325 - else 6326 - mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); 6440 + switch (hdev->dev_type) { 6441 + case HCI_BREDR: 6442 + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 6443 + mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, 6444 + NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); 6445 + ev.type = 0x01; 6446 + } else { 6447 + mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, 6448 + HCI_MGMT_INDEX_EVENTS); 6449 + ev.type = 0x00; 6450 + } 6451 + break; 6452 + case HCI_AMP: 6453 + ev.type = 0x02; 6454 + break; 6455 + default: 6456 + return; 6457 + } 6458 + 6459 + ev.bus = hdev->bus; 6460 + 6461 + mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev), 6462 + HCI_MGMT_EXT_INDEX_EVENTS); 6327 6463 } 6328 6464 6329 6465 void mgmt_index_removed(struct hci_dev *hdev) 6330 6466 { 6467 + struct mgmt_ev_ext_index ev; 6331 6468 u8 status = MGMT_STATUS_INVALID_INDEX; 6332 - 6333 - if (hdev->dev_type != HCI_BREDR) 6334 - return; 6335 6469 6336 6470 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 6337 6471 return; 6338 6472 6339 - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); 6473 + switch (hdev->dev_type) { 6474 + case HCI_BREDR: 6475 + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); 6340 6476 6341 - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 6342 - mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL); 6343 - else 6344 - mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); 6477 + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 6478 + mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, 6479 + NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); 6480 + ev.type = 0x01; 6481 + } else { 6482 + mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, 6483 + HCI_MGMT_INDEX_EVENTS); 6484 + ev.type = 0x00; 6485 + } 6486 + break; 6487 + case HCI_AMP: 6488 + ev.type = 0x02; 6489 + break; 6490 + default: 6491 + return; 6492 + } 6493 + 6494 + ev.bus = hdev->bus; 6495 + 6496 + mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev), 6497 + HCI_MGMT_EXT_INDEX_EVENTS); 6345 6498 } 6346 6499 6347 6500 /* This function requires the caller holds hdev->lock */ ··· 6630 6535 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); 6631 6536 6632 6537 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) 6633 - mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, 6634 - zero_cod, sizeof(zero_cod), NULL); 6538 + mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, 6539 + zero_cod, sizeof(zero_cod), NULL); 6635 6540 6636 6541 new_settings: 6637 6542 err = new_settings(hdev, match.sk); ··· 6647 6552 struct mgmt_pending_cmd *cmd; 6648 6553 u8 status; 6649 6554 6650 - cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); 6555 + cmd = pending_find(MGMT_OP_SET_POWERED, hdev); 6651 6556 if (!cmd) 6652 6557 return; 6653 6558 ··· 6847 6752 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL); 6848 6753 } 6849 6754 6850 - static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, 6851 - u8 data_len) 6852 - { 6853 - eir[eir_len++] = sizeof(type) + data_len; 6854 - eir[eir_len++] = type; 6855 - memcpy(&eir[eir_len], data, data_len); 6856 - eir_len += data_len; 6857 - 6858 - return eir_len; 6859 - } 6860 - 6861 6755 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, 6862 6756 u32 flags, u8 *name, u8 name_len) 6863 6757 { ··· 6912 6828 struct mgmt_pending_cmd *cmd; 6913 6829 struct mgmt_mode *cp; 6914 6830 6915 - cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); 6831 + cmd = pending_find(MGMT_OP_SET_POWERED, hdev); 6916 6832 if (!cmd) 6917 6833 return false; 6918 6834 ··· 6969 6885 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 6970 6886 hdev); 6971 6887 6972 - cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); 6888 + cmd = pending_find(MGMT_OP_DISCONNECT, hdev); 6973 6889 if (!cmd) 6974 6890 return; 6975 6891 ··· 7021 6937 { 7022 6938 struct mgmt_pending_cmd *cmd; 7023 6939 7024 - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); 6940 + cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); 7025 6941 if (!cmd) 7026 6942 return; 7027 6943 ··· 7034 6950 { 7035 6951 struct mgmt_pending_cmd *cmd; 7036 6952 7037 - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); 6953 + cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); 7038 6954 if (!cmd) 7039 6955 return; 7040 6956 ··· 7079 6995 { 7080 6996 struct mgmt_pending_cmd *cmd; 7081 6997 7082 - cmd = mgmt_pending_find(opcode, hdev); 6998 + cmd = pending_find(opcode, hdev); 7083 6999 if (!cmd) 7084 7000 return -ENOENT; 7085 7001 ··· 7271 7187 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); 7272 7188 7273 7189 if (!status) 7274 - mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3, 7275 - NULL); 7190 + mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, 7191 + dev_class, 3, NULL); 7276 7192 7277 7193 if (match.sk) 7278 7194 sock_put(match.sk); ··· 7290 7206 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 7291 7207 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); 7292 7208 7293 - cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 7209 + cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 7294 7210 if (!cmd) { 7295 7211 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); 7296 7212 7297 7213 /* If this is a HCI command related to powering on the 7298 7214 * HCI dev don't send any mgmt signals. 7299 7215 */ 7300 - if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) 7216 + if (pending_find(MGMT_OP_SET_POWERED, hdev)) 7301 7217 return; 7302 7218 } 7303 7219 7304 - mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), 7305 - cmd ? cmd->sk : NULL); 7220 + mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), 7221 + cmd ? cmd->sk : NULL); 7306 7222 } 7307 7223 7308 7224 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, ··· 7313 7229 7314 7230 BT_DBG("%s status %u", hdev->name, status); 7315 7231 7316 - cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); 7232 + cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); 7317 7233 if (!cmd) 7318 7234 return; 7319 7235 ··· 7595 7511 .channel = HCI_CHANNEL_CONTROL, 7596 7512 .handler_count = ARRAY_SIZE(mgmt_handlers), 7597 7513 .handlers = mgmt_handlers, 7514 + .hdev_init = mgmt_init_hdev, 7598 7515 }; 7599 7516 7600 7517 int mgmt_init(void)
+210
net/bluetooth/mgmt_util.c
··· 1 + /* 2 + BlueZ - Bluetooth protocol stack for Linux 3 + 4 + Copyright (C) 2015 Intel Corporation 5 + 6 + This program is free software; you can redistribute it and/or modify 7 + it under the terms of the GNU General Public License version 2 as 8 + published by the Free Software Foundation; 9 + 10 + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 11 + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 + 19 + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 + SOFTWARE IS DISCLAIMED. 22 + */ 23 + 24 + #include <net/bluetooth/bluetooth.h> 25 + #include <net/bluetooth/hci_core.h> 26 + #include <net/bluetooth/mgmt.h> 27 + 28 + #include "mgmt_util.h" 29 + 30 + int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, 31 + void *data, u16 data_len, int flag, struct sock *skip_sk) 32 + { 33 + struct sk_buff *skb; 34 + struct mgmt_hdr *hdr; 35 + 36 + skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); 37 + if (!skb) 38 + return -ENOMEM; 39 + 40 + hdr = (void *) skb_put(skb, sizeof(*hdr)); 41 + hdr->opcode = cpu_to_le16(event); 42 + if (hdev) 43 + hdr->index = cpu_to_le16(hdev->id); 44 + else 45 + hdr->index = cpu_to_le16(MGMT_INDEX_NONE); 46 + hdr->len = cpu_to_le16(data_len); 47 + 48 + if (data) 49 + memcpy(skb_put(skb, data_len), data, data_len); 50 + 51 + /* Time stamp */ 52 + __net_timestamp(skb); 53 + 54 + hci_send_to_channel(channel, skb, flag, skip_sk); 55 + kfree_skb(skb); 56 + 57 + return 0; 58 + } 59 + 60 + int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 61 + { 62 + struct sk_buff *skb; 63 + struct mgmt_hdr *hdr; 64 + struct mgmt_ev_cmd_status *ev; 65 + int err; 66 + 67 + BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 68 + 69 + skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); 70 + if (!skb) 71 + return -ENOMEM; 72 + 73 + hdr = (void *) skb_put(skb, sizeof(*hdr)); 74 + 75 + hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); 76 + hdr->index = cpu_to_le16(index); 77 + hdr->len = cpu_to_le16(sizeof(*ev)); 78 + 79 + ev = (void *) skb_put(skb, sizeof(*ev)); 80 + ev->status = status; 81 + ev->opcode = cpu_to_le16(cmd); 82 + 83 + err = sock_queue_rcv_skb(sk, skb); 84 + if (err < 0) 85 + kfree_skb(skb); 86 + 87 + return err; 88 + } 89 + 90 + int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, 91 + void *rp, size_t rp_len) 92 + { 93 + struct sk_buff *skb; 94 + struct mgmt_hdr *hdr; 95 + struct mgmt_ev_cmd_complete *ev; 96 + int err; 97 + 98 + BT_DBG("sock %p", sk); 99 + 100 + skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); 101 + if (!skb) 102 + return -ENOMEM; 103 + 104 + hdr = (void *) skb_put(skb, sizeof(*hdr)); 105 + 106 + hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 107 + hdr->index = cpu_to_le16(index); 108 + hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 109 + 110 + ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); 111 + ev->opcode = cpu_to_le16(cmd); 112 + ev->status = status; 113 + 114 + if (rp) 115 + memcpy(ev->data, rp, rp_len); 116 + 117 + err = sock_queue_rcv_skb(sk, skb); 118 + if (err < 0) 119 + kfree_skb(skb); 120 + 121 + return err; 122 + } 123 + 124 + struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, 125 + struct hci_dev *hdev) 126 + { 127 + struct mgmt_pending_cmd *cmd; 128 + 129 + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 130 + if (hci_sock_get_channel(cmd->sk) != channel) 131 + continue; 132 + if (cmd->opcode == opcode) 133 + return cmd; 134 + } 135 + 136 + return NULL; 137 + } 138 + 139 + struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, 140 + u16 opcode, 141 + struct hci_dev *hdev, 142 + const void *data) 143 + { 144 + struct mgmt_pending_cmd *cmd; 145 + 146 + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 147 + if (cmd->user_data != data) 148 + continue; 149 + if (cmd->opcode == opcode) 150 + return cmd; 151 + } 152 + 153 + return NULL; 154 + } 155 + 156 + void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 157 + void (*cb)(struct mgmt_pending_cmd *cmd, void *data), 158 + void *data) 159 + { 160 + struct mgmt_pending_cmd *cmd, *tmp; 161 + 162 + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { 163 + if (opcode > 0 && cmd->opcode != opcode) 164 + continue; 165 + 166 + cb(cmd, data); 167 + } 168 + } 169 + 170 + struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 171 + struct hci_dev *hdev, 172 + void *data, u16 len) 173 + { 174 + struct mgmt_pending_cmd *cmd; 175 + 176 + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 177 + if (!cmd) 178 + return NULL; 179 + 180 + cmd->opcode = opcode; 181 + cmd->index = hdev->id; 182 + 183 + cmd->param = kmemdup(data, len, GFP_KERNEL); 184 + if (!cmd->param) { 185 + kfree(cmd); 186 + return NULL; 187 + } 188 + 189 + cmd->param_len = len; 190 + 191 + cmd->sk = sk; 192 + sock_hold(sk); 193 + 194 + list_add(&cmd->list, &hdev->mgmt_pending); 195 + 196 + return cmd; 197 + } 198 + 199 + void mgmt_pending_free(struct mgmt_pending_cmd *cmd) 200 + { 201 + sock_put(cmd->sk); 202 + kfree(cmd->param); 203 + kfree(cmd); 204 + } 205 + 206 + void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) 207 + { 208 + list_del(&cmd->list); 209 + mgmt_pending_free(cmd); 210 + }
+53
net/bluetooth/mgmt_util.h
··· 1 + /* 2 + BlueZ - Bluetooth protocol stack for Linux 3 + Copyright (C) 2015 Intel Coropration 4 + 5 + This program is free software; you can redistribute it and/or modify 6 + it under the terms of the GNU General Public License version 2 as 7 + published by the Free Software Foundation; 8 + 9 + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 10 + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 11 + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 12 + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 13 + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 14 + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 + 18 + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 19 + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 20 + SOFTWARE IS DISCLAIMED. 21 + */ 22 + 23 + struct mgmt_pending_cmd { 24 + struct list_head list; 25 + u16 opcode; 26 + int index; 27 + void *param; 28 + size_t param_len; 29 + struct sock *sk; 30 + void *user_data; 31 + int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); 32 + }; 33 + 34 + int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, 35 + void *data, u16 data_len, int flag, struct sock *skip_sk); 36 + int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status); 37 + int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, 38 + void *rp, size_t rp_len); 39 + 40 + struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, 41 + struct hci_dev *hdev); 42 + struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, 43 + u16 opcode, 44 + struct hci_dev *hdev, 45 + const void *data); 46 + void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 47 + void (*cb)(struct mgmt_pending_cmd *cmd, void *data), 48 + void *data); 49 + struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 50 + struct hci_dev *hdev, 51 + void *data, u16 len); 52 + void mgmt_pending_free(struct mgmt_pending_cmd *cmd); 53 + void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
+232 -46
net/bluetooth/smp.c
··· 70 70 SMP_FLAG_DEBUG_KEY, 71 71 SMP_FLAG_WAIT_USER, 72 72 SMP_FLAG_DHKEY_PENDING, 73 - SMP_FLAG_OOB, 73 + SMP_FLAG_REMOTE_OOB, 74 + SMP_FLAG_LOCAL_OOB, 75 + }; 76 + 77 + struct smp_dev { 78 + /* Secure Connections OOB data */ 79 + u8 local_pk[64]; 80 + u8 local_sk[32]; 81 + u8 local_rand[16]; 82 + bool debug_key; 83 + 84 + struct crypto_blkcipher *tfm_aes; 85 + struct crypto_hash *tfm_cmac; 74 86 }; 75 87 76 88 struct smp_chan { ··· 96 84 u8 rrnd[16]; /* SMP Pairing Random (remote) */ 97 85 u8 pcnf[16]; /* SMP Pairing Confirm */ 98 86 u8 tk[16]; /* SMP Temporary Key */ 99 - u8 rr[16]; 87 + u8 rr[16]; /* Remote OOB ra/rb value */ 88 + u8 lr[16]; /* Local OOB ra/rb value */ 100 89 u8 enc_key_size; 101 90 u8 remote_key_dist; 102 91 bdaddr_t id_addr; ··· 491 478 const bdaddr_t *bdaddr) 492 479 { 493 480 struct l2cap_chan *chan = hdev->smp_data; 494 - struct crypto_blkcipher *tfm; 481 + struct smp_dev *smp; 495 482 u8 hash[3]; 496 483 int err; 497 484 498 485 if (!chan || !chan->data) 499 486 return false; 500 487 501 - tfm = chan->data; 488 + smp = chan->data; 502 489 503 490 BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); 504 491 505 - err = smp_ah(tfm, irk, &bdaddr->b[3], hash); 492 + err = smp_ah(smp->tfm_aes, irk, &bdaddr->b[3], hash); 506 493 if (err) 507 494 return false; 508 495 ··· 512 499 int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) 513 500 { 514 501 struct l2cap_chan *chan = hdev->smp_data; 515 - struct crypto_blkcipher *tfm; 502 + struct smp_dev *smp; 516 503 int err; 517 504 518 505 if (!chan || !chan->data) 519 506 return -EOPNOTSUPP; 520 507 521 - tfm = chan->data; 508 + smp = chan->data; 522 509 523 510 get_random_bytes(&rpa->b[3], 3); 524 511 525 512 rpa->b[5] &= 0x3f; /* Clear two most significant bits */ 526 513 rpa->b[5] |= 0x40; /* Set second most significant bit */ 527 514 528 - err = smp_ah(tfm, irk, &rpa->b[3], rpa->b); 515 + err = smp_ah(smp->tfm_aes, irk, &rpa->b[3], rpa->b); 529 516 if (err < 0) 530 517 return err; 531 518 532 519 BT_DBG("RPA %pMR", rpa); 520 + 521 + return 0; 522 + } 523 + 524 + int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) 525 + { 526 + struct l2cap_chan *chan = hdev->smp_data; 527 + struct smp_dev *smp; 528 + int err; 529 + 530 + if (!chan || !chan->data) 531 + return -EOPNOTSUPP; 532 + 533 + smp = chan->data; 534 + 535 + if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { 536 + BT_DBG("Using debug keys"); 537 + memcpy(smp->local_pk, debug_pk, 64); 538 + memcpy(smp->local_sk, debug_sk, 32); 539 + smp->debug_key = true; 540 + } else { 541 + while (true) { 542 + /* Generate local key pair for Secure Connections */ 543 + if (!ecc_make_key(smp->local_pk, smp->local_sk)) 544 + return -EIO; 545 + 546 + /* This is unlikely, but we need to check that 547 + * we didn't accidentially generate a debug key. 548 + */ 549 + if (memcmp(smp->local_sk, debug_sk, 32)) 550 + break; 551 + } 552 + smp->debug_key = false; 553 + } 554 + 555 + SMP_DBG("OOB Public Key X: %32phN", smp->local_pk); 556 + SMP_DBG("OOB Public Key Y: %32phN", smp->local_pk + 32); 557 + SMP_DBG("OOB Private Key: %32phN", smp->local_sk); 558 + 559 + get_random_bytes(smp->local_rand, 16); 560 + 561 + err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->local_pk, 562 + smp->local_rand, 0, hash); 563 + if (err < 0) 564 + return err; 565 + 566 + memcpy(rand, smp->local_rand, 16); 533 567 534 568 return 0; 535 569 } ··· 681 621 oob_data = hci_find_remote_oob_data(hdev, &hcon->dst, 682 622 bdaddr_type); 683 623 if (oob_data && oob_data->present) { 684 - set_bit(SMP_FLAG_OOB, &smp->flags); 624 + set_bit(SMP_FLAG_REMOTE_OOB, &smp->flags); 685 625 oob_flag = SMP_OOB_PRESENT; 686 626 memcpy(smp->rr, oob_data->rand256, 16); 687 627 memcpy(smp->pcnf, oob_data->hash256, 16); 628 + SMP_DBG("OOB Remote Confirmation: %16phN", smp->pcnf); 629 + SMP_DBG("OOB Remote Random: %16phN", smp->rr); 688 630 } 689 631 690 632 } else { ··· 743 681 complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); 744 682 mgmt_smp_complete(hcon, complete); 745 683 746 - kfree(smp->csrk); 747 - kfree(smp->slave_csrk); 748 - kfree(smp->link_key); 684 + kzfree(smp->csrk); 685 + kzfree(smp->slave_csrk); 686 + kzfree(smp->link_key); 749 687 750 688 crypto_free_blkcipher(smp->tfm_aes); 751 689 crypto_free_hash(smp->tfm_cmac); ··· 779 717 } 780 718 781 719 chan->data = NULL; 782 - kfree(smp); 720 + kzfree(smp); 783 721 hci_conn_drop(hcon); 784 722 } 785 723 ··· 879 817 set_bit(SMP_FLAG_TK_VALID, &smp->flags); 880 818 return 0; 881 819 } 820 + 821 + /* If this function is used for SC -> legacy fallback we 822 + * can only recover the just-works case. 823 + */ 824 + if (test_bit(SMP_FLAG_SC, &smp->flags)) 825 + return -EINVAL; 882 826 883 827 /* Not Just Works/Confirm results in MITM Authentication */ 884 828 if (smp->method != JUST_CFM) { ··· 1165 1097 return; 1166 1098 1167 1099 if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) { 1168 - kfree(smp->link_key); 1100 + kzfree(smp->link_key); 1169 1101 smp->link_key = NULL; 1170 1102 return; 1171 1103 } 1172 1104 1173 1105 if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) { 1174 - kfree(smp->link_key); 1106 + kzfree(smp->link_key); 1175 1107 smp->link_key = NULL; 1176 1108 return; 1177 1109 } ··· 1368 1300 smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); 1369 1301 if (IS_ERR(smp->tfm_aes)) { 1370 1302 BT_ERR("Unable to create ECB crypto context"); 1371 - kfree(smp); 1303 + kzfree(smp); 1372 1304 return NULL; 1373 1305 } 1374 1306 ··· 1376 1308 if (IS_ERR(smp->tfm_cmac)) { 1377 1309 BT_ERR("Unable to create CMAC crypto context"); 1378 1310 crypto_free_blkcipher(smp->tfm_aes); 1379 - kfree(smp); 1311 + kzfree(smp); 1380 1312 return NULL; 1381 1313 } 1382 1314 ··· 1743 1675 memcpy(&smp->preq[1], req, sizeof(*req)); 1744 1676 skb_pull(skb, sizeof(*req)); 1745 1677 1678 + /* If the remote side's OOB flag is set it means it has 1679 + * successfully received our local OOB data - therefore set the 1680 + * flag to indicate that local OOB is in use. 1681 + */ 1682 + if (req->oob_flag == SMP_OOB_PRESENT) 1683 + set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); 1684 + 1746 1685 /* SMP over BR/EDR requires special treatment */ 1747 1686 if (conn->hcon->type == ACL_LINK) { 1748 1687 /* We must have a BR/EDR SC link */ ··· 1812 1737 1813 1738 clear_bit(SMP_FLAG_INITIATOR, &smp->flags); 1814 1739 1740 + /* Strictly speaking we shouldn't allow Pairing Confirm for the 1741 + * SC case, however some implementations incorrectly copy RFU auth 1742 + * req bits from our security request, which may create a false 1743 + * positive SC enablement. 1744 + */ 1745 + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); 1746 + 1815 1747 if (test_bit(SMP_FLAG_SC, &smp->flags)) { 1816 1748 SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); 1817 1749 /* Clear bits which are generated but not distributed */ ··· 1826 1744 /* Wait for Public Key from Initiating Device */ 1827 1745 return 0; 1828 1746 } 1829 - 1830 - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); 1831 1747 1832 1748 /* Request setup of TK */ 1833 1749 ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); ··· 1840 1760 struct hci_dev *hdev = smp->conn->hcon->hdev; 1841 1761 1842 1762 BT_DBG(""); 1763 + 1764 + if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { 1765 + struct l2cap_chan *chan = hdev->smp_data; 1766 + struct smp_dev *smp_dev; 1767 + 1768 + if (!chan || !chan->data) 1769 + return SMP_UNSPECIFIED; 1770 + 1771 + smp_dev = chan->data; 1772 + 1773 + memcpy(smp->local_pk, smp_dev->local_pk, 64); 1774 + memcpy(smp->local_sk, smp_dev->local_sk, 32); 1775 + memcpy(smp->lr, smp_dev->local_rand, 16); 1776 + 1777 + if (smp_dev->debug_key) 1778 + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); 1779 + 1780 + goto done; 1781 + } 1843 1782 1844 1783 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { 1845 1784 BT_DBG("Using debug keys"); ··· 1879 1780 } 1880 1781 } 1881 1782 1783 + done: 1882 1784 SMP_DBG("Local Public Key X: %32phN", smp->local_pk); 1883 - SMP_DBG("Local Public Key Y: %32phN", &smp->local_pk[32]); 1785 + SMP_DBG("Local Public Key Y: %32phN", smp->local_pk + 32); 1884 1786 SMP_DBG("Local Private Key: %32phN", smp->local_sk); 1885 1787 1886 1788 smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk); ··· 1918 1818 1919 1819 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) 1920 1820 return SMP_AUTH_REQUIREMENTS; 1821 + 1822 + /* If the remote side's OOB flag is set it means it has 1823 + * successfully received our local OOB data - therefore set the 1824 + * flag to indicate that local OOB is in use. 1825 + */ 1826 + if (rsp->oob_flag == SMP_OOB_PRESENT) 1827 + set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); 1921 1828 1922 1829 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 1923 1830 memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); ··· 1992 1885 1993 1886 BT_DBG(""); 1994 1887 1995 - /* Public Key exchange must happen before any other steps */ 1996 - if (!test_bit(SMP_FLAG_REMOTE_PK, &smp->flags)) 1997 - return SMP_UNSPECIFIED; 1998 - 1999 1888 if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) 2000 1889 return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM); 2001 1890 ··· 2000 1897 smp->prnd); 2001 1898 SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); 2002 1899 } 1900 + 1901 + return 0; 1902 + } 1903 + 1904 + /* Work-around for some implementations that incorrectly copy RFU bits 1905 + * from our security request and thereby create the impression that 1906 + * we're doing SC when in fact the remote doesn't support it. 1907 + */ 1908 + static int fixup_sc_false_positive(struct smp_chan *smp) 1909 + { 1910 + struct l2cap_conn *conn = smp->conn; 1911 + struct hci_conn *hcon = conn->hcon; 1912 + struct hci_dev *hdev = hcon->hdev; 1913 + struct smp_cmd_pairing *req, *rsp; 1914 + u8 auth; 1915 + 1916 + /* The issue is only observed when we're in slave role */ 1917 + if (hcon->out) 1918 + return SMP_UNSPECIFIED; 1919 + 1920 + if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 1921 + BT_ERR("Refusing SMP SC -> legacy fallback in SC-only mode"); 1922 + return SMP_UNSPECIFIED; 1923 + } 1924 + 1925 + BT_ERR("Trying to fall back to legacy SMP"); 1926 + 1927 + req = (void *) &smp->preq[1]; 1928 + rsp = (void *) &smp->prsp[1]; 1929 + 1930 + /* Rebuild key dist flags which may have been cleared for SC */ 1931 + smp->remote_key_dist = (req->init_key_dist & rsp->resp_key_dist); 1932 + 1933 + auth = req->auth_req & AUTH_REQ_MASK(hdev); 1934 + 1935 + if (tk_request(conn, 0, auth, rsp->io_capability, req->io_capability)) { 1936 + BT_ERR("Failed to fall back to legacy SMP"); 1937 + return SMP_UNSPECIFIED; 1938 + } 1939 + 1940 + clear_bit(SMP_FLAG_SC, &smp->flags); 2003 1941 2004 1942 return 0; 2005 1943 } ··· 2058 1914 memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); 2059 1915 skb_pull(skb, sizeof(smp->pcnf)); 2060 1916 2061 - if (test_bit(SMP_FLAG_SC, &smp->flags)) 2062 - return sc_check_confirm(smp); 1917 + if (test_bit(SMP_FLAG_SC, &smp->flags)) { 1918 + int ret; 1919 + 1920 + /* Public Key exchange must happen before any other steps */ 1921 + if (test_bit(SMP_FLAG_REMOTE_PK, &smp->flags)) 1922 + return sc_check_confirm(smp); 1923 + 1924 + BT_ERR("Unexpected SMP Pairing Confirm"); 1925 + 1926 + ret = fixup_sc_false_positive(smp); 1927 + if (ret) 1928 + return ret; 1929 + } 2063 1930 2064 1931 if (conn->hcon->out) { 2065 1932 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), ··· 2529 2374 struct smp_cmd_pairing *local, *remote; 2530 2375 u8 local_mitm, remote_mitm, local_io, remote_io, method; 2531 2376 2532 - if (test_bit(SMP_FLAG_OOB, &smp->flags)) 2377 + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags) || 2378 + test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) 2533 2379 return REQ_OOB; 2534 2380 2535 2381 /* The preq/prsp contain the raw Pairing Request/Response PDUs ··· 2584 2428 2585 2429 memcpy(smp->remote_pk, key, 64); 2586 2430 2431 + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) { 2432 + err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk, 2433 + smp->rr, 0, cfm.confirm_val); 2434 + if (err) 2435 + return SMP_UNSPECIFIED; 2436 + 2437 + if (memcmp(cfm.confirm_val, smp->pcnf, 16)) 2438 + return SMP_CONFIRM_FAILED; 2439 + } 2440 + 2587 2441 /* Non-initiating device sends its public key after receiving 2588 2442 * the key from the initiating device. 2589 2443 */ ··· 2604 2438 } 2605 2439 2606 2440 SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk); 2607 - SMP_DBG("Remote Public Key Y: %32phN", &smp->remote_pk[32]); 2441 + SMP_DBG("Remote Public Key Y: %32phN", smp->remote_pk + 32); 2608 2442 2609 2443 if (!ecdh_shared_secret(smp->remote_pk, smp->local_sk, smp->dhkey)) 2610 2444 return SMP_UNSPECIFIED; ··· 2642 2476 } 2643 2477 2644 2478 if (smp->method == REQ_OOB) { 2645 - err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk, 2646 - smp->rr, 0, cfm.confirm_val); 2647 - if (err) 2648 - return SMP_UNSPECIFIED; 2649 - 2650 - if (memcmp(cfm.confirm_val, smp->pcnf, 16)) 2651 - return SMP_CONFIRM_FAILED; 2652 - 2653 2479 if (hcon->out) 2654 2480 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, 2655 2481 sizeof(smp->prnd), smp->prnd); ··· 2714 2556 2715 2557 if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) 2716 2558 put_unaligned_le32(hcon->passkey_notify, r); 2559 + else if (smp->method == REQ_OOB) 2560 + memcpy(r, smp->lr, 16); 2717 2561 2718 2562 err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r, 2719 2563 io_cap, remote_addr, local_addr, e); ··· 3090 2930 static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) 3091 2931 { 3092 2932 struct l2cap_chan *chan; 3093 - struct crypto_blkcipher *tfm_aes; 2933 + struct smp_dev *smp; 2934 + struct crypto_blkcipher *tfm_aes; 2935 + struct crypto_hash *tfm_cmac; 3094 2936 3095 2937 if (cid == L2CAP_CID_SMP_BREDR) { 3096 - tfm_aes = NULL; 2938 + smp = NULL; 3097 2939 goto create_chan; 3098 2940 } 3099 2941 3100 - tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0); 2942 + smp = kzalloc(sizeof(*smp), GFP_KERNEL); 2943 + if (!smp) 2944 + return ERR_PTR(-ENOMEM); 2945 + 2946 + tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); 3101 2947 if (IS_ERR(tfm_aes)) { 3102 - BT_ERR("Unable to create crypto context"); 2948 + BT_ERR("Unable to create ECB crypto context"); 2949 + kzfree(smp); 3103 2950 return ERR_CAST(tfm_aes); 3104 2951 } 2952 + 2953 + tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC); 2954 + if (IS_ERR(tfm_cmac)) { 2955 + BT_ERR("Unable to create CMAC crypto context"); 2956 + crypto_free_blkcipher(tfm_aes); 2957 + kzfree(smp); 2958 + return ERR_CAST(tfm_cmac); 2959 + } 2960 + 2961 + smp->tfm_aes = tfm_aes; 2962 + smp->tfm_cmac = tfm_cmac; 3105 2963 3106 2964 create_chan: 3107 2965 chan = l2cap_chan_create(); 3108 2966 if (!chan) { 3109 - crypto_free_blkcipher(tfm_aes); 2967 + if (smp) { 2968 + crypto_free_blkcipher(smp->tfm_aes); 2969 + crypto_free_hash(smp->tfm_cmac); 2970 + kzfree(smp); 2971 + } 3110 2972 return ERR_PTR(-ENOMEM); 3111 2973 } 3112 2974 3113 - chan->data = tfm_aes; 2975 + chan->data = smp; 3114 2976 3115 2977 l2cap_add_scid(chan, cid); 3116 2978 ··· 3165 2983 3166 2984 static void smp_del_chan(struct l2cap_chan *chan) 3167 2985 { 3168 - struct crypto_blkcipher *tfm_aes; 2986 + struct smp_dev *smp; 3169 2987 3170 2988 BT_DBG("chan %p", chan); 3171 2989 3172 - tfm_aes = chan->data; 3173 - if (tfm_aes) { 2990 + smp = chan->data; 2991 + if (smp) { 3174 2992 chan->data = NULL; 3175 - crypto_free_blkcipher(tfm_aes); 2993 + if (smp->tfm_aes) 2994 + crypto_free_blkcipher(smp->tfm_aes); 2995 + if (smp->tfm_cmac) 2996 + crypto_free_hash(smp->tfm_cmac); 2997 + kzfree(smp); 3176 2998 } 3177 2999 3178 3000 l2cap_chan_put(chan);
+1
net/bluetooth/smp.h
··· 188 188 bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], 189 189 const bdaddr_t *bdaddr); 190 190 int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa); 191 + int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]); 191 192 192 193 int smp_register(struct hci_dev *hdev); 193 194 void smp_unregister(struct hci_dev *hdev);
+2 -2
net/mac802154/driver-ops.h
··· 1 - #ifndef __MAC802154_DRVIER_OPS 1 + #ifndef __MAC802154_DRIVER_OPS 2 2 #define __MAC802154_DRIVER_OPS 3 3 4 4 #include <linux/types.h> ··· 220 220 return local->ops->set_promiscuous_mode(&local->hw, on); 221 221 } 222 222 223 - #endif /* __MAC802154_DRVIER_OPS */ 223 + #endif /* __MAC802154_DRIVER_OPS */