Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/idr.h>
30#include <linux/rfkill.h>
31#include <linux/debugfs.h>
32#include <linux/crypto.h>
33#include <linux/property.h>
34#include <linux/suspend.h>
35#include <linux/wait.h>
36#include <asm/unaligned.h>
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
40#include <net/bluetooth/l2cap.h>
41#include <net/bluetooth/mgmt.h>
42
43#include "hci_request.h"
44#include "hci_debugfs.h"
45#include "smp.h"
46#include "leds.h"
47#include "msft.h"
48
49static void hci_rx_work(struct work_struct *work);
50static void hci_cmd_work(struct work_struct *work);
51static void hci_tx_work(struct work_struct *work);
52
53/* HCI device list */
54LIST_HEAD(hci_dev_list);
55DEFINE_RWLOCK(hci_dev_list_lock);
56
57/* HCI callback list */
58LIST_HEAD(hci_cb_list);
59DEFINE_MUTEX(hci_cb_list_lock);
60
61/* HCI ID Numbering */
62static DEFINE_IDA(hci_index_ida);
63
64/* ---- HCI debugfs entries ---- */
65
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 err = kstrtobool_from_user(user_buf, count, &enable);
90 if (err)
91 return err;
92
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 return -EALREADY;
95
96 hci_req_sync_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_sync_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 kfree_skb(skb);
109
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 bool enable;
139 int err;
140
141 err = kstrtobool_from_user(user_buf, count, &enable);
142 if (err)
143 return err;
144
145 /* When the diagnostic flags are not persistent and the transport
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
149 */
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153 goto done;
154
155 hci_req_sync_lock(hdev);
156 err = hdev->set_diag(hdev, enable);
157 hci_req_sync_unlock(hdev);
158
159 if (err < 0)
160 return err;
161
162done:
163 if (enable)
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 else
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168 return count;
169}
170
171static const struct file_operations vendor_diag_fops = {
172 .open = simple_open,
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
176};
177
178static void hci_debugfs_create_basic(struct hci_dev *hdev)
179{
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181 &dut_mode_fops);
182
183 if (hdev->set_diag)
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185 &vendor_diag_fops);
186}
187
188static int hci_reset_req(struct hci_request *req, unsigned long opt)
189{
190 BT_DBG("%s %ld", req->hdev->name, opt);
191
192 /* Reset device */
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
195 return 0;
196}
197
198static void bredr_init(struct hci_request *req)
199{
200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202 /* Read Local Supported Features */
203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205 /* Read Local Version */
206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208 /* Read BD Address */
209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210}
211
212static void amp_init1(struct hci_request *req)
213{
214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216 /* Read Local Version */
217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222 /* Read Local AMP Info */
223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225 /* Read Data Blk size */
226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233}
234
235static int amp_init2(struct hci_request *req)
236{
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
239 * stage init.
240 */
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244 return 0;
245}
246
247static int hci_init1_req(struct hci_request *req, unsigned long opt)
248{
249 struct hci_dev *hdev = req->hdev;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Reset */
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255 hci_reset_req(req, 0);
256
257 switch (hdev->dev_type) {
258 case HCI_PRIMARY:
259 bredr_init(req);
260 break;
261 case HCI_AMP:
262 amp_init1(req);
263 break;
264 default:
265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269 return 0;
270}
271
272static void bredr_setup(struct hci_request *req)
273{
274 __le16 param;
275 __u8 flt_type;
276
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280 /* Read Class of Device */
281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283 /* Read Local Name */
284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286 /* Read Voice Setting */
287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299 /* Connection accept timeout ~20 secs */
300 param = cpu_to_le16(0x7d00);
301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
302}
303
304static void le_setup(struct hci_request *req)
305{
306 struct hci_dev *hdev = req->hdev;
307
308 /* Read LE Buffer Size */
309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311 /* Read LE Local Supported Features */
312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320}
321
322static void hci_setup_event_mask(struct hci_request *req)
323{
324 struct hci_dev *hdev = req->hdev;
325
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328 * command otherwise.
329 */
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
334 */
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336 return;
337
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
340 } else {
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
346
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
350 */
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
355 }
356
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
359 */
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
362 * Complete
363 */
364
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
368 }
369 }
370
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
381 }
382
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
385
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
391
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
409 */
410 }
411
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
414
415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416}
417
418static int hci_init2_req(struct hci_request *req, unsigned long opt)
419{
420 struct hci_dev *hdev = req->hdev;
421
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
424
425 if (lmp_bredr_capable(hdev))
426 bredr_setup(req);
427 else
428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430 if (lmp_le_capable(hdev))
431 le_setup(req);
432
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
435 *
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
440 */
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445 if (lmp_ssp_capable(hdev)) {
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
451 */
452 hdev->max_page = 0x01;
453
454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455 u8 mode = 0x01;
456
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
459 } else {
460 struct hci_cp_write_eir cp;
461
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
464
465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466 }
467 }
468
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471 u8 mode;
472
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
475 * events.
476 */
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480 }
481
482 if (lmp_inq_tx_pwr_capable(hdev))
483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
487
488 cp.page = 0x01;
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490 sizeof(cp), &cp);
491 }
492
493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494 u8 enable = 1;
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496 &enable);
497 }
498
499 return 0;
500}
501
502static void hci_setup_link_policy(struct hci_request *req)
503{
504 struct hci_dev *hdev = req->hdev;
505 struct hci_cp_write_def_link_policy cp;
506 u16 link_policy = 0;
507
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
516
517 cp.policy = cpu_to_le16(link_policy);
518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519}
520
521static void hci_set_le_support(struct hci_request *req)
522{
523 struct hci_dev *hdev = req->hdev;
524 struct hci_cp_write_le_host_supported cp;
525
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
528 return;
529
530 memset(&cp, 0, sizeof(cp));
531
532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533 cp.le = 0x01;
534 cp.simul = 0x00;
535 }
536
537 if (cp.le != lmp_host_le_capable(hdev))
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539 &cp);
540}
541
542static void hci_set_event_mask_page_2(struct hci_request *req)
543{
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546 bool changed = false;
547
548 /* If Connectionless Slave Broadcast master role is supported
549 * enable all necessary events for it.
550 */
551 if (lmp_csb_master_capable(hdev)) {
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Slave Page Response Timeout */
555 events[2] |= 0x20; /* CSB Channel Map Change */
556 changed = true;
557 }
558
559 /* If Connectionless Slave Broadcast slave role is supported
560 * enable all necessary events for it.
561 */
562 if (lmp_csb_slave_capable(hdev)) {
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CSB Receive */
565 events[2] |= 0x04; /* CSB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
567 changed = true;
568 }
569
570 /* Enable Authenticated Payload Timeout Expired event if supported */
571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572 events[2] |= 0x80;
573 changed = true;
574 }
575
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
581 */
582 if (changed)
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
585}
586
587static int hci_init3_req(struct hci_request *req, unsigned long opt)
588{
589 struct hci_dev *hdev = req->hdev;
590 u8 p;
591
592 hci_setup_event_mask(req);
593
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596 struct hci_cp_read_stored_link_key cp;
597
598 bacpy(&cp.bdaddr, BDADDR_ANY);
599 cp.read_all = 0x01;
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601 }
602
603 if (hdev->commands[5] & 0x10)
604 hci_setup_link_policy(req);
605
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609 if (hdev->commands[18] & 0x04)
610 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
612 /* Some older Broadcom based Bluetooth 1.2 controllers do not
613 * support the Read Page Scan Type command. Check support for
614 * this command in the bit mask of supported commands.
615 */
616 if (hdev->commands[13] & 0x01)
617 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
619 if (lmp_le_capable(hdev)) {
620 u8 events[8];
621
622 memset(events, 0, sizeof(events));
623
624 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 events[0] |= 0x10; /* LE Long Term Key Request */
626
627 /* If controller supports the Connection Parameters Request
628 * Link Layer Procedure, enable the corresponding event.
629 */
630 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 events[0] |= 0x20; /* LE Remote Connection
632 * Parameter Request
633 */
634
635 /* If the controller supports the Data Length Extension
636 * feature, enable the corresponding event.
637 */
638 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 events[0] |= 0x40; /* LE Data Length Change */
640
641 /* If the controller supports LL Privacy feature, enable
642 * the corresponding event.
643 */
644 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 events[1] |= 0x02; /* LE Enhanced Connection
646 * Complete
647 */
648
649 /* If the controller supports Extended Scanner Filter
650 * Policies, enable the correspondig event.
651 */
652 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 events[1] |= 0x04; /* LE Direct Advertising
654 * Report
655 */
656
657 /* If the controller supports Channel Selection Algorithm #2
658 * feature, enable the corresponding event.
659 */
660 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 events[2] |= 0x08; /* LE Channel Selection
662 * Algorithm
663 */
664
665 /* If the controller supports the LE Set Scan Enable command,
666 * enable the corresponding advertising report event.
667 */
668 if (hdev->commands[26] & 0x08)
669 events[0] |= 0x02; /* LE Advertising Report */
670
671 /* If the controller supports the LE Create Connection
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[26] & 0x10)
675 events[0] |= 0x01; /* LE Connection Complete */
676
677 /* If the controller supports the LE Connection Update
678 * command, enable the corresponding event.
679 */
680 if (hdev->commands[27] & 0x04)
681 events[0] |= 0x04; /* LE Connection Update
682 * Complete
683 */
684
685 /* If the controller supports the LE Read Remote Used Features
686 * command, enable the corresponding event.
687 */
688 if (hdev->commands[27] & 0x20)
689 events[0] |= 0x08; /* LE Read Remote Used
690 * Features Complete
691 */
692
693 /* If the controller supports the LE Read Local P-256
694 * Public Key command, enable the corresponding event.
695 */
696 if (hdev->commands[34] & 0x02)
697 events[0] |= 0x80; /* LE Read Local P-256
698 * Public Key Complete
699 */
700
701 /* If the controller supports the LE Generate DHKey
702 * command, enable the corresponding event.
703 */
704 if (hdev->commands[34] & 0x04)
705 events[1] |= 0x01; /* LE Generate DHKey Complete */
706
707 /* If the controller supports the LE Set Default PHY or
708 * LE Set PHY commands, enable the corresponding event.
709 */
710 if (hdev->commands[35] & (0x20 | 0x40))
711 events[1] |= 0x08; /* LE PHY Update Complete */
712
713 /* If the controller supports LE Set Extended Scan Parameters
714 * and LE Set Extended Scan Enable commands, enable the
715 * corresponding event.
716 */
717 if (use_ext_scan(hdev))
718 events[1] |= 0x10; /* LE Extended Advertising
719 * Report
720 */
721
722 /* If the controller supports the LE Extended Advertising
723 * command, enable the corresponding event.
724 */
725 if (ext_adv_capable(hdev))
726 events[2] |= 0x02; /* LE Advertising Set
727 * Terminated
728 */
729
730 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731 events);
732
733 /* Read LE Advertising Channel TX Power */
734 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735 /* HCI TS spec forbids mixing of legacy and extended
736 * advertising commands wherein READ_ADV_TX_POWER is
737 * also included. So do not call it if extended adv
738 * is supported otherwise controller will return
739 * COMMAND_DISALLOWED for extended commands.
740 */
741 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742 }
743
744 if (hdev->commands[26] & 0x40) {
745 /* Read LE White List Size */
746 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
747 0, NULL);
748 }
749
750 if (hdev->commands[26] & 0x80) {
751 /* Clear LE White List */
752 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
753 }
754
755 if (hdev->commands[34] & 0x40) {
756 /* Read LE Resolving List Size */
757 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758 0, NULL);
759 }
760
761 if (hdev->commands[34] & 0x20) {
762 /* Clear LE Resolving List */
763 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764 }
765
766 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
767 /* Read LE Maximum Data Length */
768 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
769
770 /* Read LE Suggested Default Data Length */
771 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
772 }
773
774 if (ext_adv_capable(hdev)) {
775 /* Read LE Number of Supported Advertising Sets */
776 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
777 0, NULL);
778 }
779
780 hci_set_le_support(req);
781 }
782
783 /* Read features beyond page 1 if available */
784 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
785 struct hci_cp_read_local_ext_features cp;
786
787 cp.page = p;
788 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
789 sizeof(cp), &cp);
790 }
791
792 return 0;
793}
794
795static int hci_init4_req(struct hci_request *req, unsigned long opt)
796{
797 struct hci_dev *hdev = req->hdev;
798
799 /* Some Broadcom based Bluetooth controllers do not support the
800 * Delete Stored Link Key command. They are clearly indicating its
801 * absence in the bit mask of supported commands.
802 *
803 * Check the supported commands and only if the the command is marked
804 * as supported send it. If not supported assume that the controller
805 * does not have actual support for stored link keys which makes this
806 * command redundant anyway.
807 *
808 * Some controllers indicate that they support handling deleting
809 * stored link keys, but they don't. The quirk lets a driver
810 * just disable this command.
811 */
812 if (hdev->commands[6] & 0x80 &&
813 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
814 struct hci_cp_delete_stored_link_key cp;
815
816 bacpy(&cp.bdaddr, BDADDR_ANY);
817 cp.delete_all = 0x01;
818 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
819 sizeof(cp), &cp);
820 }
821
822 /* Set event mask page 2 if the HCI command for it is supported */
823 if (hdev->commands[22] & 0x04)
824 hci_set_event_mask_page_2(req);
825
826 /* Read local codec list if the HCI command is supported */
827 if (hdev->commands[29] & 0x20)
828 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
829
830 /* Read local pairing options if the HCI command is supported */
831 if (hdev->commands[41] & 0x08)
832 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
833
834 /* Get MWS transport configuration if the HCI command is supported */
835 if (hdev->commands[30] & 0x08)
836 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
837
838 /* Check for Synchronization Train support */
839 if (lmp_sync_train_capable(hdev))
840 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
841
842 /* Enable Secure Connections if supported and configured */
843 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
844 bredr_sc_enabled(hdev)) {
845 u8 support = 0x01;
846
847 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
848 sizeof(support), &support);
849 }
850
851 /* Set erroneous data reporting if supported to the wideband speech
852 * setting value
853 */
854 if (hdev->commands[18] & 0x08) {
855 bool enabled = hci_dev_test_flag(hdev,
856 HCI_WIDEBAND_SPEECH_ENABLED);
857
858 if (enabled !=
859 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
860 struct hci_cp_write_def_err_data_reporting cp;
861
862 cp.err_data_reporting = enabled ?
863 ERR_DATA_REPORTING_ENABLED :
864 ERR_DATA_REPORTING_DISABLED;
865
866 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
867 sizeof(cp), &cp);
868 }
869 }
870
871 /* Set Suggested Default Data Length to maximum if supported */
872 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
873 struct hci_cp_le_write_def_data_len cp;
874
875 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
876 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
877 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
878 }
879
880 /* Set Default PHY parameters if command is supported */
881 if (hdev->commands[35] & 0x20) {
882 struct hci_cp_le_set_default_phy cp;
883
884 cp.all_phys = 0x00;
885 cp.tx_phys = hdev->le_tx_def_phys;
886 cp.rx_phys = hdev->le_rx_def_phys;
887
888 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
889 }
890
891 return 0;
892}
893
894static int __hci_init(struct hci_dev *hdev)
895{
896 int err;
897
898 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
899 if (err < 0)
900 return err;
901
902 if (hci_dev_test_flag(hdev, HCI_SETUP))
903 hci_debugfs_create_basic(hdev);
904
905 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
906 if (err < 0)
907 return err;
908
909 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
910 * BR/EDR/LE type controllers. AMP controllers only need the
911 * first two stages of init.
912 */
913 if (hdev->dev_type != HCI_PRIMARY)
914 return 0;
915
916 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
917 if (err < 0)
918 return err;
919
920 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
921 if (err < 0)
922 return err;
923
924 /* This function is only called when the controller is actually in
925 * configured state. When the controller is marked as unconfigured,
926 * this initialization procedure is not run.
927 *
928 * It means that it is possible that a controller runs through its
929 * setup phase and then discovers missing settings. If that is the
930 * case, then this function will not be called. It then will only
931 * be called during the config phase.
932 *
933 * So only when in setup phase or config phase, create the debugfs
934 * entries and register the SMP channels.
935 */
936 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
937 !hci_dev_test_flag(hdev, HCI_CONFIG))
938 return 0;
939
940 hci_debugfs_create_common(hdev);
941
942 if (lmp_bredr_capable(hdev))
943 hci_debugfs_create_bredr(hdev);
944
945 if (lmp_le_capable(hdev))
946 hci_debugfs_create_le(hdev);
947
948 return 0;
949}
950
951static int hci_init0_req(struct hci_request *req, unsigned long opt)
952{
953 struct hci_dev *hdev = req->hdev;
954
955 BT_DBG("%s %ld", hdev->name, opt);
956
957 /* Reset */
958 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
959 hci_reset_req(req, 0);
960
961 /* Read Local Version */
962 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
963
964 /* Read BD Address */
965 if (hdev->set_bdaddr)
966 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
967
968 return 0;
969}
970
971static int __hci_unconf_init(struct hci_dev *hdev)
972{
973 int err;
974
975 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
976 return 0;
977
978 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
979 if (err < 0)
980 return err;
981
982 if (hci_dev_test_flag(hdev, HCI_SETUP))
983 hci_debugfs_create_basic(hdev);
984
985 return 0;
986}
987
988static int hci_scan_req(struct hci_request *req, unsigned long opt)
989{
990 __u8 scan = opt;
991
992 BT_DBG("%s %x", req->hdev->name, scan);
993
994 /* Inquiry and Page scans */
995 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
996 return 0;
997}
998
999static int hci_auth_req(struct hci_request *req, unsigned long opt)
1000{
1001 __u8 auth = opt;
1002
1003 BT_DBG("%s %x", req->hdev->name, auth);
1004
1005 /* Authentication */
1006 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1007 return 0;
1008}
1009
1010static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1011{
1012 __u8 encrypt = opt;
1013
1014 BT_DBG("%s %x", req->hdev->name, encrypt);
1015
1016 /* Encryption */
1017 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1018 return 0;
1019}
1020
1021static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1022{
1023 __le16 policy = cpu_to_le16(opt);
1024
1025 BT_DBG("%s %x", req->hdev->name, policy);
1026
1027 /* Default link policy */
1028 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1029 return 0;
1030}
1031
1032/* Get HCI device by index.
1033 * Device is held on return. */
1034struct hci_dev *hci_dev_get(int index)
1035{
1036 struct hci_dev *hdev = NULL, *d;
1037
1038 BT_DBG("%d", index);
1039
1040 if (index < 0)
1041 return NULL;
1042
1043 read_lock(&hci_dev_list_lock);
1044 list_for_each_entry(d, &hci_dev_list, list) {
1045 if (d->id == index) {
1046 hdev = hci_dev_hold(d);
1047 break;
1048 }
1049 }
1050 read_unlock(&hci_dev_list_lock);
1051 return hdev;
1052}
1053
1054/* ---- Inquiry support ---- */
1055
1056bool hci_discovery_active(struct hci_dev *hdev)
1057{
1058 struct discovery_state *discov = &hdev->discovery;
1059
1060 switch (discov->state) {
1061 case DISCOVERY_FINDING:
1062 case DISCOVERY_RESOLVING:
1063 return true;
1064
1065 default:
1066 return false;
1067 }
1068}
1069
1070void hci_discovery_set_state(struct hci_dev *hdev, int state)
1071{
1072 int old_state = hdev->discovery.state;
1073
1074 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1075
1076 if (old_state == state)
1077 return;
1078
1079 hdev->discovery.state = state;
1080
1081 switch (state) {
1082 case DISCOVERY_STOPPED:
1083 hci_update_background_scan(hdev);
1084
1085 if (old_state != DISCOVERY_STARTING)
1086 mgmt_discovering(hdev, 0);
1087 break;
1088 case DISCOVERY_STARTING:
1089 break;
1090 case DISCOVERY_FINDING:
1091 mgmt_discovering(hdev, 1);
1092 break;
1093 case DISCOVERY_RESOLVING:
1094 break;
1095 case DISCOVERY_STOPPING:
1096 break;
1097 }
1098}
1099
1100void hci_inquiry_cache_flush(struct hci_dev *hdev)
1101{
1102 struct discovery_state *cache = &hdev->discovery;
1103 struct inquiry_entry *p, *n;
1104
1105 list_for_each_entry_safe(p, n, &cache->all, all) {
1106 list_del(&p->all);
1107 kfree(p);
1108 }
1109
1110 INIT_LIST_HEAD(&cache->unknown);
1111 INIT_LIST_HEAD(&cache->resolve);
1112}
1113
1114struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1115 bdaddr_t *bdaddr)
1116{
1117 struct discovery_state *cache = &hdev->discovery;
1118 struct inquiry_entry *e;
1119
1120 BT_DBG("cache %p, %pMR", cache, bdaddr);
1121
1122 list_for_each_entry(e, &cache->all, all) {
1123 if (!bacmp(&e->data.bdaddr, bdaddr))
1124 return e;
1125 }
1126
1127 return NULL;
1128}
1129
1130struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
1132{
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_entry *e;
1135
1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1137
1138 list_for_each_entry(e, &cache->unknown, list) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1140 return e;
1141 }
1142
1143 return NULL;
1144}
1145
1146struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1147 bdaddr_t *bdaddr,
1148 int state)
1149{
1150 struct discovery_state *cache = &hdev->discovery;
1151 struct inquiry_entry *e;
1152
1153 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1154
1155 list_for_each_entry(e, &cache->resolve, list) {
1156 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1157 return e;
1158 if (!bacmp(&e->data.bdaddr, bdaddr))
1159 return e;
1160 }
1161
1162 return NULL;
1163}
1164
1165void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1166 struct inquiry_entry *ie)
1167{
1168 struct discovery_state *cache = &hdev->discovery;
1169 struct list_head *pos = &cache->resolve;
1170 struct inquiry_entry *p;
1171
1172 list_del(&ie->list);
1173
1174 list_for_each_entry(p, &cache->resolve, list) {
1175 if (p->name_state != NAME_PENDING &&
1176 abs(p->data.rssi) >= abs(ie->data.rssi))
1177 break;
1178 pos = &p->list;
1179 }
1180
1181 list_add(&ie->list, pos);
1182}
1183
1184u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1185 bool name_known)
1186{
1187 struct discovery_state *cache = &hdev->discovery;
1188 struct inquiry_entry *ie;
1189 u32 flags = 0;
1190
1191 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1192
1193 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1194
1195 if (!data->ssp_mode)
1196 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1197
1198 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1199 if (ie) {
1200 if (!ie->data.ssp_mode)
1201 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1202
1203 if (ie->name_state == NAME_NEEDED &&
1204 data->rssi != ie->data.rssi) {
1205 ie->data.rssi = data->rssi;
1206 hci_inquiry_cache_update_resolve(hdev, ie);
1207 }
1208
1209 goto update;
1210 }
1211
1212 /* Entry not in the cache. Add new one. */
1213 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1214 if (!ie) {
1215 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1216 goto done;
1217 }
1218
1219 list_add(&ie->all, &cache->all);
1220
1221 if (name_known) {
1222 ie->name_state = NAME_KNOWN;
1223 } else {
1224 ie->name_state = NAME_NOT_KNOWN;
1225 list_add(&ie->list, &cache->unknown);
1226 }
1227
1228update:
1229 if (name_known && ie->name_state != NAME_KNOWN &&
1230 ie->name_state != NAME_PENDING) {
1231 ie->name_state = NAME_KNOWN;
1232 list_del(&ie->list);
1233 }
1234
1235 memcpy(&ie->data, data, sizeof(*data));
1236 ie->timestamp = jiffies;
1237 cache->timestamp = jiffies;
1238
1239 if (ie->name_state == NAME_NOT_KNOWN)
1240 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1241
1242done:
1243 return flags;
1244}
1245
1246static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1247{
1248 struct discovery_state *cache = &hdev->discovery;
1249 struct inquiry_info *info = (struct inquiry_info *) buf;
1250 struct inquiry_entry *e;
1251 int copied = 0;
1252
1253 list_for_each_entry(e, &cache->all, all) {
1254 struct inquiry_data *data = &e->data;
1255
1256 if (copied >= num)
1257 break;
1258
1259 bacpy(&info->bdaddr, &data->bdaddr);
1260 info->pscan_rep_mode = data->pscan_rep_mode;
1261 info->pscan_period_mode = data->pscan_period_mode;
1262 info->pscan_mode = data->pscan_mode;
1263 memcpy(info->dev_class, data->dev_class, 3);
1264 info->clock_offset = data->clock_offset;
1265
1266 info++;
1267 copied++;
1268 }
1269
1270 BT_DBG("cache %p, copied %d", cache, copied);
1271 return copied;
1272}
1273
1274static int hci_inq_req(struct hci_request *req, unsigned long opt)
1275{
1276 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1277 struct hci_dev *hdev = req->hdev;
1278 struct hci_cp_inquiry cp;
1279
1280 BT_DBG("%s", hdev->name);
1281
1282 if (test_bit(HCI_INQUIRY, &hdev->flags))
1283 return 0;
1284
1285 /* Start Inquiry */
1286 memcpy(&cp.lap, &ir->lap, 3);
1287 cp.length = ir->length;
1288 cp.num_rsp = ir->num_rsp;
1289 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1290
1291 return 0;
1292}
1293
1294int hci_inquiry(void __user *arg)
1295{
1296 __u8 __user *ptr = arg;
1297 struct hci_inquiry_req ir;
1298 struct hci_dev *hdev;
1299 int err = 0, do_inquiry = 0, max_rsp;
1300 long timeo;
1301 __u8 *buf;
1302
1303 if (copy_from_user(&ir, ptr, sizeof(ir)))
1304 return -EFAULT;
1305
1306 hdev = hci_dev_get(ir.dev_id);
1307 if (!hdev)
1308 return -ENODEV;
1309
1310 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1311 err = -EBUSY;
1312 goto done;
1313 }
1314
1315 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1316 err = -EOPNOTSUPP;
1317 goto done;
1318 }
1319
1320 if (hdev->dev_type != HCI_PRIMARY) {
1321 err = -EOPNOTSUPP;
1322 goto done;
1323 }
1324
1325 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1326 err = -EOPNOTSUPP;
1327 goto done;
1328 }
1329
1330 hci_dev_lock(hdev);
1331 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1332 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1333 hci_inquiry_cache_flush(hdev);
1334 do_inquiry = 1;
1335 }
1336 hci_dev_unlock(hdev);
1337
1338 timeo = ir.length * msecs_to_jiffies(2000);
1339
1340 if (do_inquiry) {
1341 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1342 timeo, NULL);
1343 if (err < 0)
1344 goto done;
1345
1346 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1347 * cleared). If it is interrupted by a signal, return -EINTR.
1348 */
1349 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1350 TASK_INTERRUPTIBLE))
1351 return -EINTR;
1352 }
1353
1354 /* for unlimited number of responses we will use buffer with
1355 * 255 entries
1356 */
1357 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1358
1359 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1360 * copy it to the user space.
1361 */
1362 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1363 if (!buf) {
1364 err = -ENOMEM;
1365 goto done;
1366 }
1367
1368 hci_dev_lock(hdev);
1369 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1370 hci_dev_unlock(hdev);
1371
1372 BT_DBG("num_rsp %d", ir.num_rsp);
1373
1374 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1375 ptr += sizeof(ir);
1376 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1377 ir.num_rsp))
1378 err = -EFAULT;
1379 } else
1380 err = -EFAULT;
1381
1382 kfree(buf);
1383
1384done:
1385 hci_dev_put(hdev);
1386 return err;
1387}
1388
1389/**
1390 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1391 * (BD_ADDR) for a HCI device from
1392 * a firmware node property.
1393 * @hdev: The HCI device
1394 *
1395 * Search the firmware node for 'local-bd-address'.
1396 *
1397 * All-zero BD addresses are rejected, because those could be properties
1398 * that exist in the firmware tables, but were not updated by the firmware. For
1399 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1400 */
1401static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1402{
1403 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1404 bdaddr_t ba;
1405 int ret;
1406
1407 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1408 (u8 *)&ba, sizeof(ba));
1409 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1410 return;
1411
1412 bacpy(&hdev->public_addr, &ba);
1413}
1414
1415static int hci_dev_do_open(struct hci_dev *hdev)
1416{
1417 int ret = 0;
1418
1419 BT_DBG("%s %p", hdev->name, hdev);
1420
1421 hci_req_sync_lock(hdev);
1422
1423 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1424 ret = -ENODEV;
1425 goto done;
1426 }
1427
1428 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1429 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1430 /* Check for rfkill but allow the HCI setup stage to
1431 * proceed (which in itself doesn't cause any RF activity).
1432 */
1433 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1434 ret = -ERFKILL;
1435 goto done;
1436 }
1437
1438 /* Check for valid public address or a configured static
1439 * random adddress, but let the HCI setup proceed to
1440 * be able to determine if there is a public address
1441 * or not.
1442 *
1443 * In case of user channel usage, it is not important
1444 * if a public address or static random address is
1445 * available.
1446 *
1447 * This check is only valid for BR/EDR controllers
1448 * since AMP controllers do not have an address.
1449 */
1450 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1451 hdev->dev_type == HCI_PRIMARY &&
1452 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1453 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1454 ret = -EADDRNOTAVAIL;
1455 goto done;
1456 }
1457 }
1458
1459 if (test_bit(HCI_UP, &hdev->flags)) {
1460 ret = -EALREADY;
1461 goto done;
1462 }
1463
1464 if (hdev->open(hdev)) {
1465 ret = -EIO;
1466 goto done;
1467 }
1468
1469 set_bit(HCI_RUNNING, &hdev->flags);
1470 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1471
1472 atomic_set(&hdev->cmd_cnt, 1);
1473 set_bit(HCI_INIT, &hdev->flags);
1474
1475 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1476 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1477 bool invalid_bdaddr;
1478
1479 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1480
1481 if (hdev->setup)
1482 ret = hdev->setup(hdev);
1483
1484 /* The transport driver can set the quirk to mark the
1485 * BD_ADDR invalid before creating the HCI device or in
1486 * its setup callback.
1487 */
1488 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1489 &hdev->quirks);
1490
1491 if (ret)
1492 goto setup_failed;
1493
1494 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1495 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1496 hci_dev_get_bd_addr_from_property(hdev);
1497
1498 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1499 hdev->set_bdaddr) {
1500 ret = hdev->set_bdaddr(hdev,
1501 &hdev->public_addr);
1502
1503 /* If setting of the BD_ADDR from the device
1504 * property succeeds, then treat the address
1505 * as valid even if the invalid BD_ADDR
1506 * quirk indicates otherwise.
1507 */
1508 if (!ret)
1509 invalid_bdaddr = false;
1510 }
1511 }
1512
1513setup_failed:
1514 /* The transport driver can set these quirks before
1515 * creating the HCI device or in its setup callback.
1516 *
1517 * For the invalid BD_ADDR quirk it is possible that
1518 * it becomes a valid address if the bootloader does
1519 * provide it (see above).
1520 *
1521 * In case any of them is set, the controller has to
1522 * start up as unconfigured.
1523 */
1524 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1525 invalid_bdaddr)
1526 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1527
1528 /* For an unconfigured controller it is required to
1529 * read at least the version information provided by
1530 * the Read Local Version Information command.
1531 *
1532 * If the set_bdaddr driver callback is provided, then
1533 * also the original Bluetooth public device address
1534 * will be read using the Read BD Address command.
1535 */
1536 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1537 ret = __hci_unconf_init(hdev);
1538 }
1539
1540 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1541 /* If public address change is configured, ensure that
1542 * the address gets programmed. If the driver does not
1543 * support changing the public address, fail the power
1544 * on procedure.
1545 */
1546 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1547 hdev->set_bdaddr)
1548 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1549 else
1550 ret = -EADDRNOTAVAIL;
1551 }
1552
1553 if (!ret) {
1554 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1555 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1556 ret = __hci_init(hdev);
1557 if (!ret && hdev->post_init)
1558 ret = hdev->post_init(hdev);
1559 }
1560 }
1561
1562 /* If the HCI Reset command is clearing all diagnostic settings,
1563 * then they need to be reprogrammed after the init procedure
1564 * completed.
1565 */
1566 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1567 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1568 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1569 ret = hdev->set_diag(hdev, true);
1570
1571 msft_do_open(hdev);
1572
1573 clear_bit(HCI_INIT, &hdev->flags);
1574
1575 if (!ret) {
1576 hci_dev_hold(hdev);
1577 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1578 hci_adv_instances_set_rpa_expired(hdev, true);
1579 set_bit(HCI_UP, &hdev->flags);
1580 hci_sock_dev_event(hdev, HCI_DEV_UP);
1581 hci_leds_update_powered(hdev, true);
1582 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1583 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1584 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1586 hci_dev_test_flag(hdev, HCI_MGMT) &&
1587 hdev->dev_type == HCI_PRIMARY) {
1588 ret = __hci_req_hci_power_on(hdev);
1589 mgmt_power_on(hdev, ret);
1590 }
1591 } else {
1592 /* Init failed, cleanup */
1593 flush_work(&hdev->tx_work);
1594 flush_work(&hdev->cmd_work);
1595 flush_work(&hdev->rx_work);
1596
1597 skb_queue_purge(&hdev->cmd_q);
1598 skb_queue_purge(&hdev->rx_q);
1599
1600 if (hdev->flush)
1601 hdev->flush(hdev);
1602
1603 if (hdev->sent_cmd) {
1604 kfree_skb(hdev->sent_cmd);
1605 hdev->sent_cmd = NULL;
1606 }
1607
1608 clear_bit(HCI_RUNNING, &hdev->flags);
1609 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1610
1611 hdev->close(hdev);
1612 hdev->flags &= BIT(HCI_RAW);
1613 }
1614
1615done:
1616 hci_req_sync_unlock(hdev);
1617 return ret;
1618}
1619
1620/* ---- HCI ioctl helpers ---- */
1621
1622int hci_dev_open(__u16 dev)
1623{
1624 struct hci_dev *hdev;
1625 int err;
1626
1627 hdev = hci_dev_get(dev);
1628 if (!hdev)
1629 return -ENODEV;
1630
1631 /* Devices that are marked as unconfigured can only be powered
1632 * up as user channel. Trying to bring them up as normal devices
1633 * will result into a failure. Only user channel operation is
1634 * possible.
1635 *
1636 * When this function is called for a user channel, the flag
1637 * HCI_USER_CHANNEL will be set first before attempting to
1638 * open the device.
1639 */
1640 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1641 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1642 err = -EOPNOTSUPP;
1643 goto done;
1644 }
1645
1646 /* We need to ensure that no other power on/off work is pending
1647 * before proceeding to call hci_dev_do_open. This is
1648 * particularly important if the setup procedure has not yet
1649 * completed.
1650 */
1651 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1652 cancel_delayed_work(&hdev->power_off);
1653
1654 /* After this call it is guaranteed that the setup procedure
1655 * has finished. This means that error conditions like RFKILL
1656 * or no valid public or static random address apply.
1657 */
1658 flush_workqueue(hdev->req_workqueue);
1659
1660 /* For controllers not using the management interface and that
1661 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1662 * so that pairing works for them. Once the management interface
1663 * is in use this bit will be cleared again and userspace has
1664 * to explicitly enable it.
1665 */
1666 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1667 !hci_dev_test_flag(hdev, HCI_MGMT))
1668 hci_dev_set_flag(hdev, HCI_BONDABLE);
1669
1670 err = hci_dev_do_open(hdev);
1671
1672done:
1673 hci_dev_put(hdev);
1674 return err;
1675}
1676
1677/* This function requires the caller holds hdev->lock */
1678static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1679{
1680 struct hci_conn_params *p;
1681
1682 list_for_each_entry(p, &hdev->le_conn_params, list) {
1683 if (p->conn) {
1684 hci_conn_drop(p->conn);
1685 hci_conn_put(p->conn);
1686 p->conn = NULL;
1687 }
1688 list_del_init(&p->action);
1689 }
1690
1691 BT_DBG("All LE pending actions cleared");
1692}
1693
1694int hci_dev_do_close(struct hci_dev *hdev)
1695{
1696 bool auto_off;
1697
1698 BT_DBG("%s %p", hdev->name, hdev);
1699
1700 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1701 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1702 test_bit(HCI_UP, &hdev->flags)) {
1703 /* Execute vendor specific shutdown routine */
1704 if (hdev->shutdown)
1705 hdev->shutdown(hdev);
1706 }
1707
1708 cancel_delayed_work(&hdev->power_off);
1709
1710 hci_request_cancel_all(hdev);
1711 hci_req_sync_lock(hdev);
1712
1713 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1714 cancel_delayed_work_sync(&hdev->cmd_timer);
1715 hci_req_sync_unlock(hdev);
1716 return 0;
1717 }
1718
1719 hci_leds_update_powered(hdev, false);
1720
1721 /* Flush RX and TX works */
1722 flush_work(&hdev->tx_work);
1723 flush_work(&hdev->rx_work);
1724
1725 if (hdev->discov_timeout > 0) {
1726 hdev->discov_timeout = 0;
1727 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1728 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1729 }
1730
1731 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1732 cancel_delayed_work(&hdev->service_cache);
1733
1734 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1735 struct adv_info *adv_instance;
1736
1737 cancel_delayed_work_sync(&hdev->rpa_expired);
1738
1739 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1740 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1741 }
1742
1743 /* Avoid potential lockdep warnings from the *_flush() calls by
1744 * ensuring the workqueue is empty up front.
1745 */
1746 drain_workqueue(hdev->workqueue);
1747
1748 hci_dev_lock(hdev);
1749
1750 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1751
1752 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1753
1754 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1755 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1756 hci_dev_test_flag(hdev, HCI_MGMT))
1757 __mgmt_power_off(hdev);
1758
1759 hci_inquiry_cache_flush(hdev);
1760 hci_pend_le_actions_clear(hdev);
1761 hci_conn_hash_flush(hdev);
1762 hci_dev_unlock(hdev);
1763
1764 smp_unregister(hdev);
1765
1766 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1767
1768 msft_do_close(hdev);
1769
1770 if (hdev->flush)
1771 hdev->flush(hdev);
1772
1773 /* Reset device */
1774 skb_queue_purge(&hdev->cmd_q);
1775 atomic_set(&hdev->cmd_cnt, 1);
1776 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1777 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1778 set_bit(HCI_INIT, &hdev->flags);
1779 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1780 clear_bit(HCI_INIT, &hdev->flags);
1781 }
1782
1783 /* flush cmd work */
1784 flush_work(&hdev->cmd_work);
1785
1786 /* Drop queues */
1787 skb_queue_purge(&hdev->rx_q);
1788 skb_queue_purge(&hdev->cmd_q);
1789 skb_queue_purge(&hdev->raw_q);
1790
1791 /* Drop last sent command */
1792 if (hdev->sent_cmd) {
1793 cancel_delayed_work_sync(&hdev->cmd_timer);
1794 kfree_skb(hdev->sent_cmd);
1795 hdev->sent_cmd = NULL;
1796 }
1797
1798 clear_bit(HCI_RUNNING, &hdev->flags);
1799 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1800
1801 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1802 wake_up(&hdev->suspend_wait_q);
1803
1804 /* After this point our queues are empty
1805 * and no tasks are scheduled. */
1806 hdev->close(hdev);
1807
1808 /* Clear flags */
1809 hdev->flags &= BIT(HCI_RAW);
1810 hci_dev_clear_volatile_flags(hdev);
1811
1812 /* Controller radio is available but is currently powered down */
1813 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1814
1815 memset(hdev->eir, 0, sizeof(hdev->eir));
1816 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1817 bacpy(&hdev->random_addr, BDADDR_ANY);
1818
1819 hci_req_sync_unlock(hdev);
1820
1821 hci_dev_put(hdev);
1822 return 0;
1823}
1824
1825int hci_dev_close(__u16 dev)
1826{
1827 struct hci_dev *hdev;
1828 int err;
1829
1830 hdev = hci_dev_get(dev);
1831 if (!hdev)
1832 return -ENODEV;
1833
1834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1835 err = -EBUSY;
1836 goto done;
1837 }
1838
1839 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1840 cancel_delayed_work(&hdev->power_off);
1841
1842 err = hci_dev_do_close(hdev);
1843
1844done:
1845 hci_dev_put(hdev);
1846 return err;
1847}
1848
1849static int hci_dev_do_reset(struct hci_dev *hdev)
1850{
1851 int ret;
1852
1853 BT_DBG("%s %p", hdev->name, hdev);
1854
1855 hci_req_sync_lock(hdev);
1856
1857 /* Drop queues */
1858 skb_queue_purge(&hdev->rx_q);
1859 skb_queue_purge(&hdev->cmd_q);
1860
1861 /* Avoid potential lockdep warnings from the *_flush() calls by
1862 * ensuring the workqueue is empty up front.
1863 */
1864 drain_workqueue(hdev->workqueue);
1865
1866 hci_dev_lock(hdev);
1867 hci_inquiry_cache_flush(hdev);
1868 hci_conn_hash_flush(hdev);
1869 hci_dev_unlock(hdev);
1870
1871 if (hdev->flush)
1872 hdev->flush(hdev);
1873
1874 atomic_set(&hdev->cmd_cnt, 1);
1875 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1876
1877 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1878
1879 hci_req_sync_unlock(hdev);
1880 return ret;
1881}
1882
1883int hci_dev_reset(__u16 dev)
1884{
1885 struct hci_dev *hdev;
1886 int err;
1887
1888 hdev = hci_dev_get(dev);
1889 if (!hdev)
1890 return -ENODEV;
1891
1892 if (!test_bit(HCI_UP, &hdev->flags)) {
1893 err = -ENETDOWN;
1894 goto done;
1895 }
1896
1897 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1898 err = -EBUSY;
1899 goto done;
1900 }
1901
1902 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1903 err = -EOPNOTSUPP;
1904 goto done;
1905 }
1906
1907 err = hci_dev_do_reset(hdev);
1908
1909done:
1910 hci_dev_put(hdev);
1911 return err;
1912}
1913
1914int hci_dev_reset_stat(__u16 dev)
1915{
1916 struct hci_dev *hdev;
1917 int ret = 0;
1918
1919 hdev = hci_dev_get(dev);
1920 if (!hdev)
1921 return -ENODEV;
1922
1923 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1924 ret = -EBUSY;
1925 goto done;
1926 }
1927
1928 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1929 ret = -EOPNOTSUPP;
1930 goto done;
1931 }
1932
1933 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1934
1935done:
1936 hci_dev_put(hdev);
1937 return ret;
1938}
1939
1940static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1941{
1942 bool conn_changed, discov_changed;
1943
1944 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1945
1946 if ((scan & SCAN_PAGE))
1947 conn_changed = !hci_dev_test_and_set_flag(hdev,
1948 HCI_CONNECTABLE);
1949 else
1950 conn_changed = hci_dev_test_and_clear_flag(hdev,
1951 HCI_CONNECTABLE);
1952
1953 if ((scan & SCAN_INQUIRY)) {
1954 discov_changed = !hci_dev_test_and_set_flag(hdev,
1955 HCI_DISCOVERABLE);
1956 } else {
1957 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1958 discov_changed = hci_dev_test_and_clear_flag(hdev,
1959 HCI_DISCOVERABLE);
1960 }
1961
1962 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1963 return;
1964
1965 if (conn_changed || discov_changed) {
1966 /* In case this was disabled through mgmt */
1967 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1968
1969 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1970 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1971
1972 mgmt_new_settings(hdev);
1973 }
1974}
1975
1976int hci_dev_cmd(unsigned int cmd, void __user *arg)
1977{
1978 struct hci_dev *hdev;
1979 struct hci_dev_req dr;
1980 int err = 0;
1981
1982 if (copy_from_user(&dr, arg, sizeof(dr)))
1983 return -EFAULT;
1984
1985 hdev = hci_dev_get(dr.dev_id);
1986 if (!hdev)
1987 return -ENODEV;
1988
1989 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1990 err = -EBUSY;
1991 goto done;
1992 }
1993
1994 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1995 err = -EOPNOTSUPP;
1996 goto done;
1997 }
1998
1999 if (hdev->dev_type != HCI_PRIMARY) {
2000 err = -EOPNOTSUPP;
2001 goto done;
2002 }
2003
2004 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2005 err = -EOPNOTSUPP;
2006 goto done;
2007 }
2008
2009 switch (cmd) {
2010 case HCISETAUTH:
2011 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2012 HCI_INIT_TIMEOUT, NULL);
2013 break;
2014
2015 case HCISETENCRYPT:
2016 if (!lmp_encrypt_capable(hdev)) {
2017 err = -EOPNOTSUPP;
2018 break;
2019 }
2020
2021 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2022 /* Auth must be enabled first */
2023 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2024 HCI_INIT_TIMEOUT, NULL);
2025 if (err)
2026 break;
2027 }
2028
2029 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2030 HCI_INIT_TIMEOUT, NULL);
2031 break;
2032
2033 case HCISETSCAN:
2034 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2035 HCI_INIT_TIMEOUT, NULL);
2036
2037 /* Ensure that the connectable and discoverable states
2038 * get correctly modified as this was a non-mgmt change.
2039 */
2040 if (!err)
2041 hci_update_scan_state(hdev, dr.dev_opt);
2042 break;
2043
2044 case HCISETLINKPOL:
2045 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2046 HCI_INIT_TIMEOUT, NULL);
2047 break;
2048
2049 case HCISETLINKMODE:
2050 hdev->link_mode = ((__u16) dr.dev_opt) &
2051 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2052 break;
2053
2054 case HCISETPTYPE:
2055 if (hdev->pkt_type == (__u16) dr.dev_opt)
2056 break;
2057
2058 hdev->pkt_type = (__u16) dr.dev_opt;
2059 mgmt_phy_configuration_changed(hdev, NULL);
2060 break;
2061
2062 case HCISETACLMTU:
2063 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2064 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2065 break;
2066
2067 case HCISETSCOMTU:
2068 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2069 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2070 break;
2071
2072 default:
2073 err = -EINVAL;
2074 break;
2075 }
2076
2077done:
2078 hci_dev_put(hdev);
2079 return err;
2080}
2081
2082int hci_get_dev_list(void __user *arg)
2083{
2084 struct hci_dev *hdev;
2085 struct hci_dev_list_req *dl;
2086 struct hci_dev_req *dr;
2087 int n = 0, size, err;
2088 __u16 dev_num;
2089
2090 if (get_user(dev_num, (__u16 __user *) arg))
2091 return -EFAULT;
2092
2093 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2094 return -EINVAL;
2095
2096 size = sizeof(*dl) + dev_num * sizeof(*dr);
2097
2098 dl = kzalloc(size, GFP_KERNEL);
2099 if (!dl)
2100 return -ENOMEM;
2101
2102 dr = dl->dev_req;
2103
2104 read_lock(&hci_dev_list_lock);
2105 list_for_each_entry(hdev, &hci_dev_list, list) {
2106 unsigned long flags = hdev->flags;
2107
2108 /* When the auto-off is configured it means the transport
2109 * is running, but in that case still indicate that the
2110 * device is actually down.
2111 */
2112 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2113 flags &= ~BIT(HCI_UP);
2114
2115 (dr + n)->dev_id = hdev->id;
2116 (dr + n)->dev_opt = flags;
2117
2118 if (++n >= dev_num)
2119 break;
2120 }
2121 read_unlock(&hci_dev_list_lock);
2122
2123 dl->dev_num = n;
2124 size = sizeof(*dl) + n * sizeof(*dr);
2125
2126 err = copy_to_user(arg, dl, size);
2127 kfree(dl);
2128
2129 return err ? -EFAULT : 0;
2130}
2131
2132int hci_get_dev_info(void __user *arg)
2133{
2134 struct hci_dev *hdev;
2135 struct hci_dev_info di;
2136 unsigned long flags;
2137 int err = 0;
2138
2139 if (copy_from_user(&di, arg, sizeof(di)))
2140 return -EFAULT;
2141
2142 hdev = hci_dev_get(di.dev_id);
2143 if (!hdev)
2144 return -ENODEV;
2145
2146 /* When the auto-off is configured it means the transport
2147 * is running, but in that case still indicate that the
2148 * device is actually down.
2149 */
2150 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2151 flags = hdev->flags & ~BIT(HCI_UP);
2152 else
2153 flags = hdev->flags;
2154
2155 strcpy(di.name, hdev->name);
2156 di.bdaddr = hdev->bdaddr;
2157 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2158 di.flags = flags;
2159 di.pkt_type = hdev->pkt_type;
2160 if (lmp_bredr_capable(hdev)) {
2161 di.acl_mtu = hdev->acl_mtu;
2162 di.acl_pkts = hdev->acl_pkts;
2163 di.sco_mtu = hdev->sco_mtu;
2164 di.sco_pkts = hdev->sco_pkts;
2165 } else {
2166 di.acl_mtu = hdev->le_mtu;
2167 di.acl_pkts = hdev->le_pkts;
2168 di.sco_mtu = 0;
2169 di.sco_pkts = 0;
2170 }
2171 di.link_policy = hdev->link_policy;
2172 di.link_mode = hdev->link_mode;
2173
2174 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2175 memcpy(&di.features, &hdev->features, sizeof(di.features));
2176
2177 if (copy_to_user(arg, &di, sizeof(di)))
2178 err = -EFAULT;
2179
2180 hci_dev_put(hdev);
2181
2182 return err;
2183}
2184
2185/* ---- Interface to HCI drivers ---- */
2186
2187static int hci_rfkill_set_block(void *data, bool blocked)
2188{
2189 struct hci_dev *hdev = data;
2190
2191 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2192
2193 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2194 return -EBUSY;
2195
2196 if (blocked) {
2197 hci_dev_set_flag(hdev, HCI_RFKILLED);
2198 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2199 !hci_dev_test_flag(hdev, HCI_CONFIG))
2200 hci_dev_do_close(hdev);
2201 } else {
2202 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2203 }
2204
2205 return 0;
2206}
2207
2208static const struct rfkill_ops hci_rfkill_ops = {
2209 .set_block = hci_rfkill_set_block,
2210};
2211
2212static void hci_power_on(struct work_struct *work)
2213{
2214 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2215 int err;
2216
2217 BT_DBG("%s", hdev->name);
2218
2219 if (test_bit(HCI_UP, &hdev->flags) &&
2220 hci_dev_test_flag(hdev, HCI_MGMT) &&
2221 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2222 cancel_delayed_work(&hdev->power_off);
2223 hci_req_sync_lock(hdev);
2224 err = __hci_req_hci_power_on(hdev);
2225 hci_req_sync_unlock(hdev);
2226 mgmt_power_on(hdev, err);
2227 return;
2228 }
2229
2230 err = hci_dev_do_open(hdev);
2231 if (err < 0) {
2232 hci_dev_lock(hdev);
2233 mgmt_set_powered_failed(hdev, err);
2234 hci_dev_unlock(hdev);
2235 return;
2236 }
2237
2238 /* During the HCI setup phase, a few error conditions are
2239 * ignored and they need to be checked now. If they are still
2240 * valid, it is important to turn the device back off.
2241 */
2242 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2243 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2244 (hdev->dev_type == HCI_PRIMARY &&
2245 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2246 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2247 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2248 hci_dev_do_close(hdev);
2249 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2250 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2251 HCI_AUTO_OFF_TIMEOUT);
2252 }
2253
2254 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2255 /* For unconfigured devices, set the HCI_RAW flag
2256 * so that userspace can easily identify them.
2257 */
2258 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2259 set_bit(HCI_RAW, &hdev->flags);
2260
2261 /* For fully configured devices, this will send
2262 * the Index Added event. For unconfigured devices,
2263 * it will send Unconfigued Index Added event.
2264 *
2265 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2266 * and no event will be send.
2267 */
2268 mgmt_index_added(hdev);
2269 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2270 /* When the controller is now configured, then it
2271 * is important to clear the HCI_RAW flag.
2272 */
2273 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2274 clear_bit(HCI_RAW, &hdev->flags);
2275
2276 /* Powering on the controller with HCI_CONFIG set only
2277 * happens with the transition from unconfigured to
2278 * configured. This will send the Index Added event.
2279 */
2280 mgmt_index_added(hdev);
2281 }
2282}
2283
2284static void hci_power_off(struct work_struct *work)
2285{
2286 struct hci_dev *hdev = container_of(work, struct hci_dev,
2287 power_off.work);
2288
2289 BT_DBG("%s", hdev->name);
2290
2291 hci_dev_do_close(hdev);
2292}
2293
2294static void hci_error_reset(struct work_struct *work)
2295{
2296 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2297
2298 BT_DBG("%s", hdev->name);
2299
2300 if (hdev->hw_error)
2301 hdev->hw_error(hdev, hdev->hw_error_code);
2302 else
2303 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2304
2305 if (hci_dev_do_close(hdev))
2306 return;
2307
2308 hci_dev_do_open(hdev);
2309}
2310
2311void hci_uuids_clear(struct hci_dev *hdev)
2312{
2313 struct bt_uuid *uuid, *tmp;
2314
2315 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2316 list_del(&uuid->list);
2317 kfree(uuid);
2318 }
2319}
2320
2321void hci_link_keys_clear(struct hci_dev *hdev)
2322{
2323 struct link_key *key;
2324
2325 list_for_each_entry(key, &hdev->link_keys, list) {
2326 list_del_rcu(&key->list);
2327 kfree_rcu(key, rcu);
2328 }
2329}
2330
2331void hci_smp_ltks_clear(struct hci_dev *hdev)
2332{
2333 struct smp_ltk *k;
2334
2335 list_for_each_entry(k, &hdev->long_term_keys, list) {
2336 list_del_rcu(&k->list);
2337 kfree_rcu(k, rcu);
2338 }
2339}
2340
2341void hci_smp_irks_clear(struct hci_dev *hdev)
2342{
2343 struct smp_irk *k;
2344
2345 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2346 list_del_rcu(&k->list);
2347 kfree_rcu(k, rcu);
2348 }
2349}
2350
2351void hci_blocked_keys_clear(struct hci_dev *hdev)
2352{
2353 struct blocked_key *b;
2354
2355 list_for_each_entry(b, &hdev->blocked_keys, list) {
2356 list_del_rcu(&b->list);
2357 kfree_rcu(b, rcu);
2358 }
2359}
2360
2361bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2362{
2363 bool blocked = false;
2364 struct blocked_key *b;
2365
2366 rcu_read_lock();
2367 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2368 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2369 blocked = true;
2370 break;
2371 }
2372 }
2373
2374 rcu_read_unlock();
2375 return blocked;
2376}
2377
2378struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2379{
2380 struct link_key *k;
2381
2382 rcu_read_lock();
2383 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2384 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2385 rcu_read_unlock();
2386
2387 if (hci_is_blocked_key(hdev,
2388 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2389 k->val)) {
2390 bt_dev_warn_ratelimited(hdev,
2391 "Link key blocked for %pMR",
2392 &k->bdaddr);
2393 return NULL;
2394 }
2395
2396 return k;
2397 }
2398 }
2399 rcu_read_unlock();
2400
2401 return NULL;
2402}
2403
2404static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2405 u8 key_type, u8 old_key_type)
2406{
2407 /* Legacy key */
2408 if (key_type < 0x03)
2409 return true;
2410
2411 /* Debug keys are insecure so don't store them persistently */
2412 if (key_type == HCI_LK_DEBUG_COMBINATION)
2413 return false;
2414
2415 /* Changed combination key and there's no previous one */
2416 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2417 return false;
2418
2419 /* Security mode 3 case */
2420 if (!conn)
2421 return true;
2422
2423 /* BR/EDR key derived using SC from an LE link */
2424 if (conn->type == LE_LINK)
2425 return true;
2426
2427 /* Neither local nor remote side had no-bonding as requirement */
2428 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2429 return true;
2430
2431 /* Local side had dedicated bonding as requirement */
2432 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2433 return true;
2434
2435 /* Remote side had dedicated bonding as requirement */
2436 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2437 return true;
2438
2439 /* If none of the above criteria match, then don't store the key
2440 * persistently */
2441 return false;
2442}
2443
2444static u8 ltk_role(u8 type)
2445{
2446 if (type == SMP_LTK)
2447 return HCI_ROLE_MASTER;
2448
2449 return HCI_ROLE_SLAVE;
2450}
2451
2452struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2453 u8 addr_type, u8 role)
2454{
2455 struct smp_ltk *k;
2456
2457 rcu_read_lock();
2458 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2459 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2460 continue;
2461
2462 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2463 rcu_read_unlock();
2464
2465 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2466 k->val)) {
2467 bt_dev_warn_ratelimited(hdev,
2468 "LTK blocked for %pMR",
2469 &k->bdaddr);
2470 return NULL;
2471 }
2472
2473 return k;
2474 }
2475 }
2476 rcu_read_unlock();
2477
2478 return NULL;
2479}
2480
2481struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2482{
2483 struct smp_irk *irk_to_return = NULL;
2484 struct smp_irk *irk;
2485
2486 rcu_read_lock();
2487 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2488 if (!bacmp(&irk->rpa, rpa)) {
2489 irk_to_return = irk;
2490 goto done;
2491 }
2492 }
2493
2494 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2495 if (smp_irk_matches(hdev, irk->val, rpa)) {
2496 bacpy(&irk->rpa, rpa);
2497 irk_to_return = irk;
2498 goto done;
2499 }
2500 }
2501
2502done:
2503 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2504 irk_to_return->val)) {
2505 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2506 &irk_to_return->bdaddr);
2507 irk_to_return = NULL;
2508 }
2509
2510 rcu_read_unlock();
2511
2512 return irk_to_return;
2513}
2514
2515struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2516 u8 addr_type)
2517{
2518 struct smp_irk *irk_to_return = NULL;
2519 struct smp_irk *irk;
2520
2521 /* Identity Address must be public or static random */
2522 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2523 return NULL;
2524
2525 rcu_read_lock();
2526 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2527 if (addr_type == irk->addr_type &&
2528 bacmp(bdaddr, &irk->bdaddr) == 0) {
2529 irk_to_return = irk;
2530 goto done;
2531 }
2532 }
2533
2534done:
2535
2536 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2537 irk_to_return->val)) {
2538 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2539 &irk_to_return->bdaddr);
2540 irk_to_return = NULL;
2541 }
2542
2543 rcu_read_unlock();
2544
2545 return irk_to_return;
2546}
2547
2548struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2549 bdaddr_t *bdaddr, u8 *val, u8 type,
2550 u8 pin_len, bool *persistent)
2551{
2552 struct link_key *key, *old_key;
2553 u8 old_key_type;
2554
2555 old_key = hci_find_link_key(hdev, bdaddr);
2556 if (old_key) {
2557 old_key_type = old_key->type;
2558 key = old_key;
2559 } else {
2560 old_key_type = conn ? conn->key_type : 0xff;
2561 key = kzalloc(sizeof(*key), GFP_KERNEL);
2562 if (!key)
2563 return NULL;
2564 list_add_rcu(&key->list, &hdev->link_keys);
2565 }
2566
2567 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2568
2569 /* Some buggy controller combinations generate a changed
2570 * combination key for legacy pairing even when there's no
2571 * previous key */
2572 if (type == HCI_LK_CHANGED_COMBINATION &&
2573 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2574 type = HCI_LK_COMBINATION;
2575 if (conn)
2576 conn->key_type = type;
2577 }
2578
2579 bacpy(&key->bdaddr, bdaddr);
2580 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2581 key->pin_len = pin_len;
2582
2583 if (type == HCI_LK_CHANGED_COMBINATION)
2584 key->type = old_key_type;
2585 else
2586 key->type = type;
2587
2588 if (persistent)
2589 *persistent = hci_persistent_key(hdev, conn, type,
2590 old_key_type);
2591
2592 return key;
2593}
2594
2595struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2596 u8 addr_type, u8 type, u8 authenticated,
2597 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2598{
2599 struct smp_ltk *key, *old_key;
2600 u8 role = ltk_role(type);
2601
2602 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2603 if (old_key)
2604 key = old_key;
2605 else {
2606 key = kzalloc(sizeof(*key), GFP_KERNEL);
2607 if (!key)
2608 return NULL;
2609 list_add_rcu(&key->list, &hdev->long_term_keys);
2610 }
2611
2612 bacpy(&key->bdaddr, bdaddr);
2613 key->bdaddr_type = addr_type;
2614 memcpy(key->val, tk, sizeof(key->val));
2615 key->authenticated = authenticated;
2616 key->ediv = ediv;
2617 key->rand = rand;
2618 key->enc_size = enc_size;
2619 key->type = type;
2620
2621 return key;
2622}
2623
2624struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2625 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2626{
2627 struct smp_irk *irk;
2628
2629 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2630 if (!irk) {
2631 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2632 if (!irk)
2633 return NULL;
2634
2635 bacpy(&irk->bdaddr, bdaddr);
2636 irk->addr_type = addr_type;
2637
2638 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2639 }
2640
2641 memcpy(irk->val, val, 16);
2642 bacpy(&irk->rpa, rpa);
2643
2644 return irk;
2645}
2646
2647int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2648{
2649 struct link_key *key;
2650
2651 key = hci_find_link_key(hdev, bdaddr);
2652 if (!key)
2653 return -ENOENT;
2654
2655 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2656
2657 list_del_rcu(&key->list);
2658 kfree_rcu(key, rcu);
2659
2660 return 0;
2661}
2662
2663int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2664{
2665 struct smp_ltk *k;
2666 int removed = 0;
2667
2668 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2669 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2670 continue;
2671
2672 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2673
2674 list_del_rcu(&k->list);
2675 kfree_rcu(k, rcu);
2676 removed++;
2677 }
2678
2679 return removed ? 0 : -ENOENT;
2680}
2681
2682void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2683{
2684 struct smp_irk *k;
2685
2686 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2687 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2688 continue;
2689
2690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2691
2692 list_del_rcu(&k->list);
2693 kfree_rcu(k, rcu);
2694 }
2695}
2696
2697bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2698{
2699 struct smp_ltk *k;
2700 struct smp_irk *irk;
2701 u8 addr_type;
2702
2703 if (type == BDADDR_BREDR) {
2704 if (hci_find_link_key(hdev, bdaddr))
2705 return true;
2706 return false;
2707 }
2708
2709 /* Convert to HCI addr type which struct smp_ltk uses */
2710 if (type == BDADDR_LE_PUBLIC)
2711 addr_type = ADDR_LE_DEV_PUBLIC;
2712 else
2713 addr_type = ADDR_LE_DEV_RANDOM;
2714
2715 irk = hci_get_irk(hdev, bdaddr, addr_type);
2716 if (irk) {
2717 bdaddr = &irk->bdaddr;
2718 addr_type = irk->addr_type;
2719 }
2720
2721 rcu_read_lock();
2722 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2723 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2724 rcu_read_unlock();
2725 return true;
2726 }
2727 }
2728 rcu_read_unlock();
2729
2730 return false;
2731}
2732
2733/* HCI command timer function */
2734static void hci_cmd_timeout(struct work_struct *work)
2735{
2736 struct hci_dev *hdev = container_of(work, struct hci_dev,
2737 cmd_timer.work);
2738
2739 if (hdev->sent_cmd) {
2740 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2741 u16 opcode = __le16_to_cpu(sent->opcode);
2742
2743 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2744 } else {
2745 bt_dev_err(hdev, "command tx timeout");
2746 }
2747
2748 if (hdev->cmd_timeout)
2749 hdev->cmd_timeout(hdev);
2750
2751 atomic_set(&hdev->cmd_cnt, 1);
2752 queue_work(hdev->workqueue, &hdev->cmd_work);
2753}
2754
2755struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2756 bdaddr_t *bdaddr, u8 bdaddr_type)
2757{
2758 struct oob_data *data;
2759
2760 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2761 if (bacmp(bdaddr, &data->bdaddr) != 0)
2762 continue;
2763 if (data->bdaddr_type != bdaddr_type)
2764 continue;
2765 return data;
2766 }
2767
2768 return NULL;
2769}
2770
2771int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2772 u8 bdaddr_type)
2773{
2774 struct oob_data *data;
2775
2776 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2777 if (!data)
2778 return -ENOENT;
2779
2780 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2781
2782 list_del(&data->list);
2783 kfree(data);
2784
2785 return 0;
2786}
2787
2788void hci_remote_oob_data_clear(struct hci_dev *hdev)
2789{
2790 struct oob_data *data, *n;
2791
2792 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2793 list_del(&data->list);
2794 kfree(data);
2795 }
2796}
2797
2798int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2799 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2800 u8 *hash256, u8 *rand256)
2801{
2802 struct oob_data *data;
2803
2804 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2805 if (!data) {
2806 data = kmalloc(sizeof(*data), GFP_KERNEL);
2807 if (!data)
2808 return -ENOMEM;
2809
2810 bacpy(&data->bdaddr, bdaddr);
2811 data->bdaddr_type = bdaddr_type;
2812 list_add(&data->list, &hdev->remote_oob_data);
2813 }
2814
2815 if (hash192 && rand192) {
2816 memcpy(data->hash192, hash192, sizeof(data->hash192));
2817 memcpy(data->rand192, rand192, sizeof(data->rand192));
2818 if (hash256 && rand256)
2819 data->present = 0x03;
2820 } else {
2821 memset(data->hash192, 0, sizeof(data->hash192));
2822 memset(data->rand192, 0, sizeof(data->rand192));
2823 if (hash256 && rand256)
2824 data->present = 0x02;
2825 else
2826 data->present = 0x00;
2827 }
2828
2829 if (hash256 && rand256) {
2830 memcpy(data->hash256, hash256, sizeof(data->hash256));
2831 memcpy(data->rand256, rand256, sizeof(data->rand256));
2832 } else {
2833 memset(data->hash256, 0, sizeof(data->hash256));
2834 memset(data->rand256, 0, sizeof(data->rand256));
2835 if (hash192 && rand192)
2836 data->present = 0x01;
2837 }
2838
2839 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2840
2841 return 0;
2842}
2843
2844/* This function requires the caller holds hdev->lock */
2845struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2846{
2847 struct adv_info *adv_instance;
2848
2849 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2850 if (adv_instance->instance == instance)
2851 return adv_instance;
2852 }
2853
2854 return NULL;
2855}
2856
2857/* This function requires the caller holds hdev->lock */
2858struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2859{
2860 struct adv_info *cur_instance;
2861
2862 cur_instance = hci_find_adv_instance(hdev, instance);
2863 if (!cur_instance)
2864 return NULL;
2865
2866 if (cur_instance == list_last_entry(&hdev->adv_instances,
2867 struct adv_info, list))
2868 return list_first_entry(&hdev->adv_instances,
2869 struct adv_info, list);
2870 else
2871 return list_next_entry(cur_instance, list);
2872}
2873
2874/* This function requires the caller holds hdev->lock */
2875int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2876{
2877 struct adv_info *adv_instance;
2878
2879 adv_instance = hci_find_adv_instance(hdev, instance);
2880 if (!adv_instance)
2881 return -ENOENT;
2882
2883 BT_DBG("%s removing %dMR", hdev->name, instance);
2884
2885 if (hdev->cur_adv_instance == instance) {
2886 if (hdev->adv_instance_timeout) {
2887 cancel_delayed_work(&hdev->adv_instance_expire);
2888 hdev->adv_instance_timeout = 0;
2889 }
2890 hdev->cur_adv_instance = 0x00;
2891 }
2892
2893 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2894
2895 list_del(&adv_instance->list);
2896 kfree(adv_instance);
2897
2898 hdev->adv_instance_cnt--;
2899
2900 return 0;
2901}
2902
2903void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2904{
2905 struct adv_info *adv_instance, *n;
2906
2907 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2908 adv_instance->rpa_expired = rpa_expired;
2909}
2910
2911/* This function requires the caller holds hdev->lock */
2912void hci_adv_instances_clear(struct hci_dev *hdev)
2913{
2914 struct adv_info *adv_instance, *n;
2915
2916 if (hdev->adv_instance_timeout) {
2917 cancel_delayed_work(&hdev->adv_instance_expire);
2918 hdev->adv_instance_timeout = 0;
2919 }
2920
2921 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2922 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2923 list_del(&adv_instance->list);
2924 kfree(adv_instance);
2925 }
2926
2927 hdev->adv_instance_cnt = 0;
2928 hdev->cur_adv_instance = 0x00;
2929}
2930
2931static void adv_instance_rpa_expired(struct work_struct *work)
2932{
2933 struct adv_info *adv_instance = container_of(work, struct adv_info,
2934 rpa_expired_cb.work);
2935
2936 BT_DBG("");
2937
2938 adv_instance->rpa_expired = true;
2939}
2940
2941/* This function requires the caller holds hdev->lock */
2942int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2943 u16 adv_data_len, u8 *adv_data,
2944 u16 scan_rsp_len, u8 *scan_rsp_data,
2945 u16 timeout, u16 duration)
2946{
2947 struct adv_info *adv_instance;
2948
2949 adv_instance = hci_find_adv_instance(hdev, instance);
2950 if (adv_instance) {
2951 memset(adv_instance->adv_data, 0,
2952 sizeof(adv_instance->adv_data));
2953 memset(adv_instance->scan_rsp_data, 0,
2954 sizeof(adv_instance->scan_rsp_data));
2955 } else {
2956 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2957 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2958 return -EOVERFLOW;
2959
2960 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2961 if (!adv_instance)
2962 return -ENOMEM;
2963
2964 adv_instance->pending = true;
2965 adv_instance->instance = instance;
2966 list_add(&adv_instance->list, &hdev->adv_instances);
2967 hdev->adv_instance_cnt++;
2968 }
2969
2970 adv_instance->flags = flags;
2971 adv_instance->adv_data_len = adv_data_len;
2972 adv_instance->scan_rsp_len = scan_rsp_len;
2973
2974 if (adv_data_len)
2975 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2976
2977 if (scan_rsp_len)
2978 memcpy(adv_instance->scan_rsp_data,
2979 scan_rsp_data, scan_rsp_len);
2980
2981 adv_instance->timeout = timeout;
2982 adv_instance->remaining_time = timeout;
2983
2984 if (duration == 0)
2985 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2986 else
2987 adv_instance->duration = duration;
2988
2989 adv_instance->tx_power = HCI_TX_POWER_INVALID;
2990
2991 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2992 adv_instance_rpa_expired);
2993
2994 BT_DBG("%s for %dMR", hdev->name, instance);
2995
2996 return 0;
2997}
2998
2999struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3000 bdaddr_t *bdaddr, u8 type)
3001{
3002 struct bdaddr_list *b;
3003
3004 list_for_each_entry(b, bdaddr_list, list) {
3005 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3006 return b;
3007 }
3008
3009 return NULL;
3010}
3011
3012struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3013 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3014 u8 type)
3015{
3016 struct bdaddr_list_with_irk *b;
3017
3018 list_for_each_entry(b, bdaddr_list, list) {
3019 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3020 return b;
3021 }
3022
3023 return NULL;
3024}
3025
3026void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3027{
3028 struct bdaddr_list *b, *n;
3029
3030 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3031 list_del(&b->list);
3032 kfree(b);
3033 }
3034}
3035
3036int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3037{
3038 struct bdaddr_list *entry;
3039
3040 if (!bacmp(bdaddr, BDADDR_ANY))
3041 return -EBADF;
3042
3043 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3044 return -EEXIST;
3045
3046 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3047 if (!entry)
3048 return -ENOMEM;
3049
3050 bacpy(&entry->bdaddr, bdaddr);
3051 entry->bdaddr_type = type;
3052
3053 list_add(&entry->list, list);
3054
3055 return 0;
3056}
3057
3058int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3059 u8 type, u8 *peer_irk, u8 *local_irk)
3060{
3061 struct bdaddr_list_with_irk *entry;
3062
3063 if (!bacmp(bdaddr, BDADDR_ANY))
3064 return -EBADF;
3065
3066 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3067 return -EEXIST;
3068
3069 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3070 if (!entry)
3071 return -ENOMEM;
3072
3073 bacpy(&entry->bdaddr, bdaddr);
3074 entry->bdaddr_type = type;
3075
3076 if (peer_irk)
3077 memcpy(entry->peer_irk, peer_irk, 16);
3078
3079 if (local_irk)
3080 memcpy(entry->local_irk, local_irk, 16);
3081
3082 list_add(&entry->list, list);
3083
3084 return 0;
3085}
3086
3087int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3088{
3089 struct bdaddr_list *entry;
3090
3091 if (!bacmp(bdaddr, BDADDR_ANY)) {
3092 hci_bdaddr_list_clear(list);
3093 return 0;
3094 }
3095
3096 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3097 if (!entry)
3098 return -ENOENT;
3099
3100 list_del(&entry->list);
3101 kfree(entry);
3102
3103 return 0;
3104}
3105
3106int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3107 u8 type)
3108{
3109 struct bdaddr_list_with_irk *entry;
3110
3111 if (!bacmp(bdaddr, BDADDR_ANY)) {
3112 hci_bdaddr_list_clear(list);
3113 return 0;
3114 }
3115
3116 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3117 if (!entry)
3118 return -ENOENT;
3119
3120 list_del(&entry->list);
3121 kfree(entry);
3122
3123 return 0;
3124}
3125
3126/* This function requires the caller holds hdev->lock */
3127struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3128 bdaddr_t *addr, u8 addr_type)
3129{
3130 struct hci_conn_params *params;
3131
3132 list_for_each_entry(params, &hdev->le_conn_params, list) {
3133 if (bacmp(¶ms->addr, addr) == 0 &&
3134 params->addr_type == addr_type) {
3135 return params;
3136 }
3137 }
3138
3139 return NULL;
3140}
3141
3142/* This function requires the caller holds hdev->lock */
3143struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3144 bdaddr_t *addr, u8 addr_type)
3145{
3146 struct hci_conn_params *param;
3147
3148 list_for_each_entry(param, list, action) {
3149 if (bacmp(¶m->addr, addr) == 0 &&
3150 param->addr_type == addr_type)
3151 return param;
3152 }
3153
3154 return NULL;
3155}
3156
3157/* This function requires the caller holds hdev->lock */
3158struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3159 bdaddr_t *addr, u8 addr_type)
3160{
3161 struct hci_conn_params *params;
3162
3163 params = hci_conn_params_lookup(hdev, addr, addr_type);
3164 if (params)
3165 return params;
3166
3167 params = kzalloc(sizeof(*params), GFP_KERNEL);
3168 if (!params) {
3169 bt_dev_err(hdev, "out of memory");
3170 return NULL;
3171 }
3172
3173 bacpy(¶ms->addr, addr);
3174 params->addr_type = addr_type;
3175
3176 list_add(¶ms->list, &hdev->le_conn_params);
3177 INIT_LIST_HEAD(¶ms->action);
3178
3179 params->conn_min_interval = hdev->le_conn_min_interval;
3180 params->conn_max_interval = hdev->le_conn_max_interval;
3181 params->conn_latency = hdev->le_conn_latency;
3182 params->supervision_timeout = hdev->le_supv_timeout;
3183 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3184
3185 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3186
3187 return params;
3188}
3189
3190static void hci_conn_params_free(struct hci_conn_params *params)
3191{
3192 if (params->conn) {
3193 hci_conn_drop(params->conn);
3194 hci_conn_put(params->conn);
3195 }
3196
3197 list_del(¶ms->action);
3198 list_del(¶ms->list);
3199 kfree(params);
3200}
3201
3202/* This function requires the caller holds hdev->lock */
3203void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3204{
3205 struct hci_conn_params *params;
3206
3207 params = hci_conn_params_lookup(hdev, addr, addr_type);
3208 if (!params)
3209 return;
3210
3211 hci_conn_params_free(params);
3212
3213 hci_update_background_scan(hdev);
3214
3215 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3216}
3217
3218/* This function requires the caller holds hdev->lock */
3219void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3220{
3221 struct hci_conn_params *params, *tmp;
3222
3223 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3224 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3225 continue;
3226
3227 /* If trying to estabilish one time connection to disabled
3228 * device, leave the params, but mark them as just once.
3229 */
3230 if (params->explicit_connect) {
3231 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3232 continue;
3233 }
3234
3235 list_del(¶ms->list);
3236 kfree(params);
3237 }
3238
3239 BT_DBG("All LE disabled connection parameters were removed");
3240}
3241
3242/* This function requires the caller holds hdev->lock */
3243static void hci_conn_params_clear_all(struct hci_dev *hdev)
3244{
3245 struct hci_conn_params *params, *tmp;
3246
3247 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3248 hci_conn_params_free(params);
3249
3250 BT_DBG("All LE connection parameters were removed");
3251}
3252
3253/* Copy the Identity Address of the controller.
3254 *
3255 * If the controller has a public BD_ADDR, then by default use that one.
3256 * If this is a LE only controller without a public address, default to
3257 * the static random address.
3258 *
3259 * For debugging purposes it is possible to force controllers with a
3260 * public address to use the static random address instead.
3261 *
3262 * In case BR/EDR has been disabled on a dual-mode controller and
3263 * userspace has configured a static address, then that address
3264 * becomes the identity address instead of the public BR/EDR address.
3265 */
3266void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3267 u8 *bdaddr_type)
3268{
3269 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3270 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3271 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3272 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3273 bacpy(bdaddr, &hdev->static_addr);
3274 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3275 } else {
3276 bacpy(bdaddr, &hdev->bdaddr);
3277 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3278 }
3279}
3280
3281static int hci_suspend_wait_event(struct hci_dev *hdev)
3282{
3283#define WAKE_COND \
3284 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3285 __SUSPEND_NUM_TASKS)
3286
3287 int i;
3288 int ret = wait_event_timeout(hdev->suspend_wait_q,
3289 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3290
3291 if (ret == 0) {
3292 bt_dev_dbg(hdev, "Timed out waiting for suspend");
3293 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3294 if (test_bit(i, hdev->suspend_tasks))
3295 bt_dev_dbg(hdev, "Bit %d is set", i);
3296 clear_bit(i, hdev->suspend_tasks);
3297 }
3298
3299 ret = -ETIMEDOUT;
3300 } else {
3301 ret = 0;
3302 }
3303
3304 return ret;
3305}
3306
3307static void hci_prepare_suspend(struct work_struct *work)
3308{
3309 struct hci_dev *hdev =
3310 container_of(work, struct hci_dev, suspend_prepare);
3311
3312 hci_dev_lock(hdev);
3313 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3314 hci_dev_unlock(hdev);
3315}
3316
3317static int hci_change_suspend_state(struct hci_dev *hdev,
3318 enum suspended_state next)
3319{
3320 hdev->suspend_state_next = next;
3321 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3322 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3323 return hci_suspend_wait_event(hdev);
3324}
3325
3326static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3327 void *data)
3328{
3329 struct hci_dev *hdev =
3330 container_of(nb, struct hci_dev, suspend_notifier);
3331 int ret = 0;
3332
3333 /* If powering down, wait for completion. */
3334 if (mgmt_powering_down(hdev)) {
3335 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3336 ret = hci_suspend_wait_event(hdev);
3337 if (ret)
3338 goto done;
3339 }
3340
3341 /* Suspend notifier should only act on events when powered. */
3342 if (!hdev_is_powered(hdev))
3343 goto done;
3344
3345 if (action == PM_SUSPEND_PREPARE) {
3346 /* Suspend consists of two actions:
3347 * - First, disconnect everything and make the controller not
3348 * connectable (disabling scanning)
3349 * - Second, program event filter/whitelist and enable scan
3350 */
3351 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3352
3353 /* Only configure whitelist if disconnect succeeded and wake
3354 * isn't being prevented.
3355 */
3356 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev)))
3357 ret = hci_change_suspend_state(hdev,
3358 BT_SUSPEND_CONFIGURE_WAKE);
3359 } else if (action == PM_POST_SUSPEND) {
3360 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3361 }
3362
3363 /* If suspend failed, restore it to running */
3364 if (ret && action == PM_SUSPEND_PREPARE)
3365 hci_change_suspend_state(hdev, BT_RUNNING);
3366
3367done:
3368 return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP;
3369}
3370
3371/* Alloc HCI device */
3372struct hci_dev *hci_alloc_dev(void)
3373{
3374 struct hci_dev *hdev;
3375
3376 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3377 if (!hdev)
3378 return NULL;
3379
3380 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3381 hdev->esco_type = (ESCO_HV1);
3382 hdev->link_mode = (HCI_LM_ACCEPT);
3383 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3384 hdev->io_capability = 0x03; /* No Input No Output */
3385 hdev->manufacturer = 0xffff; /* Default to internal use */
3386 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3387 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3388 hdev->adv_instance_cnt = 0;
3389 hdev->cur_adv_instance = 0x00;
3390 hdev->adv_instance_timeout = 0;
3391
3392 hdev->sniff_max_interval = 800;
3393 hdev->sniff_min_interval = 80;
3394
3395 hdev->le_adv_channel_map = 0x07;
3396 hdev->le_adv_min_interval = 0x0800;
3397 hdev->le_adv_max_interval = 0x0800;
3398 hdev->le_scan_interval = 0x0060;
3399 hdev->le_scan_window = 0x0030;
3400 hdev->le_conn_min_interval = 0x0018;
3401 hdev->le_conn_max_interval = 0x0028;
3402 hdev->le_conn_latency = 0x0000;
3403 hdev->le_supv_timeout = 0x002a;
3404 hdev->le_def_tx_len = 0x001b;
3405 hdev->le_def_tx_time = 0x0148;
3406 hdev->le_max_tx_len = 0x001b;
3407 hdev->le_max_tx_time = 0x0148;
3408 hdev->le_max_rx_len = 0x001b;
3409 hdev->le_max_rx_time = 0x0148;
3410 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3411 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3412 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3413 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3414 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3415
3416 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3417 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3418 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3419 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3420 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3421 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3422
3423 mutex_init(&hdev->lock);
3424 mutex_init(&hdev->req_lock);
3425
3426 INIT_LIST_HEAD(&hdev->mgmt_pending);
3427 INIT_LIST_HEAD(&hdev->blacklist);
3428 INIT_LIST_HEAD(&hdev->whitelist);
3429 INIT_LIST_HEAD(&hdev->wakeable);
3430 INIT_LIST_HEAD(&hdev->uuids);
3431 INIT_LIST_HEAD(&hdev->link_keys);
3432 INIT_LIST_HEAD(&hdev->long_term_keys);
3433 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3434 INIT_LIST_HEAD(&hdev->remote_oob_data);
3435 INIT_LIST_HEAD(&hdev->le_white_list);
3436 INIT_LIST_HEAD(&hdev->le_resolv_list);
3437 INIT_LIST_HEAD(&hdev->le_conn_params);
3438 INIT_LIST_HEAD(&hdev->pend_le_conns);
3439 INIT_LIST_HEAD(&hdev->pend_le_reports);
3440 INIT_LIST_HEAD(&hdev->conn_hash.list);
3441 INIT_LIST_HEAD(&hdev->adv_instances);
3442 INIT_LIST_HEAD(&hdev->blocked_keys);
3443
3444 INIT_WORK(&hdev->rx_work, hci_rx_work);
3445 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3446 INIT_WORK(&hdev->tx_work, hci_tx_work);
3447 INIT_WORK(&hdev->power_on, hci_power_on);
3448 INIT_WORK(&hdev->error_reset, hci_error_reset);
3449 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3450
3451 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3452
3453 skb_queue_head_init(&hdev->rx_q);
3454 skb_queue_head_init(&hdev->cmd_q);
3455 skb_queue_head_init(&hdev->raw_q);
3456
3457 init_waitqueue_head(&hdev->req_wait_q);
3458 init_waitqueue_head(&hdev->suspend_wait_q);
3459
3460 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3461
3462 hci_request_setup(hdev);
3463
3464 hci_init_sysfs(hdev);
3465 discovery_init(hdev);
3466
3467 return hdev;
3468}
3469EXPORT_SYMBOL(hci_alloc_dev);
3470
3471/* Free HCI device */
3472void hci_free_dev(struct hci_dev *hdev)
3473{
3474 /* will free via device release */
3475 put_device(&hdev->dev);
3476}
3477EXPORT_SYMBOL(hci_free_dev);
3478
3479/* Register HCI device */
3480int hci_register_dev(struct hci_dev *hdev)
3481{
3482 int id, error;
3483
3484 if (!hdev->open || !hdev->close || !hdev->send)
3485 return -EINVAL;
3486
3487 /* Do not allow HCI_AMP devices to register at index 0,
3488 * so the index can be used as the AMP controller ID.
3489 */
3490 switch (hdev->dev_type) {
3491 case HCI_PRIMARY:
3492 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3493 break;
3494 case HCI_AMP:
3495 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3496 break;
3497 default:
3498 return -EINVAL;
3499 }
3500
3501 if (id < 0)
3502 return id;
3503
3504 sprintf(hdev->name, "hci%d", id);
3505 hdev->id = id;
3506
3507 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3508
3509 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3510 if (!hdev->workqueue) {
3511 error = -ENOMEM;
3512 goto err;
3513 }
3514
3515 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3516 hdev->name);
3517 if (!hdev->req_workqueue) {
3518 destroy_workqueue(hdev->workqueue);
3519 error = -ENOMEM;
3520 goto err;
3521 }
3522
3523 if (!IS_ERR_OR_NULL(bt_debugfs))
3524 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3525
3526 dev_set_name(&hdev->dev, "%s", hdev->name);
3527
3528 error = device_add(&hdev->dev);
3529 if (error < 0)
3530 goto err_wqueue;
3531
3532 hci_leds_init(hdev);
3533
3534 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3535 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3536 hdev);
3537 if (hdev->rfkill) {
3538 if (rfkill_register(hdev->rfkill) < 0) {
3539 rfkill_destroy(hdev->rfkill);
3540 hdev->rfkill = NULL;
3541 }
3542 }
3543
3544 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3545 hci_dev_set_flag(hdev, HCI_RFKILLED);
3546
3547 hci_dev_set_flag(hdev, HCI_SETUP);
3548 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3549
3550 if (hdev->dev_type == HCI_PRIMARY) {
3551 /* Assume BR/EDR support until proven otherwise (such as
3552 * through reading supported features during init.
3553 */
3554 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3555 }
3556
3557 write_lock(&hci_dev_list_lock);
3558 list_add(&hdev->list, &hci_dev_list);
3559 write_unlock(&hci_dev_list_lock);
3560
3561 /* Devices that are marked for raw-only usage are unconfigured
3562 * and should not be included in normal operation.
3563 */
3564 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3565 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3566
3567 hci_sock_dev_event(hdev, HCI_DEV_REG);
3568 hci_dev_hold(hdev);
3569
3570 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3571 error = register_pm_notifier(&hdev->suspend_notifier);
3572 if (error)
3573 goto err_wqueue;
3574
3575 queue_work(hdev->req_workqueue, &hdev->power_on);
3576
3577 return id;
3578
3579err_wqueue:
3580 destroy_workqueue(hdev->workqueue);
3581 destroy_workqueue(hdev->req_workqueue);
3582err:
3583 ida_simple_remove(&hci_index_ida, hdev->id);
3584
3585 return error;
3586}
3587EXPORT_SYMBOL(hci_register_dev);
3588
3589/* Unregister HCI device */
3590void hci_unregister_dev(struct hci_dev *hdev)
3591{
3592 int id;
3593
3594 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3595
3596 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3597
3598 id = hdev->id;
3599
3600 write_lock(&hci_dev_list_lock);
3601 list_del(&hdev->list);
3602 write_unlock(&hci_dev_list_lock);
3603
3604 cancel_work_sync(&hdev->power_on);
3605
3606 hci_dev_do_close(hdev);
3607
3608 unregister_pm_notifier(&hdev->suspend_notifier);
3609
3610 if (!test_bit(HCI_INIT, &hdev->flags) &&
3611 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3612 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3613 hci_dev_lock(hdev);
3614 mgmt_index_removed(hdev);
3615 hci_dev_unlock(hdev);
3616 }
3617
3618 /* mgmt_index_removed should take care of emptying the
3619 * pending list */
3620 BUG_ON(!list_empty(&hdev->mgmt_pending));
3621
3622 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3623
3624 if (hdev->rfkill) {
3625 rfkill_unregister(hdev->rfkill);
3626 rfkill_destroy(hdev->rfkill);
3627 }
3628
3629 device_del(&hdev->dev);
3630
3631 debugfs_remove_recursive(hdev->debugfs);
3632 kfree_const(hdev->hw_info);
3633 kfree_const(hdev->fw_info);
3634
3635 destroy_workqueue(hdev->workqueue);
3636 destroy_workqueue(hdev->req_workqueue);
3637
3638 hci_dev_lock(hdev);
3639 hci_bdaddr_list_clear(&hdev->blacklist);
3640 hci_bdaddr_list_clear(&hdev->whitelist);
3641 hci_uuids_clear(hdev);
3642 hci_link_keys_clear(hdev);
3643 hci_smp_ltks_clear(hdev);
3644 hci_smp_irks_clear(hdev);
3645 hci_remote_oob_data_clear(hdev);
3646 hci_adv_instances_clear(hdev);
3647 hci_bdaddr_list_clear(&hdev->le_white_list);
3648 hci_bdaddr_list_clear(&hdev->le_resolv_list);
3649 hci_conn_params_clear_all(hdev);
3650 hci_discovery_filter_clear(hdev);
3651 hci_blocked_keys_clear(hdev);
3652 hci_dev_unlock(hdev);
3653
3654 hci_dev_put(hdev);
3655
3656 ida_simple_remove(&hci_index_ida, id);
3657}
3658EXPORT_SYMBOL(hci_unregister_dev);
3659
3660/* Suspend HCI device */
3661int hci_suspend_dev(struct hci_dev *hdev)
3662{
3663 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3664 return 0;
3665}
3666EXPORT_SYMBOL(hci_suspend_dev);
3667
3668/* Resume HCI device */
3669int hci_resume_dev(struct hci_dev *hdev)
3670{
3671 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3672 return 0;
3673}
3674EXPORT_SYMBOL(hci_resume_dev);
3675
3676/* Reset HCI device */
3677int hci_reset_dev(struct hci_dev *hdev)
3678{
3679 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3680 struct sk_buff *skb;
3681
3682 skb = bt_skb_alloc(3, GFP_ATOMIC);
3683 if (!skb)
3684 return -ENOMEM;
3685
3686 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3687 skb_put_data(skb, hw_err, 3);
3688
3689 /* Send Hardware Error to upper stack */
3690 return hci_recv_frame(hdev, skb);
3691}
3692EXPORT_SYMBOL(hci_reset_dev);
3693
3694/* Receive frame from HCI drivers */
3695int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3696{
3697 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3698 && !test_bit(HCI_INIT, &hdev->flags))) {
3699 kfree_skb(skb);
3700 return -ENXIO;
3701 }
3702
3703 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3704 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3705 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3706 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3707 kfree_skb(skb);
3708 return -EINVAL;
3709 }
3710
3711 /* Incoming skb */
3712 bt_cb(skb)->incoming = 1;
3713
3714 /* Time stamp */
3715 __net_timestamp(skb);
3716
3717 skb_queue_tail(&hdev->rx_q, skb);
3718 queue_work(hdev->workqueue, &hdev->rx_work);
3719
3720 return 0;
3721}
3722EXPORT_SYMBOL(hci_recv_frame);
3723
3724/* Receive diagnostic message from HCI drivers */
3725int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3726{
3727 /* Mark as diagnostic packet */
3728 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3729
3730 /* Time stamp */
3731 __net_timestamp(skb);
3732
3733 skb_queue_tail(&hdev->rx_q, skb);
3734 queue_work(hdev->workqueue, &hdev->rx_work);
3735
3736 return 0;
3737}
3738EXPORT_SYMBOL(hci_recv_diag);
3739
3740void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3741{
3742 va_list vargs;
3743
3744 va_start(vargs, fmt);
3745 kfree_const(hdev->hw_info);
3746 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3747 va_end(vargs);
3748}
3749EXPORT_SYMBOL(hci_set_hw_info);
3750
3751void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3752{
3753 va_list vargs;
3754
3755 va_start(vargs, fmt);
3756 kfree_const(hdev->fw_info);
3757 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3758 va_end(vargs);
3759}
3760EXPORT_SYMBOL(hci_set_fw_info);
3761
3762/* ---- Interface to upper protocols ---- */
3763
3764int hci_register_cb(struct hci_cb *cb)
3765{
3766 BT_DBG("%p name %s", cb, cb->name);
3767
3768 mutex_lock(&hci_cb_list_lock);
3769 list_add_tail(&cb->list, &hci_cb_list);
3770 mutex_unlock(&hci_cb_list_lock);
3771
3772 return 0;
3773}
3774EXPORT_SYMBOL(hci_register_cb);
3775
3776int hci_unregister_cb(struct hci_cb *cb)
3777{
3778 BT_DBG("%p name %s", cb, cb->name);
3779
3780 mutex_lock(&hci_cb_list_lock);
3781 list_del(&cb->list);
3782 mutex_unlock(&hci_cb_list_lock);
3783
3784 return 0;
3785}
3786EXPORT_SYMBOL(hci_unregister_cb);
3787
3788static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3789{
3790 int err;
3791
3792 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3793 skb->len);
3794
3795 /* Time stamp */
3796 __net_timestamp(skb);
3797
3798 /* Send copy to monitor */
3799 hci_send_to_monitor(hdev, skb);
3800
3801 if (atomic_read(&hdev->promisc)) {
3802 /* Send copy to the sockets */
3803 hci_send_to_sock(hdev, skb);
3804 }
3805
3806 /* Get rid of skb owner, prior to sending to the driver. */
3807 skb_orphan(skb);
3808
3809 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3810 kfree_skb(skb);
3811 return;
3812 }
3813
3814 err = hdev->send(hdev, skb);
3815 if (err < 0) {
3816 bt_dev_err(hdev, "sending frame failed (%d)", err);
3817 kfree_skb(skb);
3818 }
3819}
3820
3821/* Send HCI command */
3822int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3823 const void *param)
3824{
3825 struct sk_buff *skb;
3826
3827 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3828
3829 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3830 if (!skb) {
3831 bt_dev_err(hdev, "no memory for command");
3832 return -ENOMEM;
3833 }
3834
3835 /* Stand-alone HCI commands must be flagged as
3836 * single-command requests.
3837 */
3838 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3839
3840 skb_queue_tail(&hdev->cmd_q, skb);
3841 queue_work(hdev->workqueue, &hdev->cmd_work);
3842
3843 return 0;
3844}
3845
3846int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3847 const void *param)
3848{
3849 struct sk_buff *skb;
3850
3851 if (hci_opcode_ogf(opcode) != 0x3f) {
3852 /* A controller receiving a command shall respond with either
3853 * a Command Status Event or a Command Complete Event.
3854 * Therefore, all standard HCI commands must be sent via the
3855 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3856 * Some vendors do not comply with this rule for vendor-specific
3857 * commands and do not return any event. We want to support
3858 * unresponded commands for such cases only.
3859 */
3860 bt_dev_err(hdev, "unresponded command not supported");
3861 return -EINVAL;
3862 }
3863
3864 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3865 if (!skb) {
3866 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3867 opcode);
3868 return -ENOMEM;
3869 }
3870
3871 hci_send_frame(hdev, skb);
3872
3873 return 0;
3874}
3875EXPORT_SYMBOL(__hci_cmd_send);
3876
3877/* Get data from the previously sent command */
3878void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3879{
3880 struct hci_command_hdr *hdr;
3881
3882 if (!hdev->sent_cmd)
3883 return NULL;
3884
3885 hdr = (void *) hdev->sent_cmd->data;
3886
3887 if (hdr->opcode != cpu_to_le16(opcode))
3888 return NULL;
3889
3890 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3891
3892 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3893}
3894
3895/* Send HCI command and wait for command commplete event */
3896struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3897 const void *param, u32 timeout)
3898{
3899 struct sk_buff *skb;
3900
3901 if (!test_bit(HCI_UP, &hdev->flags))
3902 return ERR_PTR(-ENETDOWN);
3903
3904 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3905
3906 hci_req_sync_lock(hdev);
3907 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3908 hci_req_sync_unlock(hdev);
3909
3910 return skb;
3911}
3912EXPORT_SYMBOL(hci_cmd_sync);
3913
3914/* Send ACL data */
3915static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3916{
3917 struct hci_acl_hdr *hdr;
3918 int len = skb->len;
3919
3920 skb_push(skb, HCI_ACL_HDR_SIZE);
3921 skb_reset_transport_header(skb);
3922 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3923 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3924 hdr->dlen = cpu_to_le16(len);
3925}
3926
3927static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3928 struct sk_buff *skb, __u16 flags)
3929{
3930 struct hci_conn *conn = chan->conn;
3931 struct hci_dev *hdev = conn->hdev;
3932 struct sk_buff *list;
3933
3934 skb->len = skb_headlen(skb);
3935 skb->data_len = 0;
3936
3937 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3938
3939 switch (hdev->dev_type) {
3940 case HCI_PRIMARY:
3941 hci_add_acl_hdr(skb, conn->handle, flags);
3942 break;
3943 case HCI_AMP:
3944 hci_add_acl_hdr(skb, chan->handle, flags);
3945 break;
3946 default:
3947 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3948 return;
3949 }
3950
3951 list = skb_shinfo(skb)->frag_list;
3952 if (!list) {
3953 /* Non fragmented */
3954 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3955
3956 skb_queue_tail(queue, skb);
3957 } else {
3958 /* Fragmented */
3959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3960
3961 skb_shinfo(skb)->frag_list = NULL;
3962
3963 /* Queue all fragments atomically. We need to use spin_lock_bh
3964 * here because of 6LoWPAN links, as there this function is
3965 * called from softirq and using normal spin lock could cause
3966 * deadlocks.
3967 */
3968 spin_lock_bh(&queue->lock);
3969
3970 __skb_queue_tail(queue, skb);
3971
3972 flags &= ~ACL_START;
3973 flags |= ACL_CONT;
3974 do {
3975 skb = list; list = list->next;
3976
3977 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3978 hci_add_acl_hdr(skb, conn->handle, flags);
3979
3980 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3981
3982 __skb_queue_tail(queue, skb);
3983 } while (list);
3984
3985 spin_unlock_bh(&queue->lock);
3986 }
3987}
3988
3989void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3990{
3991 struct hci_dev *hdev = chan->conn->hdev;
3992
3993 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3994
3995 hci_queue_acl(chan, &chan->data_q, skb, flags);
3996
3997 queue_work(hdev->workqueue, &hdev->tx_work);
3998}
3999
4000/* Send SCO data */
4001void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4002{
4003 struct hci_dev *hdev = conn->hdev;
4004 struct hci_sco_hdr hdr;
4005
4006 BT_DBG("%s len %d", hdev->name, skb->len);
4007
4008 hdr.handle = cpu_to_le16(conn->handle);
4009 hdr.dlen = skb->len;
4010
4011 skb_push(skb, HCI_SCO_HDR_SIZE);
4012 skb_reset_transport_header(skb);
4013 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4014
4015 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4016
4017 skb_queue_tail(&conn->data_q, skb);
4018 queue_work(hdev->workqueue, &hdev->tx_work);
4019}
4020
4021/* ---- HCI TX task (outgoing data) ---- */
4022
4023/* HCI Connection scheduler */
4024static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4025 int *quote)
4026{
4027 struct hci_conn_hash *h = &hdev->conn_hash;
4028 struct hci_conn *conn = NULL, *c;
4029 unsigned int num = 0, min = ~0;
4030
4031 /* We don't have to lock device here. Connections are always
4032 * added and removed with TX task disabled. */
4033
4034 rcu_read_lock();
4035
4036 list_for_each_entry_rcu(c, &h->list, list) {
4037 if (c->type != type || skb_queue_empty(&c->data_q))
4038 continue;
4039
4040 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4041 continue;
4042
4043 num++;
4044
4045 if (c->sent < min) {
4046 min = c->sent;
4047 conn = c;
4048 }
4049
4050 if (hci_conn_num(hdev, type) == num)
4051 break;
4052 }
4053
4054 rcu_read_unlock();
4055
4056 if (conn) {
4057 int cnt, q;
4058
4059 switch (conn->type) {
4060 case ACL_LINK:
4061 cnt = hdev->acl_cnt;
4062 break;
4063 case SCO_LINK:
4064 case ESCO_LINK:
4065 cnt = hdev->sco_cnt;
4066 break;
4067 case LE_LINK:
4068 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4069 break;
4070 default:
4071 cnt = 0;
4072 bt_dev_err(hdev, "unknown link type %d", conn->type);
4073 }
4074
4075 q = cnt / num;
4076 *quote = q ? q : 1;
4077 } else
4078 *quote = 0;
4079
4080 BT_DBG("conn %p quote %d", conn, *quote);
4081 return conn;
4082}
4083
4084static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4085{
4086 struct hci_conn_hash *h = &hdev->conn_hash;
4087 struct hci_conn *c;
4088
4089 bt_dev_err(hdev, "link tx timeout");
4090
4091 rcu_read_lock();
4092
4093 /* Kill stalled connections */
4094 list_for_each_entry_rcu(c, &h->list, list) {
4095 if (c->type == type && c->sent) {
4096 bt_dev_err(hdev, "killing stalled connection %pMR",
4097 &c->dst);
4098 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4099 }
4100 }
4101
4102 rcu_read_unlock();
4103}
4104
4105static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4106 int *quote)
4107{
4108 struct hci_conn_hash *h = &hdev->conn_hash;
4109 struct hci_chan *chan = NULL;
4110 unsigned int num = 0, min = ~0, cur_prio = 0;
4111 struct hci_conn *conn;
4112 int cnt, q, conn_num = 0;
4113
4114 BT_DBG("%s", hdev->name);
4115
4116 rcu_read_lock();
4117
4118 list_for_each_entry_rcu(conn, &h->list, list) {
4119 struct hci_chan *tmp;
4120
4121 if (conn->type != type)
4122 continue;
4123
4124 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4125 continue;
4126
4127 conn_num++;
4128
4129 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4130 struct sk_buff *skb;
4131
4132 if (skb_queue_empty(&tmp->data_q))
4133 continue;
4134
4135 skb = skb_peek(&tmp->data_q);
4136 if (skb->priority < cur_prio)
4137 continue;
4138
4139 if (skb->priority > cur_prio) {
4140 num = 0;
4141 min = ~0;
4142 cur_prio = skb->priority;
4143 }
4144
4145 num++;
4146
4147 if (conn->sent < min) {
4148 min = conn->sent;
4149 chan = tmp;
4150 }
4151 }
4152
4153 if (hci_conn_num(hdev, type) == conn_num)
4154 break;
4155 }
4156
4157 rcu_read_unlock();
4158
4159 if (!chan)
4160 return NULL;
4161
4162 switch (chan->conn->type) {
4163 case ACL_LINK:
4164 cnt = hdev->acl_cnt;
4165 break;
4166 case AMP_LINK:
4167 cnt = hdev->block_cnt;
4168 break;
4169 case SCO_LINK:
4170 case ESCO_LINK:
4171 cnt = hdev->sco_cnt;
4172 break;
4173 case LE_LINK:
4174 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4175 break;
4176 default:
4177 cnt = 0;
4178 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4179 }
4180
4181 q = cnt / num;
4182 *quote = q ? q : 1;
4183 BT_DBG("chan %p quote %d", chan, *quote);
4184 return chan;
4185}
4186
4187static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4188{
4189 struct hci_conn_hash *h = &hdev->conn_hash;
4190 struct hci_conn *conn;
4191 int num = 0;
4192
4193 BT_DBG("%s", hdev->name);
4194
4195 rcu_read_lock();
4196
4197 list_for_each_entry_rcu(conn, &h->list, list) {
4198 struct hci_chan *chan;
4199
4200 if (conn->type != type)
4201 continue;
4202
4203 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4204 continue;
4205
4206 num++;
4207
4208 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4209 struct sk_buff *skb;
4210
4211 if (chan->sent) {
4212 chan->sent = 0;
4213 continue;
4214 }
4215
4216 if (skb_queue_empty(&chan->data_q))
4217 continue;
4218
4219 skb = skb_peek(&chan->data_q);
4220 if (skb->priority >= HCI_PRIO_MAX - 1)
4221 continue;
4222
4223 skb->priority = HCI_PRIO_MAX - 1;
4224
4225 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4226 skb->priority);
4227 }
4228
4229 if (hci_conn_num(hdev, type) == num)
4230 break;
4231 }
4232
4233 rcu_read_unlock();
4234
4235}
4236
4237static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4238{
4239 /* Calculate count of blocks used by this packet */
4240 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4241}
4242
4243static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4244{
4245 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4246 /* ACL tx timeout must be longer than maximum
4247 * link supervision timeout (40.9 seconds) */
4248 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4249 HCI_ACL_TX_TIMEOUT))
4250 hci_link_tx_to(hdev, ACL_LINK);
4251 }
4252}
4253
4254/* Schedule SCO */
4255static void hci_sched_sco(struct hci_dev *hdev)
4256{
4257 struct hci_conn *conn;
4258 struct sk_buff *skb;
4259 int quote;
4260
4261 BT_DBG("%s", hdev->name);
4262
4263 if (!hci_conn_num(hdev, SCO_LINK))
4264 return;
4265
4266 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4267 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4268 BT_DBG("skb %p len %d", skb, skb->len);
4269 hci_send_frame(hdev, skb);
4270
4271 conn->sent++;
4272 if (conn->sent == ~0)
4273 conn->sent = 0;
4274 }
4275 }
4276}
4277
4278static void hci_sched_esco(struct hci_dev *hdev)
4279{
4280 struct hci_conn *conn;
4281 struct sk_buff *skb;
4282 int quote;
4283
4284 BT_DBG("%s", hdev->name);
4285
4286 if (!hci_conn_num(hdev, ESCO_LINK))
4287 return;
4288
4289 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4290 "e))) {
4291 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4292 BT_DBG("skb %p len %d", skb, skb->len);
4293 hci_send_frame(hdev, skb);
4294
4295 conn->sent++;
4296 if (conn->sent == ~0)
4297 conn->sent = 0;
4298 }
4299 }
4300}
4301
4302static void hci_sched_acl_pkt(struct hci_dev *hdev)
4303{
4304 unsigned int cnt = hdev->acl_cnt;
4305 struct hci_chan *chan;
4306 struct sk_buff *skb;
4307 int quote;
4308
4309 __check_timeout(hdev, cnt);
4310
4311 while (hdev->acl_cnt &&
4312 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4313 u32 priority = (skb_peek(&chan->data_q))->priority;
4314 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4315 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4316 skb->len, skb->priority);
4317
4318 /* Stop if priority has changed */
4319 if (skb->priority < priority)
4320 break;
4321
4322 skb = skb_dequeue(&chan->data_q);
4323
4324 hci_conn_enter_active_mode(chan->conn,
4325 bt_cb(skb)->force_active);
4326
4327 hci_send_frame(hdev, skb);
4328 hdev->acl_last_tx = jiffies;
4329
4330 hdev->acl_cnt--;
4331 chan->sent++;
4332 chan->conn->sent++;
4333
4334 /* Send pending SCO packets right away */
4335 hci_sched_sco(hdev);
4336 hci_sched_esco(hdev);
4337 }
4338 }
4339
4340 if (cnt != hdev->acl_cnt)
4341 hci_prio_recalculate(hdev, ACL_LINK);
4342}
4343
4344static void hci_sched_acl_blk(struct hci_dev *hdev)
4345{
4346 unsigned int cnt = hdev->block_cnt;
4347 struct hci_chan *chan;
4348 struct sk_buff *skb;
4349 int quote;
4350 u8 type;
4351
4352 __check_timeout(hdev, cnt);
4353
4354 BT_DBG("%s", hdev->name);
4355
4356 if (hdev->dev_type == HCI_AMP)
4357 type = AMP_LINK;
4358 else
4359 type = ACL_LINK;
4360
4361 while (hdev->block_cnt > 0 &&
4362 (chan = hci_chan_sent(hdev, type, "e))) {
4363 u32 priority = (skb_peek(&chan->data_q))->priority;
4364 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4365 int blocks;
4366
4367 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4368 skb->len, skb->priority);
4369
4370 /* Stop if priority has changed */
4371 if (skb->priority < priority)
4372 break;
4373
4374 skb = skb_dequeue(&chan->data_q);
4375
4376 blocks = __get_blocks(hdev, skb);
4377 if (blocks > hdev->block_cnt)
4378 return;
4379
4380 hci_conn_enter_active_mode(chan->conn,
4381 bt_cb(skb)->force_active);
4382
4383 hci_send_frame(hdev, skb);
4384 hdev->acl_last_tx = jiffies;
4385
4386 hdev->block_cnt -= blocks;
4387 quote -= blocks;
4388
4389 chan->sent += blocks;
4390 chan->conn->sent += blocks;
4391 }
4392 }
4393
4394 if (cnt != hdev->block_cnt)
4395 hci_prio_recalculate(hdev, type);
4396}
4397
4398static void hci_sched_acl(struct hci_dev *hdev)
4399{
4400 BT_DBG("%s", hdev->name);
4401
4402 /* No ACL link over BR/EDR controller */
4403 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4404 return;
4405
4406 /* No AMP link over AMP controller */
4407 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4408 return;
4409
4410 switch (hdev->flow_ctl_mode) {
4411 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4412 hci_sched_acl_pkt(hdev);
4413 break;
4414
4415 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4416 hci_sched_acl_blk(hdev);
4417 break;
4418 }
4419}
4420
4421static void hci_sched_le(struct hci_dev *hdev)
4422{
4423 struct hci_chan *chan;
4424 struct sk_buff *skb;
4425 int quote, cnt, tmp;
4426
4427 BT_DBG("%s", hdev->name);
4428
4429 if (!hci_conn_num(hdev, LE_LINK))
4430 return;
4431
4432 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4433
4434 __check_timeout(hdev, cnt);
4435
4436 tmp = cnt;
4437 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4438 u32 priority = (skb_peek(&chan->data_q))->priority;
4439 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4440 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4441 skb->len, skb->priority);
4442
4443 /* Stop if priority has changed */
4444 if (skb->priority < priority)
4445 break;
4446
4447 skb = skb_dequeue(&chan->data_q);
4448
4449 hci_send_frame(hdev, skb);
4450 hdev->le_last_tx = jiffies;
4451
4452 cnt--;
4453 chan->sent++;
4454 chan->conn->sent++;
4455
4456 /* Send pending SCO packets right away */
4457 hci_sched_sco(hdev);
4458 hci_sched_esco(hdev);
4459 }
4460 }
4461
4462 if (hdev->le_pkts)
4463 hdev->le_cnt = cnt;
4464 else
4465 hdev->acl_cnt = cnt;
4466
4467 if (cnt != tmp)
4468 hci_prio_recalculate(hdev, LE_LINK);
4469}
4470
4471static void hci_tx_work(struct work_struct *work)
4472{
4473 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4474 struct sk_buff *skb;
4475
4476 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4477 hdev->sco_cnt, hdev->le_cnt);
4478
4479 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4480 /* Schedule queues and send stuff to HCI driver */
4481 hci_sched_sco(hdev);
4482 hci_sched_esco(hdev);
4483 hci_sched_acl(hdev);
4484 hci_sched_le(hdev);
4485 }
4486
4487 /* Send next queued raw (unknown type) packet */
4488 while ((skb = skb_dequeue(&hdev->raw_q)))
4489 hci_send_frame(hdev, skb);
4490}
4491
4492/* ----- HCI RX task (incoming data processing) ----- */
4493
4494/* ACL data packet */
4495static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4496{
4497 struct hci_acl_hdr *hdr = (void *) skb->data;
4498 struct hci_conn *conn;
4499 __u16 handle, flags;
4500
4501 skb_pull(skb, HCI_ACL_HDR_SIZE);
4502
4503 handle = __le16_to_cpu(hdr->handle);
4504 flags = hci_flags(handle);
4505 handle = hci_handle(handle);
4506
4507 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4508 handle, flags);
4509
4510 hdev->stat.acl_rx++;
4511
4512 hci_dev_lock(hdev);
4513 conn = hci_conn_hash_lookup_handle(hdev, handle);
4514 hci_dev_unlock(hdev);
4515
4516 if (conn) {
4517 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4518
4519 /* Send to upper protocol */
4520 l2cap_recv_acldata(conn, skb, flags);
4521 return;
4522 } else {
4523 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4524 handle);
4525 }
4526
4527 kfree_skb(skb);
4528}
4529
4530/* SCO data packet */
4531static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4532{
4533 struct hci_sco_hdr *hdr = (void *) skb->data;
4534 struct hci_conn *conn;
4535 __u16 handle, flags;
4536
4537 skb_pull(skb, HCI_SCO_HDR_SIZE);
4538
4539 handle = __le16_to_cpu(hdr->handle);
4540 flags = hci_flags(handle);
4541 handle = hci_handle(handle);
4542
4543 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4544 handle, flags);
4545
4546 hdev->stat.sco_rx++;
4547
4548 hci_dev_lock(hdev);
4549 conn = hci_conn_hash_lookup_handle(hdev, handle);
4550 hci_dev_unlock(hdev);
4551
4552 if (conn) {
4553 /* Send to upper protocol */
4554 sco_recv_scodata(conn, skb);
4555 return;
4556 } else {
4557 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4558 handle);
4559 }
4560
4561 kfree_skb(skb);
4562}
4563
4564static bool hci_req_is_complete(struct hci_dev *hdev)
4565{
4566 struct sk_buff *skb;
4567
4568 skb = skb_peek(&hdev->cmd_q);
4569 if (!skb)
4570 return true;
4571
4572 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4573}
4574
4575static void hci_resend_last(struct hci_dev *hdev)
4576{
4577 struct hci_command_hdr *sent;
4578 struct sk_buff *skb;
4579 u16 opcode;
4580
4581 if (!hdev->sent_cmd)
4582 return;
4583
4584 sent = (void *) hdev->sent_cmd->data;
4585 opcode = __le16_to_cpu(sent->opcode);
4586 if (opcode == HCI_OP_RESET)
4587 return;
4588
4589 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4590 if (!skb)
4591 return;
4592
4593 skb_queue_head(&hdev->cmd_q, skb);
4594 queue_work(hdev->workqueue, &hdev->cmd_work);
4595}
4596
4597void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4598 hci_req_complete_t *req_complete,
4599 hci_req_complete_skb_t *req_complete_skb)
4600{
4601 struct sk_buff *skb;
4602 unsigned long flags;
4603
4604 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4605
4606 /* If the completed command doesn't match the last one that was
4607 * sent we need to do special handling of it.
4608 */
4609 if (!hci_sent_cmd_data(hdev, opcode)) {
4610 /* Some CSR based controllers generate a spontaneous
4611 * reset complete event during init and any pending
4612 * command will never be completed. In such a case we
4613 * need to resend whatever was the last sent
4614 * command.
4615 */
4616 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4617 hci_resend_last(hdev);
4618
4619 return;
4620 }
4621
4622 /* If we reach this point this event matches the last command sent */
4623 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4624
4625 /* If the command succeeded and there's still more commands in
4626 * this request the request is not yet complete.
4627 */
4628 if (!status && !hci_req_is_complete(hdev))
4629 return;
4630
4631 /* If this was the last command in a request the complete
4632 * callback would be found in hdev->sent_cmd instead of the
4633 * command queue (hdev->cmd_q).
4634 */
4635 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4636 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4637 return;
4638 }
4639
4640 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4641 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4642 return;
4643 }
4644
4645 /* Remove all pending commands belonging to this request */
4646 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4647 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4648 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4649 __skb_queue_head(&hdev->cmd_q, skb);
4650 break;
4651 }
4652
4653 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4654 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4655 else
4656 *req_complete = bt_cb(skb)->hci.req_complete;
4657 kfree_skb(skb);
4658 }
4659 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4660}
4661
4662static void hci_rx_work(struct work_struct *work)
4663{
4664 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4665 struct sk_buff *skb;
4666
4667 BT_DBG("%s", hdev->name);
4668
4669 while ((skb = skb_dequeue(&hdev->rx_q))) {
4670 /* Send copy to monitor */
4671 hci_send_to_monitor(hdev, skb);
4672
4673 if (atomic_read(&hdev->promisc)) {
4674 /* Send copy to the sockets */
4675 hci_send_to_sock(hdev, skb);
4676 }
4677
4678 /* If the device has been opened in HCI_USER_CHANNEL,
4679 * the userspace has exclusive access to device.
4680 * When device is HCI_INIT, we still need to process
4681 * the data packets to the driver in order
4682 * to complete its setup().
4683 */
4684 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4685 !test_bit(HCI_INIT, &hdev->flags)) {
4686 kfree_skb(skb);
4687 continue;
4688 }
4689
4690 if (test_bit(HCI_INIT, &hdev->flags)) {
4691 /* Don't process data packets in this states. */
4692 switch (hci_skb_pkt_type(skb)) {
4693 case HCI_ACLDATA_PKT:
4694 case HCI_SCODATA_PKT:
4695 case HCI_ISODATA_PKT:
4696 kfree_skb(skb);
4697 continue;
4698 }
4699 }
4700
4701 /* Process frame */
4702 switch (hci_skb_pkt_type(skb)) {
4703 case HCI_EVENT_PKT:
4704 BT_DBG("%s Event packet", hdev->name);
4705 hci_event_packet(hdev, skb);
4706 break;
4707
4708 case HCI_ACLDATA_PKT:
4709 BT_DBG("%s ACL data packet", hdev->name);
4710 hci_acldata_packet(hdev, skb);
4711 break;
4712
4713 case HCI_SCODATA_PKT:
4714 BT_DBG("%s SCO data packet", hdev->name);
4715 hci_scodata_packet(hdev, skb);
4716 break;
4717
4718 default:
4719 kfree_skb(skb);
4720 break;
4721 }
4722 }
4723}
4724
4725static void hci_cmd_work(struct work_struct *work)
4726{
4727 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4728 struct sk_buff *skb;
4729
4730 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4731 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4732
4733 /* Send queued commands */
4734 if (atomic_read(&hdev->cmd_cnt)) {
4735 skb = skb_dequeue(&hdev->cmd_q);
4736 if (!skb)
4737 return;
4738
4739 kfree_skb(hdev->sent_cmd);
4740
4741 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4742 if (hdev->sent_cmd) {
4743 if (hci_req_status_pend(hdev))
4744 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4745 atomic_dec(&hdev->cmd_cnt);
4746 hci_send_frame(hdev, skb);
4747 if (test_bit(HCI_RESET, &hdev->flags))
4748 cancel_delayed_work(&hdev->cmd_timer);
4749 else
4750 schedule_delayed_work(&hdev->cmd_timer,
4751 HCI_CMD_TIMEOUT);
4752 } else {
4753 skb_queue_head(&hdev->cmd_q, skb);
4754 queue_work(hdev->workqueue, &hdev->cmd_work);
4755 }
4756 }
4757}