Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/rfkill.h>
30#include <linux/debugfs.h>
31#include <linux/crypto.h>
32#include <linux/kcov.h>
33#include <linux/property.h>
34#include <linux/suspend.h>
35#include <linux/wait.h>
36#include <linux/unaligned.h>
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
40#include <net/bluetooth/l2cap.h>
41#include <net/bluetooth/mgmt.h>
42
43#include "hci_debugfs.h"
44#include "smp.h"
45#include "leds.h"
46#include "msft.h"
47#include "aosp.h"
48#include "hci_codec.h"
49
50static void hci_rx_work(struct work_struct *work);
51static void hci_cmd_work(struct work_struct *work);
52static void hci_tx_work(struct work_struct *work);
53
54/* HCI device list */
55LIST_HEAD(hci_dev_list);
56DEFINE_RWLOCK(hci_dev_list_lock);
57
58/* HCI callback list */
59LIST_HEAD(hci_cb_list);
60
61/* HCI ID Numbering */
62static DEFINE_IDA(hci_index_ida);
63
64/* Get HCI device by index.
65 * Device is held on return. */
66struct hci_dev *hci_dev_get(int index)
67{
68 struct hci_dev *hdev = NULL, *d;
69
70 BT_DBG("%d", index);
71
72 if (index < 0)
73 return NULL;
74
75 read_lock(&hci_dev_list_lock);
76 list_for_each_entry(d, &hci_dev_list, list) {
77 if (d->id == index) {
78 hdev = hci_dev_hold(d);
79 break;
80 }
81 }
82 read_unlock(&hci_dev_list_lock);
83 return hdev;
84}
85
86/* ---- Inquiry support ---- */
87
88bool hci_discovery_active(struct hci_dev *hdev)
89{
90 struct discovery_state *discov = &hdev->discovery;
91
92 switch (discov->state) {
93 case DISCOVERY_FINDING:
94 case DISCOVERY_RESOLVING:
95 return true;
96
97 default:
98 return false;
99 }
100}
101
102void hci_discovery_set_state(struct hci_dev *hdev, int state)
103{
104 int old_state = hdev->discovery.state;
105
106 if (old_state == state)
107 return;
108
109 hdev->discovery.state = state;
110
111 switch (state) {
112 case DISCOVERY_STOPPED:
113 hci_update_passive_scan(hdev);
114
115 if (old_state != DISCOVERY_STARTING)
116 mgmt_discovering(hdev, 0);
117 break;
118 case DISCOVERY_STARTING:
119 break;
120 case DISCOVERY_FINDING:
121 mgmt_discovering(hdev, 1);
122 break;
123 case DISCOVERY_RESOLVING:
124 break;
125 case DISCOVERY_STOPPING:
126 break;
127 }
128
129 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
130}
131
132void hci_inquiry_cache_flush(struct hci_dev *hdev)
133{
134 struct discovery_state *cache = &hdev->discovery;
135 struct inquiry_entry *p, *n;
136
137 list_for_each_entry_safe(p, n, &cache->all, all) {
138 list_del(&p->all);
139 kfree(p);
140 }
141
142 INIT_LIST_HEAD(&cache->unknown);
143 INIT_LIST_HEAD(&cache->resolve);
144}
145
146struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
147 bdaddr_t *bdaddr)
148{
149 struct discovery_state *cache = &hdev->discovery;
150 struct inquiry_entry *e;
151
152 BT_DBG("cache %p, %pMR", cache, bdaddr);
153
154 list_for_each_entry(e, &cache->all, all) {
155 if (!bacmp(&e->data.bdaddr, bdaddr))
156 return e;
157 }
158
159 return NULL;
160}
161
162struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
163 bdaddr_t *bdaddr)
164{
165 struct discovery_state *cache = &hdev->discovery;
166 struct inquiry_entry *e;
167
168 BT_DBG("cache %p, %pMR", cache, bdaddr);
169
170 list_for_each_entry(e, &cache->unknown, list) {
171 if (!bacmp(&e->data.bdaddr, bdaddr))
172 return e;
173 }
174
175 return NULL;
176}
177
178struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
179 bdaddr_t *bdaddr,
180 int state)
181{
182 struct discovery_state *cache = &hdev->discovery;
183 struct inquiry_entry *e;
184
185 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
186
187 list_for_each_entry(e, &cache->resolve, list) {
188 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
189 return e;
190 if (!bacmp(&e->data.bdaddr, bdaddr))
191 return e;
192 }
193
194 return NULL;
195}
196
197void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
198 struct inquiry_entry *ie)
199{
200 struct discovery_state *cache = &hdev->discovery;
201 struct list_head *pos = &cache->resolve;
202 struct inquiry_entry *p;
203
204 list_del(&ie->list);
205
206 list_for_each_entry(p, &cache->resolve, list) {
207 if (p->name_state != NAME_PENDING &&
208 abs(p->data.rssi) >= abs(ie->data.rssi))
209 break;
210 pos = &p->list;
211 }
212
213 list_add(&ie->list, pos);
214}
215
216u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
217 bool name_known)
218{
219 struct discovery_state *cache = &hdev->discovery;
220 struct inquiry_entry *ie;
221 u32 flags = 0;
222
223 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
224
225 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
226
227 if (!data->ssp_mode)
228 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
229
230 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
231 if (ie) {
232 if (!ie->data.ssp_mode)
233 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
234
235 if (ie->name_state == NAME_NEEDED &&
236 data->rssi != ie->data.rssi) {
237 ie->data.rssi = data->rssi;
238 hci_inquiry_cache_update_resolve(hdev, ie);
239 }
240
241 goto update;
242 }
243
244 /* Entry not in the cache. Add new one. */
245 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
246 if (!ie) {
247 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
248 goto done;
249 }
250
251 list_add(&ie->all, &cache->all);
252
253 if (name_known) {
254 ie->name_state = NAME_KNOWN;
255 } else {
256 ie->name_state = NAME_NOT_KNOWN;
257 list_add(&ie->list, &cache->unknown);
258 }
259
260update:
261 if (name_known && ie->name_state != NAME_KNOWN &&
262 ie->name_state != NAME_PENDING) {
263 ie->name_state = NAME_KNOWN;
264 list_del(&ie->list);
265 }
266
267 memcpy(&ie->data, data, sizeof(*data));
268 ie->timestamp = jiffies;
269 cache->timestamp = jiffies;
270
271 if (ie->name_state == NAME_NOT_KNOWN)
272 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
273
274done:
275 return flags;
276}
277
278static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
279{
280 struct discovery_state *cache = &hdev->discovery;
281 struct inquiry_info *info = (struct inquiry_info *) buf;
282 struct inquiry_entry *e;
283 int copied = 0;
284
285 list_for_each_entry(e, &cache->all, all) {
286 struct inquiry_data *data = &e->data;
287
288 if (copied >= num)
289 break;
290
291 bacpy(&info->bdaddr, &data->bdaddr);
292 info->pscan_rep_mode = data->pscan_rep_mode;
293 info->pscan_period_mode = data->pscan_period_mode;
294 info->pscan_mode = data->pscan_mode;
295 memcpy(info->dev_class, data->dev_class, 3);
296 info->clock_offset = data->clock_offset;
297
298 info++;
299 copied++;
300 }
301
302 BT_DBG("cache %p, copied %d", cache, copied);
303 return copied;
304}
305
306int hci_inquiry(void __user *arg)
307{
308 __u8 __user *ptr = arg;
309 struct hci_inquiry_req ir;
310 struct hci_dev *hdev;
311 int err = 0, do_inquiry = 0, max_rsp;
312 __u8 *buf;
313
314 if (copy_from_user(&ir, ptr, sizeof(ir)))
315 return -EFAULT;
316
317 hdev = hci_dev_get(ir.dev_id);
318 if (!hdev)
319 return -ENODEV;
320
321 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
322 err = -EBUSY;
323 goto done;
324 }
325
326 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
327 err = -EOPNOTSUPP;
328 goto done;
329 }
330
331 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
332 err = -EOPNOTSUPP;
333 goto done;
334 }
335
336 /* Restrict maximum inquiry length to 60 seconds */
337 if (ir.length > 60) {
338 err = -EINVAL;
339 goto done;
340 }
341
342 hci_dev_lock(hdev);
343 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
344 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
345 hci_inquiry_cache_flush(hdev);
346 do_inquiry = 1;
347 }
348 hci_dev_unlock(hdev);
349
350 if (do_inquiry) {
351 hci_req_sync_lock(hdev);
352 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
353 hci_req_sync_unlock(hdev);
354
355 if (err < 0)
356 goto done;
357
358 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
359 * cleared). If it is interrupted by a signal, return -EINTR.
360 */
361 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
362 TASK_INTERRUPTIBLE)) {
363 err = -EINTR;
364 goto done;
365 }
366 }
367
368 /* for unlimited number of responses we will use buffer with
369 * 255 entries
370 */
371 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
372
373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
374 * copy it to the user space.
375 */
376 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
377 if (!buf) {
378 err = -ENOMEM;
379 goto done;
380 }
381
382 hci_dev_lock(hdev);
383 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
384 hci_dev_unlock(hdev);
385
386 BT_DBG("num_rsp %d", ir.num_rsp);
387
388 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
389 ptr += sizeof(ir);
390 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
391 ir.num_rsp))
392 err = -EFAULT;
393 } else
394 err = -EFAULT;
395
396 kfree(buf);
397
398done:
399 hci_dev_put(hdev);
400 return err;
401}
402
403static int hci_dev_do_open(struct hci_dev *hdev)
404{
405 int ret = 0;
406
407 BT_DBG("%s %p", hdev->name, hdev);
408
409 hci_req_sync_lock(hdev);
410
411 ret = hci_dev_open_sync(hdev);
412
413 hci_req_sync_unlock(hdev);
414 return ret;
415}
416
417/* ---- HCI ioctl helpers ---- */
418
419int hci_dev_open(__u16 dev)
420{
421 struct hci_dev *hdev;
422 int err;
423
424 hdev = hci_dev_get(dev);
425 if (!hdev)
426 return -ENODEV;
427
428 /* Devices that are marked as unconfigured can only be powered
429 * up as user channel. Trying to bring them up as normal devices
430 * will result into a failure. Only user channel operation is
431 * possible.
432 *
433 * When this function is called for a user channel, the flag
434 * HCI_USER_CHANNEL will be set first before attempting to
435 * open the device.
436 */
437 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
438 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
439 err = -EOPNOTSUPP;
440 goto done;
441 }
442
443 /* We need to ensure that no other power on/off work is pending
444 * before proceeding to call hci_dev_do_open. This is
445 * particularly important if the setup procedure has not yet
446 * completed.
447 */
448 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
449 cancel_delayed_work(&hdev->power_off);
450
451 /* After this call it is guaranteed that the setup procedure
452 * has finished. This means that error conditions like RFKILL
453 * or no valid public or static random address apply.
454 */
455 flush_workqueue(hdev->req_workqueue);
456
457 /* For controllers not using the management interface and that
458 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
459 * so that pairing works for them. Once the management interface
460 * is in use this bit will be cleared again and userspace has
461 * to explicitly enable it.
462 */
463 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
464 !hci_dev_test_flag(hdev, HCI_MGMT))
465 hci_dev_set_flag(hdev, HCI_BONDABLE);
466
467 err = hci_dev_do_open(hdev);
468
469done:
470 hci_dev_put(hdev);
471 return err;
472}
473
474int hci_dev_do_close(struct hci_dev *hdev)
475{
476 int err;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_sync_lock(hdev);
481
482 err = hci_dev_close_sync(hdev);
483
484 hci_req_sync_unlock(hdev);
485
486 return err;
487}
488
489int hci_dev_close(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int err;
493
494 hdev = hci_dev_get(dev);
495 if (!hdev)
496 return -ENODEV;
497
498 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
499 err = -EBUSY;
500 goto done;
501 }
502
503 cancel_work_sync(&hdev->power_on);
504 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
505 cancel_delayed_work(&hdev->power_off);
506
507 err = hci_dev_do_close(hdev);
508
509done:
510 hci_dev_put(hdev);
511 return err;
512}
513
514static int hci_dev_do_reset(struct hci_dev *hdev)
515{
516 int ret;
517
518 BT_DBG("%s %p", hdev->name, hdev);
519
520 hci_req_sync_lock(hdev);
521
522 /* Drop queues */
523 skb_queue_purge(&hdev->rx_q);
524 skb_queue_purge(&hdev->cmd_q);
525
526 /* Cancel these to avoid queueing non-chained pending work */
527 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
528 /* Wait for
529 *
530 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
531 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
532 *
533 * inside RCU section to see the flag or complete scheduling.
534 */
535 synchronize_rcu();
536 /* Explicitly cancel works in case scheduled after setting the flag. */
537 cancel_delayed_work(&hdev->cmd_timer);
538 cancel_delayed_work(&hdev->ncmd_timer);
539
540 /* Avoid potential lockdep warnings from the *_flush() calls by
541 * ensuring the workqueue is empty up front.
542 */
543 drain_workqueue(hdev->workqueue);
544
545 hci_dev_lock(hdev);
546 hci_inquiry_cache_flush(hdev);
547 hci_conn_hash_flush(hdev);
548 hci_dev_unlock(hdev);
549
550 if (hdev->flush)
551 hdev->flush(hdev);
552
553 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
554
555 atomic_set(&hdev->cmd_cnt, 1);
556 hdev->acl_cnt = 0;
557 hdev->sco_cnt = 0;
558 hdev->le_cnt = 0;
559 hdev->iso_cnt = 0;
560
561 ret = hci_reset_sync(hdev);
562
563 hci_req_sync_unlock(hdev);
564 return ret;
565}
566
567int hci_dev_reset(__u16 dev)
568{
569 struct hci_dev *hdev;
570 int err;
571
572 hdev = hci_dev_get(dev);
573 if (!hdev)
574 return -ENODEV;
575
576 if (!test_bit(HCI_UP, &hdev->flags)) {
577 err = -ENETDOWN;
578 goto done;
579 }
580
581 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
582 err = -EBUSY;
583 goto done;
584 }
585
586 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
587 err = -EOPNOTSUPP;
588 goto done;
589 }
590
591 err = hci_dev_do_reset(hdev);
592
593done:
594 hci_dev_put(hdev);
595 return err;
596}
597
598int hci_dev_reset_stat(__u16 dev)
599{
600 struct hci_dev *hdev;
601 int ret = 0;
602
603 hdev = hci_dev_get(dev);
604 if (!hdev)
605 return -ENODEV;
606
607 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
608 ret = -EBUSY;
609 goto done;
610 }
611
612 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
613 ret = -EOPNOTSUPP;
614 goto done;
615 }
616
617 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
618
619done:
620 hci_dev_put(hdev);
621 return ret;
622}
623
624static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
625{
626 bool conn_changed, discov_changed;
627
628 BT_DBG("%s scan 0x%02x", hdev->name, scan);
629
630 if ((scan & SCAN_PAGE))
631 conn_changed = !hci_dev_test_and_set_flag(hdev,
632 HCI_CONNECTABLE);
633 else
634 conn_changed = hci_dev_test_and_clear_flag(hdev,
635 HCI_CONNECTABLE);
636
637 if ((scan & SCAN_INQUIRY)) {
638 discov_changed = !hci_dev_test_and_set_flag(hdev,
639 HCI_DISCOVERABLE);
640 } else {
641 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
642 discov_changed = hci_dev_test_and_clear_flag(hdev,
643 HCI_DISCOVERABLE);
644 }
645
646 if (!hci_dev_test_flag(hdev, HCI_MGMT))
647 return;
648
649 if (conn_changed || discov_changed) {
650 /* In case this was disabled through mgmt */
651 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
652
653 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
654 hci_update_adv_data(hdev, hdev->cur_adv_instance);
655
656 mgmt_new_settings(hdev);
657 }
658}
659
660int hci_dev_cmd(unsigned int cmd, void __user *arg)
661{
662 struct hci_dev *hdev;
663 struct hci_dev_req dr;
664 __le16 policy;
665 int err = 0;
666
667 if (copy_from_user(&dr, arg, sizeof(dr)))
668 return -EFAULT;
669
670 hdev = hci_dev_get(dr.dev_id);
671 if (!hdev)
672 return -ENODEV;
673
674 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
675 err = -EBUSY;
676 goto done;
677 }
678
679 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
680 err = -EOPNOTSUPP;
681 goto done;
682 }
683
684 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
685 err = -EOPNOTSUPP;
686 goto done;
687 }
688
689 switch (cmd) {
690 case HCISETAUTH:
691 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
692 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
693 break;
694
695 case HCISETENCRYPT:
696 if (!lmp_encrypt_capable(hdev)) {
697 err = -EOPNOTSUPP;
698 break;
699 }
700
701 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702 /* Auth must be enabled first */
703 err = hci_cmd_sync_status(hdev,
704 HCI_OP_WRITE_AUTH_ENABLE,
705 1, &dr.dev_opt,
706 HCI_CMD_TIMEOUT);
707 if (err)
708 break;
709 }
710
711 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
712 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
713 break;
714
715 case HCISETSCAN:
716 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
717 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
718
719 /* Ensure that the connectable and discoverable states
720 * get correctly modified as this was a non-mgmt change.
721 */
722 if (!err)
723 hci_update_passive_scan_state(hdev, dr.dev_opt);
724 break;
725
726 case HCISETLINKPOL:
727 policy = cpu_to_le16(dr.dev_opt);
728
729 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
730 2, &policy, HCI_CMD_TIMEOUT);
731 break;
732
733 case HCISETLINKMODE:
734 hdev->link_mode = ((__u16) dr.dev_opt) &
735 (HCI_LM_MASTER | HCI_LM_ACCEPT);
736 break;
737
738 case HCISETPTYPE:
739 if (hdev->pkt_type == (__u16) dr.dev_opt)
740 break;
741
742 hdev->pkt_type = (__u16) dr.dev_opt;
743 mgmt_phy_configuration_changed(hdev, NULL);
744 break;
745
746 case HCISETACLMTU:
747 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
748 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
749 break;
750
751 case HCISETSCOMTU:
752 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
753 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
754 break;
755
756 default:
757 err = -EINVAL;
758 break;
759 }
760
761done:
762 hci_dev_put(hdev);
763 return err;
764}
765
766int hci_get_dev_list(void __user *arg)
767{
768 struct hci_dev *hdev;
769 struct hci_dev_list_req *dl;
770 struct hci_dev_req *dr;
771 int n = 0, err;
772 __u16 dev_num;
773
774 if (get_user(dev_num, (__u16 __user *) arg))
775 return -EFAULT;
776
777 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
778 return -EINVAL;
779
780 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
781 if (!dl)
782 return -ENOMEM;
783
784 dl->dev_num = dev_num;
785 dr = dl->dev_req;
786
787 read_lock(&hci_dev_list_lock);
788 list_for_each_entry(hdev, &hci_dev_list, list) {
789 unsigned long flags = hdev->flags;
790
791 /* When the auto-off is configured it means the transport
792 * is running, but in that case still indicate that the
793 * device is actually down.
794 */
795 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
796 flags &= ~BIT(HCI_UP);
797
798 dr[n].dev_id = hdev->id;
799 dr[n].dev_opt = flags;
800
801 if (++n >= dev_num)
802 break;
803 }
804 read_unlock(&hci_dev_list_lock);
805
806 dl->dev_num = n;
807 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
808 kfree(dl);
809
810 return err ? -EFAULT : 0;
811}
812
813int hci_get_dev_info(void __user *arg)
814{
815 struct hci_dev *hdev;
816 struct hci_dev_info di;
817 unsigned long flags;
818 int err = 0;
819
820 if (copy_from_user(&di, arg, sizeof(di)))
821 return -EFAULT;
822
823 hdev = hci_dev_get(di.dev_id);
824 if (!hdev)
825 return -ENODEV;
826
827 /* When the auto-off is configured it means the transport
828 * is running, but in that case still indicate that the
829 * device is actually down.
830 */
831 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
832 flags = hdev->flags & ~BIT(HCI_UP);
833 else
834 flags = hdev->flags;
835
836 strscpy(di.name, hdev->name, sizeof(di.name));
837 di.bdaddr = hdev->bdaddr;
838 di.type = (hdev->bus & 0x0f);
839 di.flags = flags;
840 di.pkt_type = hdev->pkt_type;
841 if (lmp_bredr_capable(hdev)) {
842 di.acl_mtu = hdev->acl_mtu;
843 di.acl_pkts = hdev->acl_pkts;
844 di.sco_mtu = hdev->sco_mtu;
845 di.sco_pkts = hdev->sco_pkts;
846 } else {
847 di.acl_mtu = hdev->le_mtu;
848 di.acl_pkts = hdev->le_pkts;
849 di.sco_mtu = 0;
850 di.sco_pkts = 0;
851 }
852 di.link_policy = hdev->link_policy;
853 di.link_mode = hdev->link_mode;
854
855 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
856 memcpy(&di.features, &hdev->features, sizeof(di.features));
857
858 if (copy_to_user(arg, &di, sizeof(di)))
859 err = -EFAULT;
860
861 hci_dev_put(hdev);
862
863 return err;
864}
865
866/* ---- Interface to HCI drivers ---- */
867
868static int hci_dev_do_poweroff(struct hci_dev *hdev)
869{
870 int err;
871
872 BT_DBG("%s %p", hdev->name, hdev);
873
874 hci_req_sync_lock(hdev);
875
876 err = hci_set_powered_sync(hdev, false);
877
878 hci_req_sync_unlock(hdev);
879
880 return err;
881}
882
883static int hci_rfkill_set_block(void *data, bool blocked)
884{
885 struct hci_dev *hdev = data;
886 int err;
887
888 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
889
890 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
891 return -EBUSY;
892
893 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
894 return 0;
895
896 if (blocked) {
897 hci_dev_set_flag(hdev, HCI_RFKILLED);
898
899 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
900 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
901 err = hci_dev_do_poweroff(hdev);
902 if (err) {
903 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
904 err);
905
906 /* Make sure the device is still closed even if
907 * anything during power off sequence (eg.
908 * disconnecting devices) failed.
909 */
910 hci_dev_do_close(hdev);
911 }
912 }
913 } else {
914 hci_dev_clear_flag(hdev, HCI_RFKILLED);
915 }
916
917 return 0;
918}
919
920static const struct rfkill_ops hci_rfkill_ops = {
921 .set_block = hci_rfkill_set_block,
922};
923
924static void hci_power_on(struct work_struct *work)
925{
926 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
927 int err;
928
929 BT_DBG("%s", hdev->name);
930
931 if (test_bit(HCI_UP, &hdev->flags) &&
932 hci_dev_test_flag(hdev, HCI_MGMT) &&
933 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
934 cancel_delayed_work(&hdev->power_off);
935 err = hci_powered_update_sync(hdev);
936 mgmt_power_on(hdev, err);
937 return;
938 }
939
940 err = hci_dev_do_open(hdev);
941 if (err < 0) {
942 hci_dev_lock(hdev);
943 mgmt_set_powered_failed(hdev, err);
944 hci_dev_unlock(hdev);
945 return;
946 }
947
948 /* During the HCI setup phase, a few error conditions are
949 * ignored and they need to be checked now. If they are still
950 * valid, it is important to turn the device back off.
951 */
952 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
953 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
954 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
955 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
956 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
957 hci_dev_do_close(hdev);
958 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
959 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
960 HCI_AUTO_OFF_TIMEOUT);
961 }
962
963 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
964 /* For unconfigured devices, set the HCI_RAW flag
965 * so that userspace can easily identify them.
966 */
967 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
968 set_bit(HCI_RAW, &hdev->flags);
969
970 /* For fully configured devices, this will send
971 * the Index Added event. For unconfigured devices,
972 * it will send Unconfigued Index Added event.
973 *
974 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
975 * and no event will be send.
976 */
977 mgmt_index_added(hdev);
978 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
979 /* When the controller is now configured, then it
980 * is important to clear the HCI_RAW flag.
981 */
982 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
983 clear_bit(HCI_RAW, &hdev->flags);
984
985 /* Powering on the controller with HCI_CONFIG set only
986 * happens with the transition from unconfigured to
987 * configured. This will send the Index Added event.
988 */
989 mgmt_index_added(hdev);
990 }
991}
992
993static void hci_power_off(struct work_struct *work)
994{
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 power_off.work);
997
998 BT_DBG("%s", hdev->name);
999
1000 hci_dev_do_close(hdev);
1001}
1002
1003static void hci_error_reset(struct work_struct *work)
1004{
1005 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1006
1007 hci_dev_hold(hdev);
1008 BT_DBG("%s", hdev->name);
1009
1010 if (hdev->hw_error)
1011 hdev->hw_error(hdev, hdev->hw_error_code);
1012 else
1013 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1014
1015 if (!hci_dev_do_close(hdev))
1016 hci_dev_do_open(hdev);
1017
1018 hci_dev_put(hdev);
1019}
1020
1021void hci_uuids_clear(struct hci_dev *hdev)
1022{
1023 struct bt_uuid *uuid, *tmp;
1024
1025 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1026 list_del(&uuid->list);
1027 kfree(uuid);
1028 }
1029}
1030
1031void hci_link_keys_clear(struct hci_dev *hdev)
1032{
1033 struct link_key *key, *tmp;
1034
1035 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1036 list_del_rcu(&key->list);
1037 kfree_rcu(key, rcu);
1038 }
1039}
1040
1041void hci_smp_ltks_clear(struct hci_dev *hdev)
1042{
1043 struct smp_ltk *k, *tmp;
1044
1045 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1046 list_del_rcu(&k->list);
1047 kfree_rcu(k, rcu);
1048 }
1049}
1050
1051void hci_smp_irks_clear(struct hci_dev *hdev)
1052{
1053 struct smp_irk *k, *tmp;
1054
1055 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1056 list_del_rcu(&k->list);
1057 kfree_rcu(k, rcu);
1058 }
1059}
1060
1061void hci_blocked_keys_clear(struct hci_dev *hdev)
1062{
1063 struct blocked_key *b, *tmp;
1064
1065 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1066 list_del_rcu(&b->list);
1067 kfree_rcu(b, rcu);
1068 }
1069}
1070
1071bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1072{
1073 bool blocked = false;
1074 struct blocked_key *b;
1075
1076 rcu_read_lock();
1077 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1078 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1079 blocked = true;
1080 break;
1081 }
1082 }
1083
1084 rcu_read_unlock();
1085 return blocked;
1086}
1087
1088struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1089{
1090 struct link_key *k;
1091
1092 rcu_read_lock();
1093 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1094 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1095 rcu_read_unlock();
1096
1097 if (hci_is_blocked_key(hdev,
1098 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1099 k->val)) {
1100 bt_dev_warn_ratelimited(hdev,
1101 "Link key blocked for %pMR",
1102 &k->bdaddr);
1103 return NULL;
1104 }
1105
1106 return k;
1107 }
1108 }
1109 rcu_read_unlock();
1110
1111 return NULL;
1112}
1113
1114static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1115 u8 key_type, u8 old_key_type)
1116{
1117 /* Legacy key */
1118 if (key_type < 0x03)
1119 return true;
1120
1121 /* Debug keys are insecure so don't store them persistently */
1122 if (key_type == HCI_LK_DEBUG_COMBINATION)
1123 return false;
1124
1125 /* Changed combination key and there's no previous one */
1126 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1127 return false;
1128
1129 /* Security mode 3 case */
1130 if (!conn)
1131 return true;
1132
1133 /* BR/EDR key derived using SC from an LE link */
1134 if (conn->type == LE_LINK)
1135 return true;
1136
1137 /* Neither local nor remote side had no-bonding as requirement */
1138 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1139 return true;
1140
1141 /* Local side had dedicated bonding as requirement */
1142 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1143 return true;
1144
1145 /* Remote side had dedicated bonding as requirement */
1146 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1147 return true;
1148
1149 /* If none of the above criteria match, then don't store the key
1150 * persistently */
1151 return false;
1152}
1153
1154static u8 ltk_role(u8 type)
1155{
1156 if (type == SMP_LTK)
1157 return HCI_ROLE_MASTER;
1158
1159 return HCI_ROLE_SLAVE;
1160}
1161
1162struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1163 u8 addr_type, u8 role)
1164{
1165 struct smp_ltk *k;
1166
1167 rcu_read_lock();
1168 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1169 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1170 continue;
1171
1172 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1173 rcu_read_unlock();
1174
1175 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1176 k->val)) {
1177 bt_dev_warn_ratelimited(hdev,
1178 "LTK blocked for %pMR",
1179 &k->bdaddr);
1180 return NULL;
1181 }
1182
1183 return k;
1184 }
1185 }
1186 rcu_read_unlock();
1187
1188 return NULL;
1189}
1190
1191struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1192{
1193 struct smp_irk *irk_to_return = NULL;
1194 struct smp_irk *irk;
1195
1196 rcu_read_lock();
1197 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1198 if (!bacmp(&irk->rpa, rpa)) {
1199 irk_to_return = irk;
1200 goto done;
1201 }
1202 }
1203
1204 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1205 if (smp_irk_matches(hdev, irk->val, rpa)) {
1206 bacpy(&irk->rpa, rpa);
1207 irk_to_return = irk;
1208 goto done;
1209 }
1210 }
1211
1212done:
1213 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1214 irk_to_return->val)) {
1215 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1216 &irk_to_return->bdaddr);
1217 irk_to_return = NULL;
1218 }
1219
1220 rcu_read_unlock();
1221
1222 return irk_to_return;
1223}
1224
1225struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1226 u8 addr_type)
1227{
1228 struct smp_irk *irk_to_return = NULL;
1229 struct smp_irk *irk;
1230
1231 /* Identity Address must be public or static random */
1232 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1233 return NULL;
1234
1235 rcu_read_lock();
1236 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1237 if (addr_type == irk->addr_type &&
1238 bacmp(bdaddr, &irk->bdaddr) == 0) {
1239 irk_to_return = irk;
1240 goto done;
1241 }
1242 }
1243
1244done:
1245
1246 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1247 irk_to_return->val)) {
1248 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1249 &irk_to_return->bdaddr);
1250 irk_to_return = NULL;
1251 }
1252
1253 rcu_read_unlock();
1254
1255 return irk_to_return;
1256}
1257
1258struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1259 bdaddr_t *bdaddr, u8 *val, u8 type,
1260 u8 pin_len, bool *persistent)
1261{
1262 struct link_key *key, *old_key;
1263 u8 old_key_type;
1264
1265 old_key = hci_find_link_key(hdev, bdaddr);
1266 if (old_key) {
1267 old_key_type = old_key->type;
1268 key = old_key;
1269 } else {
1270 old_key_type = conn ? conn->key_type : 0xff;
1271 key = kzalloc(sizeof(*key), GFP_KERNEL);
1272 if (!key)
1273 return NULL;
1274 list_add_rcu(&key->list, &hdev->link_keys);
1275 }
1276
1277 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1278
1279 /* Some buggy controller combinations generate a changed
1280 * combination key for legacy pairing even when there's no
1281 * previous key */
1282 if (type == HCI_LK_CHANGED_COMBINATION &&
1283 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1284 type = HCI_LK_COMBINATION;
1285 if (conn)
1286 conn->key_type = type;
1287 }
1288
1289 bacpy(&key->bdaddr, bdaddr);
1290 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1291 key->pin_len = pin_len;
1292
1293 if (type == HCI_LK_CHANGED_COMBINATION)
1294 key->type = old_key_type;
1295 else
1296 key->type = type;
1297
1298 if (persistent)
1299 *persistent = hci_persistent_key(hdev, conn, type,
1300 old_key_type);
1301
1302 return key;
1303}
1304
1305struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1306 u8 addr_type, u8 type, u8 authenticated,
1307 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1308{
1309 struct smp_ltk *key, *old_key;
1310 u8 role = ltk_role(type);
1311
1312 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1313 if (old_key)
1314 key = old_key;
1315 else {
1316 key = kzalloc(sizeof(*key), GFP_KERNEL);
1317 if (!key)
1318 return NULL;
1319 list_add_rcu(&key->list, &hdev->long_term_keys);
1320 }
1321
1322 bacpy(&key->bdaddr, bdaddr);
1323 key->bdaddr_type = addr_type;
1324 memcpy(key->val, tk, sizeof(key->val));
1325 key->authenticated = authenticated;
1326 key->ediv = ediv;
1327 key->rand = rand;
1328 key->enc_size = enc_size;
1329 key->type = type;
1330
1331 return key;
1332}
1333
1334struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1335 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1336{
1337 struct smp_irk *irk;
1338
1339 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1340 if (!irk) {
1341 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1342 if (!irk)
1343 return NULL;
1344
1345 bacpy(&irk->bdaddr, bdaddr);
1346 irk->addr_type = addr_type;
1347
1348 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1349 }
1350
1351 memcpy(irk->val, val, 16);
1352 bacpy(&irk->rpa, rpa);
1353
1354 return irk;
1355}
1356
1357int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358{
1359 struct link_key *key;
1360
1361 key = hci_find_link_key(hdev, bdaddr);
1362 if (!key)
1363 return -ENOENT;
1364
1365 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1366
1367 list_del_rcu(&key->list);
1368 kfree_rcu(key, rcu);
1369
1370 return 0;
1371}
1372
1373int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1374{
1375 struct smp_ltk *k, *tmp;
1376 int removed = 0;
1377
1378 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1379 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1380 continue;
1381
1382 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1383
1384 list_del_rcu(&k->list);
1385 kfree_rcu(k, rcu);
1386 removed++;
1387 }
1388
1389 return removed ? 0 : -ENOENT;
1390}
1391
1392void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1393{
1394 struct smp_irk *k, *tmp;
1395
1396 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1397 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1398 continue;
1399
1400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1401
1402 list_del_rcu(&k->list);
1403 kfree_rcu(k, rcu);
1404 }
1405}
1406
1407bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1408{
1409 struct smp_ltk *k;
1410 struct smp_irk *irk;
1411 u8 addr_type;
1412
1413 if (type == BDADDR_BREDR) {
1414 if (hci_find_link_key(hdev, bdaddr))
1415 return true;
1416 return false;
1417 }
1418
1419 /* Convert to HCI addr type which struct smp_ltk uses */
1420 if (type == BDADDR_LE_PUBLIC)
1421 addr_type = ADDR_LE_DEV_PUBLIC;
1422 else
1423 addr_type = ADDR_LE_DEV_RANDOM;
1424
1425 irk = hci_get_irk(hdev, bdaddr, addr_type);
1426 if (irk) {
1427 bdaddr = &irk->bdaddr;
1428 addr_type = irk->addr_type;
1429 }
1430
1431 rcu_read_lock();
1432 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1433 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1434 rcu_read_unlock();
1435 return true;
1436 }
1437 }
1438 rcu_read_unlock();
1439
1440 return false;
1441}
1442
1443/* HCI command timer function */
1444static void hci_cmd_timeout(struct work_struct *work)
1445{
1446 struct hci_dev *hdev = container_of(work, struct hci_dev,
1447 cmd_timer.work);
1448
1449 if (hdev->req_skb) {
1450 u16 opcode = hci_skb_opcode(hdev->req_skb);
1451
1452 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1453
1454 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1455 } else {
1456 bt_dev_err(hdev, "command tx timeout");
1457 }
1458
1459 if (hdev->reset)
1460 hdev->reset(hdev);
1461
1462 atomic_set(&hdev->cmd_cnt, 1);
1463 queue_work(hdev->workqueue, &hdev->cmd_work);
1464}
1465
1466/* HCI ncmd timer function */
1467static void hci_ncmd_timeout(struct work_struct *work)
1468{
1469 struct hci_dev *hdev = container_of(work, struct hci_dev,
1470 ncmd_timer.work);
1471
1472 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1473
1474 /* During HCI_INIT phase no events can be injected if the ncmd timer
1475 * triggers since the procedure has its own timeout handling.
1476 */
1477 if (test_bit(HCI_INIT, &hdev->flags))
1478 return;
1479
1480 /* This is an irrecoverable state, inject hardware error event */
1481 hci_reset_dev(hdev);
1482}
1483
1484struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1485 bdaddr_t *bdaddr, u8 bdaddr_type)
1486{
1487 struct oob_data *data;
1488
1489 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1490 if (bacmp(bdaddr, &data->bdaddr) != 0)
1491 continue;
1492 if (data->bdaddr_type != bdaddr_type)
1493 continue;
1494 return data;
1495 }
1496
1497 return NULL;
1498}
1499
1500int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1501 u8 bdaddr_type)
1502{
1503 struct oob_data *data;
1504
1505 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1506 if (!data)
1507 return -ENOENT;
1508
1509 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1510
1511 list_del(&data->list);
1512 kfree(data);
1513
1514 return 0;
1515}
1516
1517void hci_remote_oob_data_clear(struct hci_dev *hdev)
1518{
1519 struct oob_data *data, *n;
1520
1521 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1522 list_del(&data->list);
1523 kfree(data);
1524 }
1525}
1526
1527int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1528 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1529 u8 *hash256, u8 *rand256)
1530{
1531 struct oob_data *data;
1532
1533 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1534 if (!data) {
1535 data = kmalloc(sizeof(*data), GFP_KERNEL);
1536 if (!data)
1537 return -ENOMEM;
1538
1539 bacpy(&data->bdaddr, bdaddr);
1540 data->bdaddr_type = bdaddr_type;
1541 list_add(&data->list, &hdev->remote_oob_data);
1542 }
1543
1544 if (hash192 && rand192) {
1545 memcpy(data->hash192, hash192, sizeof(data->hash192));
1546 memcpy(data->rand192, rand192, sizeof(data->rand192));
1547 if (hash256 && rand256)
1548 data->present = 0x03;
1549 } else {
1550 memset(data->hash192, 0, sizeof(data->hash192));
1551 memset(data->rand192, 0, sizeof(data->rand192));
1552 if (hash256 && rand256)
1553 data->present = 0x02;
1554 else
1555 data->present = 0x00;
1556 }
1557
1558 if (hash256 && rand256) {
1559 memcpy(data->hash256, hash256, sizeof(data->hash256));
1560 memcpy(data->rand256, rand256, sizeof(data->rand256));
1561 } else {
1562 memset(data->hash256, 0, sizeof(data->hash256));
1563 memset(data->rand256, 0, sizeof(data->rand256));
1564 if (hash192 && rand192)
1565 data->present = 0x01;
1566 }
1567
1568 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1569
1570 return 0;
1571}
1572
1573/* This function requires the caller holds hdev->lock */
1574struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1575{
1576 struct adv_info *adv_instance;
1577
1578 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1579 if (adv_instance->instance == instance)
1580 return adv_instance;
1581 }
1582
1583 return NULL;
1584}
1585
1586/* This function requires the caller holds hdev->lock */
1587struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1588{
1589 struct adv_info *cur_instance;
1590
1591 cur_instance = hci_find_adv_instance(hdev, instance);
1592 if (!cur_instance)
1593 return NULL;
1594
1595 if (cur_instance == list_last_entry(&hdev->adv_instances,
1596 struct adv_info, list))
1597 return list_first_entry(&hdev->adv_instances,
1598 struct adv_info, list);
1599 else
1600 return list_next_entry(cur_instance, list);
1601}
1602
1603/* This function requires the caller holds hdev->lock */
1604int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1605{
1606 struct adv_info *adv_instance;
1607
1608 adv_instance = hci_find_adv_instance(hdev, instance);
1609 if (!adv_instance)
1610 return -ENOENT;
1611
1612 BT_DBG("%s removing %dMR", hdev->name, instance);
1613
1614 if (hdev->cur_adv_instance == instance) {
1615 if (hdev->adv_instance_timeout) {
1616 cancel_delayed_work(&hdev->adv_instance_expire);
1617 hdev->adv_instance_timeout = 0;
1618 }
1619 hdev->cur_adv_instance = 0x00;
1620 }
1621
1622 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1623
1624 list_del(&adv_instance->list);
1625 kfree(adv_instance);
1626
1627 hdev->adv_instance_cnt--;
1628
1629 return 0;
1630}
1631
1632void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1633{
1634 struct adv_info *adv_instance, *n;
1635
1636 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1637 adv_instance->rpa_expired = rpa_expired;
1638}
1639
1640/* This function requires the caller holds hdev->lock */
1641void hci_adv_instances_clear(struct hci_dev *hdev)
1642{
1643 struct adv_info *adv_instance, *n;
1644
1645 if (hdev->adv_instance_timeout) {
1646 disable_delayed_work(&hdev->adv_instance_expire);
1647 hdev->adv_instance_timeout = 0;
1648 }
1649
1650 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1651 disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1652 list_del(&adv_instance->list);
1653 kfree(adv_instance);
1654 }
1655
1656 hdev->adv_instance_cnt = 0;
1657 hdev->cur_adv_instance = 0x00;
1658}
1659
1660static void adv_instance_rpa_expired(struct work_struct *work)
1661{
1662 struct adv_info *adv_instance = container_of(work, struct adv_info,
1663 rpa_expired_cb.work);
1664
1665 BT_DBG("");
1666
1667 adv_instance->rpa_expired = true;
1668}
1669
1670/* This function requires the caller holds hdev->lock */
1671struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1672 u32 flags, u16 adv_data_len, u8 *adv_data,
1673 u16 scan_rsp_len, u8 *scan_rsp_data,
1674 u16 timeout, u16 duration, s8 tx_power,
1675 u32 min_interval, u32 max_interval,
1676 u8 mesh_handle)
1677{
1678 struct adv_info *adv;
1679
1680 adv = hci_find_adv_instance(hdev, instance);
1681 if (adv) {
1682 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1683 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1684 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1685 } else {
1686 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1687 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1688 return ERR_PTR(-EOVERFLOW);
1689
1690 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1691 if (!adv)
1692 return ERR_PTR(-ENOMEM);
1693
1694 adv->pending = true;
1695 adv->instance = instance;
1696
1697 /* If controller support only one set and the instance is set to
1698 * 1 then there is no option other than using handle 0x00.
1699 */
1700 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1701 adv->handle = 0x00;
1702 else
1703 adv->handle = instance;
1704
1705 list_add(&adv->list, &hdev->adv_instances);
1706 hdev->adv_instance_cnt++;
1707 }
1708
1709 adv->flags = flags;
1710 adv->min_interval = min_interval;
1711 adv->max_interval = max_interval;
1712 adv->tx_power = tx_power;
1713 /* Defining a mesh_handle changes the timing units to ms,
1714 * rather than seconds, and ties the instance to the requested
1715 * mesh_tx queue.
1716 */
1717 adv->mesh = mesh_handle;
1718
1719 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1720 scan_rsp_len, scan_rsp_data);
1721
1722 adv->timeout = timeout;
1723 adv->remaining_time = timeout;
1724
1725 if (duration == 0)
1726 adv->duration = hdev->def_multi_adv_rotation_duration;
1727 else
1728 adv->duration = duration;
1729
1730 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1731
1732 BT_DBG("%s for %dMR", hdev->name, instance);
1733
1734 return adv;
1735}
1736
1737/* This function requires the caller holds hdev->lock */
1738struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1739 u32 flags, u8 data_len, u8 *data,
1740 u32 min_interval, u32 max_interval)
1741{
1742 struct adv_info *adv;
1743
1744 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1745 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1746 min_interval, max_interval, 0);
1747 if (IS_ERR(adv))
1748 return adv;
1749
1750 adv->periodic = true;
1751 adv->per_adv_data_len = data_len;
1752
1753 if (data)
1754 memcpy(adv->per_adv_data, data, data_len);
1755
1756 return adv;
1757}
1758
1759/* This function requires the caller holds hdev->lock */
1760int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1761 u16 adv_data_len, u8 *adv_data,
1762 u16 scan_rsp_len, u8 *scan_rsp_data)
1763{
1764 struct adv_info *adv;
1765
1766 adv = hci_find_adv_instance(hdev, instance);
1767
1768 /* If advertisement doesn't exist, we can't modify its data */
1769 if (!adv)
1770 return -ENOENT;
1771
1772 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1773 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1774 memcpy(adv->adv_data, adv_data, adv_data_len);
1775 adv->adv_data_len = adv_data_len;
1776 adv->adv_data_changed = true;
1777 }
1778
1779 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1780 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1781 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1782 adv->scan_rsp_len = scan_rsp_len;
1783 adv->scan_rsp_changed = true;
1784 }
1785
1786 /* Mark as changed if there are flags which would affect it */
1787 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1788 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1789 adv->scan_rsp_changed = true;
1790
1791 return 0;
1792}
1793
1794/* This function requires the caller holds hdev->lock */
1795u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1796{
1797 u32 flags;
1798 struct adv_info *adv;
1799
1800 if (instance == 0x00) {
1801 /* Instance 0 always manages the "Tx Power" and "Flags"
1802 * fields
1803 */
1804 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1805
1806 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1807 * corresponds to the "connectable" instance flag.
1808 */
1809 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1810 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1811
1812 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1813 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1814 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1815 flags |= MGMT_ADV_FLAG_DISCOV;
1816
1817 return flags;
1818 }
1819
1820 adv = hci_find_adv_instance(hdev, instance);
1821
1822 /* Return 0 when we got an invalid instance identifier. */
1823 if (!adv)
1824 return 0;
1825
1826 return adv->flags;
1827}
1828
1829bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1830{
1831 struct adv_info *adv;
1832
1833 /* Instance 0x00 always set local name */
1834 if (instance == 0x00)
1835 return true;
1836
1837 adv = hci_find_adv_instance(hdev, instance);
1838 if (!adv)
1839 return false;
1840
1841 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1842 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1843 return true;
1844
1845 return adv->scan_rsp_len ? true : false;
1846}
1847
1848/* This function requires the caller holds hdev->lock */
1849void hci_adv_monitors_clear(struct hci_dev *hdev)
1850{
1851 struct adv_monitor *monitor;
1852 int handle;
1853
1854 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1855 hci_free_adv_monitor(hdev, monitor);
1856
1857 idr_destroy(&hdev->adv_monitors_idr);
1858}
1859
1860/* Frees the monitor structure and do some bookkeepings.
1861 * This function requires the caller holds hdev->lock.
1862 */
1863void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1864{
1865 struct adv_pattern *pattern;
1866 struct adv_pattern *tmp;
1867
1868 if (!monitor)
1869 return;
1870
1871 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1872 list_del(&pattern->list);
1873 kfree(pattern);
1874 }
1875
1876 if (monitor->handle)
1877 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1878
1879 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1880 hdev->adv_monitors_cnt--;
1881 mgmt_adv_monitor_removed(hdev, monitor->handle);
1882 }
1883
1884 kfree(monitor);
1885}
1886
1887/* Assigns handle to a monitor, and if offloading is supported and power is on,
1888 * also attempts to forward the request to the controller.
1889 * This function requires the caller holds hci_req_sync_lock.
1890 */
1891int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1892{
1893 int min, max, handle;
1894 int status = 0;
1895
1896 if (!monitor)
1897 return -EINVAL;
1898
1899 hci_dev_lock(hdev);
1900
1901 min = HCI_MIN_ADV_MONITOR_HANDLE;
1902 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1903 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1904 GFP_KERNEL);
1905
1906 hci_dev_unlock(hdev);
1907
1908 if (handle < 0)
1909 return handle;
1910
1911 monitor->handle = handle;
1912
1913 if (!hdev_is_powered(hdev))
1914 return status;
1915
1916 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1917 case HCI_ADV_MONITOR_EXT_NONE:
1918 bt_dev_dbg(hdev, "add monitor %d status %d",
1919 monitor->handle, status);
1920 /* Message was not forwarded to controller - not an error */
1921 break;
1922
1923 case HCI_ADV_MONITOR_EXT_MSFT:
1924 status = msft_add_monitor_pattern(hdev, monitor);
1925 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1926 handle, status);
1927 break;
1928 }
1929
1930 return status;
1931}
1932
1933/* Attempts to tell the controller and free the monitor. If somehow the
1934 * controller doesn't have a corresponding handle, remove anyway.
1935 * This function requires the caller holds hci_req_sync_lock.
1936 */
1937static int hci_remove_adv_monitor(struct hci_dev *hdev,
1938 struct adv_monitor *monitor)
1939{
1940 int status = 0;
1941 int handle;
1942
1943 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1944 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1945 bt_dev_dbg(hdev, "remove monitor %d status %d",
1946 monitor->handle, status);
1947 goto free_monitor;
1948
1949 case HCI_ADV_MONITOR_EXT_MSFT:
1950 handle = monitor->handle;
1951 status = msft_remove_monitor(hdev, monitor);
1952 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1953 handle, status);
1954 break;
1955 }
1956
1957 /* In case no matching handle registered, just free the monitor */
1958 if (status == -ENOENT)
1959 goto free_monitor;
1960
1961 return status;
1962
1963free_monitor:
1964 if (status == -ENOENT)
1965 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1966 monitor->handle);
1967 hci_free_adv_monitor(hdev, monitor);
1968
1969 return status;
1970}
1971
1972/* This function requires the caller holds hci_req_sync_lock */
1973int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1974{
1975 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1976
1977 if (!monitor)
1978 return -EINVAL;
1979
1980 return hci_remove_adv_monitor(hdev, monitor);
1981}
1982
1983/* This function requires the caller holds hci_req_sync_lock */
1984int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1985{
1986 struct adv_monitor *monitor;
1987 int idr_next_id = 0;
1988 int status = 0;
1989
1990 while (1) {
1991 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1992 if (!monitor)
1993 break;
1994
1995 status = hci_remove_adv_monitor(hdev, monitor);
1996 if (status)
1997 return status;
1998
1999 idr_next_id++;
2000 }
2001
2002 return status;
2003}
2004
2005/* This function requires the caller holds hdev->lock */
2006bool hci_is_adv_monitoring(struct hci_dev *hdev)
2007{
2008 return !idr_is_empty(&hdev->adv_monitors_idr);
2009}
2010
2011int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2012{
2013 if (msft_monitor_supported(hdev))
2014 return HCI_ADV_MONITOR_EXT_MSFT;
2015
2016 return HCI_ADV_MONITOR_EXT_NONE;
2017}
2018
2019struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2020 bdaddr_t *bdaddr, u8 type)
2021{
2022 struct bdaddr_list *b;
2023
2024 list_for_each_entry(b, bdaddr_list, list) {
2025 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2026 return b;
2027 }
2028
2029 return NULL;
2030}
2031
2032struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2033 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2034 u8 type)
2035{
2036 struct bdaddr_list_with_irk *b;
2037
2038 list_for_each_entry(b, bdaddr_list, list) {
2039 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2040 return b;
2041 }
2042
2043 return NULL;
2044}
2045
2046struct bdaddr_list_with_flags *
2047hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2048 bdaddr_t *bdaddr, u8 type)
2049{
2050 struct bdaddr_list_with_flags *b;
2051
2052 list_for_each_entry(b, bdaddr_list, list) {
2053 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2054 return b;
2055 }
2056
2057 return NULL;
2058}
2059
2060void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2061{
2062 struct bdaddr_list *b, *n;
2063
2064 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2065 list_del(&b->list);
2066 kfree(b);
2067 }
2068}
2069
2070int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2071{
2072 struct bdaddr_list *entry;
2073
2074 if (!bacmp(bdaddr, BDADDR_ANY))
2075 return -EBADF;
2076
2077 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2078 return -EEXIST;
2079
2080 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2081 if (!entry)
2082 return -ENOMEM;
2083
2084 bacpy(&entry->bdaddr, bdaddr);
2085 entry->bdaddr_type = type;
2086
2087 list_add(&entry->list, list);
2088
2089 return 0;
2090}
2091
2092int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2093 u8 type, u8 *peer_irk, u8 *local_irk)
2094{
2095 struct bdaddr_list_with_irk *entry;
2096
2097 if (!bacmp(bdaddr, BDADDR_ANY))
2098 return -EBADF;
2099
2100 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2101 return -EEXIST;
2102
2103 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2104 if (!entry)
2105 return -ENOMEM;
2106
2107 bacpy(&entry->bdaddr, bdaddr);
2108 entry->bdaddr_type = type;
2109
2110 if (peer_irk)
2111 memcpy(entry->peer_irk, peer_irk, 16);
2112
2113 if (local_irk)
2114 memcpy(entry->local_irk, local_irk, 16);
2115
2116 list_add(&entry->list, list);
2117
2118 return 0;
2119}
2120
2121int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2122 u8 type, u32 flags)
2123{
2124 struct bdaddr_list_with_flags *entry;
2125
2126 if (!bacmp(bdaddr, BDADDR_ANY))
2127 return -EBADF;
2128
2129 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2130 return -EEXIST;
2131
2132 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2133 if (!entry)
2134 return -ENOMEM;
2135
2136 bacpy(&entry->bdaddr, bdaddr);
2137 entry->bdaddr_type = type;
2138 entry->flags = flags;
2139
2140 list_add(&entry->list, list);
2141
2142 return 0;
2143}
2144
2145int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2146{
2147 struct bdaddr_list *entry;
2148
2149 if (!bacmp(bdaddr, BDADDR_ANY)) {
2150 hci_bdaddr_list_clear(list);
2151 return 0;
2152 }
2153
2154 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2155 if (!entry)
2156 return -ENOENT;
2157
2158 list_del(&entry->list);
2159 kfree(entry);
2160
2161 return 0;
2162}
2163
2164int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2165 u8 type)
2166{
2167 struct bdaddr_list_with_irk *entry;
2168
2169 if (!bacmp(bdaddr, BDADDR_ANY)) {
2170 hci_bdaddr_list_clear(list);
2171 return 0;
2172 }
2173
2174 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2175 if (!entry)
2176 return -ENOENT;
2177
2178 list_del(&entry->list);
2179 kfree(entry);
2180
2181 return 0;
2182}
2183
2184/* This function requires the caller holds hdev->lock */
2185struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2186 bdaddr_t *addr, u8 addr_type)
2187{
2188 struct hci_conn_params *params;
2189
2190 list_for_each_entry(params, &hdev->le_conn_params, list) {
2191 if (bacmp(¶ms->addr, addr) == 0 &&
2192 params->addr_type == addr_type) {
2193 return params;
2194 }
2195 }
2196
2197 return NULL;
2198}
2199
2200/* This function requires the caller holds hdev->lock or rcu_read_lock */
2201struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2202 bdaddr_t *addr, u8 addr_type)
2203{
2204 struct hci_conn_params *param;
2205
2206 rcu_read_lock();
2207
2208 list_for_each_entry_rcu(param, list, action) {
2209 if (bacmp(¶m->addr, addr) == 0 &&
2210 param->addr_type == addr_type) {
2211 rcu_read_unlock();
2212 return param;
2213 }
2214 }
2215
2216 rcu_read_unlock();
2217
2218 return NULL;
2219}
2220
2221/* This function requires the caller holds hdev->lock */
2222void hci_pend_le_list_del_init(struct hci_conn_params *param)
2223{
2224 if (list_empty(¶m->action))
2225 return;
2226
2227 list_del_rcu(¶m->action);
2228 synchronize_rcu();
2229 INIT_LIST_HEAD(¶m->action);
2230}
2231
2232/* This function requires the caller holds hdev->lock */
2233void hci_pend_le_list_add(struct hci_conn_params *param,
2234 struct list_head *list)
2235{
2236 list_add_rcu(¶m->action, list);
2237}
2238
2239/* This function requires the caller holds hdev->lock */
2240struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2241 bdaddr_t *addr, u8 addr_type)
2242{
2243 struct hci_conn_params *params;
2244
2245 params = hci_conn_params_lookup(hdev, addr, addr_type);
2246 if (params)
2247 return params;
2248
2249 params = kzalloc(sizeof(*params), GFP_KERNEL);
2250 if (!params) {
2251 bt_dev_err(hdev, "out of memory");
2252 return NULL;
2253 }
2254
2255 bacpy(¶ms->addr, addr);
2256 params->addr_type = addr_type;
2257
2258 list_add(¶ms->list, &hdev->le_conn_params);
2259 INIT_LIST_HEAD(¶ms->action);
2260
2261 params->conn_min_interval = hdev->le_conn_min_interval;
2262 params->conn_max_interval = hdev->le_conn_max_interval;
2263 params->conn_latency = hdev->le_conn_latency;
2264 params->supervision_timeout = hdev->le_supv_timeout;
2265 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2266
2267 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2268
2269 return params;
2270}
2271
2272void hci_conn_params_free(struct hci_conn_params *params)
2273{
2274 hci_pend_le_list_del_init(params);
2275
2276 if (params->conn) {
2277 hci_conn_drop(params->conn);
2278 hci_conn_put(params->conn);
2279 }
2280
2281 list_del(¶ms->list);
2282 kfree(params);
2283}
2284
2285/* This function requires the caller holds hdev->lock */
2286void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2287{
2288 struct hci_conn_params *params;
2289
2290 params = hci_conn_params_lookup(hdev, addr, addr_type);
2291 if (!params)
2292 return;
2293
2294 hci_conn_params_free(params);
2295
2296 hci_update_passive_scan(hdev);
2297
2298 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2299}
2300
2301/* This function requires the caller holds hdev->lock */
2302void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2303{
2304 struct hci_conn_params *params, *tmp;
2305
2306 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2307 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2308 continue;
2309
2310 /* If trying to establish one time connection to disabled
2311 * device, leave the params, but mark them as just once.
2312 */
2313 if (params->explicit_connect) {
2314 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2315 continue;
2316 }
2317
2318 hci_conn_params_free(params);
2319 }
2320
2321 BT_DBG("All LE disabled connection parameters were removed");
2322}
2323
2324/* This function requires the caller holds hdev->lock */
2325static void hci_conn_params_clear_all(struct hci_dev *hdev)
2326{
2327 struct hci_conn_params *params, *tmp;
2328
2329 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2330 hci_conn_params_free(params);
2331
2332 BT_DBG("All LE connection parameters were removed");
2333}
2334
2335/* Copy the Identity Address of the controller.
2336 *
2337 * If the controller has a public BD_ADDR, then by default use that one.
2338 * If this is a LE only controller without a public address, default to
2339 * the static random address.
2340 *
2341 * For debugging purposes it is possible to force controllers with a
2342 * public address to use the static random address instead.
2343 *
2344 * In case BR/EDR has been disabled on a dual-mode controller and
2345 * userspace has configured a static address, then that address
2346 * becomes the identity address instead of the public BR/EDR address.
2347 */
2348void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2349 u8 *bdaddr_type)
2350{
2351 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2352 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2353 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2354 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2355 bacpy(bdaddr, &hdev->static_addr);
2356 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2357 } else {
2358 bacpy(bdaddr, &hdev->bdaddr);
2359 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2360 }
2361}
2362
2363static void hci_clear_wake_reason(struct hci_dev *hdev)
2364{
2365 hci_dev_lock(hdev);
2366
2367 hdev->wake_reason = 0;
2368 bacpy(&hdev->wake_addr, BDADDR_ANY);
2369 hdev->wake_addr_type = 0;
2370
2371 hci_dev_unlock(hdev);
2372}
2373
2374static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2375 void *data)
2376{
2377 struct hci_dev *hdev =
2378 container_of(nb, struct hci_dev, suspend_notifier);
2379 int ret = 0;
2380
2381 /* Userspace has full control of this device. Do nothing. */
2382 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2383 return NOTIFY_DONE;
2384
2385 /* To avoid a potential race with hci_unregister_dev. */
2386 hci_dev_hold(hdev);
2387
2388 switch (action) {
2389 case PM_HIBERNATION_PREPARE:
2390 case PM_SUSPEND_PREPARE:
2391 ret = hci_suspend_dev(hdev);
2392 break;
2393 case PM_POST_HIBERNATION:
2394 case PM_POST_SUSPEND:
2395 ret = hci_resume_dev(hdev);
2396 break;
2397 }
2398
2399 if (ret)
2400 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2401 action, ret);
2402
2403 hci_dev_put(hdev);
2404 return NOTIFY_DONE;
2405}
2406
2407/* Alloc HCI device */
2408struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2409{
2410 struct hci_dev *hdev;
2411 unsigned int alloc_size;
2412
2413 alloc_size = sizeof(*hdev);
2414 if (sizeof_priv) {
2415 /* Fixme: May need ALIGN-ment? */
2416 alloc_size += sizeof_priv;
2417 }
2418
2419 hdev = kzalloc(alloc_size, GFP_KERNEL);
2420 if (!hdev)
2421 return NULL;
2422
2423 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2424 hdev->esco_type = (ESCO_HV1);
2425 hdev->link_mode = (HCI_LM_ACCEPT);
2426 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2427 hdev->io_capability = 0x03; /* No Input No Output */
2428 hdev->manufacturer = 0xffff; /* Default to internal use */
2429 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2430 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2431 hdev->adv_instance_cnt = 0;
2432 hdev->cur_adv_instance = 0x00;
2433 hdev->adv_instance_timeout = 0;
2434
2435 hdev->advmon_allowlist_duration = 300;
2436 hdev->advmon_no_filter_duration = 500;
2437 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2438
2439 hdev->sniff_max_interval = 800;
2440 hdev->sniff_min_interval = 80;
2441
2442 hdev->le_adv_channel_map = 0x07;
2443 hdev->le_adv_min_interval = 0x0800;
2444 hdev->le_adv_max_interval = 0x0800;
2445 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2446 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2447 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2448 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2449 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2450 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2451 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2452 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2453 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2454 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2455 hdev->le_conn_min_interval = 0x0018;
2456 hdev->le_conn_max_interval = 0x0028;
2457 hdev->le_conn_latency = 0x0000;
2458 hdev->le_supv_timeout = 0x002a;
2459 hdev->le_def_tx_len = 0x001b;
2460 hdev->le_def_tx_time = 0x0148;
2461 hdev->le_max_tx_len = 0x001b;
2462 hdev->le_max_tx_time = 0x0148;
2463 hdev->le_max_rx_len = 0x001b;
2464 hdev->le_max_rx_time = 0x0148;
2465 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2466 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2467 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2468 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2469 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2470 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2471 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2472 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2473 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2474
2475 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2476 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2477 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2478 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2479 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2480 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2481
2482 /* default 1.28 sec page scan */
2483 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2484 hdev->def_page_scan_int = 0x0800;
2485 hdev->def_page_scan_window = 0x0012;
2486
2487 mutex_init(&hdev->lock);
2488 mutex_init(&hdev->req_lock);
2489
2490 ida_init(&hdev->unset_handle_ida);
2491
2492 INIT_LIST_HEAD(&hdev->mesh_pending);
2493 INIT_LIST_HEAD(&hdev->mgmt_pending);
2494 INIT_LIST_HEAD(&hdev->reject_list);
2495 INIT_LIST_HEAD(&hdev->accept_list);
2496 INIT_LIST_HEAD(&hdev->uuids);
2497 INIT_LIST_HEAD(&hdev->link_keys);
2498 INIT_LIST_HEAD(&hdev->long_term_keys);
2499 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2500 INIT_LIST_HEAD(&hdev->remote_oob_data);
2501 INIT_LIST_HEAD(&hdev->le_accept_list);
2502 INIT_LIST_HEAD(&hdev->le_resolv_list);
2503 INIT_LIST_HEAD(&hdev->le_conn_params);
2504 INIT_LIST_HEAD(&hdev->pend_le_conns);
2505 INIT_LIST_HEAD(&hdev->pend_le_reports);
2506 INIT_LIST_HEAD(&hdev->conn_hash.list);
2507 INIT_LIST_HEAD(&hdev->adv_instances);
2508 INIT_LIST_HEAD(&hdev->blocked_keys);
2509 INIT_LIST_HEAD(&hdev->monitored_devices);
2510
2511 INIT_LIST_HEAD(&hdev->local_codecs);
2512 INIT_WORK(&hdev->rx_work, hci_rx_work);
2513 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2514 INIT_WORK(&hdev->tx_work, hci_tx_work);
2515 INIT_WORK(&hdev->power_on, hci_power_on);
2516 INIT_WORK(&hdev->error_reset, hci_error_reset);
2517
2518 hci_cmd_sync_init(hdev);
2519
2520 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2521
2522 skb_queue_head_init(&hdev->rx_q);
2523 skb_queue_head_init(&hdev->cmd_q);
2524 skb_queue_head_init(&hdev->raw_q);
2525
2526 init_waitqueue_head(&hdev->req_wait_q);
2527
2528 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2529 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2530
2531 hci_devcd_setup(hdev);
2532
2533 hci_init_sysfs(hdev);
2534 discovery_init(hdev);
2535
2536 return hdev;
2537}
2538EXPORT_SYMBOL(hci_alloc_dev_priv);
2539
2540/* Free HCI device */
2541void hci_free_dev(struct hci_dev *hdev)
2542{
2543 /* will free via device release */
2544 put_device(&hdev->dev);
2545}
2546EXPORT_SYMBOL(hci_free_dev);
2547
2548/* Register HCI device */
2549int hci_register_dev(struct hci_dev *hdev)
2550{
2551 int id, error;
2552
2553 if (!hdev->open || !hdev->close || !hdev->send)
2554 return -EINVAL;
2555
2556 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2557 if (id < 0)
2558 return id;
2559
2560 error = dev_set_name(&hdev->dev, "hci%u", id);
2561 if (error)
2562 return error;
2563
2564 hdev->name = dev_name(&hdev->dev);
2565 hdev->id = id;
2566
2567 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2568
2569 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2570 if (!hdev->workqueue) {
2571 error = -ENOMEM;
2572 goto err;
2573 }
2574
2575 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2576 hdev->name);
2577 if (!hdev->req_workqueue) {
2578 destroy_workqueue(hdev->workqueue);
2579 error = -ENOMEM;
2580 goto err;
2581 }
2582
2583 if (!IS_ERR_OR_NULL(bt_debugfs))
2584 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2585
2586 error = device_add(&hdev->dev);
2587 if (error < 0)
2588 goto err_wqueue;
2589
2590 hci_leds_init(hdev);
2591
2592 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2593 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2594 hdev);
2595 if (hdev->rfkill) {
2596 if (rfkill_register(hdev->rfkill) < 0) {
2597 rfkill_destroy(hdev->rfkill);
2598 hdev->rfkill = NULL;
2599 }
2600 }
2601
2602 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2603 hci_dev_set_flag(hdev, HCI_RFKILLED);
2604
2605 hci_dev_set_flag(hdev, HCI_SETUP);
2606 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2607
2608 /* Assume BR/EDR support until proven otherwise (such as
2609 * through reading supported features during init.
2610 */
2611 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2612
2613 write_lock(&hci_dev_list_lock);
2614 list_add(&hdev->list, &hci_dev_list);
2615 write_unlock(&hci_dev_list_lock);
2616
2617 /* Devices that are marked for raw-only usage are unconfigured
2618 * and should not be included in normal operation.
2619 */
2620 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2621 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2622
2623 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2624 * callback.
2625 */
2626 if (hdev->wakeup)
2627 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2628
2629 hci_sock_dev_event(hdev, HCI_DEV_REG);
2630 hci_dev_hold(hdev);
2631
2632 error = hci_register_suspend_notifier(hdev);
2633 if (error)
2634 BT_WARN("register suspend notifier failed error:%d\n", error);
2635
2636 queue_work(hdev->req_workqueue, &hdev->power_on);
2637
2638 idr_init(&hdev->adv_monitors_idr);
2639 msft_register(hdev);
2640
2641 return id;
2642
2643err_wqueue:
2644 debugfs_remove_recursive(hdev->debugfs);
2645 destroy_workqueue(hdev->workqueue);
2646 destroy_workqueue(hdev->req_workqueue);
2647err:
2648 ida_free(&hci_index_ida, hdev->id);
2649
2650 return error;
2651}
2652EXPORT_SYMBOL(hci_register_dev);
2653
2654/* Unregister HCI device */
2655void hci_unregister_dev(struct hci_dev *hdev)
2656{
2657 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2658
2659 mutex_lock(&hdev->unregister_lock);
2660 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2661 mutex_unlock(&hdev->unregister_lock);
2662
2663 write_lock(&hci_dev_list_lock);
2664 list_del(&hdev->list);
2665 write_unlock(&hci_dev_list_lock);
2666
2667 disable_work_sync(&hdev->rx_work);
2668 disable_work_sync(&hdev->cmd_work);
2669 disable_work_sync(&hdev->tx_work);
2670 disable_work_sync(&hdev->power_on);
2671 disable_work_sync(&hdev->error_reset);
2672
2673 hci_cmd_sync_clear(hdev);
2674
2675 hci_unregister_suspend_notifier(hdev);
2676
2677 hci_dev_do_close(hdev);
2678
2679 if (!test_bit(HCI_INIT, &hdev->flags) &&
2680 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2681 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2682 hci_dev_lock(hdev);
2683 mgmt_index_removed(hdev);
2684 hci_dev_unlock(hdev);
2685 }
2686
2687 /* mgmt_index_removed should take care of emptying the
2688 * pending list */
2689 BUG_ON(!list_empty(&hdev->mgmt_pending));
2690
2691 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2692
2693 if (hdev->rfkill) {
2694 rfkill_unregister(hdev->rfkill);
2695 rfkill_destroy(hdev->rfkill);
2696 }
2697
2698 device_del(&hdev->dev);
2699 /* Actual cleanup is deferred until hci_release_dev(). */
2700 hci_dev_put(hdev);
2701}
2702EXPORT_SYMBOL(hci_unregister_dev);
2703
2704/* Release HCI device */
2705void hci_release_dev(struct hci_dev *hdev)
2706{
2707 debugfs_remove_recursive(hdev->debugfs);
2708 kfree_const(hdev->hw_info);
2709 kfree_const(hdev->fw_info);
2710
2711 destroy_workqueue(hdev->workqueue);
2712 destroy_workqueue(hdev->req_workqueue);
2713
2714 hci_dev_lock(hdev);
2715 hci_bdaddr_list_clear(&hdev->reject_list);
2716 hci_bdaddr_list_clear(&hdev->accept_list);
2717 hci_uuids_clear(hdev);
2718 hci_link_keys_clear(hdev);
2719 hci_smp_ltks_clear(hdev);
2720 hci_smp_irks_clear(hdev);
2721 hci_remote_oob_data_clear(hdev);
2722 hci_adv_instances_clear(hdev);
2723 hci_adv_monitors_clear(hdev);
2724 hci_bdaddr_list_clear(&hdev->le_accept_list);
2725 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2726 hci_conn_params_clear_all(hdev);
2727 hci_discovery_filter_clear(hdev);
2728 hci_blocked_keys_clear(hdev);
2729 hci_codec_list_clear(&hdev->local_codecs);
2730 msft_release(hdev);
2731 hci_dev_unlock(hdev);
2732
2733 ida_destroy(&hdev->unset_handle_ida);
2734 ida_free(&hci_index_ida, hdev->id);
2735 kfree_skb(hdev->sent_cmd);
2736 kfree_skb(hdev->req_skb);
2737 kfree_skb(hdev->recv_event);
2738 kfree(hdev);
2739}
2740EXPORT_SYMBOL(hci_release_dev);
2741
2742int hci_register_suspend_notifier(struct hci_dev *hdev)
2743{
2744 int ret = 0;
2745
2746 if (!hdev->suspend_notifier.notifier_call &&
2747 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2748 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2749 ret = register_pm_notifier(&hdev->suspend_notifier);
2750 }
2751
2752 return ret;
2753}
2754
2755int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2756{
2757 int ret = 0;
2758
2759 if (hdev->suspend_notifier.notifier_call) {
2760 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2761 if (!ret)
2762 hdev->suspend_notifier.notifier_call = NULL;
2763 }
2764
2765 return ret;
2766}
2767
2768/* Cancel ongoing command synchronously:
2769 *
2770 * - Cancel command timer
2771 * - Reset command counter
2772 * - Cancel command request
2773 */
2774static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2775{
2776 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2777
2778 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2779 disable_delayed_work_sync(&hdev->cmd_timer);
2780 disable_delayed_work_sync(&hdev->ncmd_timer);
2781 } else {
2782 cancel_delayed_work_sync(&hdev->cmd_timer);
2783 cancel_delayed_work_sync(&hdev->ncmd_timer);
2784 }
2785
2786 atomic_set(&hdev->cmd_cnt, 1);
2787
2788 hci_cmd_sync_cancel_sync(hdev, err);
2789}
2790
2791/* Suspend HCI device */
2792int hci_suspend_dev(struct hci_dev *hdev)
2793{
2794 int ret;
2795
2796 bt_dev_dbg(hdev, "");
2797
2798 /* Suspend should only act on when powered. */
2799 if (!hdev_is_powered(hdev) ||
2800 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2801 return 0;
2802
2803 /* If powering down don't attempt to suspend */
2804 if (mgmt_powering_down(hdev))
2805 return 0;
2806
2807 /* Cancel potentially blocking sync operation before suspend */
2808 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2809
2810 hci_req_sync_lock(hdev);
2811 ret = hci_suspend_sync(hdev);
2812 hci_req_sync_unlock(hdev);
2813
2814 hci_clear_wake_reason(hdev);
2815 mgmt_suspending(hdev, hdev->suspend_state);
2816
2817 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2818 return ret;
2819}
2820EXPORT_SYMBOL(hci_suspend_dev);
2821
2822/* Resume HCI device */
2823int hci_resume_dev(struct hci_dev *hdev)
2824{
2825 int ret;
2826
2827 bt_dev_dbg(hdev, "");
2828
2829 /* Resume should only act on when powered. */
2830 if (!hdev_is_powered(hdev) ||
2831 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2832 return 0;
2833
2834 /* If powering down don't attempt to resume */
2835 if (mgmt_powering_down(hdev))
2836 return 0;
2837
2838 hci_req_sync_lock(hdev);
2839 ret = hci_resume_sync(hdev);
2840 hci_req_sync_unlock(hdev);
2841
2842 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2843 hdev->wake_addr_type);
2844
2845 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2846 return ret;
2847}
2848EXPORT_SYMBOL(hci_resume_dev);
2849
2850/* Reset HCI device */
2851int hci_reset_dev(struct hci_dev *hdev)
2852{
2853 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2854 struct sk_buff *skb;
2855
2856 skb = bt_skb_alloc(3, GFP_ATOMIC);
2857 if (!skb)
2858 return -ENOMEM;
2859
2860 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2861 skb_put_data(skb, hw_err, 3);
2862
2863 bt_dev_err(hdev, "Injecting HCI hardware error event");
2864
2865 /* Send Hardware Error to upper stack */
2866 return hci_recv_frame(hdev, skb);
2867}
2868EXPORT_SYMBOL(hci_reset_dev);
2869
2870static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2871{
2872 if (hdev->classify_pkt_type)
2873 return hdev->classify_pkt_type(hdev, skb);
2874
2875 return hci_skb_pkt_type(skb);
2876}
2877
2878/* Receive frame from HCI drivers */
2879int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2880{
2881 u8 dev_pkt_type;
2882
2883 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2884 && !test_bit(HCI_INIT, &hdev->flags))) {
2885 kfree_skb(skb);
2886 return -ENXIO;
2887 }
2888
2889 /* Check if the driver agree with packet type classification */
2890 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2891 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2892 hci_skb_pkt_type(skb) = dev_pkt_type;
2893 }
2894
2895 switch (hci_skb_pkt_type(skb)) {
2896 case HCI_EVENT_PKT:
2897 break;
2898 case HCI_ACLDATA_PKT:
2899 /* Detect if ISO packet has been sent as ACL */
2900 if (hci_conn_num(hdev, ISO_LINK)) {
2901 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2902 __u8 type;
2903
2904 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2905 if (type == ISO_LINK)
2906 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2907 }
2908 break;
2909 case HCI_SCODATA_PKT:
2910 break;
2911 case HCI_ISODATA_PKT:
2912 break;
2913 default:
2914 kfree_skb(skb);
2915 return -EINVAL;
2916 }
2917
2918 /* Incoming skb */
2919 bt_cb(skb)->incoming = 1;
2920
2921 /* Time stamp */
2922 __net_timestamp(skb);
2923
2924 skb_queue_tail(&hdev->rx_q, skb);
2925 queue_work(hdev->workqueue, &hdev->rx_work);
2926
2927 return 0;
2928}
2929EXPORT_SYMBOL(hci_recv_frame);
2930
2931/* Receive diagnostic message from HCI drivers */
2932int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2933{
2934 /* Mark as diagnostic packet */
2935 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2936
2937 /* Time stamp */
2938 __net_timestamp(skb);
2939
2940 skb_queue_tail(&hdev->rx_q, skb);
2941 queue_work(hdev->workqueue, &hdev->rx_work);
2942
2943 return 0;
2944}
2945EXPORT_SYMBOL(hci_recv_diag);
2946
2947void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2948{
2949 va_list vargs;
2950
2951 va_start(vargs, fmt);
2952 kfree_const(hdev->hw_info);
2953 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2954 va_end(vargs);
2955}
2956EXPORT_SYMBOL(hci_set_hw_info);
2957
2958void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2959{
2960 va_list vargs;
2961
2962 va_start(vargs, fmt);
2963 kfree_const(hdev->fw_info);
2964 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2965 va_end(vargs);
2966}
2967EXPORT_SYMBOL(hci_set_fw_info);
2968
2969/* ---- Interface to upper protocols ---- */
2970
2971int hci_register_cb(struct hci_cb *cb)
2972{
2973 BT_DBG("%p name %s", cb, cb->name);
2974
2975 list_add_tail_rcu(&cb->list, &hci_cb_list);
2976
2977 return 0;
2978}
2979EXPORT_SYMBOL(hci_register_cb);
2980
2981int hci_unregister_cb(struct hci_cb *cb)
2982{
2983 BT_DBG("%p name %s", cb, cb->name);
2984
2985 list_del_rcu(&cb->list);
2986 synchronize_rcu();
2987
2988 return 0;
2989}
2990EXPORT_SYMBOL(hci_unregister_cb);
2991
2992static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2993{
2994 int err;
2995
2996 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2997 skb->len);
2998
2999 /* Time stamp */
3000 __net_timestamp(skb);
3001
3002 /* Send copy to monitor */
3003 hci_send_to_monitor(hdev, skb);
3004
3005 if (atomic_read(&hdev->promisc)) {
3006 /* Send copy to the sockets */
3007 hci_send_to_sock(hdev, skb);
3008 }
3009
3010 /* Get rid of skb owner, prior to sending to the driver. */
3011 skb_orphan(skb);
3012
3013 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3014 kfree_skb(skb);
3015 return -EINVAL;
3016 }
3017
3018 err = hdev->send(hdev, skb);
3019 if (err < 0) {
3020 bt_dev_err(hdev, "sending frame failed (%d)", err);
3021 kfree_skb(skb);
3022 return err;
3023 }
3024
3025 return 0;
3026}
3027
3028/* Send HCI command */
3029int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3030 const void *param)
3031{
3032 struct sk_buff *skb;
3033
3034 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3035
3036 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3037 if (!skb) {
3038 bt_dev_err(hdev, "no memory for command");
3039 return -ENOMEM;
3040 }
3041
3042 /* Stand-alone HCI commands must be flagged as
3043 * single-command requests.
3044 */
3045 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3046
3047 skb_queue_tail(&hdev->cmd_q, skb);
3048 queue_work(hdev->workqueue, &hdev->cmd_work);
3049
3050 return 0;
3051}
3052
3053int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3054 const void *param)
3055{
3056 struct sk_buff *skb;
3057
3058 if (hci_opcode_ogf(opcode) != 0x3f) {
3059 /* A controller receiving a command shall respond with either
3060 * a Command Status Event or a Command Complete Event.
3061 * Therefore, all standard HCI commands must be sent via the
3062 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3063 * Some vendors do not comply with this rule for vendor-specific
3064 * commands and do not return any event. We want to support
3065 * unresponded commands for such cases only.
3066 */
3067 bt_dev_err(hdev, "unresponded command not supported");
3068 return -EINVAL;
3069 }
3070
3071 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3072 if (!skb) {
3073 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3074 opcode);
3075 return -ENOMEM;
3076 }
3077
3078 hci_send_frame(hdev, skb);
3079
3080 return 0;
3081}
3082EXPORT_SYMBOL(__hci_cmd_send);
3083
3084/* Get data from the previously sent command */
3085static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3086{
3087 struct hci_command_hdr *hdr;
3088
3089 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3090 return NULL;
3091
3092 hdr = (void *)skb->data;
3093
3094 if (hdr->opcode != cpu_to_le16(opcode))
3095 return NULL;
3096
3097 return skb->data + HCI_COMMAND_HDR_SIZE;
3098}
3099
3100/* Get data from the previously sent command */
3101void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3102{
3103 void *data;
3104
3105 /* Check if opcode matches last sent command */
3106 data = hci_cmd_data(hdev->sent_cmd, opcode);
3107 if (!data)
3108 /* Check if opcode matches last request */
3109 data = hci_cmd_data(hdev->req_skb, opcode);
3110
3111 return data;
3112}
3113
3114/* Get data from last received event */
3115void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3116{
3117 struct hci_event_hdr *hdr;
3118 int offset;
3119
3120 if (!hdev->recv_event)
3121 return NULL;
3122
3123 hdr = (void *)hdev->recv_event->data;
3124 offset = sizeof(*hdr);
3125
3126 if (hdr->evt != event) {
3127 /* In case of LE metaevent check the subevent match */
3128 if (hdr->evt == HCI_EV_LE_META) {
3129 struct hci_ev_le_meta *ev;
3130
3131 ev = (void *)hdev->recv_event->data + offset;
3132 offset += sizeof(*ev);
3133 if (ev->subevent == event)
3134 goto found;
3135 }
3136 return NULL;
3137 }
3138
3139found:
3140 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3141
3142 return hdev->recv_event->data + offset;
3143}
3144
3145/* Send ACL data */
3146static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3147{
3148 struct hci_acl_hdr *hdr;
3149 int len = skb->len;
3150
3151 skb_push(skb, HCI_ACL_HDR_SIZE);
3152 skb_reset_transport_header(skb);
3153 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3154 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3155 hdr->dlen = cpu_to_le16(len);
3156}
3157
3158static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3159 struct sk_buff *skb, __u16 flags)
3160{
3161 struct hci_conn *conn = chan->conn;
3162 struct hci_dev *hdev = conn->hdev;
3163 struct sk_buff *list;
3164
3165 skb->len = skb_headlen(skb);
3166 skb->data_len = 0;
3167
3168 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3169
3170 hci_add_acl_hdr(skb, conn->handle, flags);
3171
3172 list = skb_shinfo(skb)->frag_list;
3173 if (!list) {
3174 /* Non fragmented */
3175 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3176
3177 skb_queue_tail(queue, skb);
3178 } else {
3179 /* Fragmented */
3180 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3181
3182 skb_shinfo(skb)->frag_list = NULL;
3183
3184 /* Queue all fragments atomically. We need to use spin_lock_bh
3185 * here because of 6LoWPAN links, as there this function is
3186 * called from softirq and using normal spin lock could cause
3187 * deadlocks.
3188 */
3189 spin_lock_bh(&queue->lock);
3190
3191 __skb_queue_tail(queue, skb);
3192
3193 flags &= ~ACL_START;
3194 flags |= ACL_CONT;
3195 do {
3196 skb = list; list = list->next;
3197
3198 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3199 hci_add_acl_hdr(skb, conn->handle, flags);
3200
3201 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3202
3203 __skb_queue_tail(queue, skb);
3204 } while (list);
3205
3206 spin_unlock_bh(&queue->lock);
3207 }
3208}
3209
3210void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3211{
3212 struct hci_dev *hdev = chan->conn->hdev;
3213
3214 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3215
3216 hci_queue_acl(chan, &chan->data_q, skb, flags);
3217
3218 queue_work(hdev->workqueue, &hdev->tx_work);
3219}
3220
3221/* Send SCO data */
3222void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3223{
3224 struct hci_dev *hdev = conn->hdev;
3225 struct hci_sco_hdr hdr;
3226
3227 BT_DBG("%s len %d", hdev->name, skb->len);
3228
3229 hdr.handle = cpu_to_le16(conn->handle);
3230 hdr.dlen = skb->len;
3231
3232 skb_push(skb, HCI_SCO_HDR_SIZE);
3233 skb_reset_transport_header(skb);
3234 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3235
3236 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3237
3238 skb_queue_tail(&conn->data_q, skb);
3239 queue_work(hdev->workqueue, &hdev->tx_work);
3240}
3241
3242/* Send ISO data */
3243static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3244{
3245 struct hci_iso_hdr *hdr;
3246 int len = skb->len;
3247
3248 skb_push(skb, HCI_ISO_HDR_SIZE);
3249 skb_reset_transport_header(skb);
3250 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3251 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3252 hdr->dlen = cpu_to_le16(len);
3253}
3254
3255static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3256 struct sk_buff *skb)
3257{
3258 struct hci_dev *hdev = conn->hdev;
3259 struct sk_buff *list;
3260 __u16 flags;
3261
3262 skb->len = skb_headlen(skb);
3263 skb->data_len = 0;
3264
3265 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3266
3267 list = skb_shinfo(skb)->frag_list;
3268
3269 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3270 hci_add_iso_hdr(skb, conn->handle, flags);
3271
3272 if (!list) {
3273 /* Non fragmented */
3274 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3275
3276 skb_queue_tail(queue, skb);
3277 } else {
3278 /* Fragmented */
3279 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3280
3281 skb_shinfo(skb)->frag_list = NULL;
3282
3283 __skb_queue_tail(queue, skb);
3284
3285 do {
3286 skb = list; list = list->next;
3287
3288 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3289 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3290 0x00);
3291 hci_add_iso_hdr(skb, conn->handle, flags);
3292
3293 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3294
3295 __skb_queue_tail(queue, skb);
3296 } while (list);
3297 }
3298}
3299
3300void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3301{
3302 struct hci_dev *hdev = conn->hdev;
3303
3304 BT_DBG("%s len %d", hdev->name, skb->len);
3305
3306 hci_queue_iso(conn, &conn->data_q, skb);
3307
3308 queue_work(hdev->workqueue, &hdev->tx_work);
3309}
3310
3311/* ---- HCI TX task (outgoing data) ---- */
3312
3313/* HCI Connection scheduler */
3314static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3315{
3316 struct hci_dev *hdev;
3317 int cnt, q;
3318
3319 if (!conn) {
3320 *quote = 0;
3321 return;
3322 }
3323
3324 hdev = conn->hdev;
3325
3326 switch (conn->type) {
3327 case ACL_LINK:
3328 cnt = hdev->acl_cnt;
3329 break;
3330 case SCO_LINK:
3331 case ESCO_LINK:
3332 cnt = hdev->sco_cnt;
3333 break;
3334 case LE_LINK:
3335 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3336 break;
3337 case ISO_LINK:
3338 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3339 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3340 break;
3341 default:
3342 cnt = 0;
3343 bt_dev_err(hdev, "unknown link type %d", conn->type);
3344 }
3345
3346 q = cnt / num;
3347 *quote = q ? q : 1;
3348}
3349
3350static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3351 int *quote)
3352{
3353 struct hci_conn_hash *h = &hdev->conn_hash;
3354 struct hci_conn *conn = NULL, *c;
3355 unsigned int num = 0, min = ~0;
3356
3357 /* We don't have to lock device here. Connections are always
3358 * added and removed with TX task disabled. */
3359
3360 rcu_read_lock();
3361
3362 list_for_each_entry_rcu(c, &h->list, list) {
3363 if (c->type != type || skb_queue_empty(&c->data_q))
3364 continue;
3365
3366 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3367 continue;
3368
3369 num++;
3370
3371 if (c->sent < min) {
3372 min = c->sent;
3373 conn = c;
3374 }
3375
3376 if (hci_conn_num(hdev, type) == num)
3377 break;
3378 }
3379
3380 rcu_read_unlock();
3381
3382 hci_quote_sent(conn, num, quote);
3383
3384 BT_DBG("conn %p quote %d", conn, *quote);
3385 return conn;
3386}
3387
3388static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3389{
3390 struct hci_conn_hash *h = &hdev->conn_hash;
3391 struct hci_conn *c;
3392
3393 bt_dev_err(hdev, "link tx timeout");
3394
3395 rcu_read_lock();
3396
3397 /* Kill stalled connections */
3398 list_for_each_entry_rcu(c, &h->list, list) {
3399 if (c->type == type && c->sent) {
3400 bt_dev_err(hdev, "killing stalled connection %pMR",
3401 &c->dst);
3402 /* hci_disconnect might sleep, so, we have to release
3403 * the RCU read lock before calling it.
3404 */
3405 rcu_read_unlock();
3406 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3407 rcu_read_lock();
3408 }
3409 }
3410
3411 rcu_read_unlock();
3412}
3413
3414static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3415 int *quote)
3416{
3417 struct hci_conn_hash *h = &hdev->conn_hash;
3418 struct hci_chan *chan = NULL;
3419 unsigned int num = 0, min = ~0, cur_prio = 0;
3420 struct hci_conn *conn;
3421 int conn_num = 0;
3422
3423 BT_DBG("%s", hdev->name);
3424
3425 rcu_read_lock();
3426
3427 list_for_each_entry_rcu(conn, &h->list, list) {
3428 struct hci_chan *tmp;
3429
3430 if (conn->type != type)
3431 continue;
3432
3433 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3434 continue;
3435
3436 conn_num++;
3437
3438 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3439 struct sk_buff *skb;
3440
3441 if (skb_queue_empty(&tmp->data_q))
3442 continue;
3443
3444 skb = skb_peek(&tmp->data_q);
3445 if (skb->priority < cur_prio)
3446 continue;
3447
3448 if (skb->priority > cur_prio) {
3449 num = 0;
3450 min = ~0;
3451 cur_prio = skb->priority;
3452 }
3453
3454 num++;
3455
3456 if (conn->sent < min) {
3457 min = conn->sent;
3458 chan = tmp;
3459 }
3460 }
3461
3462 if (hci_conn_num(hdev, type) == conn_num)
3463 break;
3464 }
3465
3466 rcu_read_unlock();
3467
3468 if (!chan)
3469 return NULL;
3470
3471 hci_quote_sent(chan->conn, num, quote);
3472
3473 BT_DBG("chan %p quote %d", chan, *quote);
3474 return chan;
3475}
3476
3477static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3478{
3479 struct hci_conn_hash *h = &hdev->conn_hash;
3480 struct hci_conn *conn;
3481 int num = 0;
3482
3483 BT_DBG("%s", hdev->name);
3484
3485 rcu_read_lock();
3486
3487 list_for_each_entry_rcu(conn, &h->list, list) {
3488 struct hci_chan *chan;
3489
3490 if (conn->type != type)
3491 continue;
3492
3493 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3494 continue;
3495
3496 num++;
3497
3498 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3499 struct sk_buff *skb;
3500
3501 if (chan->sent) {
3502 chan->sent = 0;
3503 continue;
3504 }
3505
3506 if (skb_queue_empty(&chan->data_q))
3507 continue;
3508
3509 skb = skb_peek(&chan->data_q);
3510 if (skb->priority >= HCI_PRIO_MAX - 1)
3511 continue;
3512
3513 skb->priority = HCI_PRIO_MAX - 1;
3514
3515 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3516 skb->priority);
3517 }
3518
3519 if (hci_conn_num(hdev, type) == num)
3520 break;
3521 }
3522
3523 rcu_read_unlock();
3524
3525}
3526
3527static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3528{
3529 unsigned long last_tx;
3530
3531 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3532 return;
3533
3534 switch (type) {
3535 case LE_LINK:
3536 last_tx = hdev->le_last_tx;
3537 break;
3538 default:
3539 last_tx = hdev->acl_last_tx;
3540 break;
3541 }
3542
3543 /* tx timeout must be longer than maximum link supervision timeout
3544 * (40.9 seconds)
3545 */
3546 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3547 hci_link_tx_to(hdev, type);
3548}
3549
3550/* Schedule SCO */
3551static void hci_sched_sco(struct hci_dev *hdev)
3552{
3553 struct hci_conn *conn;
3554 struct sk_buff *skb;
3555 int quote;
3556
3557 BT_DBG("%s", hdev->name);
3558
3559 if (!hci_conn_num(hdev, SCO_LINK))
3560 return;
3561
3562 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3563 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3564 BT_DBG("skb %p len %d", skb, skb->len);
3565 hci_send_frame(hdev, skb);
3566
3567 conn->sent++;
3568 if (conn->sent == ~0)
3569 conn->sent = 0;
3570 }
3571 }
3572}
3573
3574static void hci_sched_esco(struct hci_dev *hdev)
3575{
3576 struct hci_conn *conn;
3577 struct sk_buff *skb;
3578 int quote;
3579
3580 BT_DBG("%s", hdev->name);
3581
3582 if (!hci_conn_num(hdev, ESCO_LINK))
3583 return;
3584
3585 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3586 "e))) {
3587 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3588 BT_DBG("skb %p len %d", skb, skb->len);
3589 hci_send_frame(hdev, skb);
3590
3591 conn->sent++;
3592 if (conn->sent == ~0)
3593 conn->sent = 0;
3594 }
3595 }
3596}
3597
3598static void hci_sched_acl_pkt(struct hci_dev *hdev)
3599{
3600 unsigned int cnt = hdev->acl_cnt;
3601 struct hci_chan *chan;
3602 struct sk_buff *skb;
3603 int quote;
3604
3605 __check_timeout(hdev, cnt, ACL_LINK);
3606
3607 while (hdev->acl_cnt &&
3608 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3609 u32 priority = (skb_peek(&chan->data_q))->priority;
3610 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3611 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3612 skb->len, skb->priority);
3613
3614 /* Stop if priority has changed */
3615 if (skb->priority < priority)
3616 break;
3617
3618 skb = skb_dequeue(&chan->data_q);
3619
3620 hci_conn_enter_active_mode(chan->conn,
3621 bt_cb(skb)->force_active);
3622
3623 hci_send_frame(hdev, skb);
3624 hdev->acl_last_tx = jiffies;
3625
3626 hdev->acl_cnt--;
3627 chan->sent++;
3628 chan->conn->sent++;
3629
3630 /* Send pending SCO packets right away */
3631 hci_sched_sco(hdev);
3632 hci_sched_esco(hdev);
3633 }
3634 }
3635
3636 if (cnt != hdev->acl_cnt)
3637 hci_prio_recalculate(hdev, ACL_LINK);
3638}
3639
3640static void hci_sched_acl(struct hci_dev *hdev)
3641{
3642 BT_DBG("%s", hdev->name);
3643
3644 /* No ACL link over BR/EDR controller */
3645 if (!hci_conn_num(hdev, ACL_LINK))
3646 return;
3647
3648 hci_sched_acl_pkt(hdev);
3649}
3650
3651static void hci_sched_le(struct hci_dev *hdev)
3652{
3653 struct hci_chan *chan;
3654 struct sk_buff *skb;
3655 int quote, *cnt, tmp;
3656
3657 BT_DBG("%s", hdev->name);
3658
3659 if (!hci_conn_num(hdev, LE_LINK))
3660 return;
3661
3662 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3663
3664 __check_timeout(hdev, *cnt, LE_LINK);
3665
3666 tmp = *cnt;
3667 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3668 u32 priority = (skb_peek(&chan->data_q))->priority;
3669 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3670 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3671 skb->len, skb->priority);
3672
3673 /* Stop if priority has changed */
3674 if (skb->priority < priority)
3675 break;
3676
3677 skb = skb_dequeue(&chan->data_q);
3678
3679 hci_send_frame(hdev, skb);
3680 hdev->le_last_tx = jiffies;
3681
3682 (*cnt)--;
3683 chan->sent++;
3684 chan->conn->sent++;
3685
3686 /* Send pending SCO packets right away */
3687 hci_sched_sco(hdev);
3688 hci_sched_esco(hdev);
3689 }
3690 }
3691
3692 if (*cnt != tmp)
3693 hci_prio_recalculate(hdev, LE_LINK);
3694}
3695
3696/* Schedule CIS */
3697static void hci_sched_iso(struct hci_dev *hdev)
3698{
3699 struct hci_conn *conn;
3700 struct sk_buff *skb;
3701 int quote, *cnt;
3702
3703 BT_DBG("%s", hdev->name);
3704
3705 if (!hci_conn_num(hdev, ISO_LINK))
3706 return;
3707
3708 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3709 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3710 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3711 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3712 BT_DBG("skb %p len %d", skb, skb->len);
3713 hci_send_frame(hdev, skb);
3714
3715 conn->sent++;
3716 if (conn->sent == ~0)
3717 conn->sent = 0;
3718 (*cnt)--;
3719 }
3720 }
3721}
3722
3723static void hci_tx_work(struct work_struct *work)
3724{
3725 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3726 struct sk_buff *skb;
3727
3728 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3729 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3730
3731 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3732 /* Schedule queues and send stuff to HCI driver */
3733 hci_sched_sco(hdev);
3734 hci_sched_esco(hdev);
3735 hci_sched_iso(hdev);
3736 hci_sched_acl(hdev);
3737 hci_sched_le(hdev);
3738 }
3739
3740 /* Send next queued raw (unknown type) packet */
3741 while ((skb = skb_dequeue(&hdev->raw_q)))
3742 hci_send_frame(hdev, skb);
3743}
3744
3745/* ----- HCI RX task (incoming data processing) ----- */
3746
3747/* ACL data packet */
3748static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3749{
3750 struct hci_acl_hdr *hdr;
3751 struct hci_conn *conn;
3752 __u16 handle, flags;
3753
3754 hdr = skb_pull_data(skb, sizeof(*hdr));
3755 if (!hdr) {
3756 bt_dev_err(hdev, "ACL packet too small");
3757 goto drop;
3758 }
3759
3760 handle = __le16_to_cpu(hdr->handle);
3761 flags = hci_flags(handle);
3762 handle = hci_handle(handle);
3763
3764 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3765 handle, flags);
3766
3767 hdev->stat.acl_rx++;
3768
3769 hci_dev_lock(hdev);
3770 conn = hci_conn_hash_lookup_handle(hdev, handle);
3771 hci_dev_unlock(hdev);
3772
3773 if (conn) {
3774 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3775
3776 /* Send to upper protocol */
3777 l2cap_recv_acldata(conn, skb, flags);
3778 return;
3779 } else {
3780 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3781 handle);
3782 }
3783
3784drop:
3785 kfree_skb(skb);
3786}
3787
3788/* SCO data packet */
3789static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3790{
3791 struct hci_sco_hdr *hdr;
3792 struct hci_conn *conn;
3793 __u16 handle, flags;
3794
3795 hdr = skb_pull_data(skb, sizeof(*hdr));
3796 if (!hdr) {
3797 bt_dev_err(hdev, "SCO packet too small");
3798 goto drop;
3799 }
3800
3801 handle = __le16_to_cpu(hdr->handle);
3802 flags = hci_flags(handle);
3803 handle = hci_handle(handle);
3804
3805 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3806 handle, flags);
3807
3808 hdev->stat.sco_rx++;
3809
3810 hci_dev_lock(hdev);
3811 conn = hci_conn_hash_lookup_handle(hdev, handle);
3812 hci_dev_unlock(hdev);
3813
3814 if (conn) {
3815 /* Send to upper protocol */
3816 hci_skb_pkt_status(skb) = flags & 0x03;
3817 sco_recv_scodata(conn, skb);
3818 return;
3819 } else {
3820 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3821 handle);
3822 }
3823
3824drop:
3825 kfree_skb(skb);
3826}
3827
3828static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3829{
3830 struct hci_iso_hdr *hdr;
3831 struct hci_conn *conn;
3832 __u16 handle, flags;
3833
3834 hdr = skb_pull_data(skb, sizeof(*hdr));
3835 if (!hdr) {
3836 bt_dev_err(hdev, "ISO packet too small");
3837 goto drop;
3838 }
3839
3840 handle = __le16_to_cpu(hdr->handle);
3841 flags = hci_flags(handle);
3842 handle = hci_handle(handle);
3843
3844 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3845 handle, flags);
3846
3847 hci_dev_lock(hdev);
3848 conn = hci_conn_hash_lookup_handle(hdev, handle);
3849 hci_dev_unlock(hdev);
3850
3851 if (!conn) {
3852 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3853 handle);
3854 goto drop;
3855 }
3856
3857 /* Send to upper protocol */
3858 iso_recv(conn, skb, flags);
3859 return;
3860
3861drop:
3862 kfree_skb(skb);
3863}
3864
3865static bool hci_req_is_complete(struct hci_dev *hdev)
3866{
3867 struct sk_buff *skb;
3868
3869 skb = skb_peek(&hdev->cmd_q);
3870 if (!skb)
3871 return true;
3872
3873 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3874}
3875
3876static void hci_resend_last(struct hci_dev *hdev)
3877{
3878 struct hci_command_hdr *sent;
3879 struct sk_buff *skb;
3880 u16 opcode;
3881
3882 if (!hdev->sent_cmd)
3883 return;
3884
3885 sent = (void *) hdev->sent_cmd->data;
3886 opcode = __le16_to_cpu(sent->opcode);
3887 if (opcode == HCI_OP_RESET)
3888 return;
3889
3890 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3891 if (!skb)
3892 return;
3893
3894 skb_queue_head(&hdev->cmd_q, skb);
3895 queue_work(hdev->workqueue, &hdev->cmd_work);
3896}
3897
3898void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3899 hci_req_complete_t *req_complete,
3900 hci_req_complete_skb_t *req_complete_skb)
3901{
3902 struct sk_buff *skb;
3903 unsigned long flags;
3904
3905 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3906
3907 /* If the completed command doesn't match the last one that was
3908 * sent we need to do special handling of it.
3909 */
3910 if (!hci_sent_cmd_data(hdev, opcode)) {
3911 /* Some CSR based controllers generate a spontaneous
3912 * reset complete event during init and any pending
3913 * command will never be completed. In such a case we
3914 * need to resend whatever was the last sent
3915 * command.
3916 */
3917 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3918 hci_resend_last(hdev);
3919
3920 return;
3921 }
3922
3923 /* If we reach this point this event matches the last command sent */
3924 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3925
3926 /* If the command succeeded and there's still more commands in
3927 * this request the request is not yet complete.
3928 */
3929 if (!status && !hci_req_is_complete(hdev))
3930 return;
3931
3932 skb = hdev->req_skb;
3933
3934 /* If this was the last command in a request the complete
3935 * callback would be found in hdev->req_skb instead of the
3936 * command queue (hdev->cmd_q).
3937 */
3938 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3939 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3940 return;
3941 }
3942
3943 if (skb && bt_cb(skb)->hci.req_complete) {
3944 *req_complete = bt_cb(skb)->hci.req_complete;
3945 return;
3946 }
3947
3948 /* Remove all pending commands belonging to this request */
3949 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3950 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3951 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3952 __skb_queue_head(&hdev->cmd_q, skb);
3953 break;
3954 }
3955
3956 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3957 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3958 else
3959 *req_complete = bt_cb(skb)->hci.req_complete;
3960 dev_kfree_skb_irq(skb);
3961 }
3962 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3963}
3964
3965static void hci_rx_work(struct work_struct *work)
3966{
3967 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3968 struct sk_buff *skb;
3969
3970 BT_DBG("%s", hdev->name);
3971
3972 /* The kcov_remote functions used for collecting packet parsing
3973 * coverage information from this background thread and associate
3974 * the coverage with the syscall's thread which originally injected
3975 * the packet. This helps fuzzing the kernel.
3976 */
3977 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3978 kcov_remote_start_common(skb_get_kcov_handle(skb));
3979
3980 /* Send copy to monitor */
3981 hci_send_to_monitor(hdev, skb);
3982
3983 if (atomic_read(&hdev->promisc)) {
3984 /* Send copy to the sockets */
3985 hci_send_to_sock(hdev, skb);
3986 }
3987
3988 /* If the device has been opened in HCI_USER_CHANNEL,
3989 * the userspace has exclusive access to device.
3990 * When device is HCI_INIT, we still need to process
3991 * the data packets to the driver in order
3992 * to complete its setup().
3993 */
3994 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3995 !test_bit(HCI_INIT, &hdev->flags)) {
3996 kfree_skb(skb);
3997 continue;
3998 }
3999
4000 if (test_bit(HCI_INIT, &hdev->flags)) {
4001 /* Don't process data packets in this states. */
4002 switch (hci_skb_pkt_type(skb)) {
4003 case HCI_ACLDATA_PKT:
4004 case HCI_SCODATA_PKT:
4005 case HCI_ISODATA_PKT:
4006 kfree_skb(skb);
4007 continue;
4008 }
4009 }
4010
4011 /* Process frame */
4012 switch (hci_skb_pkt_type(skb)) {
4013 case HCI_EVENT_PKT:
4014 BT_DBG("%s Event packet", hdev->name);
4015 hci_event_packet(hdev, skb);
4016 break;
4017
4018 case HCI_ACLDATA_PKT:
4019 BT_DBG("%s ACL data packet", hdev->name);
4020 hci_acldata_packet(hdev, skb);
4021 break;
4022
4023 case HCI_SCODATA_PKT:
4024 BT_DBG("%s SCO data packet", hdev->name);
4025 hci_scodata_packet(hdev, skb);
4026 break;
4027
4028 case HCI_ISODATA_PKT:
4029 BT_DBG("%s ISO data packet", hdev->name);
4030 hci_isodata_packet(hdev, skb);
4031 break;
4032
4033 default:
4034 kfree_skb(skb);
4035 break;
4036 }
4037 }
4038}
4039
4040static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4041{
4042 int err;
4043
4044 bt_dev_dbg(hdev, "skb %p", skb);
4045
4046 kfree_skb(hdev->sent_cmd);
4047
4048 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4049 if (!hdev->sent_cmd) {
4050 skb_queue_head(&hdev->cmd_q, skb);
4051 queue_work(hdev->workqueue, &hdev->cmd_work);
4052 return;
4053 }
4054
4055 err = hci_send_frame(hdev, skb);
4056 if (err < 0) {
4057 hci_cmd_sync_cancel_sync(hdev, -err);
4058 return;
4059 }
4060
4061 if (hdev->req_status == HCI_REQ_PEND &&
4062 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4063 kfree_skb(hdev->req_skb);
4064 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4065 }
4066
4067 atomic_dec(&hdev->cmd_cnt);
4068}
4069
4070static void hci_cmd_work(struct work_struct *work)
4071{
4072 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4073 struct sk_buff *skb;
4074
4075 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4076 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4077
4078 /* Send queued commands */
4079 if (atomic_read(&hdev->cmd_cnt)) {
4080 skb = skb_dequeue(&hdev->cmd_q);
4081 if (!skb)
4082 return;
4083
4084 hci_send_cmd_sync(hdev, skb);
4085
4086 rcu_read_lock();
4087 if (test_bit(HCI_RESET, &hdev->flags) ||
4088 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4089 cancel_delayed_work(&hdev->cmd_timer);
4090 else
4091 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4092 HCI_CMD_TIMEOUT);
4093 rcu_read_unlock();
4094 }
4095}