Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/rfkill.h>
30#include <linux/debugfs.h>
31#include <linux/crypto.h>
32#include <linux/property.h>
33#include <linux/suspend.h>
34#include <linux/wait.h>
35#include <asm/unaligned.h>
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39#include <net/bluetooth/l2cap.h>
40#include <net/bluetooth/mgmt.h>
41
42#include "hci_request.h"
43#include "hci_debugfs.h"
44#include "smp.h"
45#include "leds.h"
46#include "msft.h"
47#include "aosp.h"
48#include "hci_codec.h"
49
50static void hci_rx_work(struct work_struct *work);
51static void hci_cmd_work(struct work_struct *work);
52static void hci_tx_work(struct work_struct *work);
53
54/* HCI device list */
55LIST_HEAD(hci_dev_list);
56DEFINE_RWLOCK(hci_dev_list_lock);
57
58/* HCI callback list */
59LIST_HEAD(hci_cb_list);
60DEFINE_MUTEX(hci_cb_list_lock);
61
62/* HCI ID Numbering */
63static DEFINE_IDA(hci_index_ida);
64
65static int hci_scan_req(struct hci_request *req, unsigned long opt)
66{
67 __u8 scan = opt;
68
69 BT_DBG("%s %x", req->hdev->name, scan);
70
71 /* Inquiry and Page scans */
72 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
73 return 0;
74}
75
76static int hci_auth_req(struct hci_request *req, unsigned long opt)
77{
78 __u8 auth = opt;
79
80 BT_DBG("%s %x", req->hdev->name, auth);
81
82 /* Authentication */
83 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
84 return 0;
85}
86
87static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
88{
89 __u8 encrypt = opt;
90
91 BT_DBG("%s %x", req->hdev->name, encrypt);
92
93 /* Encryption */
94 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
95 return 0;
96}
97
98static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
99{
100 __le16 policy = cpu_to_le16(opt);
101
102 BT_DBG("%s %x", req->hdev->name, policy);
103
104 /* Default link policy */
105 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
106 return 0;
107}
108
109/* Get HCI device by index.
110 * Device is held on return. */
111struct hci_dev *hci_dev_get(int index)
112{
113 struct hci_dev *hdev = NULL, *d;
114
115 BT_DBG("%d", index);
116
117 if (index < 0)
118 return NULL;
119
120 read_lock(&hci_dev_list_lock);
121 list_for_each_entry(d, &hci_dev_list, list) {
122 if (d->id == index) {
123 hdev = hci_dev_hold(d);
124 break;
125 }
126 }
127 read_unlock(&hci_dev_list_lock);
128 return hdev;
129}
130
131/* ---- Inquiry support ---- */
132
133bool hci_discovery_active(struct hci_dev *hdev)
134{
135 struct discovery_state *discov = &hdev->discovery;
136
137 switch (discov->state) {
138 case DISCOVERY_FINDING:
139 case DISCOVERY_RESOLVING:
140 return true;
141
142 default:
143 return false;
144 }
145}
146
147void hci_discovery_set_state(struct hci_dev *hdev, int state)
148{
149 int old_state = hdev->discovery.state;
150
151 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
152
153 if (old_state == state)
154 return;
155
156 hdev->discovery.state = state;
157
158 switch (state) {
159 case DISCOVERY_STOPPED:
160 hci_update_passive_scan(hdev);
161
162 if (old_state != DISCOVERY_STARTING)
163 mgmt_discovering(hdev, 0);
164 break;
165 case DISCOVERY_STARTING:
166 break;
167 case DISCOVERY_FINDING:
168 mgmt_discovering(hdev, 1);
169 break;
170 case DISCOVERY_RESOLVING:
171 break;
172 case DISCOVERY_STOPPING:
173 break;
174 }
175}
176
177void hci_inquiry_cache_flush(struct hci_dev *hdev)
178{
179 struct discovery_state *cache = &hdev->discovery;
180 struct inquiry_entry *p, *n;
181
182 list_for_each_entry_safe(p, n, &cache->all, all) {
183 list_del(&p->all);
184 kfree(p);
185 }
186
187 INIT_LIST_HEAD(&cache->unknown);
188 INIT_LIST_HEAD(&cache->resolve);
189}
190
191struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
192 bdaddr_t *bdaddr)
193{
194 struct discovery_state *cache = &hdev->discovery;
195 struct inquiry_entry *e;
196
197 BT_DBG("cache %p, %pMR", cache, bdaddr);
198
199 list_for_each_entry(e, &cache->all, all) {
200 if (!bacmp(&e->data.bdaddr, bdaddr))
201 return e;
202 }
203
204 return NULL;
205}
206
207struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
208 bdaddr_t *bdaddr)
209{
210 struct discovery_state *cache = &hdev->discovery;
211 struct inquiry_entry *e;
212
213 BT_DBG("cache %p, %pMR", cache, bdaddr);
214
215 list_for_each_entry(e, &cache->unknown, list) {
216 if (!bacmp(&e->data.bdaddr, bdaddr))
217 return e;
218 }
219
220 return NULL;
221}
222
223struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
224 bdaddr_t *bdaddr,
225 int state)
226{
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
231
232 list_for_each_entry(e, &cache->resolve, list) {
233 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
234 return e;
235 if (!bacmp(&e->data.bdaddr, bdaddr))
236 return e;
237 }
238
239 return NULL;
240}
241
242void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243 struct inquiry_entry *ie)
244{
245 struct discovery_state *cache = &hdev->discovery;
246 struct list_head *pos = &cache->resolve;
247 struct inquiry_entry *p;
248
249 list_del(&ie->list);
250
251 list_for_each_entry(p, &cache->resolve, list) {
252 if (p->name_state != NAME_PENDING &&
253 abs(p->data.rssi) >= abs(ie->data.rssi))
254 break;
255 pos = &p->list;
256 }
257
258 list_add(&ie->list, pos);
259}
260
261u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
262 bool name_known)
263{
264 struct discovery_state *cache = &hdev->discovery;
265 struct inquiry_entry *ie;
266 u32 flags = 0;
267
268 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
269
270 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
271
272 if (!data->ssp_mode)
273 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
274
275 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
276 if (ie) {
277 if (!ie->data.ssp_mode)
278 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
279
280 if (ie->name_state == NAME_NEEDED &&
281 data->rssi != ie->data.rssi) {
282 ie->data.rssi = data->rssi;
283 hci_inquiry_cache_update_resolve(hdev, ie);
284 }
285
286 goto update;
287 }
288
289 /* Entry not in the cache. Add new one. */
290 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
291 if (!ie) {
292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293 goto done;
294 }
295
296 list_add(&ie->all, &cache->all);
297
298 if (name_known) {
299 ie->name_state = NAME_KNOWN;
300 } else {
301 ie->name_state = NAME_NOT_KNOWN;
302 list_add(&ie->list, &cache->unknown);
303 }
304
305update:
306 if (name_known && ie->name_state != NAME_KNOWN &&
307 ie->name_state != NAME_PENDING) {
308 ie->name_state = NAME_KNOWN;
309 list_del(&ie->list);
310 }
311
312 memcpy(&ie->data, data, sizeof(*data));
313 ie->timestamp = jiffies;
314 cache->timestamp = jiffies;
315
316 if (ie->name_state == NAME_NOT_KNOWN)
317 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
318
319done:
320 return flags;
321}
322
323static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
324{
325 struct discovery_state *cache = &hdev->discovery;
326 struct inquiry_info *info = (struct inquiry_info *) buf;
327 struct inquiry_entry *e;
328 int copied = 0;
329
330 list_for_each_entry(e, &cache->all, all) {
331 struct inquiry_data *data = &e->data;
332
333 if (copied >= num)
334 break;
335
336 bacpy(&info->bdaddr, &data->bdaddr);
337 info->pscan_rep_mode = data->pscan_rep_mode;
338 info->pscan_period_mode = data->pscan_period_mode;
339 info->pscan_mode = data->pscan_mode;
340 memcpy(info->dev_class, data->dev_class, 3);
341 info->clock_offset = data->clock_offset;
342
343 info++;
344 copied++;
345 }
346
347 BT_DBG("cache %p, copied %d", cache, copied);
348 return copied;
349}
350
351static int hci_inq_req(struct hci_request *req, unsigned long opt)
352{
353 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_inquiry cp;
356
357 BT_DBG("%s", hdev->name);
358
359 if (test_bit(HCI_INQUIRY, &hdev->flags))
360 return 0;
361
362 /* Start Inquiry */
363 memcpy(&cp.lap, &ir->lap, 3);
364 cp.length = ir->length;
365 cp.num_rsp = ir->num_rsp;
366 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
367
368 return 0;
369}
370
371int hci_inquiry(void __user *arg)
372{
373 __u8 __user *ptr = arg;
374 struct hci_inquiry_req ir;
375 struct hci_dev *hdev;
376 int err = 0, do_inquiry = 0, max_rsp;
377 long timeo;
378 __u8 *buf;
379
380 if (copy_from_user(&ir, ptr, sizeof(ir)))
381 return -EFAULT;
382
383 hdev = hci_dev_get(ir.dev_id);
384 if (!hdev)
385 return -ENODEV;
386
387 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
388 err = -EBUSY;
389 goto done;
390 }
391
392 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
393 err = -EOPNOTSUPP;
394 goto done;
395 }
396
397 if (hdev->dev_type != HCI_PRIMARY) {
398 err = -EOPNOTSUPP;
399 goto done;
400 }
401
402 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
403 err = -EOPNOTSUPP;
404 goto done;
405 }
406
407 /* Restrict maximum inquiry length to 60 seconds */
408 if (ir.length > 60) {
409 err = -EINVAL;
410 goto done;
411 }
412
413 hci_dev_lock(hdev);
414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416 hci_inquiry_cache_flush(hdev);
417 do_inquiry = 1;
418 }
419 hci_dev_unlock(hdev);
420
421 timeo = ir.length * msecs_to_jiffies(2000);
422
423 if (do_inquiry) {
424 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
425 timeo, NULL);
426 if (err < 0)
427 goto done;
428
429 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430 * cleared). If it is interrupted by a signal, return -EINTR.
431 */
432 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433 TASK_INTERRUPTIBLE)) {
434 err = -EINTR;
435 goto done;
436 }
437 }
438
439 /* for unlimited number of responses we will use buffer with
440 * 255 entries
441 */
442 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443
444 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445 * copy it to the user space.
446 */
447 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
448 if (!buf) {
449 err = -ENOMEM;
450 goto done;
451 }
452
453 hci_dev_lock(hdev);
454 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455 hci_dev_unlock(hdev);
456
457 BT_DBG("num_rsp %d", ir.num_rsp);
458
459 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
460 ptr += sizeof(ir);
461 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
462 ir.num_rsp))
463 err = -EFAULT;
464 } else
465 err = -EFAULT;
466
467 kfree(buf);
468
469done:
470 hci_dev_put(hdev);
471 return err;
472}
473
474static int hci_dev_do_open(struct hci_dev *hdev)
475{
476 int ret = 0;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_sync_lock(hdev);
481
482 ret = hci_dev_open_sync(hdev);
483
484 hci_req_sync_unlock(hdev);
485 return ret;
486}
487
488/* ---- HCI ioctl helpers ---- */
489
490int hci_dev_open(__u16 dev)
491{
492 struct hci_dev *hdev;
493 int err;
494
495 hdev = hci_dev_get(dev);
496 if (!hdev)
497 return -ENODEV;
498
499 /* Devices that are marked as unconfigured can only be powered
500 * up as user channel. Trying to bring them up as normal devices
501 * will result into a failure. Only user channel operation is
502 * possible.
503 *
504 * When this function is called for a user channel, the flag
505 * HCI_USER_CHANNEL will be set first before attempting to
506 * open the device.
507 */
508 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
510 err = -EOPNOTSUPP;
511 goto done;
512 }
513
514 /* We need to ensure that no other power on/off work is pending
515 * before proceeding to call hci_dev_do_open. This is
516 * particularly important if the setup procedure has not yet
517 * completed.
518 */
519 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520 cancel_delayed_work(&hdev->power_off);
521
522 /* After this call it is guaranteed that the setup procedure
523 * has finished. This means that error conditions like RFKILL
524 * or no valid public or static random address apply.
525 */
526 flush_workqueue(hdev->req_workqueue);
527
528 /* For controllers not using the management interface and that
529 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530 * so that pairing works for them. Once the management interface
531 * is in use this bit will be cleared again and userspace has
532 * to explicitly enable it.
533 */
534 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535 !hci_dev_test_flag(hdev, HCI_MGMT))
536 hci_dev_set_flag(hdev, HCI_BONDABLE);
537
538 err = hci_dev_do_open(hdev);
539
540done:
541 hci_dev_put(hdev);
542 return err;
543}
544
545int hci_dev_do_close(struct hci_dev *hdev)
546{
547 int err;
548
549 BT_DBG("%s %p", hdev->name, hdev);
550
551 hci_req_sync_lock(hdev);
552
553 err = hci_dev_close_sync(hdev);
554
555 hci_req_sync_unlock(hdev);
556
557 return err;
558}
559
560int hci_dev_close(__u16 dev)
561{
562 struct hci_dev *hdev;
563 int err;
564
565 hdev = hci_dev_get(dev);
566 if (!hdev)
567 return -ENODEV;
568
569 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
570 err = -EBUSY;
571 goto done;
572 }
573
574 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
575 cancel_delayed_work(&hdev->power_off);
576
577 err = hci_dev_do_close(hdev);
578
579done:
580 hci_dev_put(hdev);
581 return err;
582}
583
584static int hci_dev_do_reset(struct hci_dev *hdev)
585{
586 int ret;
587
588 BT_DBG("%s %p", hdev->name, hdev);
589
590 hci_req_sync_lock(hdev);
591
592 /* Drop queues */
593 skb_queue_purge(&hdev->rx_q);
594 skb_queue_purge(&hdev->cmd_q);
595
596 /* Avoid potential lockdep warnings from the *_flush() calls by
597 * ensuring the workqueue is empty up front.
598 */
599 drain_workqueue(hdev->workqueue);
600
601 hci_dev_lock(hdev);
602 hci_inquiry_cache_flush(hdev);
603 hci_conn_hash_flush(hdev);
604 hci_dev_unlock(hdev);
605
606 if (hdev->flush)
607 hdev->flush(hdev);
608
609 atomic_set(&hdev->cmd_cnt, 1);
610 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
611
612 ret = hci_reset_sync(hdev);
613
614 hci_req_sync_unlock(hdev);
615 return ret;
616}
617
618int hci_dev_reset(__u16 dev)
619{
620 struct hci_dev *hdev;
621 int err;
622
623 hdev = hci_dev_get(dev);
624 if (!hdev)
625 return -ENODEV;
626
627 if (!test_bit(HCI_UP, &hdev->flags)) {
628 err = -ENETDOWN;
629 goto done;
630 }
631
632 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
633 err = -EBUSY;
634 goto done;
635 }
636
637 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
638 err = -EOPNOTSUPP;
639 goto done;
640 }
641
642 err = hci_dev_do_reset(hdev);
643
644done:
645 hci_dev_put(hdev);
646 return err;
647}
648
649int hci_dev_reset_stat(__u16 dev)
650{
651 struct hci_dev *hdev;
652 int ret = 0;
653
654 hdev = hci_dev_get(dev);
655 if (!hdev)
656 return -ENODEV;
657
658 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
659 ret = -EBUSY;
660 goto done;
661 }
662
663 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
664 ret = -EOPNOTSUPP;
665 goto done;
666 }
667
668 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
669
670done:
671 hci_dev_put(hdev);
672 return ret;
673}
674
675static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
676{
677 bool conn_changed, discov_changed;
678
679 BT_DBG("%s scan 0x%02x", hdev->name, scan);
680
681 if ((scan & SCAN_PAGE))
682 conn_changed = !hci_dev_test_and_set_flag(hdev,
683 HCI_CONNECTABLE);
684 else
685 conn_changed = hci_dev_test_and_clear_flag(hdev,
686 HCI_CONNECTABLE);
687
688 if ((scan & SCAN_INQUIRY)) {
689 discov_changed = !hci_dev_test_and_set_flag(hdev,
690 HCI_DISCOVERABLE);
691 } else {
692 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
693 discov_changed = hci_dev_test_and_clear_flag(hdev,
694 HCI_DISCOVERABLE);
695 }
696
697 if (!hci_dev_test_flag(hdev, HCI_MGMT))
698 return;
699
700 if (conn_changed || discov_changed) {
701 /* In case this was disabled through mgmt */
702 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
703
704 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
705 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
706
707 mgmt_new_settings(hdev);
708 }
709}
710
711int hci_dev_cmd(unsigned int cmd, void __user *arg)
712{
713 struct hci_dev *hdev;
714 struct hci_dev_req dr;
715 int err = 0;
716
717 if (copy_from_user(&dr, arg, sizeof(dr)))
718 return -EFAULT;
719
720 hdev = hci_dev_get(dr.dev_id);
721 if (!hdev)
722 return -ENODEV;
723
724 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
725 err = -EBUSY;
726 goto done;
727 }
728
729 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
730 err = -EOPNOTSUPP;
731 goto done;
732 }
733
734 if (hdev->dev_type != HCI_PRIMARY) {
735 err = -EOPNOTSUPP;
736 goto done;
737 }
738
739 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
740 err = -EOPNOTSUPP;
741 goto done;
742 }
743
744 switch (cmd) {
745 case HCISETAUTH:
746 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
747 HCI_INIT_TIMEOUT, NULL);
748 break;
749
750 case HCISETENCRYPT:
751 if (!lmp_encrypt_capable(hdev)) {
752 err = -EOPNOTSUPP;
753 break;
754 }
755
756 if (!test_bit(HCI_AUTH, &hdev->flags)) {
757 /* Auth must be enabled first */
758 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
759 HCI_INIT_TIMEOUT, NULL);
760 if (err)
761 break;
762 }
763
764 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
765 HCI_INIT_TIMEOUT, NULL);
766 break;
767
768 case HCISETSCAN:
769 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
770 HCI_INIT_TIMEOUT, NULL);
771
772 /* Ensure that the connectable and discoverable states
773 * get correctly modified as this was a non-mgmt change.
774 */
775 if (!err)
776 hci_update_passive_scan_state(hdev, dr.dev_opt);
777 break;
778
779 case HCISETLINKPOL:
780 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
781 HCI_INIT_TIMEOUT, NULL);
782 break;
783
784 case HCISETLINKMODE:
785 hdev->link_mode = ((__u16) dr.dev_opt) &
786 (HCI_LM_MASTER | HCI_LM_ACCEPT);
787 break;
788
789 case HCISETPTYPE:
790 if (hdev->pkt_type == (__u16) dr.dev_opt)
791 break;
792
793 hdev->pkt_type = (__u16) dr.dev_opt;
794 mgmt_phy_configuration_changed(hdev, NULL);
795 break;
796
797 case HCISETACLMTU:
798 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
799 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
800 break;
801
802 case HCISETSCOMTU:
803 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
804 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
805 break;
806
807 default:
808 err = -EINVAL;
809 break;
810 }
811
812done:
813 hci_dev_put(hdev);
814 return err;
815}
816
817int hci_get_dev_list(void __user *arg)
818{
819 struct hci_dev *hdev;
820 struct hci_dev_list_req *dl;
821 struct hci_dev_req *dr;
822 int n = 0, size, err;
823 __u16 dev_num;
824
825 if (get_user(dev_num, (__u16 __user *) arg))
826 return -EFAULT;
827
828 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
829 return -EINVAL;
830
831 size = sizeof(*dl) + dev_num * sizeof(*dr);
832
833 dl = kzalloc(size, GFP_KERNEL);
834 if (!dl)
835 return -ENOMEM;
836
837 dr = dl->dev_req;
838
839 read_lock(&hci_dev_list_lock);
840 list_for_each_entry(hdev, &hci_dev_list, list) {
841 unsigned long flags = hdev->flags;
842
843 /* When the auto-off is configured it means the transport
844 * is running, but in that case still indicate that the
845 * device is actually down.
846 */
847 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
848 flags &= ~BIT(HCI_UP);
849
850 (dr + n)->dev_id = hdev->id;
851 (dr + n)->dev_opt = flags;
852
853 if (++n >= dev_num)
854 break;
855 }
856 read_unlock(&hci_dev_list_lock);
857
858 dl->dev_num = n;
859 size = sizeof(*dl) + n * sizeof(*dr);
860
861 err = copy_to_user(arg, dl, size);
862 kfree(dl);
863
864 return err ? -EFAULT : 0;
865}
866
867int hci_get_dev_info(void __user *arg)
868{
869 struct hci_dev *hdev;
870 struct hci_dev_info di;
871 unsigned long flags;
872 int err = 0;
873
874 if (copy_from_user(&di, arg, sizeof(di)))
875 return -EFAULT;
876
877 hdev = hci_dev_get(di.dev_id);
878 if (!hdev)
879 return -ENODEV;
880
881 /* When the auto-off is configured it means the transport
882 * is running, but in that case still indicate that the
883 * device is actually down.
884 */
885 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
886 flags = hdev->flags & ~BIT(HCI_UP);
887 else
888 flags = hdev->flags;
889
890 strcpy(di.name, hdev->name);
891 di.bdaddr = hdev->bdaddr;
892 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
893 di.flags = flags;
894 di.pkt_type = hdev->pkt_type;
895 if (lmp_bredr_capable(hdev)) {
896 di.acl_mtu = hdev->acl_mtu;
897 di.acl_pkts = hdev->acl_pkts;
898 di.sco_mtu = hdev->sco_mtu;
899 di.sco_pkts = hdev->sco_pkts;
900 } else {
901 di.acl_mtu = hdev->le_mtu;
902 di.acl_pkts = hdev->le_pkts;
903 di.sco_mtu = 0;
904 di.sco_pkts = 0;
905 }
906 di.link_policy = hdev->link_policy;
907 di.link_mode = hdev->link_mode;
908
909 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910 memcpy(&di.features, &hdev->features, sizeof(di.features));
911
912 if (copy_to_user(arg, &di, sizeof(di)))
913 err = -EFAULT;
914
915 hci_dev_put(hdev);
916
917 return err;
918}
919
920/* ---- Interface to HCI drivers ---- */
921
922static int hci_rfkill_set_block(void *data, bool blocked)
923{
924 struct hci_dev *hdev = data;
925
926 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
927
928 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
929 return -EBUSY;
930
931 if (blocked) {
932 hci_dev_set_flag(hdev, HCI_RFKILLED);
933 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
934 !hci_dev_test_flag(hdev, HCI_CONFIG))
935 hci_dev_do_close(hdev);
936 } else {
937 hci_dev_clear_flag(hdev, HCI_RFKILLED);
938 }
939
940 return 0;
941}
942
943static const struct rfkill_ops hci_rfkill_ops = {
944 .set_block = hci_rfkill_set_block,
945};
946
947static void hci_power_on(struct work_struct *work)
948{
949 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
950 int err;
951
952 BT_DBG("%s", hdev->name);
953
954 if (test_bit(HCI_UP, &hdev->flags) &&
955 hci_dev_test_flag(hdev, HCI_MGMT) &&
956 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
957 cancel_delayed_work(&hdev->power_off);
958 err = hci_powered_update_sync(hdev);
959 mgmt_power_on(hdev, err);
960 return;
961 }
962
963 err = hci_dev_do_open(hdev);
964 if (err < 0) {
965 hci_dev_lock(hdev);
966 mgmt_set_powered_failed(hdev, err);
967 hci_dev_unlock(hdev);
968 return;
969 }
970
971 /* During the HCI setup phase, a few error conditions are
972 * ignored and they need to be checked now. If they are still
973 * valid, it is important to turn the device back off.
974 */
975 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
976 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
977 (hdev->dev_type == HCI_PRIMARY &&
978 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
979 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
980 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
981 hci_dev_do_close(hdev);
982 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
983 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
984 HCI_AUTO_OFF_TIMEOUT);
985 }
986
987 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
988 /* For unconfigured devices, set the HCI_RAW flag
989 * so that userspace can easily identify them.
990 */
991 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
992 set_bit(HCI_RAW, &hdev->flags);
993
994 /* For fully configured devices, this will send
995 * the Index Added event. For unconfigured devices,
996 * it will send Unconfigued Index Added event.
997 *
998 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
999 * and no event will be send.
1000 */
1001 mgmt_index_added(hdev);
1002 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1003 /* When the controller is now configured, then it
1004 * is important to clear the HCI_RAW flag.
1005 */
1006 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1007 clear_bit(HCI_RAW, &hdev->flags);
1008
1009 /* Powering on the controller with HCI_CONFIG set only
1010 * happens with the transition from unconfigured to
1011 * configured. This will send the Index Added event.
1012 */
1013 mgmt_index_added(hdev);
1014 }
1015}
1016
1017static void hci_power_off(struct work_struct *work)
1018{
1019 struct hci_dev *hdev = container_of(work, struct hci_dev,
1020 power_off.work);
1021
1022 BT_DBG("%s", hdev->name);
1023
1024 hci_dev_do_close(hdev);
1025}
1026
1027static void hci_error_reset(struct work_struct *work)
1028{
1029 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1030
1031 BT_DBG("%s", hdev->name);
1032
1033 if (hdev->hw_error)
1034 hdev->hw_error(hdev, hdev->hw_error_code);
1035 else
1036 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1037
1038 if (hci_dev_do_close(hdev))
1039 return;
1040
1041 hci_dev_do_open(hdev);
1042}
1043
1044void hci_uuids_clear(struct hci_dev *hdev)
1045{
1046 struct bt_uuid *uuid, *tmp;
1047
1048 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1049 list_del(&uuid->list);
1050 kfree(uuid);
1051 }
1052}
1053
1054void hci_link_keys_clear(struct hci_dev *hdev)
1055{
1056 struct link_key *key;
1057
1058 list_for_each_entry(key, &hdev->link_keys, list) {
1059 list_del_rcu(&key->list);
1060 kfree_rcu(key, rcu);
1061 }
1062}
1063
1064void hci_smp_ltks_clear(struct hci_dev *hdev)
1065{
1066 struct smp_ltk *k;
1067
1068 list_for_each_entry(k, &hdev->long_term_keys, list) {
1069 list_del_rcu(&k->list);
1070 kfree_rcu(k, rcu);
1071 }
1072}
1073
1074void hci_smp_irks_clear(struct hci_dev *hdev)
1075{
1076 struct smp_irk *k;
1077
1078 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1079 list_del_rcu(&k->list);
1080 kfree_rcu(k, rcu);
1081 }
1082}
1083
1084void hci_blocked_keys_clear(struct hci_dev *hdev)
1085{
1086 struct blocked_key *b;
1087
1088 list_for_each_entry(b, &hdev->blocked_keys, list) {
1089 list_del_rcu(&b->list);
1090 kfree_rcu(b, rcu);
1091 }
1092}
1093
1094bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1095{
1096 bool blocked = false;
1097 struct blocked_key *b;
1098
1099 rcu_read_lock();
1100 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1101 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1102 blocked = true;
1103 break;
1104 }
1105 }
1106
1107 rcu_read_unlock();
1108 return blocked;
1109}
1110
1111struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1112{
1113 struct link_key *k;
1114
1115 rcu_read_lock();
1116 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1117 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1118 rcu_read_unlock();
1119
1120 if (hci_is_blocked_key(hdev,
1121 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1122 k->val)) {
1123 bt_dev_warn_ratelimited(hdev,
1124 "Link key blocked for %pMR",
1125 &k->bdaddr);
1126 return NULL;
1127 }
1128
1129 return k;
1130 }
1131 }
1132 rcu_read_unlock();
1133
1134 return NULL;
1135}
1136
1137static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1138 u8 key_type, u8 old_key_type)
1139{
1140 /* Legacy key */
1141 if (key_type < 0x03)
1142 return true;
1143
1144 /* Debug keys are insecure so don't store them persistently */
1145 if (key_type == HCI_LK_DEBUG_COMBINATION)
1146 return false;
1147
1148 /* Changed combination key and there's no previous one */
1149 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1150 return false;
1151
1152 /* Security mode 3 case */
1153 if (!conn)
1154 return true;
1155
1156 /* BR/EDR key derived using SC from an LE link */
1157 if (conn->type == LE_LINK)
1158 return true;
1159
1160 /* Neither local nor remote side had no-bonding as requirement */
1161 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1162 return true;
1163
1164 /* Local side had dedicated bonding as requirement */
1165 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1166 return true;
1167
1168 /* Remote side had dedicated bonding as requirement */
1169 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1170 return true;
1171
1172 /* If none of the above criteria match, then don't store the key
1173 * persistently */
1174 return false;
1175}
1176
1177static u8 ltk_role(u8 type)
1178{
1179 if (type == SMP_LTK)
1180 return HCI_ROLE_MASTER;
1181
1182 return HCI_ROLE_SLAVE;
1183}
1184
1185struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1186 u8 addr_type, u8 role)
1187{
1188 struct smp_ltk *k;
1189
1190 rcu_read_lock();
1191 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1192 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1193 continue;
1194
1195 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1196 rcu_read_unlock();
1197
1198 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1199 k->val)) {
1200 bt_dev_warn_ratelimited(hdev,
1201 "LTK blocked for %pMR",
1202 &k->bdaddr);
1203 return NULL;
1204 }
1205
1206 return k;
1207 }
1208 }
1209 rcu_read_unlock();
1210
1211 return NULL;
1212}
1213
1214struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1215{
1216 struct smp_irk *irk_to_return = NULL;
1217 struct smp_irk *irk;
1218
1219 rcu_read_lock();
1220 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1221 if (!bacmp(&irk->rpa, rpa)) {
1222 irk_to_return = irk;
1223 goto done;
1224 }
1225 }
1226
1227 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1228 if (smp_irk_matches(hdev, irk->val, rpa)) {
1229 bacpy(&irk->rpa, rpa);
1230 irk_to_return = irk;
1231 goto done;
1232 }
1233 }
1234
1235done:
1236 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1237 irk_to_return->val)) {
1238 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1239 &irk_to_return->bdaddr);
1240 irk_to_return = NULL;
1241 }
1242
1243 rcu_read_unlock();
1244
1245 return irk_to_return;
1246}
1247
1248struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1249 u8 addr_type)
1250{
1251 struct smp_irk *irk_to_return = NULL;
1252 struct smp_irk *irk;
1253
1254 /* Identity Address must be public or static random */
1255 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1256 return NULL;
1257
1258 rcu_read_lock();
1259 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1260 if (addr_type == irk->addr_type &&
1261 bacmp(bdaddr, &irk->bdaddr) == 0) {
1262 irk_to_return = irk;
1263 goto done;
1264 }
1265 }
1266
1267done:
1268
1269 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1270 irk_to_return->val)) {
1271 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1272 &irk_to_return->bdaddr);
1273 irk_to_return = NULL;
1274 }
1275
1276 rcu_read_unlock();
1277
1278 return irk_to_return;
1279}
1280
1281struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1282 bdaddr_t *bdaddr, u8 *val, u8 type,
1283 u8 pin_len, bool *persistent)
1284{
1285 struct link_key *key, *old_key;
1286 u8 old_key_type;
1287
1288 old_key = hci_find_link_key(hdev, bdaddr);
1289 if (old_key) {
1290 old_key_type = old_key->type;
1291 key = old_key;
1292 } else {
1293 old_key_type = conn ? conn->key_type : 0xff;
1294 key = kzalloc(sizeof(*key), GFP_KERNEL);
1295 if (!key)
1296 return NULL;
1297 list_add_rcu(&key->list, &hdev->link_keys);
1298 }
1299
1300 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1301
1302 /* Some buggy controller combinations generate a changed
1303 * combination key for legacy pairing even when there's no
1304 * previous key */
1305 if (type == HCI_LK_CHANGED_COMBINATION &&
1306 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1307 type = HCI_LK_COMBINATION;
1308 if (conn)
1309 conn->key_type = type;
1310 }
1311
1312 bacpy(&key->bdaddr, bdaddr);
1313 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1314 key->pin_len = pin_len;
1315
1316 if (type == HCI_LK_CHANGED_COMBINATION)
1317 key->type = old_key_type;
1318 else
1319 key->type = type;
1320
1321 if (persistent)
1322 *persistent = hci_persistent_key(hdev, conn, type,
1323 old_key_type);
1324
1325 return key;
1326}
1327
1328struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1329 u8 addr_type, u8 type, u8 authenticated,
1330 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1331{
1332 struct smp_ltk *key, *old_key;
1333 u8 role = ltk_role(type);
1334
1335 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1336 if (old_key)
1337 key = old_key;
1338 else {
1339 key = kzalloc(sizeof(*key), GFP_KERNEL);
1340 if (!key)
1341 return NULL;
1342 list_add_rcu(&key->list, &hdev->long_term_keys);
1343 }
1344
1345 bacpy(&key->bdaddr, bdaddr);
1346 key->bdaddr_type = addr_type;
1347 memcpy(key->val, tk, sizeof(key->val));
1348 key->authenticated = authenticated;
1349 key->ediv = ediv;
1350 key->rand = rand;
1351 key->enc_size = enc_size;
1352 key->type = type;
1353
1354 return key;
1355}
1356
1357struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1358 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1359{
1360 struct smp_irk *irk;
1361
1362 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1363 if (!irk) {
1364 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1365 if (!irk)
1366 return NULL;
1367
1368 bacpy(&irk->bdaddr, bdaddr);
1369 irk->addr_type = addr_type;
1370
1371 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1372 }
1373
1374 memcpy(irk->val, val, 16);
1375 bacpy(&irk->rpa, rpa);
1376
1377 return irk;
1378}
1379
1380int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381{
1382 struct link_key *key;
1383
1384 key = hci_find_link_key(hdev, bdaddr);
1385 if (!key)
1386 return -ENOENT;
1387
1388 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1389
1390 list_del_rcu(&key->list);
1391 kfree_rcu(key, rcu);
1392
1393 return 0;
1394}
1395
1396int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1397{
1398 struct smp_ltk *k;
1399 int removed = 0;
1400
1401 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1402 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1403 continue;
1404
1405 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1406
1407 list_del_rcu(&k->list);
1408 kfree_rcu(k, rcu);
1409 removed++;
1410 }
1411
1412 return removed ? 0 : -ENOENT;
1413}
1414
1415void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1416{
1417 struct smp_irk *k;
1418
1419 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1420 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1421 continue;
1422
1423 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1424
1425 list_del_rcu(&k->list);
1426 kfree_rcu(k, rcu);
1427 }
1428}
1429
1430bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1431{
1432 struct smp_ltk *k;
1433 struct smp_irk *irk;
1434 u8 addr_type;
1435
1436 if (type == BDADDR_BREDR) {
1437 if (hci_find_link_key(hdev, bdaddr))
1438 return true;
1439 return false;
1440 }
1441
1442 /* Convert to HCI addr type which struct smp_ltk uses */
1443 if (type == BDADDR_LE_PUBLIC)
1444 addr_type = ADDR_LE_DEV_PUBLIC;
1445 else
1446 addr_type = ADDR_LE_DEV_RANDOM;
1447
1448 irk = hci_get_irk(hdev, bdaddr, addr_type);
1449 if (irk) {
1450 bdaddr = &irk->bdaddr;
1451 addr_type = irk->addr_type;
1452 }
1453
1454 rcu_read_lock();
1455 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1456 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1457 rcu_read_unlock();
1458 return true;
1459 }
1460 }
1461 rcu_read_unlock();
1462
1463 return false;
1464}
1465
1466/* HCI command timer function */
1467static void hci_cmd_timeout(struct work_struct *work)
1468{
1469 struct hci_dev *hdev = container_of(work, struct hci_dev,
1470 cmd_timer.work);
1471
1472 if (hdev->sent_cmd) {
1473 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1474 u16 opcode = __le16_to_cpu(sent->opcode);
1475
1476 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1477 } else {
1478 bt_dev_err(hdev, "command tx timeout");
1479 }
1480
1481 if (hdev->cmd_timeout)
1482 hdev->cmd_timeout(hdev);
1483
1484 atomic_set(&hdev->cmd_cnt, 1);
1485 queue_work(hdev->workqueue, &hdev->cmd_work);
1486}
1487
1488/* HCI ncmd timer function */
1489static void hci_ncmd_timeout(struct work_struct *work)
1490{
1491 struct hci_dev *hdev = container_of(work, struct hci_dev,
1492 ncmd_timer.work);
1493
1494 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1495
1496 /* During HCI_INIT phase no events can be injected if the ncmd timer
1497 * triggers since the procedure has its own timeout handling.
1498 */
1499 if (test_bit(HCI_INIT, &hdev->flags))
1500 return;
1501
1502 /* This is an irrecoverable state, inject hardware error event */
1503 hci_reset_dev(hdev);
1504}
1505
1506struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1507 bdaddr_t *bdaddr, u8 bdaddr_type)
1508{
1509 struct oob_data *data;
1510
1511 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1512 if (bacmp(bdaddr, &data->bdaddr) != 0)
1513 continue;
1514 if (data->bdaddr_type != bdaddr_type)
1515 continue;
1516 return data;
1517 }
1518
1519 return NULL;
1520}
1521
1522int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1523 u8 bdaddr_type)
1524{
1525 struct oob_data *data;
1526
1527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1528 if (!data)
1529 return -ENOENT;
1530
1531 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1532
1533 list_del(&data->list);
1534 kfree(data);
1535
1536 return 0;
1537}
1538
1539void hci_remote_oob_data_clear(struct hci_dev *hdev)
1540{
1541 struct oob_data *data, *n;
1542
1543 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1544 list_del(&data->list);
1545 kfree(data);
1546 }
1547}
1548
1549int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1550 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1551 u8 *hash256, u8 *rand256)
1552{
1553 struct oob_data *data;
1554
1555 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1556 if (!data) {
1557 data = kmalloc(sizeof(*data), GFP_KERNEL);
1558 if (!data)
1559 return -ENOMEM;
1560
1561 bacpy(&data->bdaddr, bdaddr);
1562 data->bdaddr_type = bdaddr_type;
1563 list_add(&data->list, &hdev->remote_oob_data);
1564 }
1565
1566 if (hash192 && rand192) {
1567 memcpy(data->hash192, hash192, sizeof(data->hash192));
1568 memcpy(data->rand192, rand192, sizeof(data->rand192));
1569 if (hash256 && rand256)
1570 data->present = 0x03;
1571 } else {
1572 memset(data->hash192, 0, sizeof(data->hash192));
1573 memset(data->rand192, 0, sizeof(data->rand192));
1574 if (hash256 && rand256)
1575 data->present = 0x02;
1576 else
1577 data->present = 0x00;
1578 }
1579
1580 if (hash256 && rand256) {
1581 memcpy(data->hash256, hash256, sizeof(data->hash256));
1582 memcpy(data->rand256, rand256, sizeof(data->rand256));
1583 } else {
1584 memset(data->hash256, 0, sizeof(data->hash256));
1585 memset(data->rand256, 0, sizeof(data->rand256));
1586 if (hash192 && rand192)
1587 data->present = 0x01;
1588 }
1589
1590 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1591
1592 return 0;
1593}
1594
1595/* This function requires the caller holds hdev->lock */
1596struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1597{
1598 struct adv_info *adv_instance;
1599
1600 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1601 if (adv_instance->instance == instance)
1602 return adv_instance;
1603 }
1604
1605 return NULL;
1606}
1607
1608/* This function requires the caller holds hdev->lock */
1609struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1610{
1611 struct adv_info *cur_instance;
1612
1613 cur_instance = hci_find_adv_instance(hdev, instance);
1614 if (!cur_instance)
1615 return NULL;
1616
1617 if (cur_instance == list_last_entry(&hdev->adv_instances,
1618 struct adv_info, list))
1619 return list_first_entry(&hdev->adv_instances,
1620 struct adv_info, list);
1621 else
1622 return list_next_entry(cur_instance, list);
1623}
1624
1625/* This function requires the caller holds hdev->lock */
1626int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1627{
1628 struct adv_info *adv_instance;
1629
1630 adv_instance = hci_find_adv_instance(hdev, instance);
1631 if (!adv_instance)
1632 return -ENOENT;
1633
1634 BT_DBG("%s removing %dMR", hdev->name, instance);
1635
1636 if (hdev->cur_adv_instance == instance) {
1637 if (hdev->adv_instance_timeout) {
1638 cancel_delayed_work(&hdev->adv_instance_expire);
1639 hdev->adv_instance_timeout = 0;
1640 }
1641 hdev->cur_adv_instance = 0x00;
1642 }
1643
1644 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1645
1646 list_del(&adv_instance->list);
1647 kfree(adv_instance);
1648
1649 hdev->adv_instance_cnt--;
1650
1651 return 0;
1652}
1653
1654void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1655{
1656 struct adv_info *adv_instance, *n;
1657
1658 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1659 adv_instance->rpa_expired = rpa_expired;
1660}
1661
1662/* This function requires the caller holds hdev->lock */
1663void hci_adv_instances_clear(struct hci_dev *hdev)
1664{
1665 struct adv_info *adv_instance, *n;
1666
1667 if (hdev->adv_instance_timeout) {
1668 cancel_delayed_work(&hdev->adv_instance_expire);
1669 hdev->adv_instance_timeout = 0;
1670 }
1671
1672 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1673 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1674 list_del(&adv_instance->list);
1675 kfree(adv_instance);
1676 }
1677
1678 hdev->adv_instance_cnt = 0;
1679 hdev->cur_adv_instance = 0x00;
1680}
1681
1682static void adv_instance_rpa_expired(struct work_struct *work)
1683{
1684 struct adv_info *adv_instance = container_of(work, struct adv_info,
1685 rpa_expired_cb.work);
1686
1687 BT_DBG("");
1688
1689 adv_instance->rpa_expired = true;
1690}
1691
1692/* This function requires the caller holds hdev->lock */
1693int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1694 u16 adv_data_len, u8 *adv_data,
1695 u16 scan_rsp_len, u8 *scan_rsp_data,
1696 u16 timeout, u16 duration, s8 tx_power,
1697 u32 min_interval, u32 max_interval)
1698{
1699 struct adv_info *adv_instance;
1700
1701 adv_instance = hci_find_adv_instance(hdev, instance);
1702 if (adv_instance) {
1703 memset(adv_instance->adv_data, 0,
1704 sizeof(adv_instance->adv_data));
1705 memset(adv_instance->scan_rsp_data, 0,
1706 sizeof(adv_instance->scan_rsp_data));
1707 } else {
1708 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1709 instance < 1 || instance > hdev->le_num_of_adv_sets)
1710 return -EOVERFLOW;
1711
1712 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1713 if (!adv_instance)
1714 return -ENOMEM;
1715
1716 adv_instance->pending = true;
1717 adv_instance->instance = instance;
1718 list_add(&adv_instance->list, &hdev->adv_instances);
1719 hdev->adv_instance_cnt++;
1720 }
1721
1722 adv_instance->flags = flags;
1723 adv_instance->adv_data_len = adv_data_len;
1724 adv_instance->scan_rsp_len = scan_rsp_len;
1725 adv_instance->min_interval = min_interval;
1726 adv_instance->max_interval = max_interval;
1727 adv_instance->tx_power = tx_power;
1728
1729 if (adv_data_len)
1730 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1731
1732 if (scan_rsp_len)
1733 memcpy(adv_instance->scan_rsp_data,
1734 scan_rsp_data, scan_rsp_len);
1735
1736 adv_instance->timeout = timeout;
1737 adv_instance->remaining_time = timeout;
1738
1739 if (duration == 0)
1740 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1741 else
1742 adv_instance->duration = duration;
1743
1744 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1745 adv_instance_rpa_expired);
1746
1747 BT_DBG("%s for %dMR", hdev->name, instance);
1748
1749 return 0;
1750}
1751
1752/* This function requires the caller holds hdev->lock */
1753int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1754 u16 adv_data_len, u8 *adv_data,
1755 u16 scan_rsp_len, u8 *scan_rsp_data)
1756{
1757 struct adv_info *adv_instance;
1758
1759 adv_instance = hci_find_adv_instance(hdev, instance);
1760
1761 /* If advertisement doesn't exist, we can't modify its data */
1762 if (!adv_instance)
1763 return -ENOENT;
1764
1765 if (adv_data_len) {
1766 memset(adv_instance->adv_data, 0,
1767 sizeof(adv_instance->adv_data));
1768 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1769 adv_instance->adv_data_len = adv_data_len;
1770 }
1771
1772 if (scan_rsp_len) {
1773 memset(adv_instance->scan_rsp_data, 0,
1774 sizeof(adv_instance->scan_rsp_data));
1775 memcpy(adv_instance->scan_rsp_data,
1776 scan_rsp_data, scan_rsp_len);
1777 adv_instance->scan_rsp_len = scan_rsp_len;
1778 }
1779
1780 return 0;
1781}
1782
1783/* This function requires the caller holds hdev->lock */
1784u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1785{
1786 u32 flags;
1787 struct adv_info *adv;
1788
1789 if (instance == 0x00) {
1790 /* Instance 0 always manages the "Tx Power" and "Flags"
1791 * fields
1792 */
1793 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1794
1795 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1796 * corresponds to the "connectable" instance flag.
1797 */
1798 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1799 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1800
1801 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1802 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1803 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1804 flags |= MGMT_ADV_FLAG_DISCOV;
1805
1806 return flags;
1807 }
1808
1809 adv = hci_find_adv_instance(hdev, instance);
1810
1811 /* Return 0 when we got an invalid instance identifier. */
1812 if (!adv)
1813 return 0;
1814
1815 return adv->flags;
1816}
1817
1818bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1819{
1820 struct adv_info *adv;
1821
1822 /* Instance 0x00 always set local name */
1823 if (instance == 0x00)
1824 return true;
1825
1826 adv = hci_find_adv_instance(hdev, instance);
1827 if (!adv)
1828 return false;
1829
1830 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1831 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1832 return true;
1833
1834 return adv->scan_rsp_len ? true : false;
1835}
1836
1837/* This function requires the caller holds hdev->lock */
1838void hci_adv_monitors_clear(struct hci_dev *hdev)
1839{
1840 struct adv_monitor *monitor;
1841 int handle;
1842
1843 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1844 hci_free_adv_monitor(hdev, monitor);
1845
1846 idr_destroy(&hdev->adv_monitors_idr);
1847}
1848
1849/* Frees the monitor structure and do some bookkeepings.
1850 * This function requires the caller holds hdev->lock.
1851 */
1852void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1853{
1854 struct adv_pattern *pattern;
1855 struct adv_pattern *tmp;
1856
1857 if (!monitor)
1858 return;
1859
1860 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1861 list_del(&pattern->list);
1862 kfree(pattern);
1863 }
1864
1865 if (monitor->handle)
1866 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1867
1868 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1869 hdev->adv_monitors_cnt--;
1870 mgmt_adv_monitor_removed(hdev, monitor->handle);
1871 }
1872
1873 kfree(monitor);
1874}
1875
1876int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1877{
1878 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1879}
1880
1881int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1882{
1883 return mgmt_remove_adv_monitor_complete(hdev, status);
1884}
1885
1886/* Assigns handle to a monitor, and if offloading is supported and power is on,
1887 * also attempts to forward the request to the controller.
1888 * Returns true if request is forwarded (result is pending), false otherwise.
1889 * This function requires the caller holds hdev->lock.
1890 */
1891bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1892 int *err)
1893{
1894 int min, max, handle;
1895
1896 *err = 0;
1897
1898 if (!monitor) {
1899 *err = -EINVAL;
1900 return false;
1901 }
1902
1903 min = HCI_MIN_ADV_MONITOR_HANDLE;
1904 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1905 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1906 GFP_KERNEL);
1907 if (handle < 0) {
1908 *err = handle;
1909 return false;
1910 }
1911
1912 monitor->handle = handle;
1913
1914 if (!hdev_is_powered(hdev))
1915 return false;
1916
1917 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918 case HCI_ADV_MONITOR_EXT_NONE:
1919 hci_update_passive_scan(hdev);
1920 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1921 /* Message was not forwarded to controller - not an error */
1922 return false;
1923 case HCI_ADV_MONITOR_EXT_MSFT:
1924 *err = msft_add_monitor_pattern(hdev, monitor);
1925 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1926 *err);
1927 break;
1928 }
1929
1930 return (*err == 0);
1931}
1932
1933/* Attempts to tell the controller and free the monitor. If somehow the
1934 * controller doesn't have a corresponding handle, remove anyway.
1935 * Returns true if request is forwarded (result is pending), false otherwise.
1936 * This function requires the caller holds hdev->lock.
1937 */
1938static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1939 struct adv_monitor *monitor,
1940 u16 handle, int *err)
1941{
1942 *err = 0;
1943
1944 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946 goto free_monitor;
1947 case HCI_ADV_MONITOR_EXT_MSFT:
1948 *err = msft_remove_monitor(hdev, monitor, handle);
1949 break;
1950 }
1951
1952 /* In case no matching handle registered, just free the monitor */
1953 if (*err == -ENOENT)
1954 goto free_monitor;
1955
1956 return (*err == 0);
1957
1958free_monitor:
1959 if (*err == -ENOENT)
1960 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1961 monitor->handle);
1962 hci_free_adv_monitor(hdev, monitor);
1963
1964 *err = 0;
1965 return false;
1966}
1967
1968/* Returns true if request is forwarded (result is pending), false otherwise.
1969 * This function requires the caller holds hdev->lock.
1970 */
1971bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1972{
1973 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1974 bool pending;
1975
1976 if (!monitor) {
1977 *err = -EINVAL;
1978 return false;
1979 }
1980
1981 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1982 if (!*err && !pending)
1983 hci_update_passive_scan(hdev);
1984
1985 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1986 hdev->name, handle, *err, pending ? "" : "not ");
1987
1988 return pending;
1989}
1990
1991/* Returns true if request is forwarded (result is pending), false otherwise.
1992 * This function requires the caller holds hdev->lock.
1993 */
1994bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
1995{
1996 struct adv_monitor *monitor;
1997 int idr_next_id = 0;
1998 bool pending = false;
1999 bool update = false;
2000
2001 *err = 0;
2002
2003 while (!*err && !pending) {
2004 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2005 if (!monitor)
2006 break;
2007
2008 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2009
2010 if (!*err && !pending)
2011 update = true;
2012 }
2013
2014 if (update)
2015 hci_update_passive_scan(hdev);
2016
2017 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2018 hdev->name, *err, pending ? "" : "not ");
2019
2020 return pending;
2021}
2022
2023/* This function requires the caller holds hdev->lock */
2024bool hci_is_adv_monitoring(struct hci_dev *hdev)
2025{
2026 return !idr_is_empty(&hdev->adv_monitors_idr);
2027}
2028
2029int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2030{
2031 if (msft_monitor_supported(hdev))
2032 return HCI_ADV_MONITOR_EXT_MSFT;
2033
2034 return HCI_ADV_MONITOR_EXT_NONE;
2035}
2036
2037struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2038 bdaddr_t *bdaddr, u8 type)
2039{
2040 struct bdaddr_list *b;
2041
2042 list_for_each_entry(b, bdaddr_list, list) {
2043 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2044 return b;
2045 }
2046
2047 return NULL;
2048}
2049
2050struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2051 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2052 u8 type)
2053{
2054 struct bdaddr_list_with_irk *b;
2055
2056 list_for_each_entry(b, bdaddr_list, list) {
2057 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2058 return b;
2059 }
2060
2061 return NULL;
2062}
2063
2064struct bdaddr_list_with_flags *
2065hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2066 bdaddr_t *bdaddr, u8 type)
2067{
2068 struct bdaddr_list_with_flags *b;
2069
2070 list_for_each_entry(b, bdaddr_list, list) {
2071 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2072 return b;
2073 }
2074
2075 return NULL;
2076}
2077
2078void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2079{
2080 struct bdaddr_list *b, *n;
2081
2082 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2083 list_del(&b->list);
2084 kfree(b);
2085 }
2086}
2087
2088int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2089{
2090 struct bdaddr_list *entry;
2091
2092 if (!bacmp(bdaddr, BDADDR_ANY))
2093 return -EBADF;
2094
2095 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2096 return -EEXIST;
2097
2098 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2099 if (!entry)
2100 return -ENOMEM;
2101
2102 bacpy(&entry->bdaddr, bdaddr);
2103 entry->bdaddr_type = type;
2104
2105 list_add(&entry->list, list);
2106
2107 return 0;
2108}
2109
2110int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2111 u8 type, u8 *peer_irk, u8 *local_irk)
2112{
2113 struct bdaddr_list_with_irk *entry;
2114
2115 if (!bacmp(bdaddr, BDADDR_ANY))
2116 return -EBADF;
2117
2118 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2119 return -EEXIST;
2120
2121 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2122 if (!entry)
2123 return -ENOMEM;
2124
2125 bacpy(&entry->bdaddr, bdaddr);
2126 entry->bdaddr_type = type;
2127
2128 if (peer_irk)
2129 memcpy(entry->peer_irk, peer_irk, 16);
2130
2131 if (local_irk)
2132 memcpy(entry->local_irk, local_irk, 16);
2133
2134 list_add(&entry->list, list);
2135
2136 return 0;
2137}
2138
2139int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2140 u8 type, u32 flags)
2141{
2142 struct bdaddr_list_with_flags *entry;
2143
2144 if (!bacmp(bdaddr, BDADDR_ANY))
2145 return -EBADF;
2146
2147 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2148 return -EEXIST;
2149
2150 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2151 if (!entry)
2152 return -ENOMEM;
2153
2154 bacpy(&entry->bdaddr, bdaddr);
2155 entry->bdaddr_type = type;
2156 bitmap_from_u64(entry->flags, flags);
2157
2158 list_add(&entry->list, list);
2159
2160 return 0;
2161}
2162
2163int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2164{
2165 struct bdaddr_list *entry;
2166
2167 if (!bacmp(bdaddr, BDADDR_ANY)) {
2168 hci_bdaddr_list_clear(list);
2169 return 0;
2170 }
2171
2172 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2173 if (!entry)
2174 return -ENOENT;
2175
2176 list_del(&entry->list);
2177 kfree(entry);
2178
2179 return 0;
2180}
2181
2182int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2183 u8 type)
2184{
2185 struct bdaddr_list_with_irk *entry;
2186
2187 if (!bacmp(bdaddr, BDADDR_ANY)) {
2188 hci_bdaddr_list_clear(list);
2189 return 0;
2190 }
2191
2192 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2193 if (!entry)
2194 return -ENOENT;
2195
2196 list_del(&entry->list);
2197 kfree(entry);
2198
2199 return 0;
2200}
2201
2202int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2203 u8 type)
2204{
2205 struct bdaddr_list_with_flags *entry;
2206
2207 if (!bacmp(bdaddr, BDADDR_ANY)) {
2208 hci_bdaddr_list_clear(list);
2209 return 0;
2210 }
2211
2212 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2213 if (!entry)
2214 return -ENOENT;
2215
2216 list_del(&entry->list);
2217 kfree(entry);
2218
2219 return 0;
2220}
2221
2222/* This function requires the caller holds hdev->lock */
2223struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2224 bdaddr_t *addr, u8 addr_type)
2225{
2226 struct hci_conn_params *params;
2227
2228 list_for_each_entry(params, &hdev->le_conn_params, list) {
2229 if (bacmp(¶ms->addr, addr) == 0 &&
2230 params->addr_type == addr_type) {
2231 return params;
2232 }
2233 }
2234
2235 return NULL;
2236}
2237
2238/* This function requires the caller holds hdev->lock */
2239struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2240 bdaddr_t *addr, u8 addr_type)
2241{
2242 struct hci_conn_params *param;
2243
2244 list_for_each_entry(param, list, action) {
2245 if (bacmp(¶m->addr, addr) == 0 &&
2246 param->addr_type == addr_type)
2247 return param;
2248 }
2249
2250 return NULL;
2251}
2252
2253/* This function requires the caller holds hdev->lock */
2254struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2255 bdaddr_t *addr, u8 addr_type)
2256{
2257 struct hci_conn_params *params;
2258
2259 params = hci_conn_params_lookup(hdev, addr, addr_type);
2260 if (params)
2261 return params;
2262
2263 params = kzalloc(sizeof(*params), GFP_KERNEL);
2264 if (!params) {
2265 bt_dev_err(hdev, "out of memory");
2266 return NULL;
2267 }
2268
2269 bacpy(¶ms->addr, addr);
2270 params->addr_type = addr_type;
2271
2272 list_add(¶ms->list, &hdev->le_conn_params);
2273 INIT_LIST_HEAD(¶ms->action);
2274
2275 params->conn_min_interval = hdev->le_conn_min_interval;
2276 params->conn_max_interval = hdev->le_conn_max_interval;
2277 params->conn_latency = hdev->le_conn_latency;
2278 params->supervision_timeout = hdev->le_supv_timeout;
2279 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2280
2281 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2282
2283 return params;
2284}
2285
2286static void hci_conn_params_free(struct hci_conn_params *params)
2287{
2288 if (params->conn) {
2289 hci_conn_drop(params->conn);
2290 hci_conn_put(params->conn);
2291 }
2292
2293 list_del(¶ms->action);
2294 list_del(¶ms->list);
2295 kfree(params);
2296}
2297
2298/* This function requires the caller holds hdev->lock */
2299void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2300{
2301 struct hci_conn_params *params;
2302
2303 params = hci_conn_params_lookup(hdev, addr, addr_type);
2304 if (!params)
2305 return;
2306
2307 hci_conn_params_free(params);
2308
2309 hci_update_passive_scan(hdev);
2310
2311 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2312}
2313
2314/* This function requires the caller holds hdev->lock */
2315void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2316{
2317 struct hci_conn_params *params, *tmp;
2318
2319 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2320 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2321 continue;
2322
2323 /* If trying to establish one time connection to disabled
2324 * device, leave the params, but mark them as just once.
2325 */
2326 if (params->explicit_connect) {
2327 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2328 continue;
2329 }
2330
2331 list_del(¶ms->list);
2332 kfree(params);
2333 }
2334
2335 BT_DBG("All LE disabled connection parameters were removed");
2336}
2337
2338/* This function requires the caller holds hdev->lock */
2339static void hci_conn_params_clear_all(struct hci_dev *hdev)
2340{
2341 struct hci_conn_params *params, *tmp;
2342
2343 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2344 hci_conn_params_free(params);
2345
2346 BT_DBG("All LE connection parameters were removed");
2347}
2348
2349/* Copy the Identity Address of the controller.
2350 *
2351 * If the controller has a public BD_ADDR, then by default use that one.
2352 * If this is a LE only controller without a public address, default to
2353 * the static random address.
2354 *
2355 * For debugging purposes it is possible to force controllers with a
2356 * public address to use the static random address instead.
2357 *
2358 * In case BR/EDR has been disabled on a dual-mode controller and
2359 * userspace has configured a static address, then that address
2360 * becomes the identity address instead of the public BR/EDR address.
2361 */
2362void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2363 u8 *bdaddr_type)
2364{
2365 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2366 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2367 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2368 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2369 bacpy(bdaddr, &hdev->static_addr);
2370 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2371 } else {
2372 bacpy(bdaddr, &hdev->bdaddr);
2373 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2374 }
2375}
2376
2377static void hci_clear_wake_reason(struct hci_dev *hdev)
2378{
2379 hci_dev_lock(hdev);
2380
2381 hdev->wake_reason = 0;
2382 bacpy(&hdev->wake_addr, BDADDR_ANY);
2383 hdev->wake_addr_type = 0;
2384
2385 hci_dev_unlock(hdev);
2386}
2387
2388static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2389 void *data)
2390{
2391 struct hci_dev *hdev =
2392 container_of(nb, struct hci_dev, suspend_notifier);
2393 int ret = 0;
2394
2395 if (action == PM_SUSPEND_PREPARE)
2396 ret = hci_suspend_dev(hdev);
2397 else if (action == PM_POST_SUSPEND)
2398 ret = hci_resume_dev(hdev);
2399
2400 if (ret)
2401 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2402 action, ret);
2403
2404 return NOTIFY_DONE;
2405}
2406
2407/* Alloc HCI device */
2408struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2409{
2410 struct hci_dev *hdev;
2411 unsigned int alloc_size;
2412
2413 alloc_size = sizeof(*hdev);
2414 if (sizeof_priv) {
2415 /* Fixme: May need ALIGN-ment? */
2416 alloc_size += sizeof_priv;
2417 }
2418
2419 hdev = kzalloc(alloc_size, GFP_KERNEL);
2420 if (!hdev)
2421 return NULL;
2422
2423 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2424 hdev->esco_type = (ESCO_HV1);
2425 hdev->link_mode = (HCI_LM_ACCEPT);
2426 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2427 hdev->io_capability = 0x03; /* No Input No Output */
2428 hdev->manufacturer = 0xffff; /* Default to internal use */
2429 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2430 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2431 hdev->adv_instance_cnt = 0;
2432 hdev->cur_adv_instance = 0x00;
2433 hdev->adv_instance_timeout = 0;
2434
2435 hdev->advmon_allowlist_duration = 300;
2436 hdev->advmon_no_filter_duration = 500;
2437 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2438
2439 hdev->sniff_max_interval = 800;
2440 hdev->sniff_min_interval = 80;
2441
2442 hdev->le_adv_channel_map = 0x07;
2443 hdev->le_adv_min_interval = 0x0800;
2444 hdev->le_adv_max_interval = 0x0800;
2445 hdev->le_scan_interval = 0x0060;
2446 hdev->le_scan_window = 0x0030;
2447 hdev->le_scan_int_suspend = 0x0400;
2448 hdev->le_scan_window_suspend = 0x0012;
2449 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2450 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2451 hdev->le_scan_int_adv_monitor = 0x0060;
2452 hdev->le_scan_window_adv_monitor = 0x0030;
2453 hdev->le_scan_int_connect = 0x0060;
2454 hdev->le_scan_window_connect = 0x0060;
2455 hdev->le_conn_min_interval = 0x0018;
2456 hdev->le_conn_max_interval = 0x0028;
2457 hdev->le_conn_latency = 0x0000;
2458 hdev->le_supv_timeout = 0x002a;
2459 hdev->le_def_tx_len = 0x001b;
2460 hdev->le_def_tx_time = 0x0148;
2461 hdev->le_max_tx_len = 0x001b;
2462 hdev->le_max_tx_time = 0x0148;
2463 hdev->le_max_rx_len = 0x001b;
2464 hdev->le_max_rx_time = 0x0148;
2465 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2466 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2467 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2468 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2469 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2470 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2471 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2472 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2473 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2474
2475 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2476 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2477 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2478 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2479 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2480 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2481
2482 /* default 1.28 sec page scan */
2483 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2484 hdev->def_page_scan_int = 0x0800;
2485 hdev->def_page_scan_window = 0x0012;
2486
2487 mutex_init(&hdev->lock);
2488 mutex_init(&hdev->req_lock);
2489
2490 INIT_LIST_HEAD(&hdev->mgmt_pending);
2491 INIT_LIST_HEAD(&hdev->reject_list);
2492 INIT_LIST_HEAD(&hdev->accept_list);
2493 INIT_LIST_HEAD(&hdev->uuids);
2494 INIT_LIST_HEAD(&hdev->link_keys);
2495 INIT_LIST_HEAD(&hdev->long_term_keys);
2496 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2497 INIT_LIST_HEAD(&hdev->remote_oob_data);
2498 INIT_LIST_HEAD(&hdev->le_accept_list);
2499 INIT_LIST_HEAD(&hdev->le_resolv_list);
2500 INIT_LIST_HEAD(&hdev->le_conn_params);
2501 INIT_LIST_HEAD(&hdev->pend_le_conns);
2502 INIT_LIST_HEAD(&hdev->pend_le_reports);
2503 INIT_LIST_HEAD(&hdev->conn_hash.list);
2504 INIT_LIST_HEAD(&hdev->adv_instances);
2505 INIT_LIST_HEAD(&hdev->blocked_keys);
2506 INIT_LIST_HEAD(&hdev->monitored_devices);
2507
2508 INIT_LIST_HEAD(&hdev->local_codecs);
2509 INIT_WORK(&hdev->rx_work, hci_rx_work);
2510 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2511 INIT_WORK(&hdev->tx_work, hci_tx_work);
2512 INIT_WORK(&hdev->power_on, hci_power_on);
2513 INIT_WORK(&hdev->error_reset, hci_error_reset);
2514
2515 hci_cmd_sync_init(hdev);
2516
2517 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2518
2519 skb_queue_head_init(&hdev->rx_q);
2520 skb_queue_head_init(&hdev->cmd_q);
2521 skb_queue_head_init(&hdev->raw_q);
2522
2523 init_waitqueue_head(&hdev->req_wait_q);
2524
2525 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2526 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2527
2528 hci_request_setup(hdev);
2529
2530 hci_init_sysfs(hdev);
2531 discovery_init(hdev);
2532
2533 return hdev;
2534}
2535EXPORT_SYMBOL(hci_alloc_dev_priv);
2536
2537/* Free HCI device */
2538void hci_free_dev(struct hci_dev *hdev)
2539{
2540 /* will free via device release */
2541 put_device(&hdev->dev);
2542}
2543EXPORT_SYMBOL(hci_free_dev);
2544
2545/* Register HCI device */
2546int hci_register_dev(struct hci_dev *hdev)
2547{
2548 int id, error;
2549
2550 if (!hdev->open || !hdev->close || !hdev->send)
2551 return -EINVAL;
2552
2553 /* Do not allow HCI_AMP devices to register at index 0,
2554 * so the index can be used as the AMP controller ID.
2555 */
2556 switch (hdev->dev_type) {
2557 case HCI_PRIMARY:
2558 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2559 break;
2560 case HCI_AMP:
2561 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2562 break;
2563 default:
2564 return -EINVAL;
2565 }
2566
2567 if (id < 0)
2568 return id;
2569
2570 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2571 hdev->id = id;
2572
2573 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2574
2575 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2576 if (!hdev->workqueue) {
2577 error = -ENOMEM;
2578 goto err;
2579 }
2580
2581 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2582 hdev->name);
2583 if (!hdev->req_workqueue) {
2584 destroy_workqueue(hdev->workqueue);
2585 error = -ENOMEM;
2586 goto err;
2587 }
2588
2589 if (!IS_ERR_OR_NULL(bt_debugfs))
2590 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2591
2592 dev_set_name(&hdev->dev, "%s", hdev->name);
2593
2594 error = device_add(&hdev->dev);
2595 if (error < 0)
2596 goto err_wqueue;
2597
2598 hci_leds_init(hdev);
2599
2600 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2601 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2602 hdev);
2603 if (hdev->rfkill) {
2604 if (rfkill_register(hdev->rfkill) < 0) {
2605 rfkill_destroy(hdev->rfkill);
2606 hdev->rfkill = NULL;
2607 }
2608 }
2609
2610 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2611 hci_dev_set_flag(hdev, HCI_RFKILLED);
2612
2613 hci_dev_set_flag(hdev, HCI_SETUP);
2614 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2615
2616 if (hdev->dev_type == HCI_PRIMARY) {
2617 /* Assume BR/EDR support until proven otherwise (such as
2618 * through reading supported features during init.
2619 */
2620 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2621 }
2622
2623 write_lock(&hci_dev_list_lock);
2624 list_add(&hdev->list, &hci_dev_list);
2625 write_unlock(&hci_dev_list_lock);
2626
2627 /* Devices that are marked for raw-only usage are unconfigured
2628 * and should not be included in normal operation.
2629 */
2630 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2631 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2632
2633 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2634 * callback.
2635 */
2636 if (hdev->wakeup)
2637 set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
2638
2639 hci_sock_dev_event(hdev, HCI_DEV_REG);
2640 hci_dev_hold(hdev);
2641
2642 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2643 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2644 error = register_pm_notifier(&hdev->suspend_notifier);
2645 if (error)
2646 goto err_wqueue;
2647 }
2648
2649 queue_work(hdev->req_workqueue, &hdev->power_on);
2650
2651 idr_init(&hdev->adv_monitors_idr);
2652 msft_register(hdev);
2653
2654 return id;
2655
2656err_wqueue:
2657 debugfs_remove_recursive(hdev->debugfs);
2658 destroy_workqueue(hdev->workqueue);
2659 destroy_workqueue(hdev->req_workqueue);
2660err:
2661 ida_simple_remove(&hci_index_ida, hdev->id);
2662
2663 return error;
2664}
2665EXPORT_SYMBOL(hci_register_dev);
2666
2667/* Unregister HCI device */
2668void hci_unregister_dev(struct hci_dev *hdev)
2669{
2670 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2671
2672 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2673
2674 write_lock(&hci_dev_list_lock);
2675 list_del(&hdev->list);
2676 write_unlock(&hci_dev_list_lock);
2677
2678 cancel_work_sync(&hdev->power_on);
2679
2680 hci_cmd_sync_clear(hdev);
2681
2682 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2683 unregister_pm_notifier(&hdev->suspend_notifier);
2684
2685 msft_unregister(hdev);
2686
2687 hci_dev_do_close(hdev);
2688
2689 if (!test_bit(HCI_INIT, &hdev->flags) &&
2690 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2691 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2692 hci_dev_lock(hdev);
2693 mgmt_index_removed(hdev);
2694 hci_dev_unlock(hdev);
2695 }
2696
2697 /* mgmt_index_removed should take care of emptying the
2698 * pending list */
2699 BUG_ON(!list_empty(&hdev->mgmt_pending));
2700
2701 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2702
2703 if (hdev->rfkill) {
2704 rfkill_unregister(hdev->rfkill);
2705 rfkill_destroy(hdev->rfkill);
2706 }
2707
2708 device_del(&hdev->dev);
2709 /* Actual cleanup is deferred until hci_release_dev(). */
2710 hci_dev_put(hdev);
2711}
2712EXPORT_SYMBOL(hci_unregister_dev);
2713
2714/* Release HCI device */
2715void hci_release_dev(struct hci_dev *hdev)
2716{
2717 debugfs_remove_recursive(hdev->debugfs);
2718 kfree_const(hdev->hw_info);
2719 kfree_const(hdev->fw_info);
2720
2721 destroy_workqueue(hdev->workqueue);
2722 destroy_workqueue(hdev->req_workqueue);
2723
2724 hci_dev_lock(hdev);
2725 hci_bdaddr_list_clear(&hdev->reject_list);
2726 hci_bdaddr_list_clear(&hdev->accept_list);
2727 hci_uuids_clear(hdev);
2728 hci_link_keys_clear(hdev);
2729 hci_smp_ltks_clear(hdev);
2730 hci_smp_irks_clear(hdev);
2731 hci_remote_oob_data_clear(hdev);
2732 hci_adv_instances_clear(hdev);
2733 hci_adv_monitors_clear(hdev);
2734 hci_bdaddr_list_clear(&hdev->le_accept_list);
2735 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2736 hci_conn_params_clear_all(hdev);
2737 hci_discovery_filter_clear(hdev);
2738 hci_blocked_keys_clear(hdev);
2739 hci_dev_unlock(hdev);
2740
2741 ida_simple_remove(&hci_index_ida, hdev->id);
2742 kfree_skb(hdev->sent_cmd);
2743 kfree(hdev);
2744}
2745EXPORT_SYMBOL(hci_release_dev);
2746
2747/* Suspend HCI device */
2748int hci_suspend_dev(struct hci_dev *hdev)
2749{
2750 int ret;
2751
2752 bt_dev_dbg(hdev, "");
2753
2754 /* Suspend should only act on when powered. */
2755 if (!hdev_is_powered(hdev) ||
2756 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2757 return 0;
2758
2759 /* If powering down don't attempt to suspend */
2760 if (mgmt_powering_down(hdev))
2761 return 0;
2762
2763 hci_req_sync_lock(hdev);
2764 ret = hci_suspend_sync(hdev);
2765 hci_req_sync_unlock(hdev);
2766
2767 hci_clear_wake_reason(hdev);
2768 mgmt_suspending(hdev, hdev->suspend_state);
2769
2770 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2771 return ret;
2772}
2773EXPORT_SYMBOL(hci_suspend_dev);
2774
2775/* Resume HCI device */
2776int hci_resume_dev(struct hci_dev *hdev)
2777{
2778 int ret;
2779
2780 bt_dev_dbg(hdev, "");
2781
2782 /* Resume should only act on when powered. */
2783 if (!hdev_is_powered(hdev) ||
2784 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2785 return 0;
2786
2787 /* If powering down don't attempt to resume */
2788 if (mgmt_powering_down(hdev))
2789 return 0;
2790
2791 hci_req_sync_lock(hdev);
2792 ret = hci_resume_sync(hdev);
2793 hci_req_sync_unlock(hdev);
2794
2795 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2796 hdev->wake_addr_type);
2797
2798 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2799 return ret;
2800}
2801EXPORT_SYMBOL(hci_resume_dev);
2802
2803/* Reset HCI device */
2804int hci_reset_dev(struct hci_dev *hdev)
2805{
2806 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2807 struct sk_buff *skb;
2808
2809 skb = bt_skb_alloc(3, GFP_ATOMIC);
2810 if (!skb)
2811 return -ENOMEM;
2812
2813 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2814 skb_put_data(skb, hw_err, 3);
2815
2816 bt_dev_err(hdev, "Injecting HCI hardware error event");
2817
2818 /* Send Hardware Error to upper stack */
2819 return hci_recv_frame(hdev, skb);
2820}
2821EXPORT_SYMBOL(hci_reset_dev);
2822
2823/* Receive frame from HCI drivers */
2824int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2825{
2826 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2827 && !test_bit(HCI_INIT, &hdev->flags))) {
2828 kfree_skb(skb);
2829 return -ENXIO;
2830 }
2831
2832 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2833 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2834 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2835 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2836 kfree_skb(skb);
2837 return -EINVAL;
2838 }
2839
2840 /* Incoming skb */
2841 bt_cb(skb)->incoming = 1;
2842
2843 /* Time stamp */
2844 __net_timestamp(skb);
2845
2846 skb_queue_tail(&hdev->rx_q, skb);
2847 queue_work(hdev->workqueue, &hdev->rx_work);
2848
2849 return 0;
2850}
2851EXPORT_SYMBOL(hci_recv_frame);
2852
2853/* Receive diagnostic message from HCI drivers */
2854int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2855{
2856 /* Mark as diagnostic packet */
2857 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2858
2859 /* Time stamp */
2860 __net_timestamp(skb);
2861
2862 skb_queue_tail(&hdev->rx_q, skb);
2863 queue_work(hdev->workqueue, &hdev->rx_work);
2864
2865 return 0;
2866}
2867EXPORT_SYMBOL(hci_recv_diag);
2868
2869void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2870{
2871 va_list vargs;
2872
2873 va_start(vargs, fmt);
2874 kfree_const(hdev->hw_info);
2875 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2876 va_end(vargs);
2877}
2878EXPORT_SYMBOL(hci_set_hw_info);
2879
2880void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2881{
2882 va_list vargs;
2883
2884 va_start(vargs, fmt);
2885 kfree_const(hdev->fw_info);
2886 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2887 va_end(vargs);
2888}
2889EXPORT_SYMBOL(hci_set_fw_info);
2890
2891/* ---- Interface to upper protocols ---- */
2892
2893int hci_register_cb(struct hci_cb *cb)
2894{
2895 BT_DBG("%p name %s", cb, cb->name);
2896
2897 mutex_lock(&hci_cb_list_lock);
2898 list_add_tail(&cb->list, &hci_cb_list);
2899 mutex_unlock(&hci_cb_list_lock);
2900
2901 return 0;
2902}
2903EXPORT_SYMBOL(hci_register_cb);
2904
2905int hci_unregister_cb(struct hci_cb *cb)
2906{
2907 BT_DBG("%p name %s", cb, cb->name);
2908
2909 mutex_lock(&hci_cb_list_lock);
2910 list_del(&cb->list);
2911 mutex_unlock(&hci_cb_list_lock);
2912
2913 return 0;
2914}
2915EXPORT_SYMBOL(hci_unregister_cb);
2916
2917static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2918{
2919 int err;
2920
2921 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2922 skb->len);
2923
2924 /* Time stamp */
2925 __net_timestamp(skb);
2926
2927 /* Send copy to monitor */
2928 hci_send_to_monitor(hdev, skb);
2929
2930 if (atomic_read(&hdev->promisc)) {
2931 /* Send copy to the sockets */
2932 hci_send_to_sock(hdev, skb);
2933 }
2934
2935 /* Get rid of skb owner, prior to sending to the driver. */
2936 skb_orphan(skb);
2937
2938 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2939 kfree_skb(skb);
2940 return -EINVAL;
2941 }
2942
2943 err = hdev->send(hdev, skb);
2944 if (err < 0) {
2945 bt_dev_err(hdev, "sending frame failed (%d)", err);
2946 kfree_skb(skb);
2947 return err;
2948 }
2949
2950 return 0;
2951}
2952
2953/* Send HCI command */
2954int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2955 const void *param)
2956{
2957 struct sk_buff *skb;
2958
2959 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2960
2961 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2962 if (!skb) {
2963 bt_dev_err(hdev, "no memory for command");
2964 return -ENOMEM;
2965 }
2966
2967 /* Stand-alone HCI commands must be flagged as
2968 * single-command requests.
2969 */
2970 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2971
2972 skb_queue_tail(&hdev->cmd_q, skb);
2973 queue_work(hdev->workqueue, &hdev->cmd_work);
2974
2975 return 0;
2976}
2977
2978int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2979 const void *param)
2980{
2981 struct sk_buff *skb;
2982
2983 if (hci_opcode_ogf(opcode) != 0x3f) {
2984 /* A controller receiving a command shall respond with either
2985 * a Command Status Event or a Command Complete Event.
2986 * Therefore, all standard HCI commands must be sent via the
2987 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2988 * Some vendors do not comply with this rule for vendor-specific
2989 * commands and do not return any event. We want to support
2990 * unresponded commands for such cases only.
2991 */
2992 bt_dev_err(hdev, "unresponded command not supported");
2993 return -EINVAL;
2994 }
2995
2996 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2997 if (!skb) {
2998 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
2999 opcode);
3000 return -ENOMEM;
3001 }
3002
3003 hci_send_frame(hdev, skb);
3004
3005 return 0;
3006}
3007EXPORT_SYMBOL(__hci_cmd_send);
3008
3009/* Get data from the previously sent command */
3010void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3011{
3012 struct hci_command_hdr *hdr;
3013
3014 if (!hdev->sent_cmd)
3015 return NULL;
3016
3017 hdr = (void *) hdev->sent_cmd->data;
3018
3019 if (hdr->opcode != cpu_to_le16(opcode))
3020 return NULL;
3021
3022 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3023
3024 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3025}
3026
3027/* Send ACL data */
3028static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3029{
3030 struct hci_acl_hdr *hdr;
3031 int len = skb->len;
3032
3033 skb_push(skb, HCI_ACL_HDR_SIZE);
3034 skb_reset_transport_header(skb);
3035 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3036 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3037 hdr->dlen = cpu_to_le16(len);
3038}
3039
3040static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3041 struct sk_buff *skb, __u16 flags)
3042{
3043 struct hci_conn *conn = chan->conn;
3044 struct hci_dev *hdev = conn->hdev;
3045 struct sk_buff *list;
3046
3047 skb->len = skb_headlen(skb);
3048 skb->data_len = 0;
3049
3050 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3051
3052 switch (hdev->dev_type) {
3053 case HCI_PRIMARY:
3054 hci_add_acl_hdr(skb, conn->handle, flags);
3055 break;
3056 case HCI_AMP:
3057 hci_add_acl_hdr(skb, chan->handle, flags);
3058 break;
3059 default:
3060 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3061 return;
3062 }
3063
3064 list = skb_shinfo(skb)->frag_list;
3065 if (!list) {
3066 /* Non fragmented */
3067 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3068
3069 skb_queue_tail(queue, skb);
3070 } else {
3071 /* Fragmented */
3072 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3073
3074 skb_shinfo(skb)->frag_list = NULL;
3075
3076 /* Queue all fragments atomically. We need to use spin_lock_bh
3077 * here because of 6LoWPAN links, as there this function is
3078 * called from softirq and using normal spin lock could cause
3079 * deadlocks.
3080 */
3081 spin_lock_bh(&queue->lock);
3082
3083 __skb_queue_tail(queue, skb);
3084
3085 flags &= ~ACL_START;
3086 flags |= ACL_CONT;
3087 do {
3088 skb = list; list = list->next;
3089
3090 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3091 hci_add_acl_hdr(skb, conn->handle, flags);
3092
3093 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3094
3095 __skb_queue_tail(queue, skb);
3096 } while (list);
3097
3098 spin_unlock_bh(&queue->lock);
3099 }
3100}
3101
3102void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3103{
3104 struct hci_dev *hdev = chan->conn->hdev;
3105
3106 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3107
3108 hci_queue_acl(chan, &chan->data_q, skb, flags);
3109
3110 queue_work(hdev->workqueue, &hdev->tx_work);
3111}
3112
3113/* Send SCO data */
3114void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3115{
3116 struct hci_dev *hdev = conn->hdev;
3117 struct hci_sco_hdr hdr;
3118
3119 BT_DBG("%s len %d", hdev->name, skb->len);
3120
3121 hdr.handle = cpu_to_le16(conn->handle);
3122 hdr.dlen = skb->len;
3123
3124 skb_push(skb, HCI_SCO_HDR_SIZE);
3125 skb_reset_transport_header(skb);
3126 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3127
3128 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3129
3130 skb_queue_tail(&conn->data_q, skb);
3131 queue_work(hdev->workqueue, &hdev->tx_work);
3132}
3133
3134/* ---- HCI TX task (outgoing data) ---- */
3135
3136/* HCI Connection scheduler */
3137static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3138 int *quote)
3139{
3140 struct hci_conn_hash *h = &hdev->conn_hash;
3141 struct hci_conn *conn = NULL, *c;
3142 unsigned int num = 0, min = ~0;
3143
3144 /* We don't have to lock device here. Connections are always
3145 * added and removed with TX task disabled. */
3146
3147 rcu_read_lock();
3148
3149 list_for_each_entry_rcu(c, &h->list, list) {
3150 if (c->type != type || skb_queue_empty(&c->data_q))
3151 continue;
3152
3153 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3154 continue;
3155
3156 num++;
3157
3158 if (c->sent < min) {
3159 min = c->sent;
3160 conn = c;
3161 }
3162
3163 if (hci_conn_num(hdev, type) == num)
3164 break;
3165 }
3166
3167 rcu_read_unlock();
3168
3169 if (conn) {
3170 int cnt, q;
3171
3172 switch (conn->type) {
3173 case ACL_LINK:
3174 cnt = hdev->acl_cnt;
3175 break;
3176 case SCO_LINK:
3177 case ESCO_LINK:
3178 cnt = hdev->sco_cnt;
3179 break;
3180 case LE_LINK:
3181 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3182 break;
3183 default:
3184 cnt = 0;
3185 bt_dev_err(hdev, "unknown link type %d", conn->type);
3186 }
3187
3188 q = cnt / num;
3189 *quote = q ? q : 1;
3190 } else
3191 *quote = 0;
3192
3193 BT_DBG("conn %p quote %d", conn, *quote);
3194 return conn;
3195}
3196
3197static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3198{
3199 struct hci_conn_hash *h = &hdev->conn_hash;
3200 struct hci_conn *c;
3201
3202 bt_dev_err(hdev, "link tx timeout");
3203
3204 rcu_read_lock();
3205
3206 /* Kill stalled connections */
3207 list_for_each_entry_rcu(c, &h->list, list) {
3208 if (c->type == type && c->sent) {
3209 bt_dev_err(hdev, "killing stalled connection %pMR",
3210 &c->dst);
3211 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3212 }
3213 }
3214
3215 rcu_read_unlock();
3216}
3217
3218static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3219 int *quote)
3220{
3221 struct hci_conn_hash *h = &hdev->conn_hash;
3222 struct hci_chan *chan = NULL;
3223 unsigned int num = 0, min = ~0, cur_prio = 0;
3224 struct hci_conn *conn;
3225 int cnt, q, conn_num = 0;
3226
3227 BT_DBG("%s", hdev->name);
3228
3229 rcu_read_lock();
3230
3231 list_for_each_entry_rcu(conn, &h->list, list) {
3232 struct hci_chan *tmp;
3233
3234 if (conn->type != type)
3235 continue;
3236
3237 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3238 continue;
3239
3240 conn_num++;
3241
3242 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3243 struct sk_buff *skb;
3244
3245 if (skb_queue_empty(&tmp->data_q))
3246 continue;
3247
3248 skb = skb_peek(&tmp->data_q);
3249 if (skb->priority < cur_prio)
3250 continue;
3251
3252 if (skb->priority > cur_prio) {
3253 num = 0;
3254 min = ~0;
3255 cur_prio = skb->priority;
3256 }
3257
3258 num++;
3259
3260 if (conn->sent < min) {
3261 min = conn->sent;
3262 chan = tmp;
3263 }
3264 }
3265
3266 if (hci_conn_num(hdev, type) == conn_num)
3267 break;
3268 }
3269
3270 rcu_read_unlock();
3271
3272 if (!chan)
3273 return NULL;
3274
3275 switch (chan->conn->type) {
3276 case ACL_LINK:
3277 cnt = hdev->acl_cnt;
3278 break;
3279 case AMP_LINK:
3280 cnt = hdev->block_cnt;
3281 break;
3282 case SCO_LINK:
3283 case ESCO_LINK:
3284 cnt = hdev->sco_cnt;
3285 break;
3286 case LE_LINK:
3287 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3288 break;
3289 default:
3290 cnt = 0;
3291 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3292 }
3293
3294 q = cnt / num;
3295 *quote = q ? q : 1;
3296 BT_DBG("chan %p quote %d", chan, *quote);
3297 return chan;
3298}
3299
3300static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3301{
3302 struct hci_conn_hash *h = &hdev->conn_hash;
3303 struct hci_conn *conn;
3304 int num = 0;
3305
3306 BT_DBG("%s", hdev->name);
3307
3308 rcu_read_lock();
3309
3310 list_for_each_entry_rcu(conn, &h->list, list) {
3311 struct hci_chan *chan;
3312
3313 if (conn->type != type)
3314 continue;
3315
3316 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3317 continue;
3318
3319 num++;
3320
3321 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3322 struct sk_buff *skb;
3323
3324 if (chan->sent) {
3325 chan->sent = 0;
3326 continue;
3327 }
3328
3329 if (skb_queue_empty(&chan->data_q))
3330 continue;
3331
3332 skb = skb_peek(&chan->data_q);
3333 if (skb->priority >= HCI_PRIO_MAX - 1)
3334 continue;
3335
3336 skb->priority = HCI_PRIO_MAX - 1;
3337
3338 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3339 skb->priority);
3340 }
3341
3342 if (hci_conn_num(hdev, type) == num)
3343 break;
3344 }
3345
3346 rcu_read_unlock();
3347
3348}
3349
3350static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3351{
3352 /* Calculate count of blocks used by this packet */
3353 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3354}
3355
3356static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3357{
3358 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3359 /* ACL tx timeout must be longer than maximum
3360 * link supervision timeout (40.9 seconds) */
3361 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3362 HCI_ACL_TX_TIMEOUT))
3363 hci_link_tx_to(hdev, ACL_LINK);
3364 }
3365}
3366
3367/* Schedule SCO */
3368static void hci_sched_sco(struct hci_dev *hdev)
3369{
3370 struct hci_conn *conn;
3371 struct sk_buff *skb;
3372 int quote;
3373
3374 BT_DBG("%s", hdev->name);
3375
3376 if (!hci_conn_num(hdev, SCO_LINK))
3377 return;
3378
3379 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3380 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3381 BT_DBG("skb %p len %d", skb, skb->len);
3382 hci_send_frame(hdev, skb);
3383
3384 conn->sent++;
3385 if (conn->sent == ~0)
3386 conn->sent = 0;
3387 }
3388 }
3389}
3390
3391static void hci_sched_esco(struct hci_dev *hdev)
3392{
3393 struct hci_conn *conn;
3394 struct sk_buff *skb;
3395 int quote;
3396
3397 BT_DBG("%s", hdev->name);
3398
3399 if (!hci_conn_num(hdev, ESCO_LINK))
3400 return;
3401
3402 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3403 "e))) {
3404 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3405 BT_DBG("skb %p len %d", skb, skb->len);
3406 hci_send_frame(hdev, skb);
3407
3408 conn->sent++;
3409 if (conn->sent == ~0)
3410 conn->sent = 0;
3411 }
3412 }
3413}
3414
3415static void hci_sched_acl_pkt(struct hci_dev *hdev)
3416{
3417 unsigned int cnt = hdev->acl_cnt;
3418 struct hci_chan *chan;
3419 struct sk_buff *skb;
3420 int quote;
3421
3422 __check_timeout(hdev, cnt);
3423
3424 while (hdev->acl_cnt &&
3425 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3426 u32 priority = (skb_peek(&chan->data_q))->priority;
3427 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3428 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3429 skb->len, skb->priority);
3430
3431 /* Stop if priority has changed */
3432 if (skb->priority < priority)
3433 break;
3434
3435 skb = skb_dequeue(&chan->data_q);
3436
3437 hci_conn_enter_active_mode(chan->conn,
3438 bt_cb(skb)->force_active);
3439
3440 hci_send_frame(hdev, skb);
3441 hdev->acl_last_tx = jiffies;
3442
3443 hdev->acl_cnt--;
3444 chan->sent++;
3445 chan->conn->sent++;
3446
3447 /* Send pending SCO packets right away */
3448 hci_sched_sco(hdev);
3449 hci_sched_esco(hdev);
3450 }
3451 }
3452
3453 if (cnt != hdev->acl_cnt)
3454 hci_prio_recalculate(hdev, ACL_LINK);
3455}
3456
3457static void hci_sched_acl_blk(struct hci_dev *hdev)
3458{
3459 unsigned int cnt = hdev->block_cnt;
3460 struct hci_chan *chan;
3461 struct sk_buff *skb;
3462 int quote;
3463 u8 type;
3464
3465 __check_timeout(hdev, cnt);
3466
3467 BT_DBG("%s", hdev->name);
3468
3469 if (hdev->dev_type == HCI_AMP)
3470 type = AMP_LINK;
3471 else
3472 type = ACL_LINK;
3473
3474 while (hdev->block_cnt > 0 &&
3475 (chan = hci_chan_sent(hdev, type, "e))) {
3476 u32 priority = (skb_peek(&chan->data_q))->priority;
3477 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3478 int blocks;
3479
3480 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3481 skb->len, skb->priority);
3482
3483 /* Stop if priority has changed */
3484 if (skb->priority < priority)
3485 break;
3486
3487 skb = skb_dequeue(&chan->data_q);
3488
3489 blocks = __get_blocks(hdev, skb);
3490 if (blocks > hdev->block_cnt)
3491 return;
3492
3493 hci_conn_enter_active_mode(chan->conn,
3494 bt_cb(skb)->force_active);
3495
3496 hci_send_frame(hdev, skb);
3497 hdev->acl_last_tx = jiffies;
3498
3499 hdev->block_cnt -= blocks;
3500 quote -= blocks;
3501
3502 chan->sent += blocks;
3503 chan->conn->sent += blocks;
3504 }
3505 }
3506
3507 if (cnt != hdev->block_cnt)
3508 hci_prio_recalculate(hdev, type);
3509}
3510
3511static void hci_sched_acl(struct hci_dev *hdev)
3512{
3513 BT_DBG("%s", hdev->name);
3514
3515 /* No ACL link over BR/EDR controller */
3516 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3517 return;
3518
3519 /* No AMP link over AMP controller */
3520 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3521 return;
3522
3523 switch (hdev->flow_ctl_mode) {
3524 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3525 hci_sched_acl_pkt(hdev);
3526 break;
3527
3528 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3529 hci_sched_acl_blk(hdev);
3530 break;
3531 }
3532}
3533
3534static void hci_sched_le(struct hci_dev *hdev)
3535{
3536 struct hci_chan *chan;
3537 struct sk_buff *skb;
3538 int quote, cnt, tmp;
3539
3540 BT_DBG("%s", hdev->name);
3541
3542 if (!hci_conn_num(hdev, LE_LINK))
3543 return;
3544
3545 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3546
3547 __check_timeout(hdev, cnt);
3548
3549 tmp = cnt;
3550 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3551 u32 priority = (skb_peek(&chan->data_q))->priority;
3552 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3553 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3554 skb->len, skb->priority);
3555
3556 /* Stop if priority has changed */
3557 if (skb->priority < priority)
3558 break;
3559
3560 skb = skb_dequeue(&chan->data_q);
3561
3562 hci_send_frame(hdev, skb);
3563 hdev->le_last_tx = jiffies;
3564
3565 cnt--;
3566 chan->sent++;
3567 chan->conn->sent++;
3568
3569 /* Send pending SCO packets right away */
3570 hci_sched_sco(hdev);
3571 hci_sched_esco(hdev);
3572 }
3573 }
3574
3575 if (hdev->le_pkts)
3576 hdev->le_cnt = cnt;
3577 else
3578 hdev->acl_cnt = cnt;
3579
3580 if (cnt != tmp)
3581 hci_prio_recalculate(hdev, LE_LINK);
3582}
3583
3584static void hci_tx_work(struct work_struct *work)
3585{
3586 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3587 struct sk_buff *skb;
3588
3589 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3590 hdev->sco_cnt, hdev->le_cnt);
3591
3592 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3593 /* Schedule queues and send stuff to HCI driver */
3594 hci_sched_sco(hdev);
3595 hci_sched_esco(hdev);
3596 hci_sched_acl(hdev);
3597 hci_sched_le(hdev);
3598 }
3599
3600 /* Send next queued raw (unknown type) packet */
3601 while ((skb = skb_dequeue(&hdev->raw_q)))
3602 hci_send_frame(hdev, skb);
3603}
3604
3605/* ----- HCI RX task (incoming data processing) ----- */
3606
3607/* ACL data packet */
3608static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3609{
3610 struct hci_acl_hdr *hdr = (void *) skb->data;
3611 struct hci_conn *conn;
3612 __u16 handle, flags;
3613
3614 skb_pull(skb, HCI_ACL_HDR_SIZE);
3615
3616 handle = __le16_to_cpu(hdr->handle);
3617 flags = hci_flags(handle);
3618 handle = hci_handle(handle);
3619
3620 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3621 handle, flags);
3622
3623 hdev->stat.acl_rx++;
3624
3625 hci_dev_lock(hdev);
3626 conn = hci_conn_hash_lookup_handle(hdev, handle);
3627 hci_dev_unlock(hdev);
3628
3629 if (conn) {
3630 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3631
3632 /* Send to upper protocol */
3633 l2cap_recv_acldata(conn, skb, flags);
3634 return;
3635 } else {
3636 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3637 handle);
3638 }
3639
3640 kfree_skb(skb);
3641}
3642
3643/* SCO data packet */
3644static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3645{
3646 struct hci_sco_hdr *hdr = (void *) skb->data;
3647 struct hci_conn *conn;
3648 __u16 handle, flags;
3649
3650 skb_pull(skb, HCI_SCO_HDR_SIZE);
3651
3652 handle = __le16_to_cpu(hdr->handle);
3653 flags = hci_flags(handle);
3654 handle = hci_handle(handle);
3655
3656 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3657 handle, flags);
3658
3659 hdev->stat.sco_rx++;
3660
3661 hci_dev_lock(hdev);
3662 conn = hci_conn_hash_lookup_handle(hdev, handle);
3663 hci_dev_unlock(hdev);
3664
3665 if (conn) {
3666 /* Send to upper protocol */
3667 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3668 sco_recv_scodata(conn, skb);
3669 return;
3670 } else {
3671 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3672 handle);
3673 }
3674
3675 kfree_skb(skb);
3676}
3677
3678static bool hci_req_is_complete(struct hci_dev *hdev)
3679{
3680 struct sk_buff *skb;
3681
3682 skb = skb_peek(&hdev->cmd_q);
3683 if (!skb)
3684 return true;
3685
3686 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3687}
3688
3689static void hci_resend_last(struct hci_dev *hdev)
3690{
3691 struct hci_command_hdr *sent;
3692 struct sk_buff *skb;
3693 u16 opcode;
3694
3695 if (!hdev->sent_cmd)
3696 return;
3697
3698 sent = (void *) hdev->sent_cmd->data;
3699 opcode = __le16_to_cpu(sent->opcode);
3700 if (opcode == HCI_OP_RESET)
3701 return;
3702
3703 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3704 if (!skb)
3705 return;
3706
3707 skb_queue_head(&hdev->cmd_q, skb);
3708 queue_work(hdev->workqueue, &hdev->cmd_work);
3709}
3710
3711void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3712 hci_req_complete_t *req_complete,
3713 hci_req_complete_skb_t *req_complete_skb)
3714{
3715 struct sk_buff *skb;
3716 unsigned long flags;
3717
3718 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3719
3720 /* If the completed command doesn't match the last one that was
3721 * sent we need to do special handling of it.
3722 */
3723 if (!hci_sent_cmd_data(hdev, opcode)) {
3724 /* Some CSR based controllers generate a spontaneous
3725 * reset complete event during init and any pending
3726 * command will never be completed. In such a case we
3727 * need to resend whatever was the last sent
3728 * command.
3729 */
3730 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3731 hci_resend_last(hdev);
3732
3733 return;
3734 }
3735
3736 /* If we reach this point this event matches the last command sent */
3737 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3738
3739 /* If the command succeeded and there's still more commands in
3740 * this request the request is not yet complete.
3741 */
3742 if (!status && !hci_req_is_complete(hdev))
3743 return;
3744
3745 /* If this was the last command in a request the complete
3746 * callback would be found in hdev->sent_cmd instead of the
3747 * command queue (hdev->cmd_q).
3748 */
3749 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3750 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3751 return;
3752 }
3753
3754 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3755 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3756 return;
3757 }
3758
3759 /* Remove all pending commands belonging to this request */
3760 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3761 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3762 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3763 __skb_queue_head(&hdev->cmd_q, skb);
3764 break;
3765 }
3766
3767 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3768 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3769 else
3770 *req_complete = bt_cb(skb)->hci.req_complete;
3771 kfree_skb(skb);
3772 }
3773 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3774}
3775
3776static void hci_rx_work(struct work_struct *work)
3777{
3778 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3779 struct sk_buff *skb;
3780
3781 BT_DBG("%s", hdev->name);
3782
3783 while ((skb = skb_dequeue(&hdev->rx_q))) {
3784 /* Send copy to monitor */
3785 hci_send_to_monitor(hdev, skb);
3786
3787 if (atomic_read(&hdev->promisc)) {
3788 /* Send copy to the sockets */
3789 hci_send_to_sock(hdev, skb);
3790 }
3791
3792 /* If the device has been opened in HCI_USER_CHANNEL,
3793 * the userspace has exclusive access to device.
3794 * When device is HCI_INIT, we still need to process
3795 * the data packets to the driver in order
3796 * to complete its setup().
3797 */
3798 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3799 !test_bit(HCI_INIT, &hdev->flags)) {
3800 kfree_skb(skb);
3801 continue;
3802 }
3803
3804 if (test_bit(HCI_INIT, &hdev->flags)) {
3805 /* Don't process data packets in this states. */
3806 switch (hci_skb_pkt_type(skb)) {
3807 case HCI_ACLDATA_PKT:
3808 case HCI_SCODATA_PKT:
3809 case HCI_ISODATA_PKT:
3810 kfree_skb(skb);
3811 continue;
3812 }
3813 }
3814
3815 /* Process frame */
3816 switch (hci_skb_pkt_type(skb)) {
3817 case HCI_EVENT_PKT:
3818 BT_DBG("%s Event packet", hdev->name);
3819 hci_event_packet(hdev, skb);
3820 break;
3821
3822 case HCI_ACLDATA_PKT:
3823 BT_DBG("%s ACL data packet", hdev->name);
3824 hci_acldata_packet(hdev, skb);
3825 break;
3826
3827 case HCI_SCODATA_PKT:
3828 BT_DBG("%s SCO data packet", hdev->name);
3829 hci_scodata_packet(hdev, skb);
3830 break;
3831
3832 default:
3833 kfree_skb(skb);
3834 break;
3835 }
3836 }
3837}
3838
3839static void hci_cmd_work(struct work_struct *work)
3840{
3841 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3842 struct sk_buff *skb;
3843
3844 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3845 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3846
3847 /* Send queued commands */
3848 if (atomic_read(&hdev->cmd_cnt)) {
3849 skb = skb_dequeue(&hdev->cmd_q);
3850 if (!skb)
3851 return;
3852
3853 kfree_skb(hdev->sent_cmd);
3854
3855 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3856 if (hdev->sent_cmd) {
3857 int res;
3858 if (hci_req_status_pend(hdev))
3859 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3860 atomic_dec(&hdev->cmd_cnt);
3861
3862 res = hci_send_frame(hdev, skb);
3863 if (res < 0)
3864 __hci_cmd_sync_cancel(hdev, -res);
3865
3866 if (test_bit(HCI_RESET, &hdev->flags))
3867 cancel_delayed_work(&hdev->cmd_timer);
3868 else
3869 schedule_delayed_work(&hdev->cmd_timer,
3870 HCI_CMD_TIMEOUT);
3871 } else {
3872 skb_queue_head(&hdev->cmd_q, skb);
3873 queue_work(hdev->workqueue, &hdev->cmd_work);
3874 }
3875 }
3876}