Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "hci_request.h"
34#include "hci_debugfs.h"
35#include "a2mp.h"
36#include "amp.h"
37#include "smp.h"
38#include "msft.h"
39
40#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
42
43/* Handle HCI Event packets */
44
45static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
46 u8 *new_status)
47{
48 __u8 status = *((__u8 *) skb->data);
49
50 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51
52 /* It is possible that we receive Inquiry Complete event right
53 * before we receive Inquiry Cancel Command Complete event, in
54 * which case the latter event should have status of Command
55 * Disallowed (0x0c). This should not be treated as error, since
56 * we actually achieve what Inquiry Cancel wants to achieve,
57 * which is to end the last Inquiry session.
58 */
59 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
60 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
61 status = 0x00;
62 }
63
64 *new_status = status;
65
66 if (status)
67 return;
68
69 clear_bit(HCI_INQUIRY, &hdev->flags);
70 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
71 wake_up_bit(&hdev->flags, HCI_INQUIRY);
72
73 hci_dev_lock(hdev);
74 /* Set discovery state to stopped if we're not doing LE active
75 * scanning.
76 */
77 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
78 hdev->le_scan_type != LE_SCAN_ACTIVE)
79 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
80 hci_dev_unlock(hdev);
81
82 hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86{
87 __u8 status = *((__u8 *) skb->data);
88
89 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90
91 if (status)
92 return;
93
94 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
95}
96
97static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98{
99 __u8 status = *((__u8 *) skb->data);
100
101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
102
103 if (status)
104 return;
105
106 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107
108 hci_conn_check_pending(hdev);
109}
110
111static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
112 struct sk_buff *skb)
113{
114 BT_DBG("%s", hdev->name);
115}
116
117static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118{
119 struct hci_rp_role_discovery *rp = (void *) skb->data;
120 struct hci_conn *conn;
121
122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123
124 if (rp->status)
125 return;
126
127 hci_dev_lock(hdev);
128
129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 if (conn)
131 conn->role = rp->role;
132
133 hci_dev_unlock(hdev);
134}
135
136static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137{
138 struct hci_rp_read_link_policy *rp = (void *) skb->data;
139 struct hci_conn *conn;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 hci_dev_lock(hdev);
147
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 if (conn)
150 conn->link_policy = __le16_to_cpu(rp->policy);
151
152 hci_dev_unlock(hdev);
153}
154
155static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156{
157 struct hci_rp_write_link_policy *rp = (void *) skb->data;
158 struct hci_conn *conn;
159 void *sent;
160
161 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162
163 if (rp->status)
164 return;
165
166 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
167 if (!sent)
168 return;
169
170 hci_dev_lock(hdev);
171
172 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 if (conn)
174 conn->link_policy = get_unaligned_le16(sent + 2);
175
176 hci_dev_unlock(hdev);
177}
178
179static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
180 struct sk_buff *skb)
181{
182 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183
184 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
185
186 if (rp->status)
187 return;
188
189 hdev->link_policy = __le16_to_cpu(rp->policy);
190}
191
192static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
193 struct sk_buff *skb)
194{
195 __u8 status = *((__u8 *) skb->data);
196 void *sent;
197
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
199
200 if (status)
201 return;
202
203 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
204 if (!sent)
205 return;
206
207 hdev->link_policy = get_unaligned_le16(sent);
208}
209
210static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211{
212 __u8 status = *((__u8 *) skb->data);
213
214 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215
216 clear_bit(HCI_RESET, &hdev->flags);
217
218 if (status)
219 return;
220
221 /* Reset all non-persistent flags */
222 hci_dev_clear_volatile_flags(hdev);
223
224 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225
226 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
227 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228
229 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
230 hdev->adv_data_len = 0;
231
232 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
233 hdev->scan_rsp_data_len = 0;
234
235 hdev->le_scan_type = LE_SCAN_PASSIVE;
236
237 hdev->ssp_debug_mode = 0;
238
239 hci_bdaddr_list_clear(&hdev->le_white_list);
240 hci_bdaddr_list_clear(&hdev->le_resolv_list);
241}
242
243static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 struct sk_buff *skb)
245{
246 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
247 struct hci_cp_read_stored_link_key *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
252 if (!sent)
253 return;
254
255 if (!rp->status && sent->read_all == 0x01) {
256 hdev->stored_max_keys = rp->max_keys;
257 hdev->stored_num_keys = rp->num_keys;
258 }
259}
260
261static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 struct sk_buff *skb)
263{
264 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
267
268 if (rp->status)
269 return;
270
271 if (rp->num_keys <= hdev->stored_num_keys)
272 hdev->stored_num_keys -= rp->num_keys;
273 else
274 hdev->stored_num_keys = 0;
275}
276
277static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278{
279 __u8 status = *((__u8 *) skb->data);
280 void *sent;
281
282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
285 if (!sent)
286 return;
287
288 hci_dev_lock(hdev);
289
290 if (hci_dev_test_flag(hdev, HCI_MGMT))
291 mgmt_set_local_name_complete(hdev, sent, status);
292 else if (!status)
293 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294
295 hci_dev_unlock(hdev);
296}
297
298static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299{
300 struct hci_rp_read_local_name *rp = (void *) skb->data;
301
302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
303
304 if (rp->status)
305 return;
306
307 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
308 hci_dev_test_flag(hdev, HCI_CONFIG))
309 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310}
311
312static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313{
314 __u8 status = *((__u8 *) skb->data);
315 void *sent;
316
317 BT_DBG("%s status 0x%2.2x", hdev->name, status);
318
319 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
320 if (!sent)
321 return;
322
323 hci_dev_lock(hdev);
324
325 if (!status) {
326 __u8 param = *((__u8 *) sent);
327
328 if (param == AUTH_ENABLED)
329 set_bit(HCI_AUTH, &hdev->flags);
330 else
331 clear_bit(HCI_AUTH, &hdev->flags);
332 }
333
334 if (hci_dev_test_flag(hdev, HCI_MGMT))
335 mgmt_auth_enable_complete(hdev, status);
336
337 hci_dev_unlock(hdev);
338}
339
340static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341{
342 __u8 status = *((__u8 *) skb->data);
343 __u8 param;
344 void *sent;
345
346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
347
348 if (status)
349 return;
350
351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
352 if (!sent)
353 return;
354
355 param = *((__u8 *) sent);
356
357 if (param)
358 set_bit(HCI_ENCRYPT, &hdev->flags);
359 else
360 clear_bit(HCI_ENCRYPT, &hdev->flags);
361}
362
363static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364{
365 __u8 status = *((__u8 *) skb->data);
366 __u8 param;
367 void *sent;
368
369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
372 if (!sent)
373 return;
374
375 param = *((__u8 *) sent);
376
377 hci_dev_lock(hdev);
378
379 if (status) {
380 hdev->discov_timeout = 0;
381 goto done;
382 }
383
384 if (param & SCAN_INQUIRY)
385 set_bit(HCI_ISCAN, &hdev->flags);
386 else
387 clear_bit(HCI_ISCAN, &hdev->flags);
388
389 if (param & SCAN_PAGE)
390 set_bit(HCI_PSCAN, &hdev->flags);
391 else
392 clear_bit(HCI_PSCAN, &hdev->flags);
393
394done:
395 hci_dev_unlock(hdev);
396}
397
398static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
399{
400 __u8 status = *((__u8 *)skb->data);
401 struct hci_cp_set_event_filter *cp;
402 void *sent;
403
404 BT_DBG("%s status 0x%2.2x", hdev->name, status);
405
406 if (status)
407 return;
408
409 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
410 if (!sent)
411 return;
412
413 cp = (struct hci_cp_set_event_filter *)sent;
414
415 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
416 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
417 else
418 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
419}
420
421static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
422{
423 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
424
425 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426
427 if (rp->status)
428 return;
429
430 memcpy(hdev->dev_class, rp->dev_class, 3);
431
432 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
433 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
434}
435
436static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
437{
438 __u8 status = *((__u8 *) skb->data);
439 void *sent;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
444 if (!sent)
445 return;
446
447 hci_dev_lock(hdev);
448
449 if (status == 0)
450 memcpy(hdev->dev_class, sent, 3);
451
452 if (hci_dev_test_flag(hdev, HCI_MGMT))
453 mgmt_set_class_of_dev_complete(hdev, sent, status);
454
455 hci_dev_unlock(hdev);
456}
457
458static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
459{
460 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
461 __u16 setting;
462
463 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
464
465 if (rp->status)
466 return;
467
468 setting = __le16_to_cpu(rp->voice_setting);
469
470 if (hdev->voice_setting == setting)
471 return;
472
473 hdev->voice_setting = setting;
474
475 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
476
477 if (hdev->notify)
478 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
479}
480
481static void hci_cc_write_voice_setting(struct hci_dev *hdev,
482 struct sk_buff *skb)
483{
484 __u8 status = *((__u8 *) skb->data);
485 __u16 setting;
486 void *sent;
487
488 BT_DBG("%s status 0x%2.2x", hdev->name, status);
489
490 if (status)
491 return;
492
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
494 if (!sent)
495 return;
496
497 setting = get_unaligned_le16(sent);
498
499 if (hdev->voice_setting == setting)
500 return;
501
502 hdev->voice_setting = setting;
503
504 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
505
506 if (hdev->notify)
507 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
508}
509
510static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
511 struct sk_buff *skb)
512{
513 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
514
515 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516
517 if (rp->status)
518 return;
519
520 hdev->num_iac = rp->num_iac;
521
522 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
523}
524
525static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
526{
527 __u8 status = *((__u8 *) skb->data);
528 struct hci_cp_write_ssp_mode *sent;
529
530 BT_DBG("%s status 0x%2.2x", hdev->name, status);
531
532 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
533 if (!sent)
534 return;
535
536 hci_dev_lock(hdev);
537
538 if (!status) {
539 if (sent->mode)
540 hdev->features[1][0] |= LMP_HOST_SSP;
541 else
542 hdev->features[1][0] &= ~LMP_HOST_SSP;
543 }
544
545 if (hci_dev_test_flag(hdev, HCI_MGMT))
546 mgmt_ssp_enable_complete(hdev, sent->mode, status);
547 else if (!status) {
548 if (sent->mode)
549 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
550 else
551 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
552 }
553
554 hci_dev_unlock(hdev);
555}
556
557static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
558{
559 u8 status = *((u8 *) skb->data);
560 struct hci_cp_write_sc_support *sent;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, status);
563
564 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
565 if (!sent)
566 return;
567
568 hci_dev_lock(hdev);
569
570 if (!status) {
571 if (sent->support)
572 hdev->features[1][0] |= LMP_HOST_SC;
573 else
574 hdev->features[1][0] &= ~LMP_HOST_SC;
575 }
576
577 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
578 if (sent->support)
579 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
580 else
581 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
582 }
583
584 hci_dev_unlock(hdev);
585}
586
587static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
588{
589 struct hci_rp_read_local_version *rp = (void *) skb->data;
590
591 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592
593 if (rp->status)
594 return;
595
596 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
597 hci_dev_test_flag(hdev, HCI_CONFIG)) {
598 hdev->hci_ver = rp->hci_ver;
599 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
600 hdev->lmp_ver = rp->lmp_ver;
601 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
602 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
603 }
604}
605
606static void hci_cc_read_local_commands(struct hci_dev *hdev,
607 struct sk_buff *skb)
608{
609 struct hci_rp_read_local_commands *rp = (void *) skb->data;
610
611 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
612
613 if (rp->status)
614 return;
615
616 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
617 hci_dev_test_flag(hdev, HCI_CONFIG))
618 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
619}
620
621static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
622 struct sk_buff *skb)
623{
624 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
625 struct hci_conn *conn;
626
627 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
628
629 if (rp->status)
630 return;
631
632 hci_dev_lock(hdev);
633
634 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
635 if (conn)
636 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
637
638 hci_dev_unlock(hdev);
639}
640
641static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
642 struct sk_buff *skb)
643{
644 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
645 struct hci_conn *conn;
646 void *sent;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
654 if (!sent)
655 return;
656
657 hci_dev_lock(hdev);
658
659 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
660 if (conn)
661 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
662
663 hci_dev_unlock(hdev);
664}
665
666static void hci_cc_read_local_features(struct hci_dev *hdev,
667 struct sk_buff *skb)
668{
669 struct hci_rp_read_local_features *rp = (void *) skb->data;
670
671 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
672
673 if (rp->status)
674 return;
675
676 memcpy(hdev->features, rp->features, 8);
677
678 /* Adjust default settings according to features
679 * supported by device. */
680
681 if (hdev->features[0][0] & LMP_3SLOT)
682 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
683
684 if (hdev->features[0][0] & LMP_5SLOT)
685 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
686
687 if (hdev->features[0][1] & LMP_HV2) {
688 hdev->pkt_type |= (HCI_HV2);
689 hdev->esco_type |= (ESCO_HV2);
690 }
691
692 if (hdev->features[0][1] & LMP_HV3) {
693 hdev->pkt_type |= (HCI_HV3);
694 hdev->esco_type |= (ESCO_HV3);
695 }
696
697 if (lmp_esco_capable(hdev))
698 hdev->esco_type |= (ESCO_EV3);
699
700 if (hdev->features[0][4] & LMP_EV4)
701 hdev->esco_type |= (ESCO_EV4);
702
703 if (hdev->features[0][4] & LMP_EV5)
704 hdev->esco_type |= (ESCO_EV5);
705
706 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
707 hdev->esco_type |= (ESCO_2EV3);
708
709 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
710 hdev->esco_type |= (ESCO_3EV3);
711
712 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714}
715
716static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
717 struct sk_buff *skb)
718{
719 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
720
721 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
722
723 if (rp->status)
724 return;
725
726 if (hdev->max_page < rp->max_page)
727 hdev->max_page = rp->max_page;
728
729 if (rp->page < HCI_MAX_PAGES)
730 memcpy(hdev->features[rp->page], rp->features, 8);
731}
732
733static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
734 struct sk_buff *skb)
735{
736 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 hdev->flow_ctl_mode = rp->mode;
744}
745
746static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
747{
748 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
749
750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
751
752 if (rp->status)
753 return;
754
755 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
756 hdev->sco_mtu = rp->sco_mtu;
757 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
758 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
759
760 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
761 hdev->sco_mtu = 64;
762 hdev->sco_pkts = 8;
763 }
764
765 hdev->acl_cnt = hdev->acl_pkts;
766 hdev->sco_cnt = hdev->sco_pkts;
767
768 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
769 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
770}
771
772static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
773{
774 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
775
776 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
777
778 if (rp->status)
779 return;
780
781 if (test_bit(HCI_INIT, &hdev->flags))
782 bacpy(&hdev->bdaddr, &rp->bdaddr);
783
784 if (hci_dev_test_flag(hdev, HCI_SETUP))
785 bacpy(&hdev->setup_addr, &rp->bdaddr);
786}
787
788static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
789 struct sk_buff *skb)
790{
791 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
792
793 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
794
795 if (rp->status)
796 return;
797
798 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
799 hci_dev_test_flag(hdev, HCI_CONFIG)) {
800 hdev->pairing_opts = rp->pairing_opts;
801 hdev->max_enc_key_size = rp->max_key_size;
802 }
803}
804
805static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
806 struct sk_buff *skb)
807{
808 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
809
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
811
812 if (rp->status)
813 return;
814
815 if (test_bit(HCI_INIT, &hdev->flags)) {
816 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
817 hdev->page_scan_window = __le16_to_cpu(rp->window);
818 }
819}
820
821static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
822 struct sk_buff *skb)
823{
824 u8 status = *((u8 *) skb->data);
825 struct hci_cp_write_page_scan_activity *sent;
826
827 BT_DBG("%s status 0x%2.2x", hdev->name, status);
828
829 if (status)
830 return;
831
832 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
833 if (!sent)
834 return;
835
836 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
837 hdev->page_scan_window = __le16_to_cpu(sent->window);
838}
839
840static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
841 struct sk_buff *skb)
842{
843 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 if (test_bit(HCI_INIT, &hdev->flags))
851 hdev->page_scan_type = rp->type;
852}
853
854static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
855 struct sk_buff *skb)
856{
857 u8 status = *((u8 *) skb->data);
858 u8 *type;
859
860 BT_DBG("%s status 0x%2.2x", hdev->name, status);
861
862 if (status)
863 return;
864
865 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
866 if (type)
867 hdev->page_scan_type = *type;
868}
869
870static void hci_cc_read_data_block_size(struct hci_dev *hdev,
871 struct sk_buff *skb)
872{
873 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
874
875 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
876
877 if (rp->status)
878 return;
879
880 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
881 hdev->block_len = __le16_to_cpu(rp->block_len);
882 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
883
884 hdev->block_cnt = hdev->num_blocks;
885
886 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
887 hdev->block_cnt, hdev->block_len);
888}
889
890static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
891{
892 struct hci_rp_read_clock *rp = (void *) skb->data;
893 struct hci_cp_read_clock *cp;
894 struct hci_conn *conn;
895
896 BT_DBG("%s", hdev->name);
897
898 if (skb->len < sizeof(*rp))
899 return;
900
901 if (rp->status)
902 return;
903
904 hci_dev_lock(hdev);
905
906 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
907 if (!cp)
908 goto unlock;
909
910 if (cp->which == 0x00) {
911 hdev->clock = le32_to_cpu(rp->clock);
912 goto unlock;
913 }
914
915 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
916 if (conn) {
917 conn->clock = le32_to_cpu(rp->clock);
918 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
919 }
920
921unlock:
922 hci_dev_unlock(hdev);
923}
924
925static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
926 struct sk_buff *skb)
927{
928 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
929
930 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
931
932 if (rp->status)
933 return;
934
935 hdev->amp_status = rp->amp_status;
936 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
937 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
938 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
939 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
940 hdev->amp_type = rp->amp_type;
941 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
942 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
943 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
944 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
945}
946
947static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
948 struct sk_buff *skb)
949{
950 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
951
952 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
953
954 if (rp->status)
955 return;
956
957 hdev->inq_tx_power = rp->tx_power;
958}
959
960static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
961 struct sk_buff *skb)
962{
963 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
964
965 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
966
967 if (rp->status)
968 return;
969
970 hdev->err_data_reporting = rp->err_data_reporting;
971}
972
973static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
974 struct sk_buff *skb)
975{
976 __u8 status = *((__u8 *)skb->data);
977 struct hci_cp_write_def_err_data_reporting *cp;
978
979 BT_DBG("%s status 0x%2.2x", hdev->name, status);
980
981 if (status)
982 return;
983
984 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
985 if (!cp)
986 return;
987
988 hdev->err_data_reporting = cp->err_data_reporting;
989}
990
991static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
992{
993 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
994 struct hci_cp_pin_code_reply *cp;
995 struct hci_conn *conn;
996
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1003
1004 if (rp->status)
1005 goto unlock;
1006
1007 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1008 if (!cp)
1009 goto unlock;
1010
1011 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1012 if (conn)
1013 conn->pin_length = cp->pin_len;
1014
1015unlock:
1016 hci_dev_unlock(hdev);
1017}
1018
1019static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1020{
1021 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1022
1023 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1024
1025 hci_dev_lock(hdev);
1026
1027 if (hci_dev_test_flag(hdev, HCI_MGMT))
1028 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1029 rp->status);
1030
1031 hci_dev_unlock(hdev);
1032}
1033
1034static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1035 struct sk_buff *skb)
1036{
1037 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1038
1039 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1040
1041 if (rp->status)
1042 return;
1043
1044 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1045 hdev->le_pkts = rp->le_max_pkt;
1046
1047 hdev->le_cnt = hdev->le_pkts;
1048
1049 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1050}
1051
1052static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1053 struct sk_buff *skb)
1054{
1055 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1056
1057 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1058
1059 if (rp->status)
1060 return;
1061
1062 memcpy(hdev->le_features, rp->features, 8);
1063}
1064
1065static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1066 struct sk_buff *skb)
1067{
1068 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1069
1070 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1071
1072 if (rp->status)
1073 return;
1074
1075 hdev->adv_tx_power = rp->tx_power;
1076}
1077
1078static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1079{
1080 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1081
1082 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1083
1084 hci_dev_lock(hdev);
1085
1086 if (hci_dev_test_flag(hdev, HCI_MGMT))
1087 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1088 rp->status);
1089
1090 hci_dev_unlock(hdev);
1091}
1092
1093static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1094 struct sk_buff *skb)
1095{
1096 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1097
1098 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1099
1100 hci_dev_lock(hdev);
1101
1102 if (hci_dev_test_flag(hdev, HCI_MGMT))
1103 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1104 ACL_LINK, 0, rp->status);
1105
1106 hci_dev_unlock(hdev);
1107}
1108
1109static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1110{
1111 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1112
1113 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1114
1115 hci_dev_lock(hdev);
1116
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1119 0, rp->status);
1120
1121 hci_dev_unlock(hdev);
1122}
1123
1124static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1125 struct sk_buff *skb)
1126{
1127 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1128
1129 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1130
1131 hci_dev_lock(hdev);
1132
1133 if (hci_dev_test_flag(hdev, HCI_MGMT))
1134 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1135 ACL_LINK, 0, rp->status);
1136
1137 hci_dev_unlock(hdev);
1138}
1139
1140static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1141 struct sk_buff *skb)
1142{
1143 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1144
1145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1146}
1147
1148static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1149 struct sk_buff *skb)
1150{
1151 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1152
1153 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1154}
1155
1156static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1157{
1158 __u8 status = *((__u8 *) skb->data);
1159 bdaddr_t *sent;
1160
1161 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1162
1163 if (status)
1164 return;
1165
1166 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1167 if (!sent)
1168 return;
1169
1170 hci_dev_lock(hdev);
1171
1172 bacpy(&hdev->random_addr, sent);
1173
1174 hci_dev_unlock(hdev);
1175}
1176
1177static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1178{
1179 __u8 status = *((__u8 *) skb->data);
1180 struct hci_cp_le_set_default_phy *cp;
1181
1182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183
1184 if (status)
1185 return;
1186
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1188 if (!cp)
1189 return;
1190
1191 hci_dev_lock(hdev);
1192
1193 hdev->le_tx_def_phys = cp->tx_phys;
1194 hdev->le_rx_def_phys = cp->rx_phys;
1195
1196 hci_dev_unlock(hdev);
1197}
1198
1199static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1200 struct sk_buff *skb)
1201{
1202 __u8 status = *((__u8 *) skb->data);
1203 struct hci_cp_le_set_adv_set_rand_addr *cp;
1204 struct adv_info *adv_instance;
1205
1206 if (status)
1207 return;
1208
1209 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1210 if (!cp)
1211 return;
1212
1213 hci_dev_lock(hdev);
1214
1215 if (!cp->handle) {
1216 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1217 bacpy(&hdev->random_addr, &cp->bdaddr);
1218 } else {
1219 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1220 if (adv_instance)
1221 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1222 }
1223
1224 hci_dev_unlock(hdev);
1225}
1226
1227static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1228 struct sk_buff *skb)
1229{
1230 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1231
1232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1233
1234 if (rp->status)
1235 return;
1236
1237 hdev->min_le_tx_power = rp->min_le_tx_power;
1238 hdev->max_le_tx_power = rp->max_le_tx_power;
1239}
1240
1241static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1242{
1243 __u8 *sent, status = *((__u8 *) skb->data);
1244
1245 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1246
1247 if (status)
1248 return;
1249
1250 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1251 if (!sent)
1252 return;
1253
1254 hci_dev_lock(hdev);
1255
1256 /* If we're doing connection initiation as peripheral. Set a
1257 * timeout in case something goes wrong.
1258 */
1259 if (*sent) {
1260 struct hci_conn *conn;
1261
1262 hci_dev_set_flag(hdev, HCI_LE_ADV);
1263
1264 conn = hci_lookup_le_connect(hdev);
1265 if (conn)
1266 queue_delayed_work(hdev->workqueue,
1267 &conn->le_conn_timeout,
1268 conn->conn_timeout);
1269 } else {
1270 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1271 }
1272
1273 hci_dev_unlock(hdev);
1274}
1275
1276static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1277 struct sk_buff *skb)
1278{
1279 struct hci_cp_le_set_ext_adv_enable *cp;
1280 __u8 status = *((__u8 *) skb->data);
1281
1282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1283
1284 if (status)
1285 return;
1286
1287 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1288 if (!cp)
1289 return;
1290
1291 hci_dev_lock(hdev);
1292
1293 if (cp->enable) {
1294 struct hci_conn *conn;
1295
1296 hci_dev_set_flag(hdev, HCI_LE_ADV);
1297
1298 conn = hci_lookup_le_connect(hdev);
1299 if (conn)
1300 queue_delayed_work(hdev->workqueue,
1301 &conn->le_conn_timeout,
1302 conn->conn_timeout);
1303 } else {
1304 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1305 }
1306
1307 hci_dev_unlock(hdev);
1308}
1309
1310static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1311{
1312 struct hci_cp_le_set_scan_param *cp;
1313 __u8 status = *((__u8 *) skb->data);
1314
1315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1316
1317 if (status)
1318 return;
1319
1320 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1321 if (!cp)
1322 return;
1323
1324 hci_dev_lock(hdev);
1325
1326 hdev->le_scan_type = cp->type;
1327
1328 hci_dev_unlock(hdev);
1329}
1330
1331static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1332 struct sk_buff *skb)
1333{
1334 struct hci_cp_le_set_ext_scan_params *cp;
1335 __u8 status = *((__u8 *) skb->data);
1336 struct hci_cp_le_scan_phy_params *phy_param;
1337
1338 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1339
1340 if (status)
1341 return;
1342
1343 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1344 if (!cp)
1345 return;
1346
1347 phy_param = (void *)cp->data;
1348
1349 hci_dev_lock(hdev);
1350
1351 hdev->le_scan_type = phy_param->type;
1352
1353 hci_dev_unlock(hdev);
1354}
1355
1356static bool has_pending_adv_report(struct hci_dev *hdev)
1357{
1358 struct discovery_state *d = &hdev->discovery;
1359
1360 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1361}
1362
1363static void clear_pending_adv_report(struct hci_dev *hdev)
1364{
1365 struct discovery_state *d = &hdev->discovery;
1366
1367 bacpy(&d->last_adv_addr, BDADDR_ANY);
1368 d->last_adv_data_len = 0;
1369}
1370
1371static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1372 u8 bdaddr_type, s8 rssi, u32 flags,
1373 u8 *data, u8 len)
1374{
1375 struct discovery_state *d = &hdev->discovery;
1376
1377 if (len > HCI_MAX_AD_LENGTH)
1378 return;
1379
1380 bacpy(&d->last_adv_addr, bdaddr);
1381 d->last_adv_addr_type = bdaddr_type;
1382 d->last_adv_rssi = rssi;
1383 d->last_adv_flags = flags;
1384 memcpy(d->last_adv_data, data, len);
1385 d->last_adv_data_len = len;
1386}
1387
1388static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1389{
1390 hci_dev_lock(hdev);
1391
1392 switch (enable) {
1393 case LE_SCAN_ENABLE:
1394 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1395 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1396 clear_pending_adv_report(hdev);
1397 break;
1398
1399 case LE_SCAN_DISABLE:
1400 /* We do this here instead of when setting DISCOVERY_STOPPED
1401 * since the latter would potentially require waiting for
1402 * inquiry to stop too.
1403 */
1404 if (has_pending_adv_report(hdev)) {
1405 struct discovery_state *d = &hdev->discovery;
1406
1407 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1408 d->last_adv_addr_type, NULL,
1409 d->last_adv_rssi, d->last_adv_flags,
1410 d->last_adv_data,
1411 d->last_adv_data_len, NULL, 0);
1412 }
1413
1414 /* Cancel this timer so that we don't try to disable scanning
1415 * when it's already disabled.
1416 */
1417 cancel_delayed_work(&hdev->le_scan_disable);
1418
1419 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1420
1421 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1422 * interrupted scanning due to a connect request. Mark
1423 * therefore discovery as stopped. If this was not
1424 * because of a connect request advertising might have
1425 * been disabled because of active scanning, so
1426 * re-enable it again if necessary.
1427 */
1428 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1429 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1430 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1431 hdev->discovery.state == DISCOVERY_FINDING)
1432 hci_req_reenable_advertising(hdev);
1433
1434 break;
1435
1436 default:
1437 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1438 enable);
1439 break;
1440 }
1441
1442 hci_dev_unlock(hdev);
1443}
1444
1445static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1446 struct sk_buff *skb)
1447{
1448 struct hci_cp_le_set_scan_enable *cp;
1449 __u8 status = *((__u8 *) skb->data);
1450
1451 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1452
1453 if (status)
1454 return;
1455
1456 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1457 if (!cp)
1458 return;
1459
1460 le_set_scan_enable_complete(hdev, cp->enable);
1461}
1462
1463static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1464 struct sk_buff *skb)
1465{
1466 struct hci_cp_le_set_ext_scan_enable *cp;
1467 __u8 status = *((__u8 *) skb->data);
1468
1469 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1470
1471 if (status)
1472 return;
1473
1474 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1475 if (!cp)
1476 return;
1477
1478 le_set_scan_enable_complete(hdev, cp->enable);
1479}
1480
1481static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1482 struct sk_buff *skb)
1483{
1484 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1485
1486 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1487 rp->num_of_sets);
1488
1489 if (rp->status)
1490 return;
1491
1492 hdev->le_num_of_adv_sets = rp->num_of_sets;
1493}
1494
1495static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1496 struct sk_buff *skb)
1497{
1498 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1499
1500 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1501
1502 if (rp->status)
1503 return;
1504
1505 hdev->le_white_list_size = rp->size;
1506}
1507
1508static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1509 struct sk_buff *skb)
1510{
1511 __u8 status = *((__u8 *) skb->data);
1512
1513 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1514
1515 if (status)
1516 return;
1517
1518 hci_bdaddr_list_clear(&hdev->le_white_list);
1519}
1520
1521static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1522 struct sk_buff *skb)
1523{
1524 struct hci_cp_le_add_to_white_list *sent;
1525 __u8 status = *((__u8 *) skb->data);
1526
1527 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1528
1529 if (status)
1530 return;
1531
1532 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1533 if (!sent)
1534 return;
1535
1536 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1537 sent->bdaddr_type);
1538}
1539
1540static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1541 struct sk_buff *skb)
1542{
1543 struct hci_cp_le_del_from_white_list *sent;
1544 __u8 status = *((__u8 *) skb->data);
1545
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1547
1548 if (status)
1549 return;
1550
1551 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1552 if (!sent)
1553 return;
1554
1555 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1556 sent->bdaddr_type);
1557}
1558
1559static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1560 struct sk_buff *skb)
1561{
1562 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1563
1564 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1565
1566 if (rp->status)
1567 return;
1568
1569 memcpy(hdev->le_states, rp->le_states, 8);
1570}
1571
1572static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1573 struct sk_buff *skb)
1574{
1575 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1576
1577 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1578
1579 if (rp->status)
1580 return;
1581
1582 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1583 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1584}
1585
1586static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1587 struct sk_buff *skb)
1588{
1589 struct hci_cp_le_write_def_data_len *sent;
1590 __u8 status = *((__u8 *) skb->data);
1591
1592 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593
1594 if (status)
1595 return;
1596
1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1598 if (!sent)
1599 return;
1600
1601 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1602 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1603}
1604
1605static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1606 struct sk_buff *skb)
1607{
1608 struct hci_cp_le_add_to_resolv_list *sent;
1609 __u8 status = *((__u8 *) skb->data);
1610
1611 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1612
1613 if (status)
1614 return;
1615
1616 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1617 if (!sent)
1618 return;
1619
1620 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1621 sent->bdaddr_type, sent->peer_irk,
1622 sent->local_irk);
1623}
1624
1625static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1626 struct sk_buff *skb)
1627{
1628 struct hci_cp_le_del_from_resolv_list *sent;
1629 __u8 status = *((__u8 *) skb->data);
1630
1631 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1632
1633 if (status)
1634 return;
1635
1636 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1637 if (!sent)
1638 return;
1639
1640 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1641 sent->bdaddr_type);
1642}
1643
1644static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1645 struct sk_buff *skb)
1646{
1647 __u8 status = *((__u8 *) skb->data);
1648
1649 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1650
1651 if (status)
1652 return;
1653
1654 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1655}
1656
1657static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1658 struct sk_buff *skb)
1659{
1660 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1661
1662 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1663
1664 if (rp->status)
1665 return;
1666
1667 hdev->le_resolv_list_size = rp->size;
1668}
1669
1670static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1671 struct sk_buff *skb)
1672{
1673 __u8 *sent, status = *((__u8 *) skb->data);
1674
1675 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1676
1677 if (status)
1678 return;
1679
1680 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1681 if (!sent)
1682 return;
1683
1684 hci_dev_lock(hdev);
1685
1686 if (*sent)
1687 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1688 else
1689 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1690
1691 hci_dev_unlock(hdev);
1692}
1693
1694static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1695 struct sk_buff *skb)
1696{
1697 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1698
1699 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1700
1701 if (rp->status)
1702 return;
1703
1704 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1705 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1706 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1707 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1708}
1709
1710static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1711 struct sk_buff *skb)
1712{
1713 struct hci_cp_write_le_host_supported *sent;
1714 __u8 status = *((__u8 *) skb->data);
1715
1716 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1717
1718 if (status)
1719 return;
1720
1721 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1722 if (!sent)
1723 return;
1724
1725 hci_dev_lock(hdev);
1726
1727 if (sent->le) {
1728 hdev->features[1][0] |= LMP_HOST_LE;
1729 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1730 } else {
1731 hdev->features[1][0] &= ~LMP_HOST_LE;
1732 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1733 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1734 }
1735
1736 if (sent->simul)
1737 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1738 else
1739 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1740
1741 hci_dev_unlock(hdev);
1742}
1743
1744static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1745{
1746 struct hci_cp_le_set_adv_param *cp;
1747 u8 status = *((u8 *) skb->data);
1748
1749 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1750
1751 if (status)
1752 return;
1753
1754 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1755 if (!cp)
1756 return;
1757
1758 hci_dev_lock(hdev);
1759 hdev->adv_addr_type = cp->own_address_type;
1760 hci_dev_unlock(hdev);
1761}
1762
1763static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1764{
1765 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1766 struct hci_cp_le_set_ext_adv_params *cp;
1767 struct adv_info *adv_instance;
1768
1769 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1770
1771 if (rp->status)
1772 return;
1773
1774 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1775 if (!cp)
1776 return;
1777
1778 hci_dev_lock(hdev);
1779 hdev->adv_addr_type = cp->own_addr_type;
1780 if (!cp->handle) {
1781 /* Store in hdev for instance 0 */
1782 hdev->adv_tx_power = rp->tx_power;
1783 } else {
1784 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1785 if (adv_instance)
1786 adv_instance->tx_power = rp->tx_power;
1787 }
1788 /* Update adv data as tx power is known now */
1789 hci_req_update_adv_data(hdev, cp->handle);
1790
1791 hci_dev_unlock(hdev);
1792}
1793
1794static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1795{
1796 struct hci_rp_read_rssi *rp = (void *) skb->data;
1797 struct hci_conn *conn;
1798
1799 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1800
1801 if (rp->status)
1802 return;
1803
1804 hci_dev_lock(hdev);
1805
1806 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1807 if (conn)
1808 conn->rssi = rp->rssi;
1809
1810 hci_dev_unlock(hdev);
1811}
1812
1813static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1814{
1815 struct hci_cp_read_tx_power *sent;
1816 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1817 struct hci_conn *conn;
1818
1819 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1820
1821 if (rp->status)
1822 return;
1823
1824 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1825 if (!sent)
1826 return;
1827
1828 hci_dev_lock(hdev);
1829
1830 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1831 if (!conn)
1832 goto unlock;
1833
1834 switch (sent->type) {
1835 case 0x00:
1836 conn->tx_power = rp->tx_power;
1837 break;
1838 case 0x01:
1839 conn->max_tx_power = rp->tx_power;
1840 break;
1841 }
1842
1843unlock:
1844 hci_dev_unlock(hdev);
1845}
1846
1847static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1848{
1849 u8 status = *((u8 *) skb->data);
1850 u8 *mode;
1851
1852 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1853
1854 if (status)
1855 return;
1856
1857 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1858 if (mode)
1859 hdev->ssp_debug_mode = *mode;
1860}
1861
1862static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1863{
1864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1865
1866 if (status) {
1867 hci_conn_check_pending(hdev);
1868 return;
1869 }
1870
1871 set_bit(HCI_INQUIRY, &hdev->flags);
1872}
1873
1874static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1875{
1876 struct hci_cp_create_conn *cp;
1877 struct hci_conn *conn;
1878
1879 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1880
1881 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1882 if (!cp)
1883 return;
1884
1885 hci_dev_lock(hdev);
1886
1887 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1888
1889 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1890
1891 if (status) {
1892 if (conn && conn->state == BT_CONNECT) {
1893 if (status != 0x0c || conn->attempt > 2) {
1894 conn->state = BT_CLOSED;
1895 hci_connect_cfm(conn, status);
1896 hci_conn_del(conn);
1897 } else
1898 conn->state = BT_CONNECT2;
1899 }
1900 } else {
1901 if (!conn) {
1902 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1903 HCI_ROLE_MASTER);
1904 if (!conn)
1905 bt_dev_err(hdev, "no memory for new connection");
1906 }
1907 }
1908
1909 hci_dev_unlock(hdev);
1910}
1911
1912static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1913{
1914 struct hci_cp_add_sco *cp;
1915 struct hci_conn *acl, *sco;
1916 __u16 handle;
1917
1918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1919
1920 if (!status)
1921 return;
1922
1923 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1924 if (!cp)
1925 return;
1926
1927 handle = __le16_to_cpu(cp->handle);
1928
1929 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1930
1931 hci_dev_lock(hdev);
1932
1933 acl = hci_conn_hash_lookup_handle(hdev, handle);
1934 if (acl) {
1935 sco = acl->link;
1936 if (sco) {
1937 sco->state = BT_CLOSED;
1938
1939 hci_connect_cfm(sco, status);
1940 hci_conn_del(sco);
1941 }
1942 }
1943
1944 hci_dev_unlock(hdev);
1945}
1946
1947static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1948{
1949 struct hci_cp_auth_requested *cp;
1950 struct hci_conn *conn;
1951
1952 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953
1954 if (!status)
1955 return;
1956
1957 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1958 if (!cp)
1959 return;
1960
1961 hci_dev_lock(hdev);
1962
1963 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1964 if (conn) {
1965 if (conn->state == BT_CONFIG) {
1966 hci_connect_cfm(conn, status);
1967 hci_conn_drop(conn);
1968 }
1969 }
1970
1971 hci_dev_unlock(hdev);
1972}
1973
1974static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1975{
1976 struct hci_cp_set_conn_encrypt *cp;
1977 struct hci_conn *conn;
1978
1979 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1980
1981 if (!status)
1982 return;
1983
1984 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1985 if (!cp)
1986 return;
1987
1988 hci_dev_lock(hdev);
1989
1990 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1991 if (conn) {
1992 if (conn->state == BT_CONFIG) {
1993 hci_connect_cfm(conn, status);
1994 hci_conn_drop(conn);
1995 }
1996 }
1997
1998 hci_dev_unlock(hdev);
1999}
2000
2001static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2002 struct hci_conn *conn)
2003{
2004 if (conn->state != BT_CONFIG || !conn->out)
2005 return 0;
2006
2007 if (conn->pending_sec_level == BT_SECURITY_SDP)
2008 return 0;
2009
2010 /* Only request authentication for SSP connections or non-SSP
2011 * devices with sec_level MEDIUM or HIGH or if MITM protection
2012 * is requested.
2013 */
2014 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2015 conn->pending_sec_level != BT_SECURITY_FIPS &&
2016 conn->pending_sec_level != BT_SECURITY_HIGH &&
2017 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2018 return 0;
2019
2020 return 1;
2021}
2022
2023static int hci_resolve_name(struct hci_dev *hdev,
2024 struct inquiry_entry *e)
2025{
2026 struct hci_cp_remote_name_req cp;
2027
2028 memset(&cp, 0, sizeof(cp));
2029
2030 bacpy(&cp.bdaddr, &e->data.bdaddr);
2031 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2032 cp.pscan_mode = e->data.pscan_mode;
2033 cp.clock_offset = e->data.clock_offset;
2034
2035 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2036}
2037
2038static bool hci_resolve_next_name(struct hci_dev *hdev)
2039{
2040 struct discovery_state *discov = &hdev->discovery;
2041 struct inquiry_entry *e;
2042
2043 if (list_empty(&discov->resolve))
2044 return false;
2045
2046 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2047 if (!e)
2048 return false;
2049
2050 if (hci_resolve_name(hdev, e) == 0) {
2051 e->name_state = NAME_PENDING;
2052 return true;
2053 }
2054
2055 return false;
2056}
2057
2058static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2059 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2060{
2061 struct discovery_state *discov = &hdev->discovery;
2062 struct inquiry_entry *e;
2063
2064 /* Update the mgmt connected state if necessary. Be careful with
2065 * conn objects that exist but are not (yet) connected however.
2066 * Only those in BT_CONFIG or BT_CONNECTED states can be
2067 * considered connected.
2068 */
2069 if (conn &&
2070 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2071 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2072 mgmt_device_connected(hdev, conn, 0, name, name_len);
2073
2074 if (discov->state == DISCOVERY_STOPPED)
2075 return;
2076
2077 if (discov->state == DISCOVERY_STOPPING)
2078 goto discov_complete;
2079
2080 if (discov->state != DISCOVERY_RESOLVING)
2081 return;
2082
2083 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2084 /* If the device was not found in a list of found devices names of which
2085 * are pending. there is no need to continue resolving a next name as it
2086 * will be done upon receiving another Remote Name Request Complete
2087 * Event */
2088 if (!e)
2089 return;
2090
2091 list_del(&e->list);
2092 if (name) {
2093 e->name_state = NAME_KNOWN;
2094 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2095 e->data.rssi, name, name_len);
2096 } else {
2097 e->name_state = NAME_NOT_KNOWN;
2098 }
2099
2100 if (hci_resolve_next_name(hdev))
2101 return;
2102
2103discov_complete:
2104 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2105}
2106
2107static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2108{
2109 struct hci_cp_remote_name_req *cp;
2110 struct hci_conn *conn;
2111
2112 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2113
2114 /* If successful wait for the name req complete event before
2115 * checking for the need to do authentication */
2116 if (!status)
2117 return;
2118
2119 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2120 if (!cp)
2121 return;
2122
2123 hci_dev_lock(hdev);
2124
2125 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2126
2127 if (hci_dev_test_flag(hdev, HCI_MGMT))
2128 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2129
2130 if (!conn)
2131 goto unlock;
2132
2133 if (!hci_outgoing_auth_needed(hdev, conn))
2134 goto unlock;
2135
2136 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2137 struct hci_cp_auth_requested auth_cp;
2138
2139 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2140
2141 auth_cp.handle = __cpu_to_le16(conn->handle);
2142 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2143 sizeof(auth_cp), &auth_cp);
2144 }
2145
2146unlock:
2147 hci_dev_unlock(hdev);
2148}
2149
2150static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2151{
2152 struct hci_cp_read_remote_features *cp;
2153 struct hci_conn *conn;
2154
2155 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2156
2157 if (!status)
2158 return;
2159
2160 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2161 if (!cp)
2162 return;
2163
2164 hci_dev_lock(hdev);
2165
2166 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2167 if (conn) {
2168 if (conn->state == BT_CONFIG) {
2169 hci_connect_cfm(conn, status);
2170 hci_conn_drop(conn);
2171 }
2172 }
2173
2174 hci_dev_unlock(hdev);
2175}
2176
2177static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2178{
2179 struct hci_cp_read_remote_ext_features *cp;
2180 struct hci_conn *conn;
2181
2182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2183
2184 if (!status)
2185 return;
2186
2187 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2188 if (!cp)
2189 return;
2190
2191 hci_dev_lock(hdev);
2192
2193 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2194 if (conn) {
2195 if (conn->state == BT_CONFIG) {
2196 hci_connect_cfm(conn, status);
2197 hci_conn_drop(conn);
2198 }
2199 }
2200
2201 hci_dev_unlock(hdev);
2202}
2203
2204static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2205{
2206 struct hci_cp_setup_sync_conn *cp;
2207 struct hci_conn *acl, *sco;
2208 __u16 handle;
2209
2210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2211
2212 if (!status)
2213 return;
2214
2215 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2216 if (!cp)
2217 return;
2218
2219 handle = __le16_to_cpu(cp->handle);
2220
2221 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2222
2223 hci_dev_lock(hdev);
2224
2225 acl = hci_conn_hash_lookup_handle(hdev, handle);
2226 if (acl) {
2227 sco = acl->link;
2228 if (sco) {
2229 sco->state = BT_CLOSED;
2230
2231 hci_connect_cfm(sco, status);
2232 hci_conn_del(sco);
2233 }
2234 }
2235
2236 hci_dev_unlock(hdev);
2237}
2238
2239static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2240{
2241 struct hci_cp_sniff_mode *cp;
2242 struct hci_conn *conn;
2243
2244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2245
2246 if (!status)
2247 return;
2248
2249 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2250 if (!cp)
2251 return;
2252
2253 hci_dev_lock(hdev);
2254
2255 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2256 if (conn) {
2257 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2258
2259 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2260 hci_sco_setup(conn, status);
2261 }
2262
2263 hci_dev_unlock(hdev);
2264}
2265
2266static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2267{
2268 struct hci_cp_exit_sniff_mode *cp;
2269 struct hci_conn *conn;
2270
2271 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2272
2273 if (!status)
2274 return;
2275
2276 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2277 if (!cp)
2278 return;
2279
2280 hci_dev_lock(hdev);
2281
2282 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2283 if (conn) {
2284 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2285
2286 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2287 hci_sco_setup(conn, status);
2288 }
2289
2290 hci_dev_unlock(hdev);
2291}
2292
2293static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2294{
2295 struct hci_cp_disconnect *cp;
2296 struct hci_conn *conn;
2297
2298 if (!status)
2299 return;
2300
2301 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2302 if (!cp)
2303 return;
2304
2305 hci_dev_lock(hdev);
2306
2307 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2308 if (conn) {
2309 u8 type = conn->type;
2310
2311 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2312 conn->dst_type, status);
2313
2314 /* If the disconnection failed for any reason, the upper layer
2315 * does not retry to disconnect in current implementation.
2316 * Hence, we need to do some basic cleanup here and re-enable
2317 * advertising if necessary.
2318 */
2319 hci_conn_del(conn);
2320 if (type == LE_LINK)
2321 hci_req_reenable_advertising(hdev);
2322 }
2323
2324 hci_dev_unlock(hdev);
2325}
2326
2327static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2328 u8 peer_addr_type, u8 own_address_type,
2329 u8 filter_policy)
2330{
2331 struct hci_conn *conn;
2332
2333 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2334 peer_addr_type);
2335 if (!conn)
2336 return;
2337
2338 /* When using controller based address resolution, then the new
2339 * address types 0x02 and 0x03 are used. These types need to be
2340 * converted back into either public address or random address type
2341 */
2342 if (use_ll_privacy(hdev) &&
2343 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2344 switch (own_address_type) {
2345 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2346 own_address_type = ADDR_LE_DEV_PUBLIC;
2347 break;
2348 case ADDR_LE_DEV_RANDOM_RESOLVED:
2349 own_address_type = ADDR_LE_DEV_RANDOM;
2350 break;
2351 }
2352 }
2353
2354 /* Store the initiator and responder address information which
2355 * is needed for SMP. These values will not change during the
2356 * lifetime of the connection.
2357 */
2358 conn->init_addr_type = own_address_type;
2359 if (own_address_type == ADDR_LE_DEV_RANDOM)
2360 bacpy(&conn->init_addr, &hdev->random_addr);
2361 else
2362 bacpy(&conn->init_addr, &hdev->bdaddr);
2363
2364 conn->resp_addr_type = peer_addr_type;
2365 bacpy(&conn->resp_addr, peer_addr);
2366
2367 /* We don't want the connection attempt to stick around
2368 * indefinitely since LE doesn't have a page timeout concept
2369 * like BR/EDR. Set a timer for any connection that doesn't use
2370 * the white list for connecting.
2371 */
2372 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2373 queue_delayed_work(conn->hdev->workqueue,
2374 &conn->le_conn_timeout,
2375 conn->conn_timeout);
2376}
2377
2378static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2379{
2380 struct hci_cp_le_create_conn *cp;
2381
2382 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2383
2384 /* All connection failure handling is taken care of by the
2385 * hci_le_conn_failed function which is triggered by the HCI
2386 * request completion callbacks used for connecting.
2387 */
2388 if (status)
2389 return;
2390
2391 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2392 if (!cp)
2393 return;
2394
2395 hci_dev_lock(hdev);
2396
2397 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2398 cp->own_address_type, cp->filter_policy);
2399
2400 hci_dev_unlock(hdev);
2401}
2402
2403static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2404{
2405 struct hci_cp_le_ext_create_conn *cp;
2406
2407 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2408
2409 /* All connection failure handling is taken care of by the
2410 * hci_le_conn_failed function which is triggered by the HCI
2411 * request completion callbacks used for connecting.
2412 */
2413 if (status)
2414 return;
2415
2416 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2417 if (!cp)
2418 return;
2419
2420 hci_dev_lock(hdev);
2421
2422 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2423 cp->own_addr_type, cp->filter_policy);
2424
2425 hci_dev_unlock(hdev);
2426}
2427
2428static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2429{
2430 struct hci_cp_le_read_remote_features *cp;
2431 struct hci_conn *conn;
2432
2433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2434
2435 if (!status)
2436 return;
2437
2438 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2439 if (!cp)
2440 return;
2441
2442 hci_dev_lock(hdev);
2443
2444 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2445 if (conn) {
2446 if (conn->state == BT_CONFIG) {
2447 hci_connect_cfm(conn, status);
2448 hci_conn_drop(conn);
2449 }
2450 }
2451
2452 hci_dev_unlock(hdev);
2453}
2454
2455static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2456{
2457 struct hci_cp_le_start_enc *cp;
2458 struct hci_conn *conn;
2459
2460 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2461
2462 if (!status)
2463 return;
2464
2465 hci_dev_lock(hdev);
2466
2467 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2468 if (!cp)
2469 goto unlock;
2470
2471 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2472 if (!conn)
2473 goto unlock;
2474
2475 if (conn->state != BT_CONNECTED)
2476 goto unlock;
2477
2478 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2479 hci_conn_drop(conn);
2480
2481unlock:
2482 hci_dev_unlock(hdev);
2483}
2484
2485static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2486{
2487 struct hci_cp_switch_role *cp;
2488 struct hci_conn *conn;
2489
2490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2491
2492 if (!status)
2493 return;
2494
2495 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2496 if (!cp)
2497 return;
2498
2499 hci_dev_lock(hdev);
2500
2501 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2502 if (conn)
2503 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2504
2505 hci_dev_unlock(hdev);
2506}
2507
2508static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2509{
2510 __u8 status = *((__u8 *) skb->data);
2511 struct discovery_state *discov = &hdev->discovery;
2512 struct inquiry_entry *e;
2513
2514 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2515
2516 hci_conn_check_pending(hdev);
2517
2518 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2519 return;
2520
2521 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2522 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2523
2524 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2525 return;
2526
2527 hci_dev_lock(hdev);
2528
2529 if (discov->state != DISCOVERY_FINDING)
2530 goto unlock;
2531
2532 if (list_empty(&discov->resolve)) {
2533 /* When BR/EDR inquiry is active and no LE scanning is in
2534 * progress, then change discovery state to indicate completion.
2535 *
2536 * When running LE scanning and BR/EDR inquiry simultaneously
2537 * and the LE scan already finished, then change the discovery
2538 * state to indicate completion.
2539 */
2540 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2541 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2542 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2543 goto unlock;
2544 }
2545
2546 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2547 if (e && hci_resolve_name(hdev, e) == 0) {
2548 e->name_state = NAME_PENDING;
2549 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2550 } else {
2551 /* When BR/EDR inquiry is active and no LE scanning is in
2552 * progress, then change discovery state to indicate completion.
2553 *
2554 * When running LE scanning and BR/EDR inquiry simultaneously
2555 * and the LE scan already finished, then change the discovery
2556 * state to indicate completion.
2557 */
2558 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2559 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2560 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2561 }
2562
2563unlock:
2564 hci_dev_unlock(hdev);
2565}
2566
2567static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2568{
2569 struct inquiry_data data;
2570 struct inquiry_info *info = (void *) (skb->data + 1);
2571 int num_rsp = *((__u8 *) skb->data);
2572
2573 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2574
2575 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2576 return;
2577
2578 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2579 return;
2580
2581 hci_dev_lock(hdev);
2582
2583 for (; num_rsp; num_rsp--, info++) {
2584 u32 flags;
2585
2586 bacpy(&data.bdaddr, &info->bdaddr);
2587 data.pscan_rep_mode = info->pscan_rep_mode;
2588 data.pscan_period_mode = info->pscan_period_mode;
2589 data.pscan_mode = info->pscan_mode;
2590 memcpy(data.dev_class, info->dev_class, 3);
2591 data.clock_offset = info->clock_offset;
2592 data.rssi = HCI_RSSI_INVALID;
2593 data.ssp_mode = 0x00;
2594
2595 flags = hci_inquiry_cache_update(hdev, &data, false);
2596
2597 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2598 info->dev_class, HCI_RSSI_INVALID,
2599 flags, NULL, 0, NULL, 0);
2600 }
2601
2602 hci_dev_unlock(hdev);
2603}
2604
2605static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2606{
2607 struct hci_ev_conn_complete *ev = (void *) skb->data;
2608 struct hci_conn *conn;
2609
2610 BT_DBG("%s", hdev->name);
2611
2612 hci_dev_lock(hdev);
2613
2614 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2615 if (!conn) {
2616 /* Connection may not exist if auto-connected. Check the bredr
2617 * allowlist to see if this device is allowed to auto connect.
2618 * If link is an ACL type, create a connection class
2619 * automatically.
2620 *
2621 * Auto-connect will only occur if the event filter is
2622 * programmed with a given address. Right now, event filter is
2623 * only used during suspend.
2624 */
2625 if (ev->link_type == ACL_LINK &&
2626 hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
2627 &ev->bdaddr,
2628 BDADDR_BREDR)) {
2629 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2630 HCI_ROLE_SLAVE);
2631 if (!conn) {
2632 bt_dev_err(hdev, "no memory for new conn");
2633 goto unlock;
2634 }
2635 } else {
2636 if (ev->link_type != SCO_LINK)
2637 goto unlock;
2638
2639 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2640 &ev->bdaddr);
2641 if (!conn)
2642 goto unlock;
2643
2644 conn->type = SCO_LINK;
2645 }
2646 }
2647
2648 if (!ev->status) {
2649 conn->handle = __le16_to_cpu(ev->handle);
2650
2651 if (conn->type == ACL_LINK) {
2652 conn->state = BT_CONFIG;
2653 hci_conn_hold(conn);
2654
2655 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2656 !hci_find_link_key(hdev, &ev->bdaddr))
2657 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2658 else
2659 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2660 } else
2661 conn->state = BT_CONNECTED;
2662
2663 hci_debugfs_create_conn(conn);
2664 hci_conn_add_sysfs(conn);
2665
2666 if (test_bit(HCI_AUTH, &hdev->flags))
2667 set_bit(HCI_CONN_AUTH, &conn->flags);
2668
2669 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2670 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2671
2672 /* Get remote features */
2673 if (conn->type == ACL_LINK) {
2674 struct hci_cp_read_remote_features cp;
2675 cp.handle = ev->handle;
2676 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2677 sizeof(cp), &cp);
2678
2679 hci_req_update_scan(hdev);
2680 }
2681
2682 /* Set packet type for incoming connection */
2683 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2684 struct hci_cp_change_conn_ptype cp;
2685 cp.handle = ev->handle;
2686 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2687 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2688 &cp);
2689 }
2690 } else {
2691 conn->state = BT_CLOSED;
2692 if (conn->type == ACL_LINK)
2693 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2694 conn->dst_type, ev->status);
2695 }
2696
2697 if (conn->type == ACL_LINK)
2698 hci_sco_setup(conn, ev->status);
2699
2700 if (ev->status) {
2701 hci_connect_cfm(conn, ev->status);
2702 hci_conn_del(conn);
2703 } else if (ev->link_type == SCO_LINK) {
2704 switch (conn->setting & SCO_AIRMODE_MASK) {
2705 case SCO_AIRMODE_CVSD:
2706 if (hdev->notify)
2707 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2708 break;
2709 }
2710
2711 hci_connect_cfm(conn, ev->status);
2712 }
2713
2714unlock:
2715 hci_dev_unlock(hdev);
2716
2717 hci_conn_check_pending(hdev);
2718}
2719
2720static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2721{
2722 struct hci_cp_reject_conn_req cp;
2723
2724 bacpy(&cp.bdaddr, bdaddr);
2725 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2726 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2727}
2728
2729static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2730{
2731 struct hci_ev_conn_request *ev = (void *) skb->data;
2732 int mask = hdev->link_mode;
2733 struct inquiry_entry *ie;
2734 struct hci_conn *conn;
2735 __u8 flags = 0;
2736
2737 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2738 ev->link_type);
2739
2740 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2741 &flags);
2742
2743 if (!(mask & HCI_LM_ACCEPT)) {
2744 hci_reject_conn(hdev, &ev->bdaddr);
2745 return;
2746 }
2747
2748 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2749 BDADDR_BREDR)) {
2750 hci_reject_conn(hdev, &ev->bdaddr);
2751 return;
2752 }
2753
2754 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2755 * connection. These features are only touched through mgmt so
2756 * only do the checks if HCI_MGMT is set.
2757 */
2758 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2759 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2760 !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr,
2761 BDADDR_BREDR)) {
2762 hci_reject_conn(hdev, &ev->bdaddr);
2763 return;
2764 }
2765
2766 /* Connection accepted */
2767
2768 hci_dev_lock(hdev);
2769
2770 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2771 if (ie)
2772 memcpy(ie->data.dev_class, ev->dev_class, 3);
2773
2774 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2775 &ev->bdaddr);
2776 if (!conn) {
2777 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2778 HCI_ROLE_SLAVE);
2779 if (!conn) {
2780 bt_dev_err(hdev, "no memory for new connection");
2781 hci_dev_unlock(hdev);
2782 return;
2783 }
2784 }
2785
2786 memcpy(conn->dev_class, ev->dev_class, 3);
2787
2788 hci_dev_unlock(hdev);
2789
2790 if (ev->link_type == ACL_LINK ||
2791 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2792 struct hci_cp_accept_conn_req cp;
2793 conn->state = BT_CONNECT;
2794
2795 bacpy(&cp.bdaddr, &ev->bdaddr);
2796
2797 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2798 cp.role = 0x00; /* Become master */
2799 else
2800 cp.role = 0x01; /* Remain slave */
2801
2802 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2803 } else if (!(flags & HCI_PROTO_DEFER)) {
2804 struct hci_cp_accept_sync_conn_req cp;
2805 conn->state = BT_CONNECT;
2806
2807 bacpy(&cp.bdaddr, &ev->bdaddr);
2808 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2809
2810 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2811 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2812 cp.max_latency = cpu_to_le16(0xffff);
2813 cp.content_format = cpu_to_le16(hdev->voice_setting);
2814 cp.retrans_effort = 0xff;
2815
2816 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2817 &cp);
2818 } else {
2819 conn->state = BT_CONNECT2;
2820 hci_connect_cfm(conn, 0);
2821 }
2822}
2823
2824static u8 hci_to_mgmt_reason(u8 err)
2825{
2826 switch (err) {
2827 case HCI_ERROR_CONNECTION_TIMEOUT:
2828 return MGMT_DEV_DISCONN_TIMEOUT;
2829 case HCI_ERROR_REMOTE_USER_TERM:
2830 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2831 case HCI_ERROR_REMOTE_POWER_OFF:
2832 return MGMT_DEV_DISCONN_REMOTE;
2833 case HCI_ERROR_LOCAL_HOST_TERM:
2834 return MGMT_DEV_DISCONN_LOCAL_HOST;
2835 default:
2836 return MGMT_DEV_DISCONN_UNKNOWN;
2837 }
2838}
2839
2840static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2841{
2842 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2843 u8 reason;
2844 struct hci_conn_params *params;
2845 struct hci_conn *conn;
2846 bool mgmt_connected;
2847 u8 type;
2848
2849 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2850
2851 hci_dev_lock(hdev);
2852
2853 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2854 if (!conn)
2855 goto unlock;
2856
2857 if (ev->status) {
2858 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2859 conn->dst_type, ev->status);
2860 goto unlock;
2861 }
2862
2863 conn->state = BT_CLOSED;
2864
2865 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2866
2867 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2868 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2869 else
2870 reason = hci_to_mgmt_reason(ev->reason);
2871
2872 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2873 reason, mgmt_connected);
2874
2875 if (conn->type == ACL_LINK) {
2876 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2877 hci_remove_link_key(hdev, &conn->dst);
2878
2879 hci_req_update_scan(hdev);
2880 }
2881
2882 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2883 if (params) {
2884 switch (params->auto_connect) {
2885 case HCI_AUTO_CONN_LINK_LOSS:
2886 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2887 break;
2888 fallthrough;
2889
2890 case HCI_AUTO_CONN_DIRECT:
2891 case HCI_AUTO_CONN_ALWAYS:
2892 list_del_init(¶ms->action);
2893 list_add(¶ms->action, &hdev->pend_le_conns);
2894 hci_update_background_scan(hdev);
2895 break;
2896
2897 default:
2898 break;
2899 }
2900 }
2901
2902 type = conn->type;
2903
2904 hci_disconn_cfm(conn, ev->reason);
2905 hci_conn_del(conn);
2906
2907 /* The suspend notifier is waiting for all devices to disconnect so
2908 * clear the bit from pending tasks and inform the wait queue.
2909 */
2910 if (list_empty(&hdev->conn_hash.list) &&
2911 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2912 wake_up(&hdev->suspend_wait_q);
2913 }
2914
2915 /* Re-enable advertising if necessary, since it might
2916 * have been disabled by the connection. From the
2917 * HCI_LE_Set_Advertise_Enable command description in
2918 * the core specification (v4.0):
2919 * "The Controller shall continue advertising until the Host
2920 * issues an LE_Set_Advertise_Enable command with
2921 * Advertising_Enable set to 0x00 (Advertising is disabled)
2922 * or until a connection is created or until the Advertising
2923 * is timed out due to Directed Advertising."
2924 */
2925 if (type == LE_LINK)
2926 hci_req_reenable_advertising(hdev);
2927
2928unlock:
2929 hci_dev_unlock(hdev);
2930}
2931
2932static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2933{
2934 struct hci_ev_auth_complete *ev = (void *) skb->data;
2935 struct hci_conn *conn;
2936
2937 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2938
2939 hci_dev_lock(hdev);
2940
2941 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2942 if (!conn)
2943 goto unlock;
2944
2945 if (!ev->status) {
2946 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2947
2948 if (!hci_conn_ssp_enabled(conn) &&
2949 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2950 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2951 } else {
2952 set_bit(HCI_CONN_AUTH, &conn->flags);
2953 conn->sec_level = conn->pending_sec_level;
2954 }
2955 } else {
2956 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2957 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2958
2959 mgmt_auth_failed(conn, ev->status);
2960 }
2961
2962 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2963 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2964
2965 if (conn->state == BT_CONFIG) {
2966 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2967 struct hci_cp_set_conn_encrypt cp;
2968 cp.handle = ev->handle;
2969 cp.encrypt = 0x01;
2970 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2971 &cp);
2972 } else {
2973 conn->state = BT_CONNECTED;
2974 hci_connect_cfm(conn, ev->status);
2975 hci_conn_drop(conn);
2976 }
2977 } else {
2978 hci_auth_cfm(conn, ev->status);
2979
2980 hci_conn_hold(conn);
2981 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2982 hci_conn_drop(conn);
2983 }
2984
2985 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2986 if (!ev->status) {
2987 struct hci_cp_set_conn_encrypt cp;
2988 cp.handle = ev->handle;
2989 cp.encrypt = 0x01;
2990 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2991 &cp);
2992 } else {
2993 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2994 hci_encrypt_cfm(conn, ev->status);
2995 }
2996 }
2997
2998unlock:
2999 hci_dev_unlock(hdev);
3000}
3001
3002static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3003{
3004 struct hci_ev_remote_name *ev = (void *) skb->data;
3005 struct hci_conn *conn;
3006
3007 BT_DBG("%s", hdev->name);
3008
3009 hci_conn_check_pending(hdev);
3010
3011 hci_dev_lock(hdev);
3012
3013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3014
3015 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3016 goto check_auth;
3017
3018 if (ev->status == 0)
3019 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3020 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3021 else
3022 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3023
3024check_auth:
3025 if (!conn)
3026 goto unlock;
3027
3028 if (!hci_outgoing_auth_needed(hdev, conn))
3029 goto unlock;
3030
3031 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3032 struct hci_cp_auth_requested cp;
3033
3034 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3035
3036 cp.handle = __cpu_to_le16(conn->handle);
3037 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3038 }
3039
3040unlock:
3041 hci_dev_unlock(hdev);
3042}
3043
3044static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3045 u16 opcode, struct sk_buff *skb)
3046{
3047 const struct hci_rp_read_enc_key_size *rp;
3048 struct hci_conn *conn;
3049 u16 handle;
3050
3051 BT_DBG("%s status 0x%02x", hdev->name, status);
3052
3053 if (!skb || skb->len < sizeof(*rp)) {
3054 bt_dev_err(hdev, "invalid read key size response");
3055 return;
3056 }
3057
3058 rp = (void *)skb->data;
3059 handle = le16_to_cpu(rp->handle);
3060
3061 hci_dev_lock(hdev);
3062
3063 conn = hci_conn_hash_lookup_handle(hdev, handle);
3064 if (!conn)
3065 goto unlock;
3066
3067 /* While unexpected, the read_enc_key_size command may fail. The most
3068 * secure approach is to then assume the key size is 0 to force a
3069 * disconnection.
3070 */
3071 if (rp->status) {
3072 bt_dev_err(hdev, "failed to read key size for handle %u",
3073 handle);
3074 conn->enc_key_size = 0;
3075 } else {
3076 conn->enc_key_size = rp->key_size;
3077 }
3078
3079 hci_encrypt_cfm(conn, 0);
3080
3081unlock:
3082 hci_dev_unlock(hdev);
3083}
3084
3085static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3086{
3087 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3088 struct hci_conn *conn;
3089
3090 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3091
3092 hci_dev_lock(hdev);
3093
3094 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3095 if (!conn)
3096 goto unlock;
3097
3098 if (!ev->status) {
3099 if (ev->encrypt) {
3100 /* Encryption implies authentication */
3101 set_bit(HCI_CONN_AUTH, &conn->flags);
3102 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3103 conn->sec_level = conn->pending_sec_level;
3104
3105 /* P-256 authentication key implies FIPS */
3106 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3107 set_bit(HCI_CONN_FIPS, &conn->flags);
3108
3109 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3110 conn->type == LE_LINK)
3111 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3112 } else {
3113 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3114 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3115 }
3116 }
3117
3118 /* We should disregard the current RPA and generate a new one
3119 * whenever the encryption procedure fails.
3120 */
3121 if (ev->status && conn->type == LE_LINK) {
3122 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3123 hci_adv_instances_set_rpa_expired(hdev, true);
3124 }
3125
3126 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3127
3128 /* Check link security requirements are met */
3129 if (!hci_conn_check_link_mode(conn))
3130 ev->status = HCI_ERROR_AUTH_FAILURE;
3131
3132 if (ev->status && conn->state == BT_CONNECTED) {
3133 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3134 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3135
3136 /* Notify upper layers so they can cleanup before
3137 * disconnecting.
3138 */
3139 hci_encrypt_cfm(conn, ev->status);
3140 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3141 hci_conn_drop(conn);
3142 goto unlock;
3143 }
3144
3145 /* Try reading the encryption key size for encrypted ACL links */
3146 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3147 struct hci_cp_read_enc_key_size cp;
3148 struct hci_request req;
3149
3150 /* Only send HCI_Read_Encryption_Key_Size if the
3151 * controller really supports it. If it doesn't, assume
3152 * the default size (16).
3153 */
3154 if (!(hdev->commands[20] & 0x10)) {
3155 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3156 goto notify;
3157 }
3158
3159 hci_req_init(&req, hdev);
3160
3161 cp.handle = cpu_to_le16(conn->handle);
3162 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3163
3164 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3165 bt_dev_err(hdev, "sending read key size failed");
3166 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3167 goto notify;
3168 }
3169
3170 goto unlock;
3171 }
3172
3173 /* Set the default Authenticated Payload Timeout after
3174 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3175 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3176 * sent when the link is active and Encryption is enabled, the conn
3177 * type can be either LE or ACL and controller must support LMP Ping.
3178 * Ensure for AES-CCM encryption as well.
3179 */
3180 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3181 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3182 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3183 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3184 struct hci_cp_write_auth_payload_to cp;
3185
3186 cp.handle = cpu_to_le16(conn->handle);
3187 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3188 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3189 sizeof(cp), &cp);
3190 }
3191
3192notify:
3193 hci_encrypt_cfm(conn, ev->status);
3194
3195unlock:
3196 hci_dev_unlock(hdev);
3197}
3198
3199static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3200 struct sk_buff *skb)
3201{
3202 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3203 struct hci_conn *conn;
3204
3205 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3206
3207 hci_dev_lock(hdev);
3208
3209 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3210 if (conn) {
3211 if (!ev->status)
3212 set_bit(HCI_CONN_SECURE, &conn->flags);
3213
3214 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3215
3216 hci_key_change_cfm(conn, ev->status);
3217 }
3218
3219 hci_dev_unlock(hdev);
3220}
3221
3222static void hci_remote_features_evt(struct hci_dev *hdev,
3223 struct sk_buff *skb)
3224{
3225 struct hci_ev_remote_features *ev = (void *) skb->data;
3226 struct hci_conn *conn;
3227
3228 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3229
3230 hci_dev_lock(hdev);
3231
3232 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3233 if (!conn)
3234 goto unlock;
3235
3236 if (!ev->status)
3237 memcpy(conn->features[0], ev->features, 8);
3238
3239 if (conn->state != BT_CONFIG)
3240 goto unlock;
3241
3242 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3243 lmp_ext_feat_capable(conn)) {
3244 struct hci_cp_read_remote_ext_features cp;
3245 cp.handle = ev->handle;
3246 cp.page = 0x01;
3247 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3248 sizeof(cp), &cp);
3249 goto unlock;
3250 }
3251
3252 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3253 struct hci_cp_remote_name_req cp;
3254 memset(&cp, 0, sizeof(cp));
3255 bacpy(&cp.bdaddr, &conn->dst);
3256 cp.pscan_rep_mode = 0x02;
3257 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3258 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3259 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3260
3261 if (!hci_outgoing_auth_needed(hdev, conn)) {
3262 conn->state = BT_CONNECTED;
3263 hci_connect_cfm(conn, ev->status);
3264 hci_conn_drop(conn);
3265 }
3266
3267unlock:
3268 hci_dev_unlock(hdev);
3269}
3270
3271static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3272 u16 *opcode, u8 *status,
3273 hci_req_complete_t *req_complete,
3274 hci_req_complete_skb_t *req_complete_skb)
3275{
3276 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3277
3278 *opcode = __le16_to_cpu(ev->opcode);
3279 *status = skb->data[sizeof(*ev)];
3280
3281 skb_pull(skb, sizeof(*ev));
3282
3283 switch (*opcode) {
3284 case HCI_OP_INQUIRY_CANCEL:
3285 hci_cc_inquiry_cancel(hdev, skb, status);
3286 break;
3287
3288 case HCI_OP_PERIODIC_INQ:
3289 hci_cc_periodic_inq(hdev, skb);
3290 break;
3291
3292 case HCI_OP_EXIT_PERIODIC_INQ:
3293 hci_cc_exit_periodic_inq(hdev, skb);
3294 break;
3295
3296 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3297 hci_cc_remote_name_req_cancel(hdev, skb);
3298 break;
3299
3300 case HCI_OP_ROLE_DISCOVERY:
3301 hci_cc_role_discovery(hdev, skb);
3302 break;
3303
3304 case HCI_OP_READ_LINK_POLICY:
3305 hci_cc_read_link_policy(hdev, skb);
3306 break;
3307
3308 case HCI_OP_WRITE_LINK_POLICY:
3309 hci_cc_write_link_policy(hdev, skb);
3310 break;
3311
3312 case HCI_OP_READ_DEF_LINK_POLICY:
3313 hci_cc_read_def_link_policy(hdev, skb);
3314 break;
3315
3316 case HCI_OP_WRITE_DEF_LINK_POLICY:
3317 hci_cc_write_def_link_policy(hdev, skb);
3318 break;
3319
3320 case HCI_OP_RESET:
3321 hci_cc_reset(hdev, skb);
3322 break;
3323
3324 case HCI_OP_READ_STORED_LINK_KEY:
3325 hci_cc_read_stored_link_key(hdev, skb);
3326 break;
3327
3328 case HCI_OP_DELETE_STORED_LINK_KEY:
3329 hci_cc_delete_stored_link_key(hdev, skb);
3330 break;
3331
3332 case HCI_OP_WRITE_LOCAL_NAME:
3333 hci_cc_write_local_name(hdev, skb);
3334 break;
3335
3336 case HCI_OP_READ_LOCAL_NAME:
3337 hci_cc_read_local_name(hdev, skb);
3338 break;
3339
3340 case HCI_OP_WRITE_AUTH_ENABLE:
3341 hci_cc_write_auth_enable(hdev, skb);
3342 break;
3343
3344 case HCI_OP_WRITE_ENCRYPT_MODE:
3345 hci_cc_write_encrypt_mode(hdev, skb);
3346 break;
3347
3348 case HCI_OP_WRITE_SCAN_ENABLE:
3349 hci_cc_write_scan_enable(hdev, skb);
3350 break;
3351
3352 case HCI_OP_SET_EVENT_FLT:
3353 hci_cc_set_event_filter(hdev, skb);
3354 break;
3355
3356 case HCI_OP_READ_CLASS_OF_DEV:
3357 hci_cc_read_class_of_dev(hdev, skb);
3358 break;
3359
3360 case HCI_OP_WRITE_CLASS_OF_DEV:
3361 hci_cc_write_class_of_dev(hdev, skb);
3362 break;
3363
3364 case HCI_OP_READ_VOICE_SETTING:
3365 hci_cc_read_voice_setting(hdev, skb);
3366 break;
3367
3368 case HCI_OP_WRITE_VOICE_SETTING:
3369 hci_cc_write_voice_setting(hdev, skb);
3370 break;
3371
3372 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3373 hci_cc_read_num_supported_iac(hdev, skb);
3374 break;
3375
3376 case HCI_OP_WRITE_SSP_MODE:
3377 hci_cc_write_ssp_mode(hdev, skb);
3378 break;
3379
3380 case HCI_OP_WRITE_SC_SUPPORT:
3381 hci_cc_write_sc_support(hdev, skb);
3382 break;
3383
3384 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3385 hci_cc_read_auth_payload_timeout(hdev, skb);
3386 break;
3387
3388 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3389 hci_cc_write_auth_payload_timeout(hdev, skb);
3390 break;
3391
3392 case HCI_OP_READ_LOCAL_VERSION:
3393 hci_cc_read_local_version(hdev, skb);
3394 break;
3395
3396 case HCI_OP_READ_LOCAL_COMMANDS:
3397 hci_cc_read_local_commands(hdev, skb);
3398 break;
3399
3400 case HCI_OP_READ_LOCAL_FEATURES:
3401 hci_cc_read_local_features(hdev, skb);
3402 break;
3403
3404 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3405 hci_cc_read_local_ext_features(hdev, skb);
3406 break;
3407
3408 case HCI_OP_READ_BUFFER_SIZE:
3409 hci_cc_read_buffer_size(hdev, skb);
3410 break;
3411
3412 case HCI_OP_READ_BD_ADDR:
3413 hci_cc_read_bd_addr(hdev, skb);
3414 break;
3415
3416 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3417 hci_cc_read_local_pairing_opts(hdev, skb);
3418 break;
3419
3420 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3421 hci_cc_read_page_scan_activity(hdev, skb);
3422 break;
3423
3424 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3425 hci_cc_write_page_scan_activity(hdev, skb);
3426 break;
3427
3428 case HCI_OP_READ_PAGE_SCAN_TYPE:
3429 hci_cc_read_page_scan_type(hdev, skb);
3430 break;
3431
3432 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3433 hci_cc_write_page_scan_type(hdev, skb);
3434 break;
3435
3436 case HCI_OP_READ_DATA_BLOCK_SIZE:
3437 hci_cc_read_data_block_size(hdev, skb);
3438 break;
3439
3440 case HCI_OP_READ_FLOW_CONTROL_MODE:
3441 hci_cc_read_flow_control_mode(hdev, skb);
3442 break;
3443
3444 case HCI_OP_READ_LOCAL_AMP_INFO:
3445 hci_cc_read_local_amp_info(hdev, skb);
3446 break;
3447
3448 case HCI_OP_READ_CLOCK:
3449 hci_cc_read_clock(hdev, skb);
3450 break;
3451
3452 case HCI_OP_READ_INQ_RSP_TX_POWER:
3453 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3454 break;
3455
3456 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3457 hci_cc_read_def_err_data_reporting(hdev, skb);
3458 break;
3459
3460 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3461 hci_cc_write_def_err_data_reporting(hdev, skb);
3462 break;
3463
3464 case HCI_OP_PIN_CODE_REPLY:
3465 hci_cc_pin_code_reply(hdev, skb);
3466 break;
3467
3468 case HCI_OP_PIN_CODE_NEG_REPLY:
3469 hci_cc_pin_code_neg_reply(hdev, skb);
3470 break;
3471
3472 case HCI_OP_READ_LOCAL_OOB_DATA:
3473 hci_cc_read_local_oob_data(hdev, skb);
3474 break;
3475
3476 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3477 hci_cc_read_local_oob_ext_data(hdev, skb);
3478 break;
3479
3480 case HCI_OP_LE_READ_BUFFER_SIZE:
3481 hci_cc_le_read_buffer_size(hdev, skb);
3482 break;
3483
3484 case HCI_OP_LE_READ_LOCAL_FEATURES:
3485 hci_cc_le_read_local_features(hdev, skb);
3486 break;
3487
3488 case HCI_OP_LE_READ_ADV_TX_POWER:
3489 hci_cc_le_read_adv_tx_power(hdev, skb);
3490 break;
3491
3492 case HCI_OP_USER_CONFIRM_REPLY:
3493 hci_cc_user_confirm_reply(hdev, skb);
3494 break;
3495
3496 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3497 hci_cc_user_confirm_neg_reply(hdev, skb);
3498 break;
3499
3500 case HCI_OP_USER_PASSKEY_REPLY:
3501 hci_cc_user_passkey_reply(hdev, skb);
3502 break;
3503
3504 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3505 hci_cc_user_passkey_neg_reply(hdev, skb);
3506 break;
3507
3508 case HCI_OP_LE_SET_RANDOM_ADDR:
3509 hci_cc_le_set_random_addr(hdev, skb);
3510 break;
3511
3512 case HCI_OP_LE_SET_ADV_ENABLE:
3513 hci_cc_le_set_adv_enable(hdev, skb);
3514 break;
3515
3516 case HCI_OP_LE_SET_SCAN_PARAM:
3517 hci_cc_le_set_scan_param(hdev, skb);
3518 break;
3519
3520 case HCI_OP_LE_SET_SCAN_ENABLE:
3521 hci_cc_le_set_scan_enable(hdev, skb);
3522 break;
3523
3524 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3525 hci_cc_le_read_white_list_size(hdev, skb);
3526 break;
3527
3528 case HCI_OP_LE_CLEAR_WHITE_LIST:
3529 hci_cc_le_clear_white_list(hdev, skb);
3530 break;
3531
3532 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3533 hci_cc_le_add_to_white_list(hdev, skb);
3534 break;
3535
3536 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3537 hci_cc_le_del_from_white_list(hdev, skb);
3538 break;
3539
3540 case HCI_OP_LE_READ_SUPPORTED_STATES:
3541 hci_cc_le_read_supported_states(hdev, skb);
3542 break;
3543
3544 case HCI_OP_LE_READ_DEF_DATA_LEN:
3545 hci_cc_le_read_def_data_len(hdev, skb);
3546 break;
3547
3548 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3549 hci_cc_le_write_def_data_len(hdev, skb);
3550 break;
3551
3552 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3553 hci_cc_le_add_to_resolv_list(hdev, skb);
3554 break;
3555
3556 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3557 hci_cc_le_del_from_resolv_list(hdev, skb);
3558 break;
3559
3560 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3561 hci_cc_le_clear_resolv_list(hdev, skb);
3562 break;
3563
3564 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3565 hci_cc_le_read_resolv_list_size(hdev, skb);
3566 break;
3567
3568 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3569 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3570 break;
3571
3572 case HCI_OP_LE_READ_MAX_DATA_LEN:
3573 hci_cc_le_read_max_data_len(hdev, skb);
3574 break;
3575
3576 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3577 hci_cc_write_le_host_supported(hdev, skb);
3578 break;
3579
3580 case HCI_OP_LE_SET_ADV_PARAM:
3581 hci_cc_set_adv_param(hdev, skb);
3582 break;
3583
3584 case HCI_OP_READ_RSSI:
3585 hci_cc_read_rssi(hdev, skb);
3586 break;
3587
3588 case HCI_OP_READ_TX_POWER:
3589 hci_cc_read_tx_power(hdev, skb);
3590 break;
3591
3592 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3593 hci_cc_write_ssp_debug_mode(hdev, skb);
3594 break;
3595
3596 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3597 hci_cc_le_set_ext_scan_param(hdev, skb);
3598 break;
3599
3600 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3601 hci_cc_le_set_ext_scan_enable(hdev, skb);
3602 break;
3603
3604 case HCI_OP_LE_SET_DEFAULT_PHY:
3605 hci_cc_le_set_default_phy(hdev, skb);
3606 break;
3607
3608 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3609 hci_cc_le_read_num_adv_sets(hdev, skb);
3610 break;
3611
3612 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3613 hci_cc_set_ext_adv_param(hdev, skb);
3614 break;
3615
3616 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3617 hci_cc_le_set_ext_adv_enable(hdev, skb);
3618 break;
3619
3620 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3621 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3622 break;
3623
3624 case HCI_OP_LE_READ_TRANSMIT_POWER:
3625 hci_cc_le_read_transmit_power(hdev, skb);
3626 break;
3627
3628 default:
3629 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3630 break;
3631 }
3632
3633 if (*opcode != HCI_OP_NOP)
3634 cancel_delayed_work(&hdev->cmd_timer);
3635
3636 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3637 atomic_set(&hdev->cmd_cnt, 1);
3638
3639 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3640 req_complete_skb);
3641
3642 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3643 bt_dev_err(hdev,
3644 "unexpected event for opcode 0x%4.4x", *opcode);
3645 return;
3646 }
3647
3648 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3649 queue_work(hdev->workqueue, &hdev->cmd_work);
3650}
3651
3652static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3653 u16 *opcode, u8 *status,
3654 hci_req_complete_t *req_complete,
3655 hci_req_complete_skb_t *req_complete_skb)
3656{
3657 struct hci_ev_cmd_status *ev = (void *) skb->data;
3658
3659 skb_pull(skb, sizeof(*ev));
3660
3661 *opcode = __le16_to_cpu(ev->opcode);
3662 *status = ev->status;
3663
3664 switch (*opcode) {
3665 case HCI_OP_INQUIRY:
3666 hci_cs_inquiry(hdev, ev->status);
3667 break;
3668
3669 case HCI_OP_CREATE_CONN:
3670 hci_cs_create_conn(hdev, ev->status);
3671 break;
3672
3673 case HCI_OP_DISCONNECT:
3674 hci_cs_disconnect(hdev, ev->status);
3675 break;
3676
3677 case HCI_OP_ADD_SCO:
3678 hci_cs_add_sco(hdev, ev->status);
3679 break;
3680
3681 case HCI_OP_AUTH_REQUESTED:
3682 hci_cs_auth_requested(hdev, ev->status);
3683 break;
3684
3685 case HCI_OP_SET_CONN_ENCRYPT:
3686 hci_cs_set_conn_encrypt(hdev, ev->status);
3687 break;
3688
3689 case HCI_OP_REMOTE_NAME_REQ:
3690 hci_cs_remote_name_req(hdev, ev->status);
3691 break;
3692
3693 case HCI_OP_READ_REMOTE_FEATURES:
3694 hci_cs_read_remote_features(hdev, ev->status);
3695 break;
3696
3697 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3698 hci_cs_read_remote_ext_features(hdev, ev->status);
3699 break;
3700
3701 case HCI_OP_SETUP_SYNC_CONN:
3702 hci_cs_setup_sync_conn(hdev, ev->status);
3703 break;
3704
3705 case HCI_OP_SNIFF_MODE:
3706 hci_cs_sniff_mode(hdev, ev->status);
3707 break;
3708
3709 case HCI_OP_EXIT_SNIFF_MODE:
3710 hci_cs_exit_sniff_mode(hdev, ev->status);
3711 break;
3712
3713 case HCI_OP_SWITCH_ROLE:
3714 hci_cs_switch_role(hdev, ev->status);
3715 break;
3716
3717 case HCI_OP_LE_CREATE_CONN:
3718 hci_cs_le_create_conn(hdev, ev->status);
3719 break;
3720
3721 case HCI_OP_LE_READ_REMOTE_FEATURES:
3722 hci_cs_le_read_remote_features(hdev, ev->status);
3723 break;
3724
3725 case HCI_OP_LE_START_ENC:
3726 hci_cs_le_start_enc(hdev, ev->status);
3727 break;
3728
3729 case HCI_OP_LE_EXT_CREATE_CONN:
3730 hci_cs_le_ext_create_conn(hdev, ev->status);
3731 break;
3732
3733 default:
3734 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3735 break;
3736 }
3737
3738 if (*opcode != HCI_OP_NOP)
3739 cancel_delayed_work(&hdev->cmd_timer);
3740
3741 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3742 atomic_set(&hdev->cmd_cnt, 1);
3743
3744 /* Indicate request completion if the command failed. Also, if
3745 * we're not waiting for a special event and we get a success
3746 * command status we should try to flag the request as completed
3747 * (since for this kind of commands there will not be a command
3748 * complete event).
3749 */
3750 if (ev->status ||
3751 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3752 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3753 req_complete_skb);
3754
3755 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3756 bt_dev_err(hdev,
3757 "unexpected event for opcode 0x%4.4x", *opcode);
3758 return;
3759 }
3760
3761 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3762 queue_work(hdev->workqueue, &hdev->cmd_work);
3763}
3764
3765static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3766{
3767 struct hci_ev_hardware_error *ev = (void *) skb->data;
3768
3769 hdev->hw_error_code = ev->code;
3770
3771 queue_work(hdev->req_workqueue, &hdev->error_reset);
3772}
3773
3774static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3775{
3776 struct hci_ev_role_change *ev = (void *) skb->data;
3777 struct hci_conn *conn;
3778
3779 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3780
3781 hci_dev_lock(hdev);
3782
3783 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3784 if (conn) {
3785 if (!ev->status)
3786 conn->role = ev->role;
3787
3788 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3789
3790 hci_role_switch_cfm(conn, ev->status, ev->role);
3791 }
3792
3793 hci_dev_unlock(hdev);
3794}
3795
3796static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3797{
3798 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3799 int i;
3800
3801 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3802 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3803 return;
3804 }
3805
3806 if (skb->len < sizeof(*ev) ||
3807 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3808 BT_DBG("%s bad parameters", hdev->name);
3809 return;
3810 }
3811
3812 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3813
3814 for (i = 0; i < ev->num_hndl; i++) {
3815 struct hci_comp_pkts_info *info = &ev->handles[i];
3816 struct hci_conn *conn;
3817 __u16 handle, count;
3818
3819 handle = __le16_to_cpu(info->handle);
3820 count = __le16_to_cpu(info->count);
3821
3822 conn = hci_conn_hash_lookup_handle(hdev, handle);
3823 if (!conn)
3824 continue;
3825
3826 conn->sent -= count;
3827
3828 switch (conn->type) {
3829 case ACL_LINK:
3830 hdev->acl_cnt += count;
3831 if (hdev->acl_cnt > hdev->acl_pkts)
3832 hdev->acl_cnt = hdev->acl_pkts;
3833 break;
3834
3835 case LE_LINK:
3836 if (hdev->le_pkts) {
3837 hdev->le_cnt += count;
3838 if (hdev->le_cnt > hdev->le_pkts)
3839 hdev->le_cnt = hdev->le_pkts;
3840 } else {
3841 hdev->acl_cnt += count;
3842 if (hdev->acl_cnt > hdev->acl_pkts)
3843 hdev->acl_cnt = hdev->acl_pkts;
3844 }
3845 break;
3846
3847 case SCO_LINK:
3848 hdev->sco_cnt += count;
3849 if (hdev->sco_cnt > hdev->sco_pkts)
3850 hdev->sco_cnt = hdev->sco_pkts;
3851 break;
3852
3853 default:
3854 bt_dev_err(hdev, "unknown type %d conn %p",
3855 conn->type, conn);
3856 break;
3857 }
3858 }
3859
3860 queue_work(hdev->workqueue, &hdev->tx_work);
3861}
3862
3863static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3864 __u16 handle)
3865{
3866 struct hci_chan *chan;
3867
3868 switch (hdev->dev_type) {
3869 case HCI_PRIMARY:
3870 return hci_conn_hash_lookup_handle(hdev, handle);
3871 case HCI_AMP:
3872 chan = hci_chan_lookup_handle(hdev, handle);
3873 if (chan)
3874 return chan->conn;
3875 break;
3876 default:
3877 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3878 break;
3879 }
3880
3881 return NULL;
3882}
3883
3884static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3885{
3886 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3887 int i;
3888
3889 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3890 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3891 return;
3892 }
3893
3894 if (skb->len < sizeof(*ev) ||
3895 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3896 BT_DBG("%s bad parameters", hdev->name);
3897 return;
3898 }
3899
3900 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3901 ev->num_hndl);
3902
3903 for (i = 0; i < ev->num_hndl; i++) {
3904 struct hci_comp_blocks_info *info = &ev->handles[i];
3905 struct hci_conn *conn = NULL;
3906 __u16 handle, block_count;
3907
3908 handle = __le16_to_cpu(info->handle);
3909 block_count = __le16_to_cpu(info->blocks);
3910
3911 conn = __hci_conn_lookup_handle(hdev, handle);
3912 if (!conn)
3913 continue;
3914
3915 conn->sent -= block_count;
3916
3917 switch (conn->type) {
3918 case ACL_LINK:
3919 case AMP_LINK:
3920 hdev->block_cnt += block_count;
3921 if (hdev->block_cnt > hdev->num_blocks)
3922 hdev->block_cnt = hdev->num_blocks;
3923 break;
3924
3925 default:
3926 bt_dev_err(hdev, "unknown type %d conn %p",
3927 conn->type, conn);
3928 break;
3929 }
3930 }
3931
3932 queue_work(hdev->workqueue, &hdev->tx_work);
3933}
3934
3935static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3936{
3937 struct hci_ev_mode_change *ev = (void *) skb->data;
3938 struct hci_conn *conn;
3939
3940 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3941
3942 hci_dev_lock(hdev);
3943
3944 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3945 if (conn) {
3946 conn->mode = ev->mode;
3947
3948 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3949 &conn->flags)) {
3950 if (conn->mode == HCI_CM_ACTIVE)
3951 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3952 else
3953 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3954 }
3955
3956 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3957 hci_sco_setup(conn, ev->status);
3958 }
3959
3960 hci_dev_unlock(hdev);
3961}
3962
3963static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3964{
3965 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3966 struct hci_conn *conn;
3967
3968 BT_DBG("%s", hdev->name);
3969
3970 hci_dev_lock(hdev);
3971
3972 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3973 if (!conn)
3974 goto unlock;
3975
3976 if (conn->state == BT_CONNECTED) {
3977 hci_conn_hold(conn);
3978 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3979 hci_conn_drop(conn);
3980 }
3981
3982 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3983 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3984 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3985 sizeof(ev->bdaddr), &ev->bdaddr);
3986 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3987 u8 secure;
3988
3989 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3990 secure = 1;
3991 else
3992 secure = 0;
3993
3994 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3995 }
3996
3997unlock:
3998 hci_dev_unlock(hdev);
3999}
4000
4001static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4002{
4003 if (key_type == HCI_LK_CHANGED_COMBINATION)
4004 return;
4005
4006 conn->pin_length = pin_len;
4007 conn->key_type = key_type;
4008
4009 switch (key_type) {
4010 case HCI_LK_LOCAL_UNIT:
4011 case HCI_LK_REMOTE_UNIT:
4012 case HCI_LK_DEBUG_COMBINATION:
4013 return;
4014 case HCI_LK_COMBINATION:
4015 if (pin_len == 16)
4016 conn->pending_sec_level = BT_SECURITY_HIGH;
4017 else
4018 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4019 break;
4020 case HCI_LK_UNAUTH_COMBINATION_P192:
4021 case HCI_LK_UNAUTH_COMBINATION_P256:
4022 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4023 break;
4024 case HCI_LK_AUTH_COMBINATION_P192:
4025 conn->pending_sec_level = BT_SECURITY_HIGH;
4026 break;
4027 case HCI_LK_AUTH_COMBINATION_P256:
4028 conn->pending_sec_level = BT_SECURITY_FIPS;
4029 break;
4030 }
4031}
4032
4033static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4034{
4035 struct hci_ev_link_key_req *ev = (void *) skb->data;
4036 struct hci_cp_link_key_reply cp;
4037 struct hci_conn *conn;
4038 struct link_key *key;
4039
4040 BT_DBG("%s", hdev->name);
4041
4042 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4043 return;
4044
4045 hci_dev_lock(hdev);
4046
4047 key = hci_find_link_key(hdev, &ev->bdaddr);
4048 if (!key) {
4049 BT_DBG("%s link key not found for %pMR", hdev->name,
4050 &ev->bdaddr);
4051 goto not_found;
4052 }
4053
4054 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4055 &ev->bdaddr);
4056
4057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4058 if (conn) {
4059 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4060
4061 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4062 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4063 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4064 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4065 goto not_found;
4066 }
4067
4068 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4069 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4070 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4071 BT_DBG("%s ignoring key unauthenticated for high security",
4072 hdev->name);
4073 goto not_found;
4074 }
4075
4076 conn_set_key(conn, key->type, key->pin_len);
4077 }
4078
4079 bacpy(&cp.bdaddr, &ev->bdaddr);
4080 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4081
4082 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4083
4084 hci_dev_unlock(hdev);
4085
4086 return;
4087
4088not_found:
4089 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4090 hci_dev_unlock(hdev);
4091}
4092
4093static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4094{
4095 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4096 struct hci_conn *conn;
4097 struct link_key *key;
4098 bool persistent;
4099 u8 pin_len = 0;
4100
4101 BT_DBG("%s", hdev->name);
4102
4103 hci_dev_lock(hdev);
4104
4105 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4106 if (!conn)
4107 goto unlock;
4108
4109 hci_conn_hold(conn);
4110 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4111 hci_conn_drop(conn);
4112
4113 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4114 conn_set_key(conn, ev->key_type, conn->pin_length);
4115
4116 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4117 goto unlock;
4118
4119 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4120 ev->key_type, pin_len, &persistent);
4121 if (!key)
4122 goto unlock;
4123
4124 /* Update connection information since adding the key will have
4125 * fixed up the type in the case of changed combination keys.
4126 */
4127 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4128 conn_set_key(conn, key->type, key->pin_len);
4129
4130 mgmt_new_link_key(hdev, key, persistent);
4131
4132 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4133 * is set. If it's not set simply remove the key from the kernel
4134 * list (we've still notified user space about it but with
4135 * store_hint being 0).
4136 */
4137 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4138 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4139 list_del_rcu(&key->list);
4140 kfree_rcu(key, rcu);
4141 goto unlock;
4142 }
4143
4144 if (persistent)
4145 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4146 else
4147 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4148
4149unlock:
4150 hci_dev_unlock(hdev);
4151}
4152
4153static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4154{
4155 struct hci_ev_clock_offset *ev = (void *) skb->data;
4156 struct hci_conn *conn;
4157
4158 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4159
4160 hci_dev_lock(hdev);
4161
4162 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4163 if (conn && !ev->status) {
4164 struct inquiry_entry *ie;
4165
4166 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4167 if (ie) {
4168 ie->data.clock_offset = ev->clock_offset;
4169 ie->timestamp = jiffies;
4170 }
4171 }
4172
4173 hci_dev_unlock(hdev);
4174}
4175
4176static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4177{
4178 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4179 struct hci_conn *conn;
4180
4181 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4182
4183 hci_dev_lock(hdev);
4184
4185 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4186 if (conn && !ev->status)
4187 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4188
4189 hci_dev_unlock(hdev);
4190}
4191
4192static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4193{
4194 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4195 struct inquiry_entry *ie;
4196
4197 BT_DBG("%s", hdev->name);
4198
4199 hci_dev_lock(hdev);
4200
4201 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4202 if (ie) {
4203 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4204 ie->timestamp = jiffies;
4205 }
4206
4207 hci_dev_unlock(hdev);
4208}
4209
4210static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4211 struct sk_buff *skb)
4212{
4213 struct inquiry_data data;
4214 int num_rsp = *((__u8 *) skb->data);
4215
4216 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4217
4218 if (!num_rsp)
4219 return;
4220
4221 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4222 return;
4223
4224 hci_dev_lock(hdev);
4225
4226 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4227 struct inquiry_info_with_rssi_and_pscan_mode *info;
4228 info = (void *) (skb->data + 1);
4229
4230 if (skb->len < num_rsp * sizeof(*info) + 1)
4231 goto unlock;
4232
4233 for (; num_rsp; num_rsp--, info++) {
4234 u32 flags;
4235
4236 bacpy(&data.bdaddr, &info->bdaddr);
4237 data.pscan_rep_mode = info->pscan_rep_mode;
4238 data.pscan_period_mode = info->pscan_period_mode;
4239 data.pscan_mode = info->pscan_mode;
4240 memcpy(data.dev_class, info->dev_class, 3);
4241 data.clock_offset = info->clock_offset;
4242 data.rssi = info->rssi;
4243 data.ssp_mode = 0x00;
4244
4245 flags = hci_inquiry_cache_update(hdev, &data, false);
4246
4247 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4248 info->dev_class, info->rssi,
4249 flags, NULL, 0, NULL, 0);
4250 }
4251 } else {
4252 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4253
4254 if (skb->len < num_rsp * sizeof(*info) + 1)
4255 goto unlock;
4256
4257 for (; num_rsp; num_rsp--, info++) {
4258 u32 flags;
4259
4260 bacpy(&data.bdaddr, &info->bdaddr);
4261 data.pscan_rep_mode = info->pscan_rep_mode;
4262 data.pscan_period_mode = info->pscan_period_mode;
4263 data.pscan_mode = 0x00;
4264 memcpy(data.dev_class, info->dev_class, 3);
4265 data.clock_offset = info->clock_offset;
4266 data.rssi = info->rssi;
4267 data.ssp_mode = 0x00;
4268
4269 flags = hci_inquiry_cache_update(hdev, &data, false);
4270
4271 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4272 info->dev_class, info->rssi,
4273 flags, NULL, 0, NULL, 0);
4274 }
4275 }
4276
4277unlock:
4278 hci_dev_unlock(hdev);
4279}
4280
4281static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4282 struct sk_buff *skb)
4283{
4284 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4285 struct hci_conn *conn;
4286
4287 BT_DBG("%s", hdev->name);
4288
4289 hci_dev_lock(hdev);
4290
4291 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4292 if (!conn)
4293 goto unlock;
4294
4295 if (ev->page < HCI_MAX_PAGES)
4296 memcpy(conn->features[ev->page], ev->features, 8);
4297
4298 if (!ev->status && ev->page == 0x01) {
4299 struct inquiry_entry *ie;
4300
4301 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4302 if (ie)
4303 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4304
4305 if (ev->features[0] & LMP_HOST_SSP) {
4306 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4307 } else {
4308 /* It is mandatory by the Bluetooth specification that
4309 * Extended Inquiry Results are only used when Secure
4310 * Simple Pairing is enabled, but some devices violate
4311 * this.
4312 *
4313 * To make these devices work, the internal SSP
4314 * enabled flag needs to be cleared if the remote host
4315 * features do not indicate SSP support */
4316 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4317 }
4318
4319 if (ev->features[0] & LMP_HOST_SC)
4320 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4321 }
4322
4323 if (conn->state != BT_CONFIG)
4324 goto unlock;
4325
4326 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4327 struct hci_cp_remote_name_req cp;
4328 memset(&cp, 0, sizeof(cp));
4329 bacpy(&cp.bdaddr, &conn->dst);
4330 cp.pscan_rep_mode = 0x02;
4331 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4332 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4333 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4334
4335 if (!hci_outgoing_auth_needed(hdev, conn)) {
4336 conn->state = BT_CONNECTED;
4337 hci_connect_cfm(conn, ev->status);
4338 hci_conn_drop(conn);
4339 }
4340
4341unlock:
4342 hci_dev_unlock(hdev);
4343}
4344
4345static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4346 struct sk_buff *skb)
4347{
4348 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4349 struct hci_conn *conn;
4350
4351 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4352
4353 hci_dev_lock(hdev);
4354
4355 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4356 if (!conn) {
4357 if (ev->link_type == ESCO_LINK)
4358 goto unlock;
4359
4360 /* When the link type in the event indicates SCO connection
4361 * and lookup of the connection object fails, then check
4362 * if an eSCO connection object exists.
4363 *
4364 * The core limits the synchronous connections to either
4365 * SCO or eSCO. The eSCO connection is preferred and tried
4366 * to be setup first and until successfully established,
4367 * the link type will be hinted as eSCO.
4368 */
4369 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4370 if (!conn)
4371 goto unlock;
4372 }
4373
4374 switch (ev->status) {
4375 case 0x00:
4376 conn->handle = __le16_to_cpu(ev->handle);
4377 conn->state = BT_CONNECTED;
4378 conn->type = ev->link_type;
4379
4380 hci_debugfs_create_conn(conn);
4381 hci_conn_add_sysfs(conn);
4382 break;
4383
4384 case 0x10: /* Connection Accept Timeout */
4385 case 0x0d: /* Connection Rejected due to Limited Resources */
4386 case 0x11: /* Unsupported Feature or Parameter Value */
4387 case 0x1c: /* SCO interval rejected */
4388 case 0x1a: /* Unsupported Remote Feature */
4389 case 0x1e: /* Invalid LMP Parameters */
4390 case 0x1f: /* Unspecified error */
4391 case 0x20: /* Unsupported LMP Parameter value */
4392 if (conn->out) {
4393 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4394 (hdev->esco_type & EDR_ESCO_MASK);
4395 if (hci_setup_sync(conn, conn->link->handle))
4396 goto unlock;
4397 }
4398 fallthrough;
4399
4400 default:
4401 conn->state = BT_CLOSED;
4402 break;
4403 }
4404
4405 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4406
4407 switch (conn->setting & SCO_AIRMODE_MASK) {
4408 case SCO_AIRMODE_CVSD:
4409 if (hdev->notify)
4410 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4411 break;
4412 case SCO_AIRMODE_TRANSP:
4413 if (hdev->notify)
4414 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4415 break;
4416 }
4417
4418 hci_connect_cfm(conn, ev->status);
4419 if (ev->status)
4420 hci_conn_del(conn);
4421
4422unlock:
4423 hci_dev_unlock(hdev);
4424}
4425
4426static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4427{
4428 size_t parsed = 0;
4429
4430 while (parsed < eir_len) {
4431 u8 field_len = eir[0];
4432
4433 if (field_len == 0)
4434 return parsed;
4435
4436 parsed += field_len + 1;
4437 eir += field_len + 1;
4438 }
4439
4440 return eir_len;
4441}
4442
4443static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4444 struct sk_buff *skb)
4445{
4446 struct inquiry_data data;
4447 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4448 int num_rsp = *((__u8 *) skb->data);
4449 size_t eir_len;
4450
4451 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4452
4453 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4454 return;
4455
4456 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4457 return;
4458
4459 hci_dev_lock(hdev);
4460
4461 for (; num_rsp; num_rsp--, info++) {
4462 u32 flags;
4463 bool name_known;
4464
4465 bacpy(&data.bdaddr, &info->bdaddr);
4466 data.pscan_rep_mode = info->pscan_rep_mode;
4467 data.pscan_period_mode = info->pscan_period_mode;
4468 data.pscan_mode = 0x00;
4469 memcpy(data.dev_class, info->dev_class, 3);
4470 data.clock_offset = info->clock_offset;
4471 data.rssi = info->rssi;
4472 data.ssp_mode = 0x01;
4473
4474 if (hci_dev_test_flag(hdev, HCI_MGMT))
4475 name_known = eir_get_data(info->data,
4476 sizeof(info->data),
4477 EIR_NAME_COMPLETE, NULL);
4478 else
4479 name_known = true;
4480
4481 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4482
4483 eir_len = eir_get_length(info->data, sizeof(info->data));
4484
4485 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4486 info->dev_class, info->rssi,
4487 flags, info->data, eir_len, NULL, 0);
4488 }
4489
4490 hci_dev_unlock(hdev);
4491}
4492
4493static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4494 struct sk_buff *skb)
4495{
4496 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4497 struct hci_conn *conn;
4498
4499 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4500 __le16_to_cpu(ev->handle));
4501
4502 hci_dev_lock(hdev);
4503
4504 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4505 if (!conn)
4506 goto unlock;
4507
4508 /* For BR/EDR the necessary steps are taken through the
4509 * auth_complete event.
4510 */
4511 if (conn->type != LE_LINK)
4512 goto unlock;
4513
4514 if (!ev->status)
4515 conn->sec_level = conn->pending_sec_level;
4516
4517 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4518
4519 if (ev->status && conn->state == BT_CONNECTED) {
4520 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4521 hci_conn_drop(conn);
4522 goto unlock;
4523 }
4524
4525 if (conn->state == BT_CONFIG) {
4526 if (!ev->status)
4527 conn->state = BT_CONNECTED;
4528
4529 hci_connect_cfm(conn, ev->status);
4530 hci_conn_drop(conn);
4531 } else {
4532 hci_auth_cfm(conn, ev->status);
4533
4534 hci_conn_hold(conn);
4535 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4536 hci_conn_drop(conn);
4537 }
4538
4539unlock:
4540 hci_dev_unlock(hdev);
4541}
4542
4543static u8 hci_get_auth_req(struct hci_conn *conn)
4544{
4545 /* If remote requests no-bonding follow that lead */
4546 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4547 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4548 return conn->remote_auth | (conn->auth_type & 0x01);
4549
4550 /* If both remote and local have enough IO capabilities, require
4551 * MITM protection
4552 */
4553 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4554 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4555 return conn->remote_auth | 0x01;
4556
4557 /* No MITM protection possible so ignore remote requirement */
4558 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4559}
4560
4561static u8 bredr_oob_data_present(struct hci_conn *conn)
4562{
4563 struct hci_dev *hdev = conn->hdev;
4564 struct oob_data *data;
4565
4566 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4567 if (!data)
4568 return 0x00;
4569
4570 if (bredr_sc_enabled(hdev)) {
4571 /* When Secure Connections is enabled, then just
4572 * return the present value stored with the OOB
4573 * data. The stored value contains the right present
4574 * information. However it can only be trusted when
4575 * not in Secure Connection Only mode.
4576 */
4577 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4578 return data->present;
4579
4580 /* When Secure Connections Only mode is enabled, then
4581 * the P-256 values are required. If they are not
4582 * available, then do not declare that OOB data is
4583 * present.
4584 */
4585 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4586 !memcmp(data->hash256, ZERO_KEY, 16))
4587 return 0x00;
4588
4589 return 0x02;
4590 }
4591
4592 /* When Secure Connections is not enabled or actually
4593 * not supported by the hardware, then check that if
4594 * P-192 data values are present.
4595 */
4596 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4597 !memcmp(data->hash192, ZERO_KEY, 16))
4598 return 0x00;
4599
4600 return 0x01;
4601}
4602
4603static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4604{
4605 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4606 struct hci_conn *conn;
4607
4608 BT_DBG("%s", hdev->name);
4609
4610 hci_dev_lock(hdev);
4611
4612 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4613 if (!conn)
4614 goto unlock;
4615
4616 hci_conn_hold(conn);
4617
4618 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4619 goto unlock;
4620
4621 /* Allow pairing if we're pairable, the initiators of the
4622 * pairing or if the remote is not requesting bonding.
4623 */
4624 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4625 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4626 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4627 struct hci_cp_io_capability_reply cp;
4628
4629 bacpy(&cp.bdaddr, &ev->bdaddr);
4630 /* Change the IO capability from KeyboardDisplay
4631 * to DisplayYesNo as it is not supported by BT spec. */
4632 cp.capability = (conn->io_capability == 0x04) ?
4633 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4634
4635 /* If we are initiators, there is no remote information yet */
4636 if (conn->remote_auth == 0xff) {
4637 /* Request MITM protection if our IO caps allow it
4638 * except for the no-bonding case.
4639 */
4640 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4641 conn->auth_type != HCI_AT_NO_BONDING)
4642 conn->auth_type |= 0x01;
4643 } else {
4644 conn->auth_type = hci_get_auth_req(conn);
4645 }
4646
4647 /* If we're not bondable, force one of the non-bondable
4648 * authentication requirement values.
4649 */
4650 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4651 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4652
4653 cp.authentication = conn->auth_type;
4654 cp.oob_data = bredr_oob_data_present(conn);
4655
4656 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4657 sizeof(cp), &cp);
4658 } else {
4659 struct hci_cp_io_capability_neg_reply cp;
4660
4661 bacpy(&cp.bdaddr, &ev->bdaddr);
4662 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4663
4664 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4665 sizeof(cp), &cp);
4666 }
4667
4668unlock:
4669 hci_dev_unlock(hdev);
4670}
4671
4672static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4673{
4674 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4675 struct hci_conn *conn;
4676
4677 BT_DBG("%s", hdev->name);
4678
4679 hci_dev_lock(hdev);
4680
4681 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4682 if (!conn)
4683 goto unlock;
4684
4685 conn->remote_cap = ev->capability;
4686 conn->remote_auth = ev->authentication;
4687
4688unlock:
4689 hci_dev_unlock(hdev);
4690}
4691
4692static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4693 struct sk_buff *skb)
4694{
4695 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4696 int loc_mitm, rem_mitm, confirm_hint = 0;
4697 struct hci_conn *conn;
4698
4699 BT_DBG("%s", hdev->name);
4700
4701 hci_dev_lock(hdev);
4702
4703 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4704 goto unlock;
4705
4706 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4707 if (!conn)
4708 goto unlock;
4709
4710 loc_mitm = (conn->auth_type & 0x01);
4711 rem_mitm = (conn->remote_auth & 0x01);
4712
4713 /* If we require MITM but the remote device can't provide that
4714 * (it has NoInputNoOutput) then reject the confirmation
4715 * request. We check the security level here since it doesn't
4716 * necessarily match conn->auth_type.
4717 */
4718 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4719 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4720 BT_DBG("Rejecting request: remote device can't provide MITM");
4721 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4722 sizeof(ev->bdaddr), &ev->bdaddr);
4723 goto unlock;
4724 }
4725
4726 /* If no side requires MITM protection; auto-accept */
4727 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4728 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4729
4730 /* If we're not the initiators request authorization to
4731 * proceed from user space (mgmt_user_confirm with
4732 * confirm_hint set to 1). The exception is if neither
4733 * side had MITM or if the local IO capability is
4734 * NoInputNoOutput, in which case we do auto-accept
4735 */
4736 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4737 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4738 (loc_mitm || rem_mitm)) {
4739 BT_DBG("Confirming auto-accept as acceptor");
4740 confirm_hint = 1;
4741 goto confirm;
4742 }
4743
4744 /* If there already exists link key in local host, leave the
4745 * decision to user space since the remote device could be
4746 * legitimate or malicious.
4747 */
4748 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4749 bt_dev_dbg(hdev, "Local host already has link key");
4750 confirm_hint = 1;
4751 goto confirm;
4752 }
4753
4754 BT_DBG("Auto-accept of user confirmation with %ums delay",
4755 hdev->auto_accept_delay);
4756
4757 if (hdev->auto_accept_delay > 0) {
4758 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4759 queue_delayed_work(conn->hdev->workqueue,
4760 &conn->auto_accept_work, delay);
4761 goto unlock;
4762 }
4763
4764 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4765 sizeof(ev->bdaddr), &ev->bdaddr);
4766 goto unlock;
4767 }
4768
4769confirm:
4770 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4771 le32_to_cpu(ev->passkey), confirm_hint);
4772
4773unlock:
4774 hci_dev_unlock(hdev);
4775}
4776
4777static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4778 struct sk_buff *skb)
4779{
4780 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4781
4782 BT_DBG("%s", hdev->name);
4783
4784 if (hci_dev_test_flag(hdev, HCI_MGMT))
4785 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4786}
4787
4788static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4789 struct sk_buff *skb)
4790{
4791 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4792 struct hci_conn *conn;
4793
4794 BT_DBG("%s", hdev->name);
4795
4796 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4797 if (!conn)
4798 return;
4799
4800 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4801 conn->passkey_entered = 0;
4802
4803 if (hci_dev_test_flag(hdev, HCI_MGMT))
4804 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4805 conn->dst_type, conn->passkey_notify,
4806 conn->passkey_entered);
4807}
4808
4809static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4810{
4811 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4812 struct hci_conn *conn;
4813
4814 BT_DBG("%s", hdev->name);
4815
4816 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4817 if (!conn)
4818 return;
4819
4820 switch (ev->type) {
4821 case HCI_KEYPRESS_STARTED:
4822 conn->passkey_entered = 0;
4823 return;
4824
4825 case HCI_KEYPRESS_ENTERED:
4826 conn->passkey_entered++;
4827 break;
4828
4829 case HCI_KEYPRESS_ERASED:
4830 conn->passkey_entered--;
4831 break;
4832
4833 case HCI_KEYPRESS_CLEARED:
4834 conn->passkey_entered = 0;
4835 break;
4836
4837 case HCI_KEYPRESS_COMPLETED:
4838 return;
4839 }
4840
4841 if (hci_dev_test_flag(hdev, HCI_MGMT))
4842 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4843 conn->dst_type, conn->passkey_notify,
4844 conn->passkey_entered);
4845}
4846
4847static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4848 struct sk_buff *skb)
4849{
4850 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4851 struct hci_conn *conn;
4852
4853 BT_DBG("%s", hdev->name);
4854
4855 hci_dev_lock(hdev);
4856
4857 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4858 if (!conn)
4859 goto unlock;
4860
4861 /* Reset the authentication requirement to unknown */
4862 conn->remote_auth = 0xff;
4863
4864 /* To avoid duplicate auth_failed events to user space we check
4865 * the HCI_CONN_AUTH_PEND flag which will be set if we
4866 * initiated the authentication. A traditional auth_complete
4867 * event gets always produced as initiator and is also mapped to
4868 * the mgmt_auth_failed event */
4869 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4870 mgmt_auth_failed(conn, ev->status);
4871
4872 hci_conn_drop(conn);
4873
4874unlock:
4875 hci_dev_unlock(hdev);
4876}
4877
4878static void hci_remote_host_features_evt(struct hci_dev *hdev,
4879 struct sk_buff *skb)
4880{
4881 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4882 struct inquiry_entry *ie;
4883 struct hci_conn *conn;
4884
4885 BT_DBG("%s", hdev->name);
4886
4887 hci_dev_lock(hdev);
4888
4889 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4890 if (conn)
4891 memcpy(conn->features[1], ev->features, 8);
4892
4893 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4894 if (ie)
4895 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4896
4897 hci_dev_unlock(hdev);
4898}
4899
4900static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4901 struct sk_buff *skb)
4902{
4903 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4904 struct oob_data *data;
4905
4906 BT_DBG("%s", hdev->name);
4907
4908 hci_dev_lock(hdev);
4909
4910 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4911 goto unlock;
4912
4913 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4914 if (!data) {
4915 struct hci_cp_remote_oob_data_neg_reply cp;
4916
4917 bacpy(&cp.bdaddr, &ev->bdaddr);
4918 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4919 sizeof(cp), &cp);
4920 goto unlock;
4921 }
4922
4923 if (bredr_sc_enabled(hdev)) {
4924 struct hci_cp_remote_oob_ext_data_reply cp;
4925
4926 bacpy(&cp.bdaddr, &ev->bdaddr);
4927 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4928 memset(cp.hash192, 0, sizeof(cp.hash192));
4929 memset(cp.rand192, 0, sizeof(cp.rand192));
4930 } else {
4931 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4932 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4933 }
4934 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4935 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4936
4937 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4938 sizeof(cp), &cp);
4939 } else {
4940 struct hci_cp_remote_oob_data_reply cp;
4941
4942 bacpy(&cp.bdaddr, &ev->bdaddr);
4943 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4944 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4945
4946 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4947 sizeof(cp), &cp);
4948 }
4949
4950unlock:
4951 hci_dev_unlock(hdev);
4952}
4953
4954#if IS_ENABLED(CONFIG_BT_HS)
4955static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4956{
4957 struct hci_ev_channel_selected *ev = (void *)skb->data;
4958 struct hci_conn *hcon;
4959
4960 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4961
4962 skb_pull(skb, sizeof(*ev));
4963
4964 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4965 if (!hcon)
4966 return;
4967
4968 amp_read_loc_assoc_final_data(hdev, hcon);
4969}
4970
4971static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4972 struct sk_buff *skb)
4973{
4974 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4975 struct hci_conn *hcon, *bredr_hcon;
4976
4977 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4978 ev->status);
4979
4980 hci_dev_lock(hdev);
4981
4982 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4983 if (!hcon)
4984 goto unlock;
4985
4986 if (!hcon->amp_mgr)
4987 goto unlock;
4988
4989 if (ev->status) {
4990 hci_conn_del(hcon);
4991 goto unlock;
4992 }
4993
4994 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4995
4996 hcon->state = BT_CONNECTED;
4997 bacpy(&hcon->dst, &bredr_hcon->dst);
4998
4999 hci_conn_hold(hcon);
5000 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5001 hci_conn_drop(hcon);
5002
5003 hci_debugfs_create_conn(hcon);
5004 hci_conn_add_sysfs(hcon);
5005
5006 amp_physical_cfm(bredr_hcon, hcon);
5007
5008unlock:
5009 hci_dev_unlock(hdev);
5010}
5011
5012static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5013{
5014 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5015 struct hci_conn *hcon;
5016 struct hci_chan *hchan;
5017 struct amp_mgr *mgr;
5018
5019 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5020 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5021 ev->status);
5022
5023 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5024 if (!hcon)
5025 return;
5026
5027 /* Create AMP hchan */
5028 hchan = hci_chan_create(hcon);
5029 if (!hchan)
5030 return;
5031
5032 hchan->handle = le16_to_cpu(ev->handle);
5033 hchan->amp = true;
5034
5035 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5036
5037 mgr = hcon->amp_mgr;
5038 if (mgr && mgr->bredr_chan) {
5039 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5040
5041 l2cap_chan_lock(bredr_chan);
5042
5043 bredr_chan->conn->mtu = hdev->block_mtu;
5044 l2cap_logical_cfm(bredr_chan, hchan, 0);
5045 hci_conn_hold(hcon);
5046
5047 l2cap_chan_unlock(bredr_chan);
5048 }
5049}
5050
5051static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5052 struct sk_buff *skb)
5053{
5054 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5055 struct hci_chan *hchan;
5056
5057 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5058 le16_to_cpu(ev->handle), ev->status);
5059
5060 if (ev->status)
5061 return;
5062
5063 hci_dev_lock(hdev);
5064
5065 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5066 if (!hchan || !hchan->amp)
5067 goto unlock;
5068
5069 amp_destroy_logical_link(hchan, ev->reason);
5070
5071unlock:
5072 hci_dev_unlock(hdev);
5073}
5074
5075static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5076 struct sk_buff *skb)
5077{
5078 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5079 struct hci_conn *hcon;
5080
5081 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5082
5083 if (ev->status)
5084 return;
5085
5086 hci_dev_lock(hdev);
5087
5088 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5089 if (hcon) {
5090 hcon->state = BT_CLOSED;
5091 hci_conn_del(hcon);
5092 }
5093
5094 hci_dev_unlock(hdev);
5095}
5096#endif
5097
5098static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5099 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
5100 u16 interval, u16 latency, u16 supervision_timeout)
5101{
5102 struct hci_conn_params *params;
5103 struct hci_conn *conn;
5104 struct smp_irk *irk;
5105 u8 addr_type;
5106
5107 hci_dev_lock(hdev);
5108
5109 /* All controllers implicitly stop advertising in the event of a
5110 * connection, so ensure that the state bit is cleared.
5111 */
5112 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5113
5114 conn = hci_lookup_le_connect(hdev);
5115 if (!conn) {
5116 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5117 if (!conn) {
5118 bt_dev_err(hdev, "no memory for new connection");
5119 goto unlock;
5120 }
5121
5122 conn->dst_type = bdaddr_type;
5123
5124 /* If we didn't have a hci_conn object previously
5125 * but we're in master role this must be something
5126 * initiated using a white list. Since white list based
5127 * connections are not "first class citizens" we don't
5128 * have full tracking of them. Therefore, we go ahead
5129 * with a "best effort" approach of determining the
5130 * initiator address based on the HCI_PRIVACY flag.
5131 */
5132 if (conn->out) {
5133 conn->resp_addr_type = bdaddr_type;
5134 bacpy(&conn->resp_addr, bdaddr);
5135 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5136 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5137 bacpy(&conn->init_addr, &hdev->rpa);
5138 } else {
5139 hci_copy_identity_address(hdev,
5140 &conn->init_addr,
5141 &conn->init_addr_type);
5142 }
5143 }
5144 } else {
5145 cancel_delayed_work(&conn->le_conn_timeout);
5146 }
5147
5148 if (!conn->out) {
5149 /* Set the responder (our side) address type based on
5150 * the advertising address type.
5151 */
5152 conn->resp_addr_type = hdev->adv_addr_type;
5153 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5154 /* In case of ext adv, resp_addr will be updated in
5155 * Adv Terminated event.
5156 */
5157 if (!ext_adv_capable(hdev))
5158 bacpy(&conn->resp_addr, &hdev->random_addr);
5159 } else {
5160 bacpy(&conn->resp_addr, &hdev->bdaddr);
5161 }
5162
5163 conn->init_addr_type = bdaddr_type;
5164 bacpy(&conn->init_addr, bdaddr);
5165
5166 /* For incoming connections, set the default minimum
5167 * and maximum connection interval. They will be used
5168 * to check if the parameters are in range and if not
5169 * trigger the connection update procedure.
5170 */
5171 conn->le_conn_min_interval = hdev->le_conn_min_interval;
5172 conn->le_conn_max_interval = hdev->le_conn_max_interval;
5173 }
5174
5175 /* Lookup the identity address from the stored connection
5176 * address and address type.
5177 *
5178 * When establishing connections to an identity address, the
5179 * connection procedure will store the resolvable random
5180 * address first. Now if it can be converted back into the
5181 * identity address, start using the identity address from
5182 * now on.
5183 */
5184 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5185 if (irk) {
5186 bacpy(&conn->dst, &irk->bdaddr);
5187 conn->dst_type = irk->addr_type;
5188 }
5189
5190 if (status) {
5191 hci_le_conn_failed(conn, status);
5192 goto unlock;
5193 }
5194
5195 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5196 addr_type = BDADDR_LE_PUBLIC;
5197 else
5198 addr_type = BDADDR_LE_RANDOM;
5199
5200 /* Drop the connection if the device is blocked */
5201 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5202 hci_conn_drop(conn);
5203 goto unlock;
5204 }
5205
5206 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5207 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5208
5209 conn->sec_level = BT_SECURITY_LOW;
5210 conn->handle = handle;
5211 conn->state = BT_CONFIG;
5212
5213 conn->le_conn_interval = interval;
5214 conn->le_conn_latency = latency;
5215 conn->le_supv_timeout = supervision_timeout;
5216
5217 hci_debugfs_create_conn(conn);
5218 hci_conn_add_sysfs(conn);
5219
5220 /* The remote features procedure is defined for master
5221 * role only. So only in case of an initiated connection
5222 * request the remote features.
5223 *
5224 * If the local controller supports slave-initiated features
5225 * exchange, then requesting the remote features in slave
5226 * role is possible. Otherwise just transition into the
5227 * connected state without requesting the remote features.
5228 */
5229 if (conn->out ||
5230 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5231 struct hci_cp_le_read_remote_features cp;
5232
5233 cp.handle = __cpu_to_le16(conn->handle);
5234
5235 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5236 sizeof(cp), &cp);
5237
5238 hci_conn_hold(conn);
5239 } else {
5240 conn->state = BT_CONNECTED;
5241 hci_connect_cfm(conn, status);
5242 }
5243
5244 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5245 conn->dst_type);
5246 if (params) {
5247 list_del_init(¶ms->action);
5248 if (params->conn) {
5249 hci_conn_drop(params->conn);
5250 hci_conn_put(params->conn);
5251 params->conn = NULL;
5252 }
5253 }
5254
5255unlock:
5256 hci_update_background_scan(hdev);
5257 hci_dev_unlock(hdev);
5258}
5259
5260static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5261{
5262 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5263
5264 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5265
5266 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5267 ev->role, le16_to_cpu(ev->handle),
5268 le16_to_cpu(ev->interval),
5269 le16_to_cpu(ev->latency),
5270 le16_to_cpu(ev->supervision_timeout));
5271}
5272
5273static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5274 struct sk_buff *skb)
5275{
5276 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5277
5278 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5279
5280 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5281 ev->role, le16_to_cpu(ev->handle),
5282 le16_to_cpu(ev->interval),
5283 le16_to_cpu(ev->latency),
5284 le16_to_cpu(ev->supervision_timeout));
5285
5286 if (use_ll_privacy(hdev) &&
5287 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5288 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5289 hci_req_disable_address_resolution(hdev);
5290}
5291
5292static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5293{
5294 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5295 struct hci_conn *conn;
5296
5297 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5298
5299 if (ev->status)
5300 return;
5301
5302 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5303 if (conn) {
5304 struct adv_info *adv_instance;
5305
5306 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5307 return;
5308
5309 if (!ev->handle) {
5310 bacpy(&conn->resp_addr, &hdev->random_addr);
5311 return;
5312 }
5313
5314 adv_instance = hci_find_adv_instance(hdev, ev->handle);
5315 if (adv_instance)
5316 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5317 }
5318}
5319
5320static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5321 struct sk_buff *skb)
5322{
5323 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5324 struct hci_conn *conn;
5325
5326 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5327
5328 if (ev->status)
5329 return;
5330
5331 hci_dev_lock(hdev);
5332
5333 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5334 if (conn) {
5335 conn->le_conn_interval = le16_to_cpu(ev->interval);
5336 conn->le_conn_latency = le16_to_cpu(ev->latency);
5337 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5338 }
5339
5340 hci_dev_unlock(hdev);
5341}
5342
5343/* This function requires the caller holds hdev->lock */
5344static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5345 bdaddr_t *addr,
5346 u8 addr_type, u8 adv_type,
5347 bdaddr_t *direct_rpa)
5348{
5349 struct hci_conn *conn;
5350 struct hci_conn_params *params;
5351
5352 /* If the event is not connectable don't proceed further */
5353 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5354 return NULL;
5355
5356 /* Ignore if the device is blocked */
5357 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5358 return NULL;
5359
5360 /* Most controller will fail if we try to create new connections
5361 * while we have an existing one in slave role.
5362 */
5363 if (hdev->conn_hash.le_num_slave > 0 &&
5364 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5365 !(hdev->le_states[3] & 0x10)))
5366 return NULL;
5367
5368 /* If we're not connectable only connect devices that we have in
5369 * our pend_le_conns list.
5370 */
5371 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5372 addr_type);
5373 if (!params)
5374 return NULL;
5375
5376 if (!params->explicit_connect) {
5377 switch (params->auto_connect) {
5378 case HCI_AUTO_CONN_DIRECT:
5379 /* Only devices advertising with ADV_DIRECT_IND are
5380 * triggering a connection attempt. This is allowing
5381 * incoming connections from slave devices.
5382 */
5383 if (adv_type != LE_ADV_DIRECT_IND)
5384 return NULL;
5385 break;
5386 case HCI_AUTO_CONN_ALWAYS:
5387 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5388 * are triggering a connection attempt. This means
5389 * that incoming connections from slave device are
5390 * accepted and also outgoing connections to slave
5391 * devices are established when found.
5392 */
5393 break;
5394 default:
5395 return NULL;
5396 }
5397 }
5398
5399 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5400 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5401 direct_rpa);
5402 if (!IS_ERR(conn)) {
5403 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5404 * by higher layer that tried to connect, if no then
5405 * store the pointer since we don't really have any
5406 * other owner of the object besides the params that
5407 * triggered it. This way we can abort the connection if
5408 * the parameters get removed and keep the reference
5409 * count consistent once the connection is established.
5410 */
5411
5412 if (!params->explicit_connect)
5413 params->conn = hci_conn_get(conn);
5414
5415 return conn;
5416 }
5417
5418 switch (PTR_ERR(conn)) {
5419 case -EBUSY:
5420 /* If hci_connect() returns -EBUSY it means there is already
5421 * an LE connection attempt going on. Since controllers don't
5422 * support more than one connection attempt at the time, we
5423 * don't consider this an error case.
5424 */
5425 break;
5426 default:
5427 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5428 return NULL;
5429 }
5430
5431 return NULL;
5432}
5433
5434static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5435 u8 bdaddr_type, bdaddr_t *direct_addr,
5436 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5437 bool ext_adv)
5438{
5439 struct discovery_state *d = &hdev->discovery;
5440 struct smp_irk *irk;
5441 struct hci_conn *conn;
5442 bool match;
5443 u32 flags;
5444 u8 *ptr, real_len;
5445
5446 switch (type) {
5447 case LE_ADV_IND:
5448 case LE_ADV_DIRECT_IND:
5449 case LE_ADV_SCAN_IND:
5450 case LE_ADV_NONCONN_IND:
5451 case LE_ADV_SCAN_RSP:
5452 break;
5453 default:
5454 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5455 "type: 0x%02x", type);
5456 return;
5457 }
5458
5459 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5460 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5461 return;
5462 }
5463
5464 /* Find the end of the data in case the report contains padded zero
5465 * bytes at the end causing an invalid length value.
5466 *
5467 * When data is NULL, len is 0 so there is no need for extra ptr
5468 * check as 'ptr < data + 0' is already false in such case.
5469 */
5470 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5471 if (ptr + 1 + *ptr > data + len)
5472 break;
5473 }
5474
5475 real_len = ptr - data;
5476
5477 /* Adjust for actual length */
5478 if (len != real_len) {
5479 bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u",
5480 len, real_len);
5481 len = real_len;
5482 }
5483
5484 /* If the direct address is present, then this report is from
5485 * a LE Direct Advertising Report event. In that case it is
5486 * important to see if the address is matching the local
5487 * controller address.
5488 */
5489 if (direct_addr) {
5490 /* Only resolvable random addresses are valid for these
5491 * kind of reports and others can be ignored.
5492 */
5493 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5494 return;
5495
5496 /* If the controller is not using resolvable random
5497 * addresses, then this report can be ignored.
5498 */
5499 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5500 return;
5501
5502 /* If the local IRK of the controller does not match
5503 * with the resolvable random address provided, then
5504 * this report can be ignored.
5505 */
5506 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5507 return;
5508 }
5509
5510 /* Check if we need to convert to identity address */
5511 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5512 if (irk) {
5513 bdaddr = &irk->bdaddr;
5514 bdaddr_type = irk->addr_type;
5515 }
5516
5517 /* Check if we have been requested to connect to this device.
5518 *
5519 * direct_addr is set only for directed advertising reports (it is NULL
5520 * for advertising reports) and is already verified to be RPA above.
5521 */
5522 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5523 direct_addr);
5524 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5525 /* Store report for later inclusion by
5526 * mgmt_device_connected
5527 */
5528 memcpy(conn->le_adv_data, data, len);
5529 conn->le_adv_data_len = len;
5530 }
5531
5532 /* Passive scanning shouldn't trigger any device found events,
5533 * except for devices marked as CONN_REPORT for which we do send
5534 * device found events, or advertisement monitoring requested.
5535 */
5536 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5537 if (type == LE_ADV_DIRECT_IND)
5538 return;
5539
5540 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5541 bdaddr, bdaddr_type) &&
5542 idr_is_empty(&hdev->adv_monitors_idr))
5543 return;
5544
5545 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5546 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5547 else
5548 flags = 0;
5549 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5550 rssi, flags, data, len, NULL, 0);
5551 return;
5552 }
5553
5554 /* When receiving non-connectable or scannable undirected
5555 * advertising reports, this means that the remote device is
5556 * not connectable and then clearly indicate this in the
5557 * device found event.
5558 *
5559 * When receiving a scan response, then there is no way to
5560 * know if the remote device is connectable or not. However
5561 * since scan responses are merged with a previously seen
5562 * advertising report, the flags field from that report
5563 * will be used.
5564 *
5565 * In the really unlikely case that a controller get confused
5566 * and just sends a scan response event, then it is marked as
5567 * not connectable as well.
5568 */
5569 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5570 type == LE_ADV_SCAN_RSP)
5571 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5572 else
5573 flags = 0;
5574
5575 /* If there's nothing pending either store the data from this
5576 * event or send an immediate device found event if the data
5577 * should not be stored for later.
5578 */
5579 if (!ext_adv && !has_pending_adv_report(hdev)) {
5580 /* If the report will trigger a SCAN_REQ store it for
5581 * later merging.
5582 */
5583 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5584 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5585 rssi, flags, data, len);
5586 return;
5587 }
5588
5589 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5590 rssi, flags, data, len, NULL, 0);
5591 return;
5592 }
5593
5594 /* Check if the pending report is for the same device as the new one */
5595 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5596 bdaddr_type == d->last_adv_addr_type);
5597
5598 /* If the pending data doesn't match this report or this isn't a
5599 * scan response (e.g. we got a duplicate ADV_IND) then force
5600 * sending of the pending data.
5601 */
5602 if (type != LE_ADV_SCAN_RSP || !match) {
5603 /* Send out whatever is in the cache, but skip duplicates */
5604 if (!match)
5605 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5606 d->last_adv_addr_type, NULL,
5607 d->last_adv_rssi, d->last_adv_flags,
5608 d->last_adv_data,
5609 d->last_adv_data_len, NULL, 0);
5610
5611 /* If the new report will trigger a SCAN_REQ store it for
5612 * later merging.
5613 */
5614 if (!ext_adv && (type == LE_ADV_IND ||
5615 type == LE_ADV_SCAN_IND)) {
5616 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5617 rssi, flags, data, len);
5618 return;
5619 }
5620
5621 /* The advertising reports cannot be merged, so clear
5622 * the pending report and send out a device found event.
5623 */
5624 clear_pending_adv_report(hdev);
5625 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5626 rssi, flags, data, len, NULL, 0);
5627 return;
5628 }
5629
5630 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5631 * the new event is a SCAN_RSP. We can therefore proceed with
5632 * sending a merged device found event.
5633 */
5634 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5635 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5636 d->last_adv_data, d->last_adv_data_len, data, len);
5637 clear_pending_adv_report(hdev);
5638}
5639
5640static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5641{
5642 u8 num_reports = skb->data[0];
5643 void *ptr = &skb->data[1];
5644
5645 hci_dev_lock(hdev);
5646
5647 while (num_reports--) {
5648 struct hci_ev_le_advertising_info *ev = ptr;
5649 s8 rssi;
5650
5651 if (ev->length <= HCI_MAX_AD_LENGTH) {
5652 rssi = ev->data[ev->length];
5653 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5654 ev->bdaddr_type, NULL, 0, rssi,
5655 ev->data, ev->length, false);
5656 } else {
5657 bt_dev_err(hdev, "Dropping invalid advertising data");
5658 }
5659
5660 ptr += sizeof(*ev) + ev->length + 1;
5661 }
5662
5663 hci_dev_unlock(hdev);
5664}
5665
5666static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5667{
5668 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5669 switch (evt_type) {
5670 case LE_LEGACY_ADV_IND:
5671 return LE_ADV_IND;
5672 case LE_LEGACY_ADV_DIRECT_IND:
5673 return LE_ADV_DIRECT_IND;
5674 case LE_LEGACY_ADV_SCAN_IND:
5675 return LE_ADV_SCAN_IND;
5676 case LE_LEGACY_NONCONN_IND:
5677 return LE_ADV_NONCONN_IND;
5678 case LE_LEGACY_SCAN_RSP_ADV:
5679 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5680 return LE_ADV_SCAN_RSP;
5681 }
5682
5683 goto invalid;
5684 }
5685
5686 if (evt_type & LE_EXT_ADV_CONN_IND) {
5687 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5688 return LE_ADV_DIRECT_IND;
5689
5690 return LE_ADV_IND;
5691 }
5692
5693 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5694 return LE_ADV_SCAN_RSP;
5695
5696 if (evt_type & LE_EXT_ADV_SCAN_IND)
5697 return LE_ADV_SCAN_IND;
5698
5699 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5700 evt_type & LE_EXT_ADV_DIRECT_IND)
5701 return LE_ADV_NONCONN_IND;
5702
5703invalid:
5704 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5705 evt_type);
5706
5707 return LE_ADV_INVALID;
5708}
5709
5710static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5711{
5712 u8 num_reports = skb->data[0];
5713 void *ptr = &skb->data[1];
5714
5715 hci_dev_lock(hdev);
5716
5717 while (num_reports--) {
5718 struct hci_ev_le_ext_adv_report *ev = ptr;
5719 u8 legacy_evt_type;
5720 u16 evt_type;
5721
5722 evt_type = __le16_to_cpu(ev->evt_type);
5723 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5724 if (legacy_evt_type != LE_ADV_INVALID) {
5725 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5726 ev->bdaddr_type, NULL, 0, ev->rssi,
5727 ev->data, ev->length,
5728 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5729 }
5730
5731 ptr += sizeof(*ev) + ev->length;
5732 }
5733
5734 hci_dev_unlock(hdev);
5735}
5736
5737static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5738 struct sk_buff *skb)
5739{
5740 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5741 struct hci_conn *conn;
5742
5743 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5744
5745 hci_dev_lock(hdev);
5746
5747 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5748 if (conn) {
5749 if (!ev->status)
5750 memcpy(conn->features[0], ev->features, 8);
5751
5752 if (conn->state == BT_CONFIG) {
5753 __u8 status;
5754
5755 /* If the local controller supports slave-initiated
5756 * features exchange, but the remote controller does
5757 * not, then it is possible that the error code 0x1a
5758 * for unsupported remote feature gets returned.
5759 *
5760 * In this specific case, allow the connection to
5761 * transition into connected state and mark it as
5762 * successful.
5763 */
5764 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5765 !conn->out && ev->status == 0x1a)
5766 status = 0x00;
5767 else
5768 status = ev->status;
5769
5770 conn->state = BT_CONNECTED;
5771 hci_connect_cfm(conn, status);
5772 hci_conn_drop(conn);
5773 }
5774 }
5775
5776 hci_dev_unlock(hdev);
5777}
5778
5779static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5780{
5781 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5782 struct hci_cp_le_ltk_reply cp;
5783 struct hci_cp_le_ltk_neg_reply neg;
5784 struct hci_conn *conn;
5785 struct smp_ltk *ltk;
5786
5787 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5788
5789 hci_dev_lock(hdev);
5790
5791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5792 if (conn == NULL)
5793 goto not_found;
5794
5795 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5796 if (!ltk)
5797 goto not_found;
5798
5799 if (smp_ltk_is_sc(ltk)) {
5800 /* With SC both EDiv and Rand are set to zero */
5801 if (ev->ediv || ev->rand)
5802 goto not_found;
5803 } else {
5804 /* For non-SC keys check that EDiv and Rand match */
5805 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5806 goto not_found;
5807 }
5808
5809 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5810 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5811 cp.handle = cpu_to_le16(conn->handle);
5812
5813 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5814
5815 conn->enc_key_size = ltk->enc_size;
5816
5817 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5818
5819 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5820 * temporary key used to encrypt a connection following
5821 * pairing. It is used during the Encrypted Session Setup to
5822 * distribute the keys. Later, security can be re-established
5823 * using a distributed LTK.
5824 */
5825 if (ltk->type == SMP_STK) {
5826 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5827 list_del_rcu(<k->list);
5828 kfree_rcu(ltk, rcu);
5829 } else {
5830 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5831 }
5832
5833 hci_dev_unlock(hdev);
5834
5835 return;
5836
5837not_found:
5838 neg.handle = ev->handle;
5839 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5840 hci_dev_unlock(hdev);
5841}
5842
5843static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5844 u8 reason)
5845{
5846 struct hci_cp_le_conn_param_req_neg_reply cp;
5847
5848 cp.handle = cpu_to_le16(handle);
5849 cp.reason = reason;
5850
5851 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5852 &cp);
5853}
5854
5855static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5856 struct sk_buff *skb)
5857{
5858 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5859 struct hci_cp_le_conn_param_req_reply cp;
5860 struct hci_conn *hcon;
5861 u16 handle, min, max, latency, timeout;
5862
5863 handle = le16_to_cpu(ev->handle);
5864 min = le16_to_cpu(ev->interval_min);
5865 max = le16_to_cpu(ev->interval_max);
5866 latency = le16_to_cpu(ev->latency);
5867 timeout = le16_to_cpu(ev->timeout);
5868
5869 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5870 if (!hcon || hcon->state != BT_CONNECTED)
5871 return send_conn_param_neg_reply(hdev, handle,
5872 HCI_ERROR_UNKNOWN_CONN_ID);
5873
5874 if (hci_check_conn_params(min, max, latency, timeout))
5875 return send_conn_param_neg_reply(hdev, handle,
5876 HCI_ERROR_INVALID_LL_PARAMS);
5877
5878 if (hcon->role == HCI_ROLE_MASTER) {
5879 struct hci_conn_params *params;
5880 u8 store_hint;
5881
5882 hci_dev_lock(hdev);
5883
5884 params = hci_conn_params_lookup(hdev, &hcon->dst,
5885 hcon->dst_type);
5886 if (params) {
5887 params->conn_min_interval = min;
5888 params->conn_max_interval = max;
5889 params->conn_latency = latency;
5890 params->supervision_timeout = timeout;
5891 store_hint = 0x01;
5892 } else {
5893 store_hint = 0x00;
5894 }
5895
5896 hci_dev_unlock(hdev);
5897
5898 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5899 store_hint, min, max, latency, timeout);
5900 }
5901
5902 cp.handle = ev->handle;
5903 cp.interval_min = ev->interval_min;
5904 cp.interval_max = ev->interval_max;
5905 cp.latency = ev->latency;
5906 cp.timeout = ev->timeout;
5907 cp.min_ce_len = 0;
5908 cp.max_ce_len = 0;
5909
5910 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5911}
5912
5913static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5914 struct sk_buff *skb)
5915{
5916 u8 num_reports = skb->data[0];
5917 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5918
5919 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5920 return;
5921
5922 hci_dev_lock(hdev);
5923
5924 for (; num_reports; num_reports--, ev++)
5925 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5926 ev->bdaddr_type, &ev->direct_addr,
5927 ev->direct_addr_type, ev->rssi, NULL, 0,
5928 false);
5929
5930 hci_dev_unlock(hdev);
5931}
5932
5933static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5934{
5935 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5936 struct hci_conn *conn;
5937
5938 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5939
5940 if (ev->status)
5941 return;
5942
5943 hci_dev_lock(hdev);
5944
5945 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5946 if (!conn)
5947 goto unlock;
5948
5949 conn->le_tx_phy = ev->tx_phy;
5950 conn->le_rx_phy = ev->rx_phy;
5951
5952unlock:
5953 hci_dev_unlock(hdev);
5954}
5955
5956static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5957{
5958 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5959
5960 skb_pull(skb, sizeof(*le_ev));
5961
5962 switch (le_ev->subevent) {
5963 case HCI_EV_LE_CONN_COMPLETE:
5964 hci_le_conn_complete_evt(hdev, skb);
5965 break;
5966
5967 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5968 hci_le_conn_update_complete_evt(hdev, skb);
5969 break;
5970
5971 case HCI_EV_LE_ADVERTISING_REPORT:
5972 hci_le_adv_report_evt(hdev, skb);
5973 break;
5974
5975 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5976 hci_le_remote_feat_complete_evt(hdev, skb);
5977 break;
5978
5979 case HCI_EV_LE_LTK_REQ:
5980 hci_le_ltk_request_evt(hdev, skb);
5981 break;
5982
5983 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5984 hci_le_remote_conn_param_req_evt(hdev, skb);
5985 break;
5986
5987 case HCI_EV_LE_DIRECT_ADV_REPORT:
5988 hci_le_direct_adv_report_evt(hdev, skb);
5989 break;
5990
5991 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
5992 hci_le_phy_update_evt(hdev, skb);
5993 break;
5994
5995 case HCI_EV_LE_EXT_ADV_REPORT:
5996 hci_le_ext_adv_report_evt(hdev, skb);
5997 break;
5998
5999 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6000 hci_le_enh_conn_complete_evt(hdev, skb);
6001 break;
6002
6003 case HCI_EV_LE_EXT_ADV_SET_TERM:
6004 hci_le_ext_adv_term_evt(hdev, skb);
6005 break;
6006
6007 default:
6008 break;
6009 }
6010}
6011
6012static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6013 u8 event, struct sk_buff *skb)
6014{
6015 struct hci_ev_cmd_complete *ev;
6016 struct hci_event_hdr *hdr;
6017
6018 if (!skb)
6019 return false;
6020
6021 if (skb->len < sizeof(*hdr)) {
6022 bt_dev_err(hdev, "too short HCI event");
6023 return false;
6024 }
6025
6026 hdr = (void *) skb->data;
6027 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6028
6029 if (event) {
6030 if (hdr->evt != event)
6031 return false;
6032 return true;
6033 }
6034
6035 /* Check if request ended in Command Status - no way to retreive
6036 * any extra parameters in this case.
6037 */
6038 if (hdr->evt == HCI_EV_CMD_STATUS)
6039 return false;
6040
6041 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6042 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6043 hdr->evt);
6044 return false;
6045 }
6046
6047 if (skb->len < sizeof(*ev)) {
6048 bt_dev_err(hdev, "too short cmd_complete event");
6049 return false;
6050 }
6051
6052 ev = (void *) skb->data;
6053 skb_pull(skb, sizeof(*ev));
6054
6055 if (opcode != __le16_to_cpu(ev->opcode)) {
6056 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6057 __le16_to_cpu(ev->opcode));
6058 return false;
6059 }
6060
6061 return true;
6062}
6063
6064static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6065 struct sk_buff *skb)
6066{
6067 struct hci_ev_le_advertising_info *adv;
6068 struct hci_ev_le_direct_adv_info *direct_adv;
6069 struct hci_ev_le_ext_adv_report *ext_adv;
6070 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6071 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6072
6073 hci_dev_lock(hdev);
6074
6075 /* If we are currently suspended and this is the first BT event seen,
6076 * save the wake reason associated with the event.
6077 */
6078 if (!hdev->suspended || hdev->wake_reason)
6079 goto unlock;
6080
6081 /* Default to remote wake. Values for wake_reason are documented in the
6082 * Bluez mgmt api docs.
6083 */
6084 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6085
6086 /* Once configured for remote wakeup, we should only wake up for
6087 * reconnections. It's useful to see which device is waking us up so
6088 * keep track of the bdaddr of the connection event that woke us up.
6089 */
6090 if (event == HCI_EV_CONN_REQUEST) {
6091 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6092 hdev->wake_addr_type = BDADDR_BREDR;
6093 } else if (event == HCI_EV_CONN_COMPLETE) {
6094 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6095 hdev->wake_addr_type = BDADDR_BREDR;
6096 } else if (event == HCI_EV_LE_META) {
6097 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6098 u8 subevent = le_ev->subevent;
6099 u8 *ptr = &skb->data[sizeof(*le_ev)];
6100 u8 num_reports = *ptr;
6101
6102 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6103 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6104 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6105 num_reports) {
6106 adv = (void *)(ptr + 1);
6107 direct_adv = (void *)(ptr + 1);
6108 ext_adv = (void *)(ptr + 1);
6109
6110 switch (subevent) {
6111 case HCI_EV_LE_ADVERTISING_REPORT:
6112 bacpy(&hdev->wake_addr, &adv->bdaddr);
6113 hdev->wake_addr_type = adv->bdaddr_type;
6114 break;
6115 case HCI_EV_LE_DIRECT_ADV_REPORT:
6116 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6117 hdev->wake_addr_type = direct_adv->bdaddr_type;
6118 break;
6119 case HCI_EV_LE_EXT_ADV_REPORT:
6120 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6121 hdev->wake_addr_type = ext_adv->bdaddr_type;
6122 break;
6123 }
6124 }
6125 } else {
6126 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6127 }
6128
6129unlock:
6130 hci_dev_unlock(hdev);
6131}
6132
6133void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6134{
6135 struct hci_event_hdr *hdr = (void *) skb->data;
6136 hci_req_complete_t req_complete = NULL;
6137 hci_req_complete_skb_t req_complete_skb = NULL;
6138 struct sk_buff *orig_skb = NULL;
6139 u8 status = 0, event = hdr->evt, req_evt = 0;
6140 u16 opcode = HCI_OP_NOP;
6141
6142 if (!event) {
6143 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6144 goto done;
6145 }
6146
6147 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6148 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6149 opcode = __le16_to_cpu(cmd_hdr->opcode);
6150 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6151 &req_complete_skb);
6152 req_evt = event;
6153 }
6154
6155 /* If it looks like we might end up having to call
6156 * req_complete_skb, store a pristine copy of the skb since the
6157 * various handlers may modify the original one through
6158 * skb_pull() calls, etc.
6159 */
6160 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6161 event == HCI_EV_CMD_COMPLETE)
6162 orig_skb = skb_clone(skb, GFP_KERNEL);
6163
6164 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6165
6166 /* Store wake reason if we're suspended */
6167 hci_store_wake_reason(hdev, event, skb);
6168
6169 switch (event) {
6170 case HCI_EV_INQUIRY_COMPLETE:
6171 hci_inquiry_complete_evt(hdev, skb);
6172 break;
6173
6174 case HCI_EV_INQUIRY_RESULT:
6175 hci_inquiry_result_evt(hdev, skb);
6176 break;
6177
6178 case HCI_EV_CONN_COMPLETE:
6179 hci_conn_complete_evt(hdev, skb);
6180 break;
6181
6182 case HCI_EV_CONN_REQUEST:
6183 hci_conn_request_evt(hdev, skb);
6184 break;
6185
6186 case HCI_EV_DISCONN_COMPLETE:
6187 hci_disconn_complete_evt(hdev, skb);
6188 break;
6189
6190 case HCI_EV_AUTH_COMPLETE:
6191 hci_auth_complete_evt(hdev, skb);
6192 break;
6193
6194 case HCI_EV_REMOTE_NAME:
6195 hci_remote_name_evt(hdev, skb);
6196 break;
6197
6198 case HCI_EV_ENCRYPT_CHANGE:
6199 hci_encrypt_change_evt(hdev, skb);
6200 break;
6201
6202 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6203 hci_change_link_key_complete_evt(hdev, skb);
6204 break;
6205
6206 case HCI_EV_REMOTE_FEATURES:
6207 hci_remote_features_evt(hdev, skb);
6208 break;
6209
6210 case HCI_EV_CMD_COMPLETE:
6211 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6212 &req_complete, &req_complete_skb);
6213 break;
6214
6215 case HCI_EV_CMD_STATUS:
6216 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6217 &req_complete_skb);
6218 break;
6219
6220 case HCI_EV_HARDWARE_ERROR:
6221 hci_hardware_error_evt(hdev, skb);
6222 break;
6223
6224 case HCI_EV_ROLE_CHANGE:
6225 hci_role_change_evt(hdev, skb);
6226 break;
6227
6228 case HCI_EV_NUM_COMP_PKTS:
6229 hci_num_comp_pkts_evt(hdev, skb);
6230 break;
6231
6232 case HCI_EV_MODE_CHANGE:
6233 hci_mode_change_evt(hdev, skb);
6234 break;
6235
6236 case HCI_EV_PIN_CODE_REQ:
6237 hci_pin_code_request_evt(hdev, skb);
6238 break;
6239
6240 case HCI_EV_LINK_KEY_REQ:
6241 hci_link_key_request_evt(hdev, skb);
6242 break;
6243
6244 case HCI_EV_LINK_KEY_NOTIFY:
6245 hci_link_key_notify_evt(hdev, skb);
6246 break;
6247
6248 case HCI_EV_CLOCK_OFFSET:
6249 hci_clock_offset_evt(hdev, skb);
6250 break;
6251
6252 case HCI_EV_PKT_TYPE_CHANGE:
6253 hci_pkt_type_change_evt(hdev, skb);
6254 break;
6255
6256 case HCI_EV_PSCAN_REP_MODE:
6257 hci_pscan_rep_mode_evt(hdev, skb);
6258 break;
6259
6260 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6261 hci_inquiry_result_with_rssi_evt(hdev, skb);
6262 break;
6263
6264 case HCI_EV_REMOTE_EXT_FEATURES:
6265 hci_remote_ext_features_evt(hdev, skb);
6266 break;
6267
6268 case HCI_EV_SYNC_CONN_COMPLETE:
6269 hci_sync_conn_complete_evt(hdev, skb);
6270 break;
6271
6272 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6273 hci_extended_inquiry_result_evt(hdev, skb);
6274 break;
6275
6276 case HCI_EV_KEY_REFRESH_COMPLETE:
6277 hci_key_refresh_complete_evt(hdev, skb);
6278 break;
6279
6280 case HCI_EV_IO_CAPA_REQUEST:
6281 hci_io_capa_request_evt(hdev, skb);
6282 break;
6283
6284 case HCI_EV_IO_CAPA_REPLY:
6285 hci_io_capa_reply_evt(hdev, skb);
6286 break;
6287
6288 case HCI_EV_USER_CONFIRM_REQUEST:
6289 hci_user_confirm_request_evt(hdev, skb);
6290 break;
6291
6292 case HCI_EV_USER_PASSKEY_REQUEST:
6293 hci_user_passkey_request_evt(hdev, skb);
6294 break;
6295
6296 case HCI_EV_USER_PASSKEY_NOTIFY:
6297 hci_user_passkey_notify_evt(hdev, skb);
6298 break;
6299
6300 case HCI_EV_KEYPRESS_NOTIFY:
6301 hci_keypress_notify_evt(hdev, skb);
6302 break;
6303
6304 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6305 hci_simple_pair_complete_evt(hdev, skb);
6306 break;
6307
6308 case HCI_EV_REMOTE_HOST_FEATURES:
6309 hci_remote_host_features_evt(hdev, skb);
6310 break;
6311
6312 case HCI_EV_LE_META:
6313 hci_le_meta_evt(hdev, skb);
6314 break;
6315
6316 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6317 hci_remote_oob_data_request_evt(hdev, skb);
6318 break;
6319
6320#if IS_ENABLED(CONFIG_BT_HS)
6321 case HCI_EV_CHANNEL_SELECTED:
6322 hci_chan_selected_evt(hdev, skb);
6323 break;
6324
6325 case HCI_EV_PHY_LINK_COMPLETE:
6326 hci_phy_link_complete_evt(hdev, skb);
6327 break;
6328
6329 case HCI_EV_LOGICAL_LINK_COMPLETE:
6330 hci_loglink_complete_evt(hdev, skb);
6331 break;
6332
6333 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6334 hci_disconn_loglink_complete_evt(hdev, skb);
6335 break;
6336
6337 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6338 hci_disconn_phylink_complete_evt(hdev, skb);
6339 break;
6340#endif
6341
6342 case HCI_EV_NUM_COMP_BLOCKS:
6343 hci_num_comp_blocks_evt(hdev, skb);
6344 break;
6345
6346 case HCI_EV_VENDOR:
6347 msft_vendor_evt(hdev, skb);
6348 break;
6349
6350 default:
6351 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6352 break;
6353 }
6354
6355 if (req_complete) {
6356 req_complete(hdev, status, opcode);
6357 } else if (req_complete_skb) {
6358 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6359 kfree_skb(orig_skb);
6360 orig_skb = NULL;
6361 }
6362 req_complete_skb(hdev, status, opcode, orig_skb);
6363 }
6364
6365done:
6366 kfree_skb(orig_skb);
6367 kfree_skb(skb);
6368 hdev->stat.evt_rx++;
6369}