Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <linux/export.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/mgmt.h>
33
34/* Handle HCI Event packets */
35
36static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
37{
38 __u8 status = *((__u8 *) skb->data);
39
40 BT_DBG("%s status 0x%2.2x", hdev->name, status);
41
42 if (status) {
43 hci_dev_lock(hdev);
44 mgmt_stop_discovery_failed(hdev, status);
45 hci_dev_unlock(hdev);
46 return;
47 }
48
49 clear_bit(HCI_INQUIRY, &hdev->flags);
50
51 hci_dev_lock(hdev);
52 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 hci_dev_unlock(hdev);
54
55 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
56
57 hci_conn_check_pending(hdev);
58}
59
60static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
61{
62 __u8 status = *((__u8 *) skb->data);
63
64 BT_DBG("%s status 0x%2.2x", hdev->name, status);
65
66 if (status)
67 return;
68
69 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
70}
71
72static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{
74 __u8 status = *((__u8 *) skb->data);
75
76 BT_DBG("%s status 0x%2.2x", hdev->name, status);
77
78 if (status)
79 return;
80
81 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82
83 hci_conn_check_pending(hdev);
84}
85
86static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
87 struct sk_buff *skb)
88{
89 BT_DBG("%s", hdev->name);
90}
91
92static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
93{
94 struct hci_rp_role_discovery *rp = (void *) skb->data;
95 struct hci_conn *conn;
96
97 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
98
99 if (rp->status)
100 return;
101
102 hci_dev_lock(hdev);
103
104 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
105 if (conn) {
106 if (rp->role)
107 conn->link_mode &= ~HCI_LM_MASTER;
108 else
109 conn->link_mode |= HCI_LM_MASTER;
110 }
111
112 hci_dev_unlock(hdev);
113}
114
115static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116{
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
119
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121
122 if (rp->status)
123 return;
124
125 hci_dev_lock(hdev);
126
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 if (conn)
129 conn->link_policy = __le16_to_cpu(rp->policy);
130
131 hci_dev_unlock(hdev);
132}
133
134static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135{
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
138 void *sent;
139
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
141
142 if (rp->status)
143 return;
144
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 if (!sent)
147 return;
148
149 hci_dev_lock(hdev);
150
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 if (conn)
153 conn->link_policy = get_unaligned_le16(sent + 2);
154
155 hci_dev_unlock(hdev);
156}
157
158static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 struct sk_buff *skb)
160{
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169}
170
171static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 struct sk_buff *skb)
173{
174 __u8 status = *((__u8 *) skb->data);
175 void *sent;
176
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
178
179 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
180 if (!sent)
181 return;
182
183 if (!status)
184 hdev->link_policy = get_unaligned_le16(sent);
185
186 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
187}
188
189static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190{
191 __u8 status = *((__u8 *) skb->data);
192
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194
195 clear_bit(HCI_RESET, &hdev->flags);
196
197 hci_req_complete(hdev, HCI_OP_RESET, status);
198
199 /* Reset all non-persistent flags */
200 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
201 BIT(HCI_PERIODIC_INQ));
202
203 hdev->discovery.state = DISCOVERY_STOPPED;
204}
205
206static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
207{
208 __u8 status = *((__u8 *) skb->data);
209 void *sent;
210
211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
212
213 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
214 if (!sent)
215 return;
216
217 hci_dev_lock(hdev);
218
219 if (test_bit(HCI_MGMT, &hdev->dev_flags))
220 mgmt_set_local_name_complete(hdev, sent, status);
221 else if (!status)
222 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
223
224 hci_dev_unlock(hdev);
225
226 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
227}
228
229static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
230{
231 struct hci_rp_read_local_name *rp = (void *) skb->data;
232
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234
235 if (rp->status)
236 return;
237
238 if (test_bit(HCI_SETUP, &hdev->dev_flags))
239 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
240}
241
242static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
243{
244 __u8 status = *((__u8 *) skb->data);
245 void *sent;
246
247 BT_DBG("%s status 0x%2.2x", hdev->name, status);
248
249 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
250 if (!sent)
251 return;
252
253 if (!status) {
254 __u8 param = *((__u8 *) sent);
255
256 if (param == AUTH_ENABLED)
257 set_bit(HCI_AUTH, &hdev->flags);
258 else
259 clear_bit(HCI_AUTH, &hdev->flags);
260 }
261
262 if (test_bit(HCI_MGMT, &hdev->dev_flags))
263 mgmt_auth_enable_complete(hdev, status);
264
265 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
266}
267
268static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
269{
270 __u8 status = *((__u8 *) skb->data);
271 void *sent;
272
273 BT_DBG("%s status 0x%2.2x", hdev->name, status);
274
275 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
276 if (!sent)
277 return;
278
279 if (!status) {
280 __u8 param = *((__u8 *) sent);
281
282 if (param)
283 set_bit(HCI_ENCRYPT, &hdev->flags);
284 else
285 clear_bit(HCI_ENCRYPT, &hdev->flags);
286 }
287
288 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
289}
290
291static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292{
293 __u8 param, status = *((__u8 *) skb->data);
294 int old_pscan, old_iscan;
295 void *sent;
296
297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
298
299 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300 if (!sent)
301 return;
302
303 param = *((__u8 *) sent);
304
305 hci_dev_lock(hdev);
306
307 if (status) {
308 mgmt_write_scan_failed(hdev, param, status);
309 hdev->discov_timeout = 0;
310 goto done;
311 }
312
313 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
315
316 if (param & SCAN_INQUIRY) {
317 set_bit(HCI_ISCAN, &hdev->flags);
318 if (!old_iscan)
319 mgmt_discoverable(hdev, 1);
320 if (hdev->discov_timeout > 0) {
321 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
322 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
323 to);
324 }
325 } else if (old_iscan)
326 mgmt_discoverable(hdev, 0);
327
328 if (param & SCAN_PAGE) {
329 set_bit(HCI_PSCAN, &hdev->flags);
330 if (!old_pscan)
331 mgmt_connectable(hdev, 1);
332 } else if (old_pscan)
333 mgmt_connectable(hdev, 0);
334
335done:
336 hci_dev_unlock(hdev);
337 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
338}
339
340static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
341{
342 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
343
344 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
345
346 if (rp->status)
347 return;
348
349 memcpy(hdev->dev_class, rp->dev_class, 3);
350
351 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
352 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
353}
354
355static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
356{
357 __u8 status = *((__u8 *) skb->data);
358 void *sent;
359
360 BT_DBG("%s status 0x%2.2x", hdev->name, status);
361
362 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 if (!sent)
364 return;
365
366 hci_dev_lock(hdev);
367
368 if (status == 0)
369 memcpy(hdev->dev_class, sent, 3);
370
371 if (test_bit(HCI_MGMT, &hdev->dev_flags))
372 mgmt_set_class_of_dev_complete(hdev, sent, status);
373
374 hci_dev_unlock(hdev);
375}
376
377static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
378{
379 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
380 __u16 setting;
381
382 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
383
384 if (rp->status)
385 return;
386
387 setting = __le16_to_cpu(rp->voice_setting);
388
389 if (hdev->voice_setting == setting)
390 return;
391
392 hdev->voice_setting = setting;
393
394 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
395
396 if (hdev->notify)
397 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
398}
399
400static void hci_cc_write_voice_setting(struct hci_dev *hdev,
401 struct sk_buff *skb)
402{
403 __u8 status = *((__u8 *) skb->data);
404 __u16 setting;
405 void *sent;
406
407 BT_DBG("%s status 0x%2.2x", hdev->name, status);
408
409 if (status)
410 return;
411
412 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
413 if (!sent)
414 return;
415
416 setting = get_unaligned_le16(sent);
417
418 if (hdev->voice_setting == setting)
419 return;
420
421 hdev->voice_setting = setting;
422
423 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
424
425 if (hdev->notify)
426 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
427}
428
429static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
430{
431 __u8 status = *((__u8 *) skb->data);
432
433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
434
435 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
436}
437
438static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
439{
440 __u8 status = *((__u8 *) skb->data);
441 void *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
446 if (!sent)
447 return;
448
449 if (test_bit(HCI_MGMT, &hdev->dev_flags))
450 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
451 else if (!status) {
452 if (*((u8 *) sent))
453 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
454 else
455 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
456 }
457}
458
459static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
460{
461 if (hdev->features[6] & LMP_EXT_INQ)
462 return 2;
463
464 if (hdev->features[3] & LMP_RSSI_INQ)
465 return 1;
466
467 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
468 hdev->lmp_subver == 0x0757)
469 return 1;
470
471 if (hdev->manufacturer == 15) {
472 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
473 return 1;
474 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
475 return 1;
476 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
477 return 1;
478 }
479
480 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
481 hdev->lmp_subver == 0x1805)
482 return 1;
483
484 return 0;
485}
486
487static void hci_setup_inquiry_mode(struct hci_dev *hdev)
488{
489 u8 mode;
490
491 mode = hci_get_inquiry_mode(hdev);
492
493 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
494}
495
496static void hci_setup_event_mask(struct hci_dev *hdev)
497{
498 /* The second byte is 0xff instead of 0x9f (two reserved bits
499 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
500 * command otherwise */
501 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
502
503 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
504 * any event mask for pre 1.2 devices */
505 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
506 return;
507
508 events[4] |= 0x01; /* Flow Specification Complete */
509 events[4] |= 0x02; /* Inquiry Result with RSSI */
510 events[4] |= 0x04; /* Read Remote Extended Features Complete */
511 events[5] |= 0x08; /* Synchronous Connection Complete */
512 events[5] |= 0x10; /* Synchronous Connection Changed */
513
514 if (hdev->features[3] & LMP_RSSI_INQ)
515 events[4] |= 0x02; /* Inquiry Result with RSSI */
516
517 if (lmp_sniffsubr_capable(hdev))
518 events[5] |= 0x20; /* Sniff Subrating */
519
520 if (hdev->features[5] & LMP_PAUSE_ENC)
521 events[5] |= 0x80; /* Encryption Key Refresh Complete */
522
523 if (hdev->features[6] & LMP_EXT_INQ)
524 events[5] |= 0x40; /* Extended Inquiry Result */
525
526 if (lmp_no_flush_capable(hdev))
527 events[7] |= 0x01; /* Enhanced Flush Complete */
528
529 if (hdev->features[7] & LMP_LSTO)
530 events[6] |= 0x80; /* Link Supervision Timeout Changed */
531
532 if (lmp_ssp_capable(hdev)) {
533 events[6] |= 0x01; /* IO Capability Request */
534 events[6] |= 0x02; /* IO Capability Response */
535 events[6] |= 0x04; /* User Confirmation Request */
536 events[6] |= 0x08; /* User Passkey Request */
537 events[6] |= 0x10; /* Remote OOB Data Request */
538 events[6] |= 0x20; /* Simple Pairing Complete */
539 events[7] |= 0x04; /* User Passkey Notification */
540 events[7] |= 0x08; /* Keypress Notification */
541 events[7] |= 0x10; /* Remote Host Supported
542 * Features Notification */
543 }
544
545 if (lmp_le_capable(hdev))
546 events[7] |= 0x20; /* LE Meta-Event */
547
548 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
549}
550
551static void hci_setup(struct hci_dev *hdev)
552{
553 if (hdev->dev_type != HCI_BREDR)
554 return;
555
556 hci_setup_event_mask(hdev);
557
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
559 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
560
561 if (lmp_ssp_capable(hdev)) {
562 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
563 u8 mode = 0x01;
564 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
565 sizeof(mode), &mode);
566 } else {
567 struct hci_cp_write_eir cp;
568
569 memset(hdev->eir, 0, sizeof(hdev->eir));
570 memset(&cp, 0, sizeof(cp));
571
572 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
573 }
574 }
575
576 if (hdev->features[3] & LMP_RSSI_INQ)
577 hci_setup_inquiry_mode(hdev);
578
579 if (hdev->features[7] & LMP_INQ_TX_PWR)
580 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
581
582 if (hdev->features[7] & LMP_EXTFEATURES) {
583 struct hci_cp_read_local_ext_features cp;
584
585 cp.page = 0x01;
586 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
587 &cp);
588 }
589
590 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
591 u8 enable = 1;
592 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
593 &enable);
594 }
595}
596
597static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
598{
599 struct hci_rp_read_local_version *rp = (void *) skb->data;
600
601 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
602
603 if (rp->status)
604 goto done;
605
606 hdev->hci_ver = rp->hci_ver;
607 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
608 hdev->lmp_ver = rp->lmp_ver;
609 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
610 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
611
612 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
613 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
614
615 if (test_bit(HCI_INIT, &hdev->flags))
616 hci_setup(hdev);
617
618done:
619 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
620}
621
622static void hci_setup_link_policy(struct hci_dev *hdev)
623{
624 struct hci_cp_write_def_link_policy cp;
625 u16 link_policy = 0;
626
627 if (lmp_rswitch_capable(hdev))
628 link_policy |= HCI_LP_RSWITCH;
629 if (hdev->features[0] & LMP_HOLD)
630 link_policy |= HCI_LP_HOLD;
631 if (lmp_sniff_capable(hdev))
632 link_policy |= HCI_LP_SNIFF;
633 if (hdev->features[1] & LMP_PARK)
634 link_policy |= HCI_LP_PARK;
635
636 cp.policy = cpu_to_le16(link_policy);
637 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
638}
639
640static void hci_cc_read_local_commands(struct hci_dev *hdev,
641 struct sk_buff *skb)
642{
643 struct hci_rp_read_local_commands *rp = (void *) skb->data;
644
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646
647 if (rp->status)
648 goto done;
649
650 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
651
652 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
653 hci_setup_link_policy(hdev);
654
655done:
656 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
657}
658
659static void hci_cc_read_local_features(struct hci_dev *hdev,
660 struct sk_buff *skb)
661{
662 struct hci_rp_read_local_features *rp = (void *) skb->data;
663
664 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665
666 if (rp->status)
667 return;
668
669 memcpy(hdev->features, rp->features, 8);
670
671 /* Adjust default settings according to features
672 * supported by device. */
673
674 if (hdev->features[0] & LMP_3SLOT)
675 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
676
677 if (hdev->features[0] & LMP_5SLOT)
678 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
679
680 if (hdev->features[1] & LMP_HV2) {
681 hdev->pkt_type |= (HCI_HV2);
682 hdev->esco_type |= (ESCO_HV2);
683 }
684
685 if (hdev->features[1] & LMP_HV3) {
686 hdev->pkt_type |= (HCI_HV3);
687 hdev->esco_type |= (ESCO_HV3);
688 }
689
690 if (lmp_esco_capable(hdev))
691 hdev->esco_type |= (ESCO_EV3);
692
693 if (hdev->features[4] & LMP_EV4)
694 hdev->esco_type |= (ESCO_EV4);
695
696 if (hdev->features[4] & LMP_EV5)
697 hdev->esco_type |= (ESCO_EV5);
698
699 if (hdev->features[5] & LMP_EDR_ESCO_2M)
700 hdev->esco_type |= (ESCO_2EV3);
701
702 if (hdev->features[5] & LMP_EDR_ESCO_3M)
703 hdev->esco_type |= (ESCO_3EV3);
704
705 if (hdev->features[5] & LMP_EDR_3S_ESCO)
706 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
707
708 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
709 hdev->features[0], hdev->features[1],
710 hdev->features[2], hdev->features[3],
711 hdev->features[4], hdev->features[5],
712 hdev->features[6], hdev->features[7]);
713}
714
715static void hci_set_le_support(struct hci_dev *hdev)
716{
717 struct hci_cp_write_le_host_supported cp;
718
719 memset(&cp, 0, sizeof(cp));
720
721 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
722 cp.le = 1;
723 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
724 }
725
726 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
727 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
728 &cp);
729}
730
731static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
732 struct sk_buff *skb)
733{
734 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
735
736 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
737
738 if (rp->status)
739 goto done;
740
741 switch (rp->page) {
742 case 0:
743 memcpy(hdev->features, rp->features, 8);
744 break;
745 case 1:
746 memcpy(hdev->host_features, rp->features, 8);
747 break;
748 }
749
750 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
751 hci_set_le_support(hdev);
752
753done:
754 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
755}
756
757static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
758 struct sk_buff *skb)
759{
760 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
761
762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763
764 if (rp->status)
765 return;
766
767 hdev->flow_ctl_mode = rp->mode;
768
769 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
770}
771
772static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
773{
774 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
775
776 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
777
778 if (rp->status)
779 return;
780
781 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
782 hdev->sco_mtu = rp->sco_mtu;
783 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
784 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
785
786 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
787 hdev->sco_mtu = 64;
788 hdev->sco_pkts = 8;
789 }
790
791 hdev->acl_cnt = hdev->acl_pkts;
792 hdev->sco_cnt = hdev->sco_pkts;
793
794 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
795 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
796}
797
798static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
799{
800 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
801
802 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
803
804 if (!rp->status)
805 bacpy(&hdev->bdaddr, &rp->bdaddr);
806
807 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
808}
809
810static void hci_cc_read_data_block_size(struct hci_dev *hdev,
811 struct sk_buff *skb)
812{
813 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
814
815 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
816
817 if (rp->status)
818 return;
819
820 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
821 hdev->block_len = __le16_to_cpu(rp->block_len);
822 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
823
824 hdev->block_cnt = hdev->num_blocks;
825
826 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
827 hdev->block_cnt, hdev->block_len);
828
829 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
830}
831
832static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
833{
834 __u8 status = *((__u8 *) skb->data);
835
836 BT_DBG("%s status 0x%2.2x", hdev->name, status);
837
838 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
839}
840
841static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
842 struct sk_buff *skb)
843{
844 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
845
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847
848 if (rp->status)
849 return;
850
851 hdev->amp_status = rp->amp_status;
852 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
853 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
854 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
855 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
856 hdev->amp_type = rp->amp_type;
857 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
858 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
859 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
860 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
861
862 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
863}
864
865static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
866 struct sk_buff *skb)
867{
868 __u8 status = *((__u8 *) skb->data);
869
870 BT_DBG("%s status 0x%2.2x", hdev->name, status);
871
872 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
873}
874
875static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
876{
877 __u8 status = *((__u8 *) skb->data);
878
879 BT_DBG("%s status 0x%2.2x", hdev->name, status);
880
881 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
882}
883
884static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
885 struct sk_buff *skb)
886{
887 __u8 status = *((__u8 *) skb->data);
888
889 BT_DBG("%s status 0x%2.2x", hdev->name, status);
890
891 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
892}
893
894static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
895 struct sk_buff *skb)
896{
897 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
898
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900
901 if (!rp->status)
902 hdev->inq_tx_power = rp->tx_power;
903
904 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
905}
906
907static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
908{
909 __u8 status = *((__u8 *) skb->data);
910
911 BT_DBG("%s status 0x%2.2x", hdev->name, status);
912
913 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
914}
915
916static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
917{
918 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
919 struct hci_cp_pin_code_reply *cp;
920 struct hci_conn *conn;
921
922 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
923
924 hci_dev_lock(hdev);
925
926 if (test_bit(HCI_MGMT, &hdev->dev_flags))
927 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
928
929 if (rp->status)
930 goto unlock;
931
932 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
933 if (!cp)
934 goto unlock;
935
936 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
937 if (conn)
938 conn->pin_length = cp->pin_len;
939
940unlock:
941 hci_dev_unlock(hdev);
942}
943
944static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
945{
946 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
949
950 hci_dev_lock(hdev);
951
952 if (test_bit(HCI_MGMT, &hdev->dev_flags))
953 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
954 rp->status);
955
956 hci_dev_unlock(hdev);
957}
958
959static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
960 struct sk_buff *skb)
961{
962 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
963
964 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
965
966 if (rp->status)
967 return;
968
969 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
970 hdev->le_pkts = rp->le_max_pkt;
971
972 hdev->le_cnt = hdev->le_pkts;
973
974 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
975
976 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
977}
978
979static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
980{
981 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
982
983 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984
985 hci_dev_lock(hdev);
986
987 if (test_bit(HCI_MGMT, &hdev->dev_flags))
988 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
989 rp->status);
990
991 hci_dev_unlock(hdev);
992}
993
994static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
995 struct sk_buff *skb)
996{
997 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
998
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1000
1001 hci_dev_lock(hdev);
1002
1003 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1004 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1005 ACL_LINK, 0, rp->status);
1006
1007 hci_dev_unlock(hdev);
1008}
1009
1010static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1011{
1012 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1013
1014 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015
1016 hci_dev_lock(hdev);
1017
1018 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1019 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1020 0, rp->status);
1021
1022 hci_dev_unlock(hdev);
1023}
1024
1025static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1026 struct sk_buff *skb)
1027{
1028 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1029
1030 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1031
1032 hci_dev_lock(hdev);
1033
1034 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1035 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1036 ACL_LINK, 0, rp->status);
1037
1038 hci_dev_unlock(hdev);
1039}
1040
1041static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1042 struct sk_buff *skb)
1043{
1044 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1045
1046 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1047
1048 hci_dev_lock(hdev);
1049 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1050 rp->randomizer, rp->status);
1051 hci_dev_unlock(hdev);
1052}
1053
1054static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1055{
1056 __u8 status = *((__u8 *) skb->data);
1057
1058 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1059
1060 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1061
1062 if (status) {
1063 hci_dev_lock(hdev);
1064 mgmt_start_discovery_failed(hdev, status);
1065 hci_dev_unlock(hdev);
1066 return;
1067 }
1068}
1069
1070static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1071 struct sk_buff *skb)
1072{
1073 struct hci_cp_le_set_scan_enable *cp;
1074 __u8 status = *((__u8 *) skb->data);
1075
1076 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1077
1078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1079 if (!cp)
1080 return;
1081
1082 switch (cp->enable) {
1083 case LE_SCANNING_ENABLED:
1084 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1085
1086 if (status) {
1087 hci_dev_lock(hdev);
1088 mgmt_start_discovery_failed(hdev, status);
1089 hci_dev_unlock(hdev);
1090 return;
1091 }
1092
1093 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1094
1095 hci_dev_lock(hdev);
1096 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1097 hci_dev_unlock(hdev);
1098 break;
1099
1100 case LE_SCANNING_DISABLED:
1101 if (status) {
1102 hci_dev_lock(hdev);
1103 mgmt_stop_discovery_failed(hdev, status);
1104 hci_dev_unlock(hdev);
1105 return;
1106 }
1107
1108 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1109
1110 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1111 hdev->discovery.state == DISCOVERY_FINDING) {
1112 mgmt_interleaved_discovery(hdev);
1113 } else {
1114 hci_dev_lock(hdev);
1115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1116 hci_dev_unlock(hdev);
1117 }
1118
1119 break;
1120
1121 default:
1122 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1123 break;
1124 }
1125}
1126
1127static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1128{
1129 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1130
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1132
1133 if (rp->status)
1134 return;
1135
1136 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1137}
1138
1139static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1140{
1141 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1142
1143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1144
1145 if (rp->status)
1146 return;
1147
1148 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1149}
1150
1151static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1152 struct sk_buff *skb)
1153{
1154 struct hci_cp_write_le_host_supported *sent;
1155 __u8 status = *((__u8 *) skb->data);
1156
1157 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1158
1159 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1160 if (!sent)
1161 return;
1162
1163 if (!status) {
1164 if (sent->le)
1165 hdev->host_features[0] |= LMP_HOST_LE;
1166 else
1167 hdev->host_features[0] &= ~LMP_HOST_LE;
1168 }
1169
1170 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1171 !test_bit(HCI_INIT, &hdev->flags))
1172 mgmt_le_enable_complete(hdev, sent->le, status);
1173
1174 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1175}
1176
1177static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1178{
1179 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1180
1181 if (status) {
1182 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1183 hci_conn_check_pending(hdev);
1184 hci_dev_lock(hdev);
1185 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1186 mgmt_start_discovery_failed(hdev, status);
1187 hci_dev_unlock(hdev);
1188 return;
1189 }
1190
1191 set_bit(HCI_INQUIRY, &hdev->flags);
1192
1193 hci_dev_lock(hdev);
1194 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1195 hci_dev_unlock(hdev);
1196}
1197
1198static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1199{
1200 struct hci_cp_create_conn *cp;
1201 struct hci_conn *conn;
1202
1203 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1204
1205 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1206 if (!cp)
1207 return;
1208
1209 hci_dev_lock(hdev);
1210
1211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1212
1213 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1214
1215 if (status) {
1216 if (conn && conn->state == BT_CONNECT) {
1217 if (status != 0x0c || conn->attempt > 2) {
1218 conn->state = BT_CLOSED;
1219 hci_proto_connect_cfm(conn, status);
1220 hci_conn_del(conn);
1221 } else
1222 conn->state = BT_CONNECT2;
1223 }
1224 } else {
1225 if (!conn) {
1226 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1227 if (conn) {
1228 conn->out = true;
1229 conn->link_mode |= HCI_LM_MASTER;
1230 } else
1231 BT_ERR("No memory for new connection");
1232 }
1233 }
1234
1235 hci_dev_unlock(hdev);
1236}
1237
1238static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1239{
1240 struct hci_cp_add_sco *cp;
1241 struct hci_conn *acl, *sco;
1242 __u16 handle;
1243
1244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1245
1246 if (!status)
1247 return;
1248
1249 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1250 if (!cp)
1251 return;
1252
1253 handle = __le16_to_cpu(cp->handle);
1254
1255 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1256
1257 hci_dev_lock(hdev);
1258
1259 acl = hci_conn_hash_lookup_handle(hdev, handle);
1260 if (acl) {
1261 sco = acl->link;
1262 if (sco) {
1263 sco->state = BT_CLOSED;
1264
1265 hci_proto_connect_cfm(sco, status);
1266 hci_conn_del(sco);
1267 }
1268 }
1269
1270 hci_dev_unlock(hdev);
1271}
1272
1273static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1274{
1275 struct hci_cp_auth_requested *cp;
1276 struct hci_conn *conn;
1277
1278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1279
1280 if (!status)
1281 return;
1282
1283 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1284 if (!cp)
1285 return;
1286
1287 hci_dev_lock(hdev);
1288
1289 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1290 if (conn) {
1291 if (conn->state == BT_CONFIG) {
1292 hci_proto_connect_cfm(conn, status);
1293 hci_conn_put(conn);
1294 }
1295 }
1296
1297 hci_dev_unlock(hdev);
1298}
1299
1300static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1301{
1302 struct hci_cp_set_conn_encrypt *cp;
1303 struct hci_conn *conn;
1304
1305 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1306
1307 if (!status)
1308 return;
1309
1310 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1311 if (!cp)
1312 return;
1313
1314 hci_dev_lock(hdev);
1315
1316 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1317 if (conn) {
1318 if (conn->state == BT_CONFIG) {
1319 hci_proto_connect_cfm(conn, status);
1320 hci_conn_put(conn);
1321 }
1322 }
1323
1324 hci_dev_unlock(hdev);
1325}
1326
1327static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1328 struct hci_conn *conn)
1329{
1330 if (conn->state != BT_CONFIG || !conn->out)
1331 return 0;
1332
1333 if (conn->pending_sec_level == BT_SECURITY_SDP)
1334 return 0;
1335
1336 /* Only request authentication for SSP connections or non-SSP
1337 * devices with sec_level HIGH or if MITM protection is requested */
1338 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1339 conn->pending_sec_level != BT_SECURITY_HIGH)
1340 return 0;
1341
1342 return 1;
1343}
1344
1345static int hci_resolve_name(struct hci_dev *hdev,
1346 struct inquiry_entry *e)
1347{
1348 struct hci_cp_remote_name_req cp;
1349
1350 memset(&cp, 0, sizeof(cp));
1351
1352 bacpy(&cp.bdaddr, &e->data.bdaddr);
1353 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1354 cp.pscan_mode = e->data.pscan_mode;
1355 cp.clock_offset = e->data.clock_offset;
1356
1357 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1358}
1359
1360static bool hci_resolve_next_name(struct hci_dev *hdev)
1361{
1362 struct discovery_state *discov = &hdev->discovery;
1363 struct inquiry_entry *e;
1364
1365 if (list_empty(&discov->resolve))
1366 return false;
1367
1368 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1369 if (!e)
1370 return false;
1371
1372 if (hci_resolve_name(hdev, e) == 0) {
1373 e->name_state = NAME_PENDING;
1374 return true;
1375 }
1376
1377 return false;
1378}
1379
1380static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1381 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1382{
1383 struct discovery_state *discov = &hdev->discovery;
1384 struct inquiry_entry *e;
1385
1386 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1387 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1388 name_len, conn->dev_class);
1389
1390 if (discov->state == DISCOVERY_STOPPED)
1391 return;
1392
1393 if (discov->state == DISCOVERY_STOPPING)
1394 goto discov_complete;
1395
1396 if (discov->state != DISCOVERY_RESOLVING)
1397 return;
1398
1399 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1400 /* If the device was not found in a list of found devices names of which
1401 * are pending. there is no need to continue resolving a next name as it
1402 * will be done upon receiving another Remote Name Request Complete
1403 * Event */
1404 if (!e)
1405 return;
1406
1407 list_del(&e->list);
1408 if (name) {
1409 e->name_state = NAME_KNOWN;
1410 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1411 e->data.rssi, name, name_len);
1412 } else {
1413 e->name_state = NAME_NOT_KNOWN;
1414 }
1415
1416 if (hci_resolve_next_name(hdev))
1417 return;
1418
1419discov_complete:
1420 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1421}
1422
1423static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1424{
1425 struct hci_cp_remote_name_req *cp;
1426 struct hci_conn *conn;
1427
1428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1429
1430 /* If successful wait for the name req complete event before
1431 * checking for the need to do authentication */
1432 if (!status)
1433 return;
1434
1435 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1436 if (!cp)
1437 return;
1438
1439 hci_dev_lock(hdev);
1440
1441 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1442
1443 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1444 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1445
1446 if (!conn)
1447 goto unlock;
1448
1449 if (!hci_outgoing_auth_needed(hdev, conn))
1450 goto unlock;
1451
1452 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1453 struct hci_cp_auth_requested cp;
1454 cp.handle = __cpu_to_le16(conn->handle);
1455 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1456 }
1457
1458unlock:
1459 hci_dev_unlock(hdev);
1460}
1461
1462static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1463{
1464 struct hci_cp_read_remote_features *cp;
1465 struct hci_conn *conn;
1466
1467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1468
1469 if (!status)
1470 return;
1471
1472 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1473 if (!cp)
1474 return;
1475
1476 hci_dev_lock(hdev);
1477
1478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1479 if (conn) {
1480 if (conn->state == BT_CONFIG) {
1481 hci_proto_connect_cfm(conn, status);
1482 hci_conn_put(conn);
1483 }
1484 }
1485
1486 hci_dev_unlock(hdev);
1487}
1488
1489static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1490{
1491 struct hci_cp_read_remote_ext_features *cp;
1492 struct hci_conn *conn;
1493
1494 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1495
1496 if (!status)
1497 return;
1498
1499 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1500 if (!cp)
1501 return;
1502
1503 hci_dev_lock(hdev);
1504
1505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1506 if (conn) {
1507 if (conn->state == BT_CONFIG) {
1508 hci_proto_connect_cfm(conn, status);
1509 hci_conn_put(conn);
1510 }
1511 }
1512
1513 hci_dev_unlock(hdev);
1514}
1515
1516static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1517{
1518 struct hci_cp_setup_sync_conn *cp;
1519 struct hci_conn *acl, *sco;
1520 __u16 handle;
1521
1522 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1523
1524 if (!status)
1525 return;
1526
1527 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1528 if (!cp)
1529 return;
1530
1531 handle = __le16_to_cpu(cp->handle);
1532
1533 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1534
1535 hci_dev_lock(hdev);
1536
1537 acl = hci_conn_hash_lookup_handle(hdev, handle);
1538 if (acl) {
1539 sco = acl->link;
1540 if (sco) {
1541 sco->state = BT_CLOSED;
1542
1543 hci_proto_connect_cfm(sco, status);
1544 hci_conn_del(sco);
1545 }
1546 }
1547
1548 hci_dev_unlock(hdev);
1549}
1550
1551static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1552{
1553 struct hci_cp_sniff_mode *cp;
1554 struct hci_conn *conn;
1555
1556 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1557
1558 if (!status)
1559 return;
1560
1561 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1562 if (!cp)
1563 return;
1564
1565 hci_dev_lock(hdev);
1566
1567 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1568 if (conn) {
1569 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1570
1571 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1572 hci_sco_setup(conn, status);
1573 }
1574
1575 hci_dev_unlock(hdev);
1576}
1577
1578static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1579{
1580 struct hci_cp_exit_sniff_mode *cp;
1581 struct hci_conn *conn;
1582
1583 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584
1585 if (!status)
1586 return;
1587
1588 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1589 if (!cp)
1590 return;
1591
1592 hci_dev_lock(hdev);
1593
1594 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1595 if (conn) {
1596 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1597
1598 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1599 hci_sco_setup(conn, status);
1600 }
1601
1602 hci_dev_unlock(hdev);
1603}
1604
1605static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1606{
1607 struct hci_cp_disconnect *cp;
1608 struct hci_conn *conn;
1609
1610 if (!status)
1611 return;
1612
1613 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1614 if (!cp)
1615 return;
1616
1617 hci_dev_lock(hdev);
1618
1619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1620 if (conn)
1621 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1622 conn->dst_type, status);
1623
1624 hci_dev_unlock(hdev);
1625}
1626
1627static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1628{
1629 struct hci_conn *conn;
1630
1631 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1632
1633 if (status) {
1634 hci_dev_lock(hdev);
1635
1636 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1637 if (!conn) {
1638 hci_dev_unlock(hdev);
1639 return;
1640 }
1641
1642 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
1643 conn);
1644
1645 conn->state = BT_CLOSED;
1646 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1647 conn->dst_type, status);
1648 hci_proto_connect_cfm(conn, status);
1649 hci_conn_del(conn);
1650
1651 hci_dev_unlock(hdev);
1652 }
1653}
1654
1655static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1656{
1657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1658}
1659
1660static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1661{
1662 __u8 status = *((__u8 *) skb->data);
1663 struct discovery_state *discov = &hdev->discovery;
1664 struct inquiry_entry *e;
1665
1666 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1667
1668 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1669
1670 hci_conn_check_pending(hdev);
1671
1672 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1673 return;
1674
1675 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1676 return;
1677
1678 hci_dev_lock(hdev);
1679
1680 if (discov->state != DISCOVERY_FINDING)
1681 goto unlock;
1682
1683 if (list_empty(&discov->resolve)) {
1684 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1685 goto unlock;
1686 }
1687
1688 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1689 if (e && hci_resolve_name(hdev, e) == 0) {
1690 e->name_state = NAME_PENDING;
1691 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1692 } else {
1693 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1694 }
1695
1696unlock:
1697 hci_dev_unlock(hdev);
1698}
1699
1700static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1701{
1702 struct inquiry_data data;
1703 struct inquiry_info *info = (void *) (skb->data + 1);
1704 int num_rsp = *((__u8 *) skb->data);
1705
1706 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1707
1708 if (!num_rsp)
1709 return;
1710
1711 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1712 return;
1713
1714 hci_dev_lock(hdev);
1715
1716 for (; num_rsp; num_rsp--, info++) {
1717 bool name_known, ssp;
1718
1719 bacpy(&data.bdaddr, &info->bdaddr);
1720 data.pscan_rep_mode = info->pscan_rep_mode;
1721 data.pscan_period_mode = info->pscan_period_mode;
1722 data.pscan_mode = info->pscan_mode;
1723 memcpy(data.dev_class, info->dev_class, 3);
1724 data.clock_offset = info->clock_offset;
1725 data.rssi = 0x00;
1726 data.ssp_mode = 0x00;
1727
1728 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1729 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1730 info->dev_class, 0, !name_known, ssp, NULL,
1731 0);
1732 }
1733
1734 hci_dev_unlock(hdev);
1735}
1736
1737static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1738{
1739 struct hci_ev_conn_complete *ev = (void *) skb->data;
1740 struct hci_conn *conn;
1741
1742 BT_DBG("%s", hdev->name);
1743
1744 hci_dev_lock(hdev);
1745
1746 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1747 if (!conn) {
1748 if (ev->link_type != SCO_LINK)
1749 goto unlock;
1750
1751 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1752 if (!conn)
1753 goto unlock;
1754
1755 conn->type = SCO_LINK;
1756 }
1757
1758 if (!ev->status) {
1759 conn->handle = __le16_to_cpu(ev->handle);
1760
1761 if (conn->type == ACL_LINK) {
1762 conn->state = BT_CONFIG;
1763 hci_conn_hold(conn);
1764
1765 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1766 !hci_find_link_key(hdev, &ev->bdaddr))
1767 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1768 else
1769 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1770 } else
1771 conn->state = BT_CONNECTED;
1772
1773 hci_conn_hold_device(conn);
1774 hci_conn_add_sysfs(conn);
1775
1776 if (test_bit(HCI_AUTH, &hdev->flags))
1777 conn->link_mode |= HCI_LM_AUTH;
1778
1779 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1780 conn->link_mode |= HCI_LM_ENCRYPT;
1781
1782 /* Get remote features */
1783 if (conn->type == ACL_LINK) {
1784 struct hci_cp_read_remote_features cp;
1785 cp.handle = ev->handle;
1786 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1787 sizeof(cp), &cp);
1788 }
1789
1790 /* Set packet type for incoming connection */
1791 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1792 struct hci_cp_change_conn_ptype cp;
1793 cp.handle = ev->handle;
1794 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1795 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1796 &cp);
1797 }
1798 } else {
1799 conn->state = BT_CLOSED;
1800 if (conn->type == ACL_LINK)
1801 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1802 conn->dst_type, ev->status);
1803 }
1804
1805 if (conn->type == ACL_LINK)
1806 hci_sco_setup(conn, ev->status);
1807
1808 if (ev->status) {
1809 hci_proto_connect_cfm(conn, ev->status);
1810 hci_conn_del(conn);
1811 } else if (ev->link_type != ACL_LINK)
1812 hci_proto_connect_cfm(conn, ev->status);
1813
1814unlock:
1815 hci_dev_unlock(hdev);
1816
1817 hci_conn_check_pending(hdev);
1818}
1819
1820static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1821{
1822 struct hci_ev_conn_request *ev = (void *) skb->data;
1823 int mask = hdev->link_mode;
1824
1825 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1826 ev->link_type);
1827
1828 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1829
1830 if ((mask & HCI_LM_ACCEPT) &&
1831 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1832 /* Connection accepted */
1833 struct inquiry_entry *ie;
1834 struct hci_conn *conn;
1835
1836 hci_dev_lock(hdev);
1837
1838 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1839 if (ie)
1840 memcpy(ie->data.dev_class, ev->dev_class, 3);
1841
1842 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1843 &ev->bdaddr);
1844 if (!conn) {
1845 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1846 if (!conn) {
1847 BT_ERR("No memory for new connection");
1848 hci_dev_unlock(hdev);
1849 return;
1850 }
1851 }
1852
1853 memcpy(conn->dev_class, ev->dev_class, 3);
1854 conn->state = BT_CONNECT;
1855
1856 hci_dev_unlock(hdev);
1857
1858 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1859 struct hci_cp_accept_conn_req cp;
1860
1861 bacpy(&cp.bdaddr, &ev->bdaddr);
1862
1863 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1864 cp.role = 0x00; /* Become master */
1865 else
1866 cp.role = 0x01; /* Remain slave */
1867
1868 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1869 &cp);
1870 } else {
1871 struct hci_cp_accept_sync_conn_req cp;
1872
1873 bacpy(&cp.bdaddr, &ev->bdaddr);
1874 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1875
1876 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1877 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1878 cp.max_latency = __constant_cpu_to_le16(0xffff);
1879 cp.content_format = cpu_to_le16(hdev->voice_setting);
1880 cp.retrans_effort = 0xff;
1881
1882 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1883 sizeof(cp), &cp);
1884 }
1885 } else {
1886 /* Connection rejected */
1887 struct hci_cp_reject_conn_req cp;
1888
1889 bacpy(&cp.bdaddr, &ev->bdaddr);
1890 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1891 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1892 }
1893}
1894
1895static u8 hci_to_mgmt_reason(u8 err)
1896{
1897 switch (err) {
1898 case HCI_ERROR_CONNECTION_TIMEOUT:
1899 return MGMT_DEV_DISCONN_TIMEOUT;
1900 case HCI_ERROR_REMOTE_USER_TERM:
1901 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1902 case HCI_ERROR_REMOTE_POWER_OFF:
1903 return MGMT_DEV_DISCONN_REMOTE;
1904 case HCI_ERROR_LOCAL_HOST_TERM:
1905 return MGMT_DEV_DISCONN_LOCAL_HOST;
1906 default:
1907 return MGMT_DEV_DISCONN_UNKNOWN;
1908 }
1909}
1910
1911static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1912{
1913 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1914 struct hci_conn *conn;
1915
1916 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1917
1918 hci_dev_lock(hdev);
1919
1920 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1921 if (!conn)
1922 goto unlock;
1923
1924 if (ev->status == 0)
1925 conn->state = BT_CLOSED;
1926
1927 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1928 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1929 if (ev->status) {
1930 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1931 conn->dst_type, ev->status);
1932 } else {
1933 u8 reason = hci_to_mgmt_reason(ev->reason);
1934
1935 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1936 conn->dst_type, reason);
1937 }
1938 }
1939
1940 if (ev->status == 0) {
1941 if (conn->type == ACL_LINK && conn->flush_key)
1942 hci_remove_link_key(hdev, &conn->dst);
1943 hci_proto_disconn_cfm(conn, ev->reason);
1944 hci_conn_del(conn);
1945 }
1946
1947unlock:
1948 hci_dev_unlock(hdev);
1949}
1950
1951static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1952{
1953 struct hci_ev_auth_complete *ev = (void *) skb->data;
1954 struct hci_conn *conn;
1955
1956 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1957
1958 hci_dev_lock(hdev);
1959
1960 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1961 if (!conn)
1962 goto unlock;
1963
1964 if (!ev->status) {
1965 if (!hci_conn_ssp_enabled(conn) &&
1966 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1967 BT_INFO("re-auth of legacy device is not possible.");
1968 } else {
1969 conn->link_mode |= HCI_LM_AUTH;
1970 conn->sec_level = conn->pending_sec_level;
1971 }
1972 } else {
1973 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1974 ev->status);
1975 }
1976
1977 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1978 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1979
1980 if (conn->state == BT_CONFIG) {
1981 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1982 struct hci_cp_set_conn_encrypt cp;
1983 cp.handle = ev->handle;
1984 cp.encrypt = 0x01;
1985 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1986 &cp);
1987 } else {
1988 conn->state = BT_CONNECTED;
1989 hci_proto_connect_cfm(conn, ev->status);
1990 hci_conn_put(conn);
1991 }
1992 } else {
1993 hci_auth_cfm(conn, ev->status);
1994
1995 hci_conn_hold(conn);
1996 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1997 hci_conn_put(conn);
1998 }
1999
2000 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2001 if (!ev->status) {
2002 struct hci_cp_set_conn_encrypt cp;
2003 cp.handle = ev->handle;
2004 cp.encrypt = 0x01;
2005 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2006 &cp);
2007 } else {
2008 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2009 hci_encrypt_cfm(conn, ev->status, 0x00);
2010 }
2011 }
2012
2013unlock:
2014 hci_dev_unlock(hdev);
2015}
2016
2017static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2018{
2019 struct hci_ev_remote_name *ev = (void *) skb->data;
2020 struct hci_conn *conn;
2021
2022 BT_DBG("%s", hdev->name);
2023
2024 hci_conn_check_pending(hdev);
2025
2026 hci_dev_lock(hdev);
2027
2028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2029
2030 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2031 goto check_auth;
2032
2033 if (ev->status == 0)
2034 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2035 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2036 else
2037 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2038
2039check_auth:
2040 if (!conn)
2041 goto unlock;
2042
2043 if (!hci_outgoing_auth_needed(hdev, conn))
2044 goto unlock;
2045
2046 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2047 struct hci_cp_auth_requested cp;
2048 cp.handle = __cpu_to_le16(conn->handle);
2049 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2050 }
2051
2052unlock:
2053 hci_dev_unlock(hdev);
2054}
2055
2056static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2057{
2058 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2059 struct hci_conn *conn;
2060
2061 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2062
2063 hci_dev_lock(hdev);
2064
2065 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2066 if (conn) {
2067 if (!ev->status) {
2068 if (ev->encrypt) {
2069 /* Encryption implies authentication */
2070 conn->link_mode |= HCI_LM_AUTH;
2071 conn->link_mode |= HCI_LM_ENCRYPT;
2072 conn->sec_level = conn->pending_sec_level;
2073 } else
2074 conn->link_mode &= ~HCI_LM_ENCRYPT;
2075 }
2076
2077 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2078
2079 if (ev->status && conn->state == BT_CONNECTED) {
2080 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2081 hci_conn_put(conn);
2082 goto unlock;
2083 }
2084
2085 if (conn->state == BT_CONFIG) {
2086 if (!ev->status)
2087 conn->state = BT_CONNECTED;
2088
2089 hci_proto_connect_cfm(conn, ev->status);
2090 hci_conn_put(conn);
2091 } else
2092 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2093 }
2094
2095unlock:
2096 hci_dev_unlock(hdev);
2097}
2098
2099static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2101{
2102 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2103 struct hci_conn *conn;
2104
2105 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2106
2107 hci_dev_lock(hdev);
2108
2109 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2110 if (conn) {
2111 if (!ev->status)
2112 conn->link_mode |= HCI_LM_SECURE;
2113
2114 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2115
2116 hci_key_change_cfm(conn, ev->status);
2117 }
2118
2119 hci_dev_unlock(hdev);
2120}
2121
2122static void hci_remote_features_evt(struct hci_dev *hdev,
2123 struct sk_buff *skb)
2124{
2125 struct hci_ev_remote_features *ev = (void *) skb->data;
2126 struct hci_conn *conn;
2127
2128 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2129
2130 hci_dev_lock(hdev);
2131
2132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2133 if (!conn)
2134 goto unlock;
2135
2136 if (!ev->status)
2137 memcpy(conn->features, ev->features, 8);
2138
2139 if (conn->state != BT_CONFIG)
2140 goto unlock;
2141
2142 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2143 struct hci_cp_read_remote_ext_features cp;
2144 cp.handle = ev->handle;
2145 cp.page = 0x01;
2146 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2147 sizeof(cp), &cp);
2148 goto unlock;
2149 }
2150
2151 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2152 struct hci_cp_remote_name_req cp;
2153 memset(&cp, 0, sizeof(cp));
2154 bacpy(&cp.bdaddr, &conn->dst);
2155 cp.pscan_rep_mode = 0x02;
2156 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2157 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2158 mgmt_device_connected(hdev, &conn->dst, conn->type,
2159 conn->dst_type, 0, NULL, 0,
2160 conn->dev_class);
2161
2162 if (!hci_outgoing_auth_needed(hdev, conn)) {
2163 conn->state = BT_CONNECTED;
2164 hci_proto_connect_cfm(conn, ev->status);
2165 hci_conn_put(conn);
2166 }
2167
2168unlock:
2169 hci_dev_unlock(hdev);
2170}
2171
2172static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2173{
2174 BT_DBG("%s", hdev->name);
2175}
2176
2177static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2178 struct sk_buff *skb)
2179{
2180 BT_DBG("%s", hdev->name);
2181}
2182
2183static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2184{
2185 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2186 __u16 opcode;
2187
2188 skb_pull(skb, sizeof(*ev));
2189
2190 opcode = __le16_to_cpu(ev->opcode);
2191
2192 switch (opcode) {
2193 case HCI_OP_INQUIRY_CANCEL:
2194 hci_cc_inquiry_cancel(hdev, skb);
2195 break;
2196
2197 case HCI_OP_PERIODIC_INQ:
2198 hci_cc_periodic_inq(hdev, skb);
2199 break;
2200
2201 case HCI_OP_EXIT_PERIODIC_INQ:
2202 hci_cc_exit_periodic_inq(hdev, skb);
2203 break;
2204
2205 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2206 hci_cc_remote_name_req_cancel(hdev, skb);
2207 break;
2208
2209 case HCI_OP_ROLE_DISCOVERY:
2210 hci_cc_role_discovery(hdev, skb);
2211 break;
2212
2213 case HCI_OP_READ_LINK_POLICY:
2214 hci_cc_read_link_policy(hdev, skb);
2215 break;
2216
2217 case HCI_OP_WRITE_LINK_POLICY:
2218 hci_cc_write_link_policy(hdev, skb);
2219 break;
2220
2221 case HCI_OP_READ_DEF_LINK_POLICY:
2222 hci_cc_read_def_link_policy(hdev, skb);
2223 break;
2224
2225 case HCI_OP_WRITE_DEF_LINK_POLICY:
2226 hci_cc_write_def_link_policy(hdev, skb);
2227 break;
2228
2229 case HCI_OP_RESET:
2230 hci_cc_reset(hdev, skb);
2231 break;
2232
2233 case HCI_OP_WRITE_LOCAL_NAME:
2234 hci_cc_write_local_name(hdev, skb);
2235 break;
2236
2237 case HCI_OP_READ_LOCAL_NAME:
2238 hci_cc_read_local_name(hdev, skb);
2239 break;
2240
2241 case HCI_OP_WRITE_AUTH_ENABLE:
2242 hci_cc_write_auth_enable(hdev, skb);
2243 break;
2244
2245 case HCI_OP_WRITE_ENCRYPT_MODE:
2246 hci_cc_write_encrypt_mode(hdev, skb);
2247 break;
2248
2249 case HCI_OP_WRITE_SCAN_ENABLE:
2250 hci_cc_write_scan_enable(hdev, skb);
2251 break;
2252
2253 case HCI_OP_READ_CLASS_OF_DEV:
2254 hci_cc_read_class_of_dev(hdev, skb);
2255 break;
2256
2257 case HCI_OP_WRITE_CLASS_OF_DEV:
2258 hci_cc_write_class_of_dev(hdev, skb);
2259 break;
2260
2261 case HCI_OP_READ_VOICE_SETTING:
2262 hci_cc_read_voice_setting(hdev, skb);
2263 break;
2264
2265 case HCI_OP_WRITE_VOICE_SETTING:
2266 hci_cc_write_voice_setting(hdev, skb);
2267 break;
2268
2269 case HCI_OP_HOST_BUFFER_SIZE:
2270 hci_cc_host_buffer_size(hdev, skb);
2271 break;
2272
2273 case HCI_OP_WRITE_SSP_MODE:
2274 hci_cc_write_ssp_mode(hdev, skb);
2275 break;
2276
2277 case HCI_OP_READ_LOCAL_VERSION:
2278 hci_cc_read_local_version(hdev, skb);
2279 break;
2280
2281 case HCI_OP_READ_LOCAL_COMMANDS:
2282 hci_cc_read_local_commands(hdev, skb);
2283 break;
2284
2285 case HCI_OP_READ_LOCAL_FEATURES:
2286 hci_cc_read_local_features(hdev, skb);
2287 break;
2288
2289 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2290 hci_cc_read_local_ext_features(hdev, skb);
2291 break;
2292
2293 case HCI_OP_READ_BUFFER_SIZE:
2294 hci_cc_read_buffer_size(hdev, skb);
2295 break;
2296
2297 case HCI_OP_READ_BD_ADDR:
2298 hci_cc_read_bd_addr(hdev, skb);
2299 break;
2300
2301 case HCI_OP_READ_DATA_BLOCK_SIZE:
2302 hci_cc_read_data_block_size(hdev, skb);
2303 break;
2304
2305 case HCI_OP_WRITE_CA_TIMEOUT:
2306 hci_cc_write_ca_timeout(hdev, skb);
2307 break;
2308
2309 case HCI_OP_READ_FLOW_CONTROL_MODE:
2310 hci_cc_read_flow_control_mode(hdev, skb);
2311 break;
2312
2313 case HCI_OP_READ_LOCAL_AMP_INFO:
2314 hci_cc_read_local_amp_info(hdev, skb);
2315 break;
2316
2317 case HCI_OP_DELETE_STORED_LINK_KEY:
2318 hci_cc_delete_stored_link_key(hdev, skb);
2319 break;
2320
2321 case HCI_OP_SET_EVENT_MASK:
2322 hci_cc_set_event_mask(hdev, skb);
2323 break;
2324
2325 case HCI_OP_WRITE_INQUIRY_MODE:
2326 hci_cc_write_inquiry_mode(hdev, skb);
2327 break;
2328
2329 case HCI_OP_READ_INQ_RSP_TX_POWER:
2330 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2331 break;
2332
2333 case HCI_OP_SET_EVENT_FLT:
2334 hci_cc_set_event_flt(hdev, skb);
2335 break;
2336
2337 case HCI_OP_PIN_CODE_REPLY:
2338 hci_cc_pin_code_reply(hdev, skb);
2339 break;
2340
2341 case HCI_OP_PIN_CODE_NEG_REPLY:
2342 hci_cc_pin_code_neg_reply(hdev, skb);
2343 break;
2344
2345 case HCI_OP_READ_LOCAL_OOB_DATA:
2346 hci_cc_read_local_oob_data_reply(hdev, skb);
2347 break;
2348
2349 case HCI_OP_LE_READ_BUFFER_SIZE:
2350 hci_cc_le_read_buffer_size(hdev, skb);
2351 break;
2352
2353 case HCI_OP_USER_CONFIRM_REPLY:
2354 hci_cc_user_confirm_reply(hdev, skb);
2355 break;
2356
2357 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2358 hci_cc_user_confirm_neg_reply(hdev, skb);
2359 break;
2360
2361 case HCI_OP_USER_PASSKEY_REPLY:
2362 hci_cc_user_passkey_reply(hdev, skb);
2363 break;
2364
2365 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2366 hci_cc_user_passkey_neg_reply(hdev, skb);
2367 break;
2368
2369 case HCI_OP_LE_SET_SCAN_PARAM:
2370 hci_cc_le_set_scan_param(hdev, skb);
2371 break;
2372
2373 case HCI_OP_LE_SET_SCAN_ENABLE:
2374 hci_cc_le_set_scan_enable(hdev, skb);
2375 break;
2376
2377 case HCI_OP_LE_LTK_REPLY:
2378 hci_cc_le_ltk_reply(hdev, skb);
2379 break;
2380
2381 case HCI_OP_LE_LTK_NEG_REPLY:
2382 hci_cc_le_ltk_neg_reply(hdev, skb);
2383 break;
2384
2385 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2386 hci_cc_write_le_host_supported(hdev, skb);
2387 break;
2388
2389 default:
2390 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2391 break;
2392 }
2393
2394 if (ev->opcode != HCI_OP_NOP)
2395 del_timer(&hdev->cmd_timer);
2396
2397 if (ev->ncmd) {
2398 atomic_set(&hdev->cmd_cnt, 1);
2399 if (!skb_queue_empty(&hdev->cmd_q))
2400 queue_work(hdev->workqueue, &hdev->cmd_work);
2401 }
2402}
2403
2404static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2405{
2406 struct hci_ev_cmd_status *ev = (void *) skb->data;
2407 __u16 opcode;
2408
2409 skb_pull(skb, sizeof(*ev));
2410
2411 opcode = __le16_to_cpu(ev->opcode);
2412
2413 switch (opcode) {
2414 case HCI_OP_INQUIRY:
2415 hci_cs_inquiry(hdev, ev->status);
2416 break;
2417
2418 case HCI_OP_CREATE_CONN:
2419 hci_cs_create_conn(hdev, ev->status);
2420 break;
2421
2422 case HCI_OP_ADD_SCO:
2423 hci_cs_add_sco(hdev, ev->status);
2424 break;
2425
2426 case HCI_OP_AUTH_REQUESTED:
2427 hci_cs_auth_requested(hdev, ev->status);
2428 break;
2429
2430 case HCI_OP_SET_CONN_ENCRYPT:
2431 hci_cs_set_conn_encrypt(hdev, ev->status);
2432 break;
2433
2434 case HCI_OP_REMOTE_NAME_REQ:
2435 hci_cs_remote_name_req(hdev, ev->status);
2436 break;
2437
2438 case HCI_OP_READ_REMOTE_FEATURES:
2439 hci_cs_read_remote_features(hdev, ev->status);
2440 break;
2441
2442 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2443 hci_cs_read_remote_ext_features(hdev, ev->status);
2444 break;
2445
2446 case HCI_OP_SETUP_SYNC_CONN:
2447 hci_cs_setup_sync_conn(hdev, ev->status);
2448 break;
2449
2450 case HCI_OP_SNIFF_MODE:
2451 hci_cs_sniff_mode(hdev, ev->status);
2452 break;
2453
2454 case HCI_OP_EXIT_SNIFF_MODE:
2455 hci_cs_exit_sniff_mode(hdev, ev->status);
2456 break;
2457
2458 case HCI_OP_DISCONNECT:
2459 hci_cs_disconnect(hdev, ev->status);
2460 break;
2461
2462 case HCI_OP_LE_CREATE_CONN:
2463 hci_cs_le_create_conn(hdev, ev->status);
2464 break;
2465
2466 case HCI_OP_LE_START_ENC:
2467 hci_cs_le_start_enc(hdev, ev->status);
2468 break;
2469
2470 default:
2471 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2472 break;
2473 }
2474
2475 if (ev->opcode != HCI_OP_NOP)
2476 del_timer(&hdev->cmd_timer);
2477
2478 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2479 atomic_set(&hdev->cmd_cnt, 1);
2480 if (!skb_queue_empty(&hdev->cmd_q))
2481 queue_work(hdev->workqueue, &hdev->cmd_work);
2482 }
2483}
2484
2485static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2486{
2487 struct hci_ev_role_change *ev = (void *) skb->data;
2488 struct hci_conn *conn;
2489
2490 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2491
2492 hci_dev_lock(hdev);
2493
2494 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2495 if (conn) {
2496 if (!ev->status) {
2497 if (ev->role)
2498 conn->link_mode &= ~HCI_LM_MASTER;
2499 else
2500 conn->link_mode |= HCI_LM_MASTER;
2501 }
2502
2503 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2504
2505 hci_role_switch_cfm(conn, ev->status, ev->role);
2506 }
2507
2508 hci_dev_unlock(hdev);
2509}
2510
2511static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2512{
2513 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2514 int i;
2515
2516 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2517 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2518 return;
2519 }
2520
2521 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2522 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2523 BT_DBG("%s bad parameters", hdev->name);
2524 return;
2525 }
2526
2527 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2528
2529 for (i = 0; i < ev->num_hndl; i++) {
2530 struct hci_comp_pkts_info *info = &ev->handles[i];
2531 struct hci_conn *conn;
2532 __u16 handle, count;
2533
2534 handle = __le16_to_cpu(info->handle);
2535 count = __le16_to_cpu(info->count);
2536
2537 conn = hci_conn_hash_lookup_handle(hdev, handle);
2538 if (!conn)
2539 continue;
2540
2541 conn->sent -= count;
2542
2543 switch (conn->type) {
2544 case ACL_LINK:
2545 hdev->acl_cnt += count;
2546 if (hdev->acl_cnt > hdev->acl_pkts)
2547 hdev->acl_cnt = hdev->acl_pkts;
2548 break;
2549
2550 case LE_LINK:
2551 if (hdev->le_pkts) {
2552 hdev->le_cnt += count;
2553 if (hdev->le_cnt > hdev->le_pkts)
2554 hdev->le_cnt = hdev->le_pkts;
2555 } else {
2556 hdev->acl_cnt += count;
2557 if (hdev->acl_cnt > hdev->acl_pkts)
2558 hdev->acl_cnt = hdev->acl_pkts;
2559 }
2560 break;
2561
2562 case SCO_LINK:
2563 hdev->sco_cnt += count;
2564 if (hdev->sco_cnt > hdev->sco_pkts)
2565 hdev->sco_cnt = hdev->sco_pkts;
2566 break;
2567
2568 default:
2569 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2570 break;
2571 }
2572 }
2573
2574 queue_work(hdev->workqueue, &hdev->tx_work);
2575}
2576
2577static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2578{
2579 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2580 int i;
2581
2582 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2583 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2584 return;
2585 }
2586
2587 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2588 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2589 BT_DBG("%s bad parameters", hdev->name);
2590 return;
2591 }
2592
2593 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2594 ev->num_hndl);
2595
2596 for (i = 0; i < ev->num_hndl; i++) {
2597 struct hci_comp_blocks_info *info = &ev->handles[i];
2598 struct hci_conn *conn;
2599 __u16 handle, block_count;
2600
2601 handle = __le16_to_cpu(info->handle);
2602 block_count = __le16_to_cpu(info->blocks);
2603
2604 conn = hci_conn_hash_lookup_handle(hdev, handle);
2605 if (!conn)
2606 continue;
2607
2608 conn->sent -= block_count;
2609
2610 switch (conn->type) {
2611 case ACL_LINK:
2612 hdev->block_cnt += block_count;
2613 if (hdev->block_cnt > hdev->num_blocks)
2614 hdev->block_cnt = hdev->num_blocks;
2615 break;
2616
2617 default:
2618 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2619 break;
2620 }
2621 }
2622
2623 queue_work(hdev->workqueue, &hdev->tx_work);
2624}
2625
2626static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2627{
2628 struct hci_ev_mode_change *ev = (void *) skb->data;
2629 struct hci_conn *conn;
2630
2631 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2632
2633 hci_dev_lock(hdev);
2634
2635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2636 if (conn) {
2637 conn->mode = ev->mode;
2638 conn->interval = __le16_to_cpu(ev->interval);
2639
2640 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2641 &conn->flags)) {
2642 if (conn->mode == HCI_CM_ACTIVE)
2643 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2644 else
2645 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2646 }
2647
2648 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2649 hci_sco_setup(conn, ev->status);
2650 }
2651
2652 hci_dev_unlock(hdev);
2653}
2654
2655static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2656{
2657 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2658 struct hci_conn *conn;
2659
2660 BT_DBG("%s", hdev->name);
2661
2662 hci_dev_lock(hdev);
2663
2664 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2665 if (!conn)
2666 goto unlock;
2667
2668 if (conn->state == BT_CONNECTED) {
2669 hci_conn_hold(conn);
2670 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2671 hci_conn_put(conn);
2672 }
2673
2674 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2675 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2676 sizeof(ev->bdaddr), &ev->bdaddr);
2677 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2678 u8 secure;
2679
2680 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2681 secure = 1;
2682 else
2683 secure = 0;
2684
2685 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2686 }
2687
2688unlock:
2689 hci_dev_unlock(hdev);
2690}
2691
2692static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2693{
2694 struct hci_ev_link_key_req *ev = (void *) skb->data;
2695 struct hci_cp_link_key_reply cp;
2696 struct hci_conn *conn;
2697 struct link_key *key;
2698
2699 BT_DBG("%s", hdev->name);
2700
2701 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2702 return;
2703
2704 hci_dev_lock(hdev);
2705
2706 key = hci_find_link_key(hdev, &ev->bdaddr);
2707 if (!key) {
2708 BT_DBG("%s link key not found for %s", hdev->name,
2709 batostr(&ev->bdaddr));
2710 goto not_found;
2711 }
2712
2713 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2714 batostr(&ev->bdaddr));
2715
2716 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2717 key->type == HCI_LK_DEBUG_COMBINATION) {
2718 BT_DBG("%s ignoring debug key", hdev->name);
2719 goto not_found;
2720 }
2721
2722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2723 if (conn) {
2724 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2725 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2726 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2727 goto not_found;
2728 }
2729
2730 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2731 conn->pending_sec_level == BT_SECURITY_HIGH) {
2732 BT_DBG("%s ignoring key unauthenticated for high security",
2733 hdev->name);
2734 goto not_found;
2735 }
2736
2737 conn->key_type = key->type;
2738 conn->pin_length = key->pin_len;
2739 }
2740
2741 bacpy(&cp.bdaddr, &ev->bdaddr);
2742 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2743
2744 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2745
2746 hci_dev_unlock(hdev);
2747
2748 return;
2749
2750not_found:
2751 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2752 hci_dev_unlock(hdev);
2753}
2754
2755static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2756{
2757 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2758 struct hci_conn *conn;
2759 u8 pin_len = 0;
2760
2761 BT_DBG("%s", hdev->name);
2762
2763 hci_dev_lock(hdev);
2764
2765 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2766 if (conn) {
2767 hci_conn_hold(conn);
2768 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2769 pin_len = conn->pin_length;
2770
2771 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2772 conn->key_type = ev->key_type;
2773
2774 hci_conn_put(conn);
2775 }
2776
2777 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2778 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2779 ev->key_type, pin_len);
2780
2781 hci_dev_unlock(hdev);
2782}
2783
2784static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2785{
2786 struct hci_ev_clock_offset *ev = (void *) skb->data;
2787 struct hci_conn *conn;
2788
2789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2790
2791 hci_dev_lock(hdev);
2792
2793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2794 if (conn && !ev->status) {
2795 struct inquiry_entry *ie;
2796
2797 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2798 if (ie) {
2799 ie->data.clock_offset = ev->clock_offset;
2800 ie->timestamp = jiffies;
2801 }
2802 }
2803
2804 hci_dev_unlock(hdev);
2805}
2806
2807static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{
2809 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2810 struct hci_conn *conn;
2811
2812 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2813
2814 hci_dev_lock(hdev);
2815
2816 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2817 if (conn && !ev->status)
2818 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2819
2820 hci_dev_unlock(hdev);
2821}
2822
2823static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2824{
2825 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2826 struct inquiry_entry *ie;
2827
2828 BT_DBG("%s", hdev->name);
2829
2830 hci_dev_lock(hdev);
2831
2832 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2833 if (ie) {
2834 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2835 ie->timestamp = jiffies;
2836 }
2837
2838 hci_dev_unlock(hdev);
2839}
2840
2841static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2842 struct sk_buff *skb)
2843{
2844 struct inquiry_data data;
2845 int num_rsp = *((__u8 *) skb->data);
2846 bool name_known, ssp;
2847
2848 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2849
2850 if (!num_rsp)
2851 return;
2852
2853 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2854 return;
2855
2856 hci_dev_lock(hdev);
2857
2858 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2859 struct inquiry_info_with_rssi_and_pscan_mode *info;
2860 info = (void *) (skb->data + 1);
2861
2862 for (; num_rsp; num_rsp--, info++) {
2863 bacpy(&data.bdaddr, &info->bdaddr);
2864 data.pscan_rep_mode = info->pscan_rep_mode;
2865 data.pscan_period_mode = info->pscan_period_mode;
2866 data.pscan_mode = info->pscan_mode;
2867 memcpy(data.dev_class, info->dev_class, 3);
2868 data.clock_offset = info->clock_offset;
2869 data.rssi = info->rssi;
2870 data.ssp_mode = 0x00;
2871
2872 name_known = hci_inquiry_cache_update(hdev, &data,
2873 false, &ssp);
2874 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2875 info->dev_class, info->rssi,
2876 !name_known, ssp, NULL, 0);
2877 }
2878 } else {
2879 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2880
2881 for (; num_rsp; num_rsp--, info++) {
2882 bacpy(&data.bdaddr, &info->bdaddr);
2883 data.pscan_rep_mode = info->pscan_rep_mode;
2884 data.pscan_period_mode = info->pscan_period_mode;
2885 data.pscan_mode = 0x00;
2886 memcpy(data.dev_class, info->dev_class, 3);
2887 data.clock_offset = info->clock_offset;
2888 data.rssi = info->rssi;
2889 data.ssp_mode = 0x00;
2890 name_known = hci_inquiry_cache_update(hdev, &data,
2891 false, &ssp);
2892 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2893 info->dev_class, info->rssi,
2894 !name_known, ssp, NULL, 0);
2895 }
2896 }
2897
2898 hci_dev_unlock(hdev);
2899}
2900
2901static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2902 struct sk_buff *skb)
2903{
2904 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2905 struct hci_conn *conn;
2906
2907 BT_DBG("%s", hdev->name);
2908
2909 hci_dev_lock(hdev);
2910
2911 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2912 if (!conn)
2913 goto unlock;
2914
2915 if (!ev->status && ev->page == 0x01) {
2916 struct inquiry_entry *ie;
2917
2918 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2919 if (ie)
2920 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2921
2922 if (ev->features[0] & LMP_HOST_SSP)
2923 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2924 }
2925
2926 if (conn->state != BT_CONFIG)
2927 goto unlock;
2928
2929 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2930 struct hci_cp_remote_name_req cp;
2931 memset(&cp, 0, sizeof(cp));
2932 bacpy(&cp.bdaddr, &conn->dst);
2933 cp.pscan_rep_mode = 0x02;
2934 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2935 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2936 mgmt_device_connected(hdev, &conn->dst, conn->type,
2937 conn->dst_type, 0, NULL, 0,
2938 conn->dev_class);
2939
2940 if (!hci_outgoing_auth_needed(hdev, conn)) {
2941 conn->state = BT_CONNECTED;
2942 hci_proto_connect_cfm(conn, ev->status);
2943 hci_conn_put(conn);
2944 }
2945
2946unlock:
2947 hci_dev_unlock(hdev);
2948}
2949
2950static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2951 struct sk_buff *skb)
2952{
2953 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2954 struct hci_conn *conn;
2955
2956 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2957
2958 hci_dev_lock(hdev);
2959
2960 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2961 if (!conn) {
2962 if (ev->link_type == ESCO_LINK)
2963 goto unlock;
2964
2965 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2966 if (!conn)
2967 goto unlock;
2968
2969 conn->type = SCO_LINK;
2970 }
2971
2972 switch (ev->status) {
2973 case 0x00:
2974 conn->handle = __le16_to_cpu(ev->handle);
2975 conn->state = BT_CONNECTED;
2976
2977 hci_conn_hold_device(conn);
2978 hci_conn_add_sysfs(conn);
2979 break;
2980
2981 case 0x11: /* Unsupported Feature or Parameter Value */
2982 case 0x1c: /* SCO interval rejected */
2983 case 0x1a: /* Unsupported Remote Feature */
2984 case 0x1f: /* Unspecified error */
2985 if (conn->out && conn->attempt < 2) {
2986 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2987 (hdev->esco_type & EDR_ESCO_MASK);
2988 hci_setup_sync(conn, conn->link->handle);
2989 goto unlock;
2990 }
2991 /* fall through */
2992
2993 default:
2994 conn->state = BT_CLOSED;
2995 break;
2996 }
2997
2998 hci_proto_connect_cfm(conn, ev->status);
2999 if (ev->status)
3000 hci_conn_del(conn);
3001
3002unlock:
3003 hci_dev_unlock(hdev);
3004}
3005
3006static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3007{
3008 BT_DBG("%s", hdev->name);
3009}
3010
3011static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3012{
3013 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3014
3015 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3016}
3017
3018static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3019 struct sk_buff *skb)
3020{
3021 struct inquiry_data data;
3022 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3023 int num_rsp = *((__u8 *) skb->data);
3024 size_t eir_len;
3025
3026 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3027
3028 if (!num_rsp)
3029 return;
3030
3031 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3032 return;
3033
3034 hci_dev_lock(hdev);
3035
3036 for (; num_rsp; num_rsp--, info++) {
3037 bool name_known, ssp;
3038
3039 bacpy(&data.bdaddr, &info->bdaddr);
3040 data.pscan_rep_mode = info->pscan_rep_mode;
3041 data.pscan_period_mode = info->pscan_period_mode;
3042 data.pscan_mode = 0x00;
3043 memcpy(data.dev_class, info->dev_class, 3);
3044 data.clock_offset = info->clock_offset;
3045 data.rssi = info->rssi;
3046 data.ssp_mode = 0x01;
3047
3048 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3049 name_known = eir_has_data_type(info->data,
3050 sizeof(info->data),
3051 EIR_NAME_COMPLETE);
3052 else
3053 name_known = true;
3054
3055 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3056 &ssp);
3057 eir_len = eir_get_length(info->data, sizeof(info->data));
3058 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3059 info->dev_class, info->rssi, !name_known,
3060 ssp, info->data, eir_len);
3061 }
3062
3063 hci_dev_unlock(hdev);
3064}
3065
3066static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3067 struct sk_buff *skb)
3068{
3069 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3070 struct hci_conn *conn;
3071
3072 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3073 __le16_to_cpu(ev->handle));
3074
3075 hci_dev_lock(hdev);
3076
3077 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3078 if (!conn)
3079 goto unlock;
3080
3081 if (!ev->status)
3082 conn->sec_level = conn->pending_sec_level;
3083
3084 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3085
3086 if (ev->status && conn->state == BT_CONNECTED) {
3087 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3088 hci_conn_put(conn);
3089 goto unlock;
3090 }
3091
3092 if (conn->state == BT_CONFIG) {
3093 if (!ev->status)
3094 conn->state = BT_CONNECTED;
3095
3096 hci_proto_connect_cfm(conn, ev->status);
3097 hci_conn_put(conn);
3098 } else {
3099 hci_auth_cfm(conn, ev->status);
3100
3101 hci_conn_hold(conn);
3102 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3103 hci_conn_put(conn);
3104 }
3105
3106unlock:
3107 hci_dev_unlock(hdev);
3108}
3109
3110static u8 hci_get_auth_req(struct hci_conn *conn)
3111{
3112 /* If remote requests dedicated bonding follow that lead */
3113 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3114 /* If both remote and local IO capabilities allow MITM
3115 * protection then require it, otherwise don't */
3116 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3117 return 0x02;
3118 else
3119 return 0x03;
3120 }
3121
3122 /* If remote requests no-bonding follow that lead */
3123 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3124 return conn->remote_auth | (conn->auth_type & 0x01);
3125
3126 return conn->auth_type;
3127}
3128
3129static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3130{
3131 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3132 struct hci_conn *conn;
3133
3134 BT_DBG("%s", hdev->name);
3135
3136 hci_dev_lock(hdev);
3137
3138 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3139 if (!conn)
3140 goto unlock;
3141
3142 hci_conn_hold(conn);
3143
3144 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3145 goto unlock;
3146
3147 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3148 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3149 struct hci_cp_io_capability_reply cp;
3150
3151 bacpy(&cp.bdaddr, &ev->bdaddr);
3152 /* Change the IO capability from KeyboardDisplay
3153 * to DisplayYesNo as it is not supported by BT spec. */
3154 cp.capability = (conn->io_capability == 0x04) ?
3155 0x01 : conn->io_capability;
3156 conn->auth_type = hci_get_auth_req(conn);
3157 cp.authentication = conn->auth_type;
3158
3159 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3160 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3161 cp.oob_data = 0x01;
3162 else
3163 cp.oob_data = 0x00;
3164
3165 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3166 sizeof(cp), &cp);
3167 } else {
3168 struct hci_cp_io_capability_neg_reply cp;
3169
3170 bacpy(&cp.bdaddr, &ev->bdaddr);
3171 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3172
3173 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3174 sizeof(cp), &cp);
3175 }
3176
3177unlock:
3178 hci_dev_unlock(hdev);
3179}
3180
3181static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3182{
3183 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3184 struct hci_conn *conn;
3185
3186 BT_DBG("%s", hdev->name);
3187
3188 hci_dev_lock(hdev);
3189
3190 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3191 if (!conn)
3192 goto unlock;
3193
3194 conn->remote_cap = ev->capability;
3195 conn->remote_auth = ev->authentication;
3196 if (ev->oob_data)
3197 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3198
3199unlock:
3200 hci_dev_unlock(hdev);
3201}
3202
3203static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3204 struct sk_buff *skb)
3205{
3206 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3207 int loc_mitm, rem_mitm, confirm_hint = 0;
3208 struct hci_conn *conn;
3209
3210 BT_DBG("%s", hdev->name);
3211
3212 hci_dev_lock(hdev);
3213
3214 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3215 goto unlock;
3216
3217 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3218 if (!conn)
3219 goto unlock;
3220
3221 loc_mitm = (conn->auth_type & 0x01);
3222 rem_mitm = (conn->remote_auth & 0x01);
3223
3224 /* If we require MITM but the remote device can't provide that
3225 * (it has NoInputNoOutput) then reject the confirmation
3226 * request. The only exception is when we're dedicated bonding
3227 * initiators (connect_cfm_cb set) since then we always have the MITM
3228 * bit set. */
3229 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3230 BT_DBG("Rejecting request: remote device can't provide MITM");
3231 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3232 sizeof(ev->bdaddr), &ev->bdaddr);
3233 goto unlock;
3234 }
3235
3236 /* If no side requires MITM protection; auto-accept */
3237 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3238 (!rem_mitm || conn->io_capability == 0x03)) {
3239
3240 /* If we're not the initiators request authorization to
3241 * proceed from user space (mgmt_user_confirm with
3242 * confirm_hint set to 1). */
3243 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3244 BT_DBG("Confirming auto-accept as acceptor");
3245 confirm_hint = 1;
3246 goto confirm;
3247 }
3248
3249 BT_DBG("Auto-accept of user confirmation with %ums delay",
3250 hdev->auto_accept_delay);
3251
3252 if (hdev->auto_accept_delay > 0) {
3253 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3254 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3255 goto unlock;
3256 }
3257
3258 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3259 sizeof(ev->bdaddr), &ev->bdaddr);
3260 goto unlock;
3261 }
3262
3263confirm:
3264 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3265 confirm_hint);
3266
3267unlock:
3268 hci_dev_unlock(hdev);
3269}
3270
3271static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3272 struct sk_buff *skb)
3273{
3274 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3275
3276 BT_DBG("%s", hdev->name);
3277
3278 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3279 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3280}
3281
3282static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3283 struct sk_buff *skb)
3284{
3285 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3286 struct hci_conn *conn;
3287
3288 BT_DBG("%s", hdev->name);
3289
3290 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3291 if (!conn)
3292 return;
3293
3294 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3295 conn->passkey_entered = 0;
3296
3297 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3298 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3299 conn->dst_type, conn->passkey_notify,
3300 conn->passkey_entered);
3301}
3302
3303static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3304{
3305 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3306 struct hci_conn *conn;
3307
3308 BT_DBG("%s", hdev->name);
3309
3310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3311 if (!conn)
3312 return;
3313
3314 switch (ev->type) {
3315 case HCI_KEYPRESS_STARTED:
3316 conn->passkey_entered = 0;
3317 return;
3318
3319 case HCI_KEYPRESS_ENTERED:
3320 conn->passkey_entered++;
3321 break;
3322
3323 case HCI_KEYPRESS_ERASED:
3324 conn->passkey_entered--;
3325 break;
3326
3327 case HCI_KEYPRESS_CLEARED:
3328 conn->passkey_entered = 0;
3329 break;
3330
3331 case HCI_KEYPRESS_COMPLETED:
3332 return;
3333 }
3334
3335 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3336 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3337 conn->dst_type, conn->passkey_notify,
3338 conn->passkey_entered);
3339}
3340
3341static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3342 struct sk_buff *skb)
3343{
3344 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3345 struct hci_conn *conn;
3346
3347 BT_DBG("%s", hdev->name);
3348
3349 hci_dev_lock(hdev);
3350
3351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3352 if (!conn)
3353 goto unlock;
3354
3355 /* To avoid duplicate auth_failed events to user space we check
3356 * the HCI_CONN_AUTH_PEND flag which will be set if we
3357 * initiated the authentication. A traditional auth_complete
3358 * event gets always produced as initiator and is also mapped to
3359 * the mgmt_auth_failed event */
3360 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3361 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3362 ev->status);
3363
3364 hci_conn_put(conn);
3365
3366unlock:
3367 hci_dev_unlock(hdev);
3368}
3369
3370static void hci_remote_host_features_evt(struct hci_dev *hdev,
3371 struct sk_buff *skb)
3372{
3373 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3374 struct inquiry_entry *ie;
3375
3376 BT_DBG("%s", hdev->name);
3377
3378 hci_dev_lock(hdev);
3379
3380 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3381 if (ie)
3382 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3383
3384 hci_dev_unlock(hdev);
3385}
3386
3387static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3388 struct sk_buff *skb)
3389{
3390 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3391 struct oob_data *data;
3392
3393 BT_DBG("%s", hdev->name);
3394
3395 hci_dev_lock(hdev);
3396
3397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3398 goto unlock;
3399
3400 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3401 if (data) {
3402 struct hci_cp_remote_oob_data_reply cp;
3403
3404 bacpy(&cp.bdaddr, &ev->bdaddr);
3405 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3406 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3407
3408 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3409 &cp);
3410 } else {
3411 struct hci_cp_remote_oob_data_neg_reply cp;
3412
3413 bacpy(&cp.bdaddr, &ev->bdaddr);
3414 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3415 &cp);
3416 }
3417
3418unlock:
3419 hci_dev_unlock(hdev);
3420}
3421
3422static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3423{
3424 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3425 struct hci_conn *conn;
3426
3427 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3428
3429 hci_dev_lock(hdev);
3430
3431 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3432 if (!conn) {
3433 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3434 if (!conn) {
3435 BT_ERR("No memory for new connection");
3436 goto unlock;
3437 }
3438
3439 conn->dst_type = ev->bdaddr_type;
3440
3441 if (ev->role == LE_CONN_ROLE_MASTER) {
3442 conn->out = true;
3443 conn->link_mode |= HCI_LM_MASTER;
3444 }
3445 }
3446
3447 if (ev->status) {
3448 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3449 conn->dst_type, ev->status);
3450 hci_proto_connect_cfm(conn, ev->status);
3451 conn->state = BT_CLOSED;
3452 hci_conn_del(conn);
3453 goto unlock;
3454 }
3455
3456 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3457 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3458 conn->dst_type, 0, NULL, 0, NULL);
3459
3460 conn->sec_level = BT_SECURITY_LOW;
3461 conn->handle = __le16_to_cpu(ev->handle);
3462 conn->state = BT_CONNECTED;
3463
3464 hci_conn_hold_device(conn);
3465 hci_conn_add_sysfs(conn);
3466
3467 hci_proto_connect_cfm(conn, ev->status);
3468
3469unlock:
3470 hci_dev_unlock(hdev);
3471}
3472
3473static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3474{
3475 u8 num_reports = skb->data[0];
3476 void *ptr = &skb->data[1];
3477 s8 rssi;
3478
3479 hci_dev_lock(hdev);
3480
3481 while (num_reports--) {
3482 struct hci_ev_le_advertising_info *ev = ptr;
3483
3484 rssi = ev->data[ev->length];
3485 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3486 NULL, rssi, 0, 1, ev->data, ev->length);
3487
3488 ptr += sizeof(*ev) + ev->length + 1;
3489 }
3490
3491 hci_dev_unlock(hdev);
3492}
3493
3494static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3495{
3496 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3497 struct hci_cp_le_ltk_reply cp;
3498 struct hci_cp_le_ltk_neg_reply neg;
3499 struct hci_conn *conn;
3500 struct smp_ltk *ltk;
3501
3502 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3503
3504 hci_dev_lock(hdev);
3505
3506 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3507 if (conn == NULL)
3508 goto not_found;
3509
3510 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3511 if (ltk == NULL)
3512 goto not_found;
3513
3514 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3515 cp.handle = cpu_to_le16(conn->handle);
3516
3517 if (ltk->authenticated)
3518 conn->sec_level = BT_SECURITY_HIGH;
3519
3520 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3521
3522 if (ltk->type & HCI_SMP_STK) {
3523 list_del(<k->list);
3524 kfree(ltk);
3525 }
3526
3527 hci_dev_unlock(hdev);
3528
3529 return;
3530
3531not_found:
3532 neg.handle = ev->handle;
3533 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3534 hci_dev_unlock(hdev);
3535}
3536
3537static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3538{
3539 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3540
3541 skb_pull(skb, sizeof(*le_ev));
3542
3543 switch (le_ev->subevent) {
3544 case HCI_EV_LE_CONN_COMPLETE:
3545 hci_le_conn_complete_evt(hdev, skb);
3546 break;
3547
3548 case HCI_EV_LE_ADVERTISING_REPORT:
3549 hci_le_adv_report_evt(hdev, skb);
3550 break;
3551
3552 case HCI_EV_LE_LTK_REQ:
3553 hci_le_ltk_request_evt(hdev, skb);
3554 break;
3555
3556 default:
3557 break;
3558 }
3559}
3560
3561void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3562{
3563 struct hci_event_hdr *hdr = (void *) skb->data;
3564 __u8 event = hdr->evt;
3565
3566 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3567
3568 switch (event) {
3569 case HCI_EV_INQUIRY_COMPLETE:
3570 hci_inquiry_complete_evt(hdev, skb);
3571 break;
3572
3573 case HCI_EV_INQUIRY_RESULT:
3574 hci_inquiry_result_evt(hdev, skb);
3575 break;
3576
3577 case HCI_EV_CONN_COMPLETE:
3578 hci_conn_complete_evt(hdev, skb);
3579 break;
3580
3581 case HCI_EV_CONN_REQUEST:
3582 hci_conn_request_evt(hdev, skb);
3583 break;
3584
3585 case HCI_EV_DISCONN_COMPLETE:
3586 hci_disconn_complete_evt(hdev, skb);
3587 break;
3588
3589 case HCI_EV_AUTH_COMPLETE:
3590 hci_auth_complete_evt(hdev, skb);
3591 break;
3592
3593 case HCI_EV_REMOTE_NAME:
3594 hci_remote_name_evt(hdev, skb);
3595 break;
3596
3597 case HCI_EV_ENCRYPT_CHANGE:
3598 hci_encrypt_change_evt(hdev, skb);
3599 break;
3600
3601 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3602 hci_change_link_key_complete_evt(hdev, skb);
3603 break;
3604
3605 case HCI_EV_REMOTE_FEATURES:
3606 hci_remote_features_evt(hdev, skb);
3607 break;
3608
3609 case HCI_EV_REMOTE_VERSION:
3610 hci_remote_version_evt(hdev, skb);
3611 break;
3612
3613 case HCI_EV_QOS_SETUP_COMPLETE:
3614 hci_qos_setup_complete_evt(hdev, skb);
3615 break;
3616
3617 case HCI_EV_CMD_COMPLETE:
3618 hci_cmd_complete_evt(hdev, skb);
3619 break;
3620
3621 case HCI_EV_CMD_STATUS:
3622 hci_cmd_status_evt(hdev, skb);
3623 break;
3624
3625 case HCI_EV_ROLE_CHANGE:
3626 hci_role_change_evt(hdev, skb);
3627 break;
3628
3629 case HCI_EV_NUM_COMP_PKTS:
3630 hci_num_comp_pkts_evt(hdev, skb);
3631 break;
3632
3633 case HCI_EV_MODE_CHANGE:
3634 hci_mode_change_evt(hdev, skb);
3635 break;
3636
3637 case HCI_EV_PIN_CODE_REQ:
3638 hci_pin_code_request_evt(hdev, skb);
3639 break;
3640
3641 case HCI_EV_LINK_KEY_REQ:
3642 hci_link_key_request_evt(hdev, skb);
3643 break;
3644
3645 case HCI_EV_LINK_KEY_NOTIFY:
3646 hci_link_key_notify_evt(hdev, skb);
3647 break;
3648
3649 case HCI_EV_CLOCK_OFFSET:
3650 hci_clock_offset_evt(hdev, skb);
3651 break;
3652
3653 case HCI_EV_PKT_TYPE_CHANGE:
3654 hci_pkt_type_change_evt(hdev, skb);
3655 break;
3656
3657 case HCI_EV_PSCAN_REP_MODE:
3658 hci_pscan_rep_mode_evt(hdev, skb);
3659 break;
3660
3661 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3662 hci_inquiry_result_with_rssi_evt(hdev, skb);
3663 break;
3664
3665 case HCI_EV_REMOTE_EXT_FEATURES:
3666 hci_remote_ext_features_evt(hdev, skb);
3667 break;
3668
3669 case HCI_EV_SYNC_CONN_COMPLETE:
3670 hci_sync_conn_complete_evt(hdev, skb);
3671 break;
3672
3673 case HCI_EV_SYNC_CONN_CHANGED:
3674 hci_sync_conn_changed_evt(hdev, skb);
3675 break;
3676
3677 case HCI_EV_SNIFF_SUBRATE:
3678 hci_sniff_subrate_evt(hdev, skb);
3679 break;
3680
3681 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3682 hci_extended_inquiry_result_evt(hdev, skb);
3683 break;
3684
3685 case HCI_EV_KEY_REFRESH_COMPLETE:
3686 hci_key_refresh_complete_evt(hdev, skb);
3687 break;
3688
3689 case HCI_EV_IO_CAPA_REQUEST:
3690 hci_io_capa_request_evt(hdev, skb);
3691 break;
3692
3693 case HCI_EV_IO_CAPA_REPLY:
3694 hci_io_capa_reply_evt(hdev, skb);
3695 break;
3696
3697 case HCI_EV_USER_CONFIRM_REQUEST:
3698 hci_user_confirm_request_evt(hdev, skb);
3699 break;
3700
3701 case HCI_EV_USER_PASSKEY_REQUEST:
3702 hci_user_passkey_request_evt(hdev, skb);
3703 break;
3704
3705 case HCI_EV_USER_PASSKEY_NOTIFY:
3706 hci_user_passkey_notify_evt(hdev, skb);
3707 break;
3708
3709 case HCI_EV_KEYPRESS_NOTIFY:
3710 hci_keypress_notify_evt(hdev, skb);
3711 break;
3712
3713 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3714 hci_simple_pair_complete_evt(hdev, skb);
3715 break;
3716
3717 case HCI_EV_REMOTE_HOST_FEATURES:
3718 hci_remote_host_features_evt(hdev, skb);
3719 break;
3720
3721 case HCI_EV_LE_META:
3722 hci_le_meta_evt(hdev, skb);
3723 break;
3724
3725 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3726 hci_remote_oob_data_request_evt(hdev, skb);
3727 break;
3728
3729 case HCI_EV_NUM_COMP_BLOCKS:
3730 hci_num_comp_blocks_evt(hdev, skb);
3731 break;
3732
3733 default:
3734 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3735 break;
3736 }
3737
3738 kfree_skb(skb);
3739 hdev->stat.evt_rx++;
3740}