Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_sock.h>
33#include <net/bluetooth/l2cap.h>
34#include <net/bluetooth/mgmt.h>
35
36#include "smp.h"
37#include "mgmt_util.h"
38#include "mgmt_config.h"
39#include "msft.h"
40#include "eir.h"
41#include "aosp.h"
42
43#define MGMT_VERSION 1
44#define MGMT_REVISION 23
45
46static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135};
136
137static const u16 mgmt_events[] = {
138 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_ADDED,
140 MGMT_EV_INDEX_REMOVED,
141 MGMT_EV_NEW_SETTINGS,
142 MGMT_EV_CLASS_OF_DEV_CHANGED,
143 MGMT_EV_LOCAL_NAME_CHANGED,
144 MGMT_EV_NEW_LINK_KEY,
145 MGMT_EV_NEW_LONG_TERM_KEY,
146 MGMT_EV_DEVICE_CONNECTED,
147 MGMT_EV_DEVICE_DISCONNECTED,
148 MGMT_EV_CONNECT_FAILED,
149 MGMT_EV_PIN_CODE_REQUEST,
150 MGMT_EV_USER_CONFIRM_REQUEST,
151 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_AUTH_FAILED,
153 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DISCOVERING,
155 MGMT_EV_DEVICE_BLOCKED,
156 MGMT_EV_DEVICE_UNBLOCKED,
157 MGMT_EV_DEVICE_UNPAIRED,
158 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_NEW_IRK,
160 MGMT_EV_NEW_CSRK,
161 MGMT_EV_DEVICE_ADDED,
162 MGMT_EV_DEVICE_REMOVED,
163 MGMT_EV_NEW_CONN_PARAM,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
170 MGMT_EV_ADVERTISING_ADDED,
171 MGMT_EV_ADVERTISING_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
173 MGMT_EV_PHY_CONFIGURATION_CHANGED,
174 MGMT_EV_EXP_FEATURE_CHANGED,
175 MGMT_EV_DEVICE_FLAGS_CHANGED,
176 MGMT_EV_ADV_MONITOR_ADDED,
177 MGMT_EV_ADV_MONITOR_REMOVED,
178 MGMT_EV_CONTROLLER_SUSPEND,
179 MGMT_EV_CONTROLLER_RESUME,
180 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
181 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
182};
183
184static const u16 mgmt_untrusted_commands[] = {
185 MGMT_OP_READ_INDEX_LIST,
186 MGMT_OP_READ_INFO,
187 MGMT_OP_READ_UNCONF_INDEX_LIST,
188 MGMT_OP_READ_CONFIG_INFO,
189 MGMT_OP_READ_EXT_INDEX_LIST,
190 MGMT_OP_READ_EXT_INFO,
191 MGMT_OP_READ_CONTROLLER_CAP,
192 MGMT_OP_READ_EXP_FEATURES_INFO,
193 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
194 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
195};
196
197static const u16 mgmt_untrusted_events[] = {
198 MGMT_EV_INDEX_ADDED,
199 MGMT_EV_INDEX_REMOVED,
200 MGMT_EV_NEW_SETTINGS,
201 MGMT_EV_CLASS_OF_DEV_CHANGED,
202 MGMT_EV_LOCAL_NAME_CHANGED,
203 MGMT_EV_UNCONF_INDEX_ADDED,
204 MGMT_EV_UNCONF_INDEX_REMOVED,
205 MGMT_EV_NEW_CONFIG_OPTIONS,
206 MGMT_EV_EXT_INDEX_ADDED,
207 MGMT_EV_EXT_INDEX_REMOVED,
208 MGMT_EV_EXT_INFO_CHANGED,
209 MGMT_EV_EXP_FEATURE_CHANGED,
210};
211
212#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
213
214#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
215 "\x00\x00\x00\x00\x00\x00\x00\x00"
216
217/* HCI to MGMT error code conversion table */
218static const u8 mgmt_status_table[] = {
219 MGMT_STATUS_SUCCESS,
220 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
221 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
222 MGMT_STATUS_FAILED, /* Hardware Failure */
223 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
224 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
225 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
226 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
227 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
228 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
230 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
231 MGMT_STATUS_BUSY, /* Command Disallowed */
232 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
233 MGMT_STATUS_REJECTED, /* Rejected Security */
234 MGMT_STATUS_REJECTED, /* Rejected Personal */
235 MGMT_STATUS_TIMEOUT, /* Host Timeout */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
238 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
239 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
240 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
241 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
242 MGMT_STATUS_BUSY, /* Repeated Attempts */
243 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
244 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
245 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
246 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
247 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
248 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
249 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
250 MGMT_STATUS_FAILED, /* Unspecified Error */
251 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
252 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
253 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
254 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
255 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
256 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
257 MGMT_STATUS_FAILED, /* Unit Link Key Used */
258 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
259 MGMT_STATUS_TIMEOUT, /* Instant Passed */
260 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
261 MGMT_STATUS_FAILED, /* Transaction Collision */
262 MGMT_STATUS_FAILED, /* Reserved for future use */
263 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
264 MGMT_STATUS_REJECTED, /* QoS Rejected */
265 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
266 MGMT_STATUS_REJECTED, /* Insufficient Security */
267 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
268 MGMT_STATUS_FAILED, /* Reserved for future use */
269 MGMT_STATUS_BUSY, /* Role Switch Pending */
270 MGMT_STATUS_FAILED, /* Reserved for future use */
271 MGMT_STATUS_FAILED, /* Slot Violation */
272 MGMT_STATUS_FAILED, /* Role Switch Failed */
273 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
274 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
275 MGMT_STATUS_BUSY, /* Host Busy Pairing */
276 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
277 MGMT_STATUS_BUSY, /* Controller Busy */
278 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
279 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
280 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
281 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
282 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
283};
284
285static u8 mgmt_errno_status(int err)
286{
287 switch (err) {
288 case 0:
289 return MGMT_STATUS_SUCCESS;
290 case -EPERM:
291 return MGMT_STATUS_REJECTED;
292 case -EINVAL:
293 return MGMT_STATUS_INVALID_PARAMS;
294 case -EOPNOTSUPP:
295 return MGMT_STATUS_NOT_SUPPORTED;
296 case -EBUSY:
297 return MGMT_STATUS_BUSY;
298 case -ETIMEDOUT:
299 return MGMT_STATUS_AUTH_FAILED;
300 case -ENOMEM:
301 return MGMT_STATUS_NO_RESOURCES;
302 case -EISCONN:
303 return MGMT_STATUS_ALREADY_CONNECTED;
304 case -ENOTCONN:
305 return MGMT_STATUS_DISCONNECTED;
306 }
307
308 return MGMT_STATUS_FAILED;
309}
310
311static u8 mgmt_status(int err)
312{
313 if (err < 0)
314 return mgmt_errno_status(err);
315
316 if (err < ARRAY_SIZE(mgmt_status_table))
317 return mgmt_status_table[err];
318
319 return MGMT_STATUS_FAILED;
320}
321
322static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
323 u16 len, int flag)
324{
325 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 flag, NULL);
327}
328
329static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
330 u16 len, int flag, struct sock *skip_sk)
331{
332 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 flag, skip_sk);
334}
335
336static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
337 struct sock *skip_sk)
338{
339 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 HCI_SOCK_TRUSTED, skip_sk);
341}
342
343static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
344{
345 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 skip_sk);
347}
348
349static u8 le_addr_type(u8 mgmt_addr_type)
350{
351 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
352 return ADDR_LE_DEV_PUBLIC;
353 else
354 return ADDR_LE_DEV_RANDOM;
355}
356
357void mgmt_fill_version_info(void *ver)
358{
359 struct mgmt_rp_read_version *rp = ver;
360
361 rp->version = MGMT_VERSION;
362 rp->revision = cpu_to_le16(MGMT_REVISION);
363}
364
365static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
366 u16 data_len)
367{
368 struct mgmt_rp_read_version rp;
369
370 bt_dev_dbg(hdev, "sock %p", sk);
371
372 mgmt_fill_version_info(&rp);
373
374 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 &rp, sizeof(rp));
376}
377
378static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
379 u16 data_len)
380{
381 struct mgmt_rp_read_commands *rp;
382 u16 num_commands, num_events;
383 size_t rp_size;
384 int i, err;
385
386 bt_dev_dbg(hdev, "sock %p", sk);
387
388 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
389 num_commands = ARRAY_SIZE(mgmt_commands);
390 num_events = ARRAY_SIZE(mgmt_events);
391 } else {
392 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
393 num_events = ARRAY_SIZE(mgmt_untrusted_events);
394 }
395
396 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
397
398 rp = kmalloc(rp_size, GFP_KERNEL);
399 if (!rp)
400 return -ENOMEM;
401
402 rp->num_commands = cpu_to_le16(num_commands);
403 rp->num_events = cpu_to_le16(num_events);
404
405 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
406 __le16 *opcode = rp->opcodes;
407
408 for (i = 0; i < num_commands; i++, opcode++)
409 put_unaligned_le16(mgmt_commands[i], opcode);
410
411 for (i = 0; i < num_events; i++, opcode++)
412 put_unaligned_le16(mgmt_events[i], opcode);
413 } else {
414 __le16 *opcode = rp->opcodes;
415
416 for (i = 0; i < num_commands; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
418
419 for (i = 0; i < num_events; i++, opcode++)
420 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
421 }
422
423 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
424 rp, rp_size);
425 kfree(rp);
426
427 return err;
428}
429
430static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
431 u16 data_len)
432{
433 struct mgmt_rp_read_index_list *rp;
434 struct hci_dev *d;
435 size_t rp_len;
436 u16 count;
437 int err;
438
439 bt_dev_dbg(hdev, "sock %p", sk);
440
441 read_lock(&hci_dev_list_lock);
442
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
446 count++;
447 }
448
449 rp_len = sizeof(*rp) + (2 * count);
450 rp = kmalloc(rp_len, GFP_ATOMIC);
451 if (!rp) {
452 read_unlock(&hci_dev_list_lock);
453 return -ENOMEM;
454 }
455
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (hci_dev_test_flag(d, HCI_SETUP) ||
459 hci_dev_test_flag(d, HCI_CONFIG) ||
460 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 continue;
462
463 /* Devices marked as raw-only are neither configured
464 * nor unconfigured controllers.
465 */
466 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 continue;
468
469 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
470 rp->index[count++] = cpu_to_le16(d->id);
471 bt_dev_dbg(hdev, "Added hci%u", d->id);
472 }
473 }
474
475 rp->num_controllers = cpu_to_le16(count);
476 rp_len = sizeof(*rp) + (2 * count);
477
478 read_unlock(&hci_dev_list_lock);
479
480 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
481 0, rp, rp_len);
482
483 kfree(rp);
484
485 return err;
486}
487
488static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
489 void *data, u16 data_len)
490{
491 struct mgmt_rp_read_unconf_index_list *rp;
492 struct hci_dev *d;
493 size_t rp_len;
494 u16 count;
495 int err;
496
497 bt_dev_dbg(hdev, "sock %p", sk);
498
499 read_lock(&hci_dev_list_lock);
500
501 count = 0;
502 list_for_each_entry(d, &hci_dev_list, list) {
503 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 count++;
505 }
506
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
509 if (!rp) {
510 read_unlock(&hci_dev_list_lock);
511 return -ENOMEM;
512 }
513
514 count = 0;
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 continue;
520
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
523 */
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 continue;
526
527 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
528 rp->index[count++] = cpu_to_le16(d->id);
529 bt_dev_dbg(hdev, "Added hci%u", d->id);
530 }
531 }
532
533 rp->num_controllers = cpu_to_le16(count);
534 rp_len = sizeof(*rp) + (2 * count);
535
536 read_unlock(&hci_dev_list_lock);
537
538 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
540
541 kfree(rp);
542
543 return err;
544}
545
546static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
547 void *data, u16 data_len)
548{
549 struct mgmt_rp_read_ext_index_list *rp;
550 struct hci_dev *d;
551 u16 count;
552 int err;
553
554 bt_dev_dbg(hdev, "sock %p", sk);
555
556 read_lock(&hci_dev_list_lock);
557
558 count = 0;
559 list_for_each_entry(d, &hci_dev_list, list)
560 count++;
561
562 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
563 if (!rp) {
564 read_unlock(&hci_dev_list_lock);
565 return -ENOMEM;
566 }
567
568 count = 0;
569 list_for_each_entry(d, &hci_dev_list, list) {
570 if (hci_dev_test_flag(d, HCI_SETUP) ||
571 hci_dev_test_flag(d, HCI_CONFIG) ||
572 hci_dev_test_flag(d, HCI_USER_CHANNEL))
573 continue;
574
575 /* Devices marked as raw-only are neither configured
576 * nor unconfigured controllers.
577 */
578 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
579 continue;
580
581 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
582 rp->entry[count].type = 0x01;
583 else
584 rp->entry[count].type = 0x00;
585
586 rp->entry[count].bus = d->bus;
587 rp->entry[count++].index = cpu_to_le16(d->id);
588 bt_dev_dbg(hdev, "Added hci%u", d->id);
589 }
590
591 rp->num_controllers = cpu_to_le16(count);
592
593 read_unlock(&hci_dev_list_lock);
594
595 /* If this command is called at least once, then all the
596 * default index and unconfigured index events are disabled
597 * and from now on only extended index events are used.
598 */
599 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
602
603 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 struct_size(rp, entry, count));
606
607 kfree(rp);
608
609 return err;
610}
611
612static bool is_configured(struct hci_dev *hdev)
613{
614 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 return false;
617
618 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 !bacmp(&hdev->public_addr, BDADDR_ANY))
621 return false;
622
623 return true;
624}
625
626static __le32 get_missing_options(struct hci_dev *hdev)
627{
628 u32 options = 0;
629
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 options |= MGMT_OPTION_EXTERNAL_CONFIG;
633
634 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 !bacmp(&hdev->public_addr, BDADDR_ANY))
637 options |= MGMT_OPTION_PUBLIC_ADDRESS;
638
639 return cpu_to_le32(options);
640}
641
642static int new_options(struct hci_dev *hdev, struct sock *skip)
643{
644 __le32 options = get_missing_options(hdev);
645
646 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
648}
649
650static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
651{
652 __le32 options = get_missing_options(hdev);
653
654 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 sizeof(options));
656}
657
658static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 void *data, u16 data_len)
660{
661 struct mgmt_rp_read_config_info rp;
662 u32 options = 0;
663
664 bt_dev_dbg(hdev, "sock %p", sk);
665
666 hci_dev_lock(hdev);
667
668 memset(&rp, 0, sizeof(rp));
669 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
670
671 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 options |= MGMT_OPTION_EXTERNAL_CONFIG;
673
674 if (hdev->set_bdaddr)
675 options |= MGMT_OPTION_PUBLIC_ADDRESS;
676
677 rp.supported_options = cpu_to_le32(options);
678 rp.missing_options = get_missing_options(hdev);
679
680 hci_dev_unlock(hdev);
681
682 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 &rp, sizeof(rp));
684}
685
686static u32 get_supported_phys(struct hci_dev *hdev)
687{
688 u32 supported_phys = 0;
689
690 if (lmp_bredr_capable(hdev)) {
691 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
692
693 if (hdev->features[0][0] & LMP_3SLOT)
694 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
695
696 if (hdev->features[0][0] & LMP_5SLOT)
697 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
698
699 if (lmp_edr_2m_capable(hdev)) {
700 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
701
702 if (lmp_edr_3slot_capable(hdev))
703 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
704
705 if (lmp_edr_5slot_capable(hdev))
706 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
707
708 if (lmp_edr_3m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
710
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
713
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 }
717 }
718 }
719
720 if (lmp_le_capable(hdev)) {
721 supported_phys |= MGMT_PHY_LE_1M_TX;
722 supported_phys |= MGMT_PHY_LE_1M_RX;
723
724 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 supported_phys |= MGMT_PHY_LE_2M_TX;
726 supported_phys |= MGMT_PHY_LE_2M_RX;
727 }
728
729 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 supported_phys |= MGMT_PHY_LE_CODED_TX;
731 supported_phys |= MGMT_PHY_LE_CODED_RX;
732 }
733 }
734
735 return supported_phys;
736}
737
738static u32 get_selected_phys(struct hci_dev *hdev)
739{
740 u32 selected_phys = 0;
741
742 if (lmp_bredr_capable(hdev)) {
743 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
744
745 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
747
748 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
750
751 if (lmp_edr_2m_capable(hdev)) {
752 if (!(hdev->pkt_type & HCI_2DH1))
753 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
754
755 if (lmp_edr_3slot_capable(hdev) &&
756 !(hdev->pkt_type & HCI_2DH3))
757 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
758
759 if (lmp_edr_5slot_capable(hdev) &&
760 !(hdev->pkt_type & HCI_2DH5))
761 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
762
763 if (lmp_edr_3m_capable(hdev)) {
764 if (!(hdev->pkt_type & HCI_3DH1))
765 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
766
767 if (lmp_edr_3slot_capable(hdev) &&
768 !(hdev->pkt_type & HCI_3DH3))
769 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
770
771 if (lmp_edr_5slot_capable(hdev) &&
772 !(hdev->pkt_type & HCI_3DH5))
773 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
774 }
775 }
776 }
777
778 if (lmp_le_capable(hdev)) {
779 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 selected_phys |= MGMT_PHY_LE_1M_TX;
781
782 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 selected_phys |= MGMT_PHY_LE_1M_RX;
784
785 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 selected_phys |= MGMT_PHY_LE_2M_TX;
787
788 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 selected_phys |= MGMT_PHY_LE_2M_RX;
790
791 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 selected_phys |= MGMT_PHY_LE_CODED_TX;
793
794 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 selected_phys |= MGMT_PHY_LE_CODED_RX;
796 }
797
798 return selected_phys;
799}
800
801static u32 get_configurable_phys(struct hci_dev *hdev)
802{
803 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
805}
806
807static u32 get_supported_settings(struct hci_dev *hdev)
808{
809 u32 settings = 0;
810
811 settings |= MGMT_SETTING_POWERED;
812 settings |= MGMT_SETTING_BONDABLE;
813 settings |= MGMT_SETTING_DEBUG_KEYS;
814 settings |= MGMT_SETTING_CONNECTABLE;
815 settings |= MGMT_SETTING_DISCOVERABLE;
816
817 if (lmp_bredr_capable(hdev)) {
818 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 settings |= MGMT_SETTING_BREDR;
821 settings |= MGMT_SETTING_LINK_SECURITY;
822
823 if (lmp_ssp_capable(hdev)) {
824 settings |= MGMT_SETTING_SSP;
825 }
826
827 if (lmp_sc_capable(hdev))
828 settings |= MGMT_SETTING_SECURE_CONN;
829
830 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
831 &hdev->quirks))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
844 hdev->set_bdaddr)
845 settings |= MGMT_SETTING_CONFIGURATION;
846
847 if (cis_central_capable(hdev))
848 settings |= MGMT_SETTING_CIS_CENTRAL;
849
850 if (cis_peripheral_capable(hdev))
851 settings |= MGMT_SETTING_CIS_PERIPHERAL;
852
853 settings |= MGMT_SETTING_PHY_CONFIGURATION;
854
855 return settings;
856}
857
858static u32 get_current_settings(struct hci_dev *hdev)
859{
860 u32 settings = 0;
861
862 if (hdev_is_powered(hdev))
863 settings |= MGMT_SETTING_POWERED;
864
865 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
866 settings |= MGMT_SETTING_CONNECTABLE;
867
868 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
869 settings |= MGMT_SETTING_FAST_CONNECTABLE;
870
871 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
872 settings |= MGMT_SETTING_DISCOVERABLE;
873
874 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
875 settings |= MGMT_SETTING_BONDABLE;
876
877 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
878 settings |= MGMT_SETTING_BREDR;
879
880 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
881 settings |= MGMT_SETTING_LE;
882
883 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
884 settings |= MGMT_SETTING_LINK_SECURITY;
885
886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
887 settings |= MGMT_SETTING_SSP;
888
889 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
890 settings |= MGMT_SETTING_ADVERTISING;
891
892 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
893 settings |= MGMT_SETTING_SECURE_CONN;
894
895 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
896 settings |= MGMT_SETTING_DEBUG_KEYS;
897
898 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
899 settings |= MGMT_SETTING_PRIVACY;
900
901 /* The current setting for static address has two purposes. The
902 * first is to indicate if the static address will be used and
903 * the second is to indicate if it is actually set.
904 *
905 * This means if the static address is not configured, this flag
906 * will never be set. If the address is configured, then if the
907 * address is actually used decides if the flag is set or not.
908 *
909 * For single mode LE only controllers and dual-mode controllers
910 * with BR/EDR disabled, the existence of the static address will
911 * be evaluated.
912 */
913 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
914 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
915 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
916 if (bacmp(&hdev->static_addr, BDADDR_ANY))
917 settings |= MGMT_SETTING_STATIC_ADDRESS;
918 }
919
920 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
921 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
922
923 if (cis_central_capable(hdev))
924 settings |= MGMT_SETTING_CIS_CENTRAL;
925
926 if (cis_peripheral_capable(hdev))
927 settings |= MGMT_SETTING_CIS_PERIPHERAL;
928
929 if (bis_capable(hdev))
930 settings |= MGMT_SETTING_ISO_BROADCASTER;
931
932 if (sync_recv_capable(hdev))
933 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
934
935 return settings;
936}
937
938static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
939{
940 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
941}
942
943u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
944{
945 struct mgmt_pending_cmd *cmd;
946
947 /* If there's a pending mgmt command the flags will not yet have
948 * their final values, so check for this first.
949 */
950 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
951 if (cmd) {
952 struct mgmt_mode *cp = cmd->param;
953 if (cp->val == 0x01)
954 return LE_AD_GENERAL;
955 else if (cp->val == 0x02)
956 return LE_AD_LIMITED;
957 } else {
958 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959 return LE_AD_LIMITED;
960 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961 return LE_AD_GENERAL;
962 }
963
964 return 0;
965}
966
967bool mgmt_get_connectable(struct hci_dev *hdev)
968{
969 struct mgmt_pending_cmd *cmd;
970
971 /* If there's a pending mgmt command the flag will not yet have
972 * it's final value, so check for this first.
973 */
974 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
975 if (cmd) {
976 struct mgmt_mode *cp = cmd->param;
977
978 return cp->val;
979 }
980
981 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
982}
983
984static int service_cache_sync(struct hci_dev *hdev, void *data)
985{
986 hci_update_eir_sync(hdev);
987 hci_update_class_sync(hdev);
988
989 return 0;
990}
991
992static void service_cache_off(struct work_struct *work)
993{
994 struct hci_dev *hdev = container_of(work, struct hci_dev,
995 service_cache.work);
996
997 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
998 return;
999
1000 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1001}
1002
1003static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1004{
1005 /* The generation of a new RPA and programming it into the
1006 * controller happens in the hci_req_enable_advertising()
1007 * function.
1008 */
1009 if (ext_adv_capable(hdev))
1010 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1011 else
1012 return hci_enable_advertising_sync(hdev);
1013}
1014
1015static void rpa_expired(struct work_struct *work)
1016{
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 rpa_expired.work);
1019
1020 bt_dev_dbg(hdev, "");
1021
1022 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1023
1024 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1025 return;
1026
1027 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1028}
1029
1030static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1031
1032static void discov_off(struct work_struct *work)
1033{
1034 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 discov_off.work);
1036
1037 bt_dev_dbg(hdev, "");
1038
1039 hci_dev_lock(hdev);
1040
1041 /* When discoverable timeout triggers, then just make sure
1042 * the limited discoverable flag is cleared. Even in the case
1043 * of a timeout triggered from general discoverable, it is
1044 * safe to unconditionally clear the flag.
1045 */
1046 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1047 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1048 hdev->discov_timeout = 0;
1049
1050 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1051
1052 mgmt_new_settings(hdev);
1053
1054 hci_dev_unlock(hdev);
1055}
1056
1057static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1058
1059static void mesh_send_complete(struct hci_dev *hdev,
1060 struct mgmt_mesh_tx *mesh_tx, bool silent)
1061{
1062 u8 handle = mesh_tx->handle;
1063
1064 if (!silent)
1065 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1066 sizeof(handle), NULL);
1067
1068 mgmt_mesh_remove(mesh_tx);
1069}
1070
1071static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1072{
1073 struct mgmt_mesh_tx *mesh_tx;
1074
1075 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1076 hci_disable_advertising_sync(hdev);
1077 mesh_tx = mgmt_mesh_next(hdev, NULL);
1078
1079 if (mesh_tx)
1080 mesh_send_complete(hdev, mesh_tx, false);
1081
1082 return 0;
1083}
1084
1085static int mesh_send_sync(struct hci_dev *hdev, void *data);
1086static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1087static void mesh_next(struct hci_dev *hdev, void *data, int err)
1088{
1089 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1090
1091 if (!mesh_tx)
1092 return;
1093
1094 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1095 mesh_send_start_complete);
1096
1097 if (err < 0)
1098 mesh_send_complete(hdev, mesh_tx, false);
1099 else
1100 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1101}
1102
1103static void mesh_send_done(struct work_struct *work)
1104{
1105 struct hci_dev *hdev = container_of(work, struct hci_dev,
1106 mesh_send_done.work);
1107
1108 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1109 return;
1110
1111 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1112}
1113
1114static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1115{
1116 if (hci_dev_test_flag(hdev, HCI_MGMT))
1117 return;
1118
1119 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1120
1121 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1122 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1123 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1124 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1125
1126 /* Non-mgmt controlled devices get this bit set
1127 * implicitly so that pairing works for them, however
1128 * for mgmt we require user-space to explicitly enable
1129 * it
1130 */
1131 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1132
1133 hci_dev_set_flag(hdev, HCI_MGMT);
1134}
1135
1136static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1137 void *data, u16 data_len)
1138{
1139 struct mgmt_rp_read_info rp;
1140
1141 bt_dev_dbg(hdev, "sock %p", sk);
1142
1143 hci_dev_lock(hdev);
1144
1145 memset(&rp, 0, sizeof(rp));
1146
1147 bacpy(&rp.bdaddr, &hdev->bdaddr);
1148
1149 rp.version = hdev->hci_ver;
1150 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1151
1152 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1153 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1154
1155 memcpy(rp.dev_class, hdev->dev_class, 3);
1156
1157 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1158 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1159
1160 hci_dev_unlock(hdev);
1161
1162 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1163 sizeof(rp));
1164}
1165
1166static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1167{
1168 u16 eir_len = 0;
1169 size_t name_len;
1170
1171 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1172 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1173 hdev->dev_class, 3);
1174
1175 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1176 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1177 hdev->appearance);
1178
1179 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1180 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1181 hdev->dev_name, name_len);
1182
1183 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1184 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1185 hdev->short_name, name_len);
1186
1187 return eir_len;
1188}
1189
1190static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1191 void *data, u16 data_len)
1192{
1193 char buf[512];
1194 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1195 u16 eir_len;
1196
1197 bt_dev_dbg(hdev, "sock %p", sk);
1198
1199 memset(&buf, 0, sizeof(buf));
1200
1201 hci_dev_lock(hdev);
1202
1203 bacpy(&rp->bdaddr, &hdev->bdaddr);
1204
1205 rp->version = hdev->hci_ver;
1206 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1207
1208 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1209 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1210
1211
1212 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1213 rp->eir_len = cpu_to_le16(eir_len);
1214
1215 hci_dev_unlock(hdev);
1216
1217 /* If this command is called at least once, then the events
1218 * for class of device and local name changes are disabled
1219 * and only the new extended controller information event
1220 * is used.
1221 */
1222 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1223 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1225
1226 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1227 sizeof(*rp) + eir_len);
1228}
1229
1230static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1231{
1232 char buf[512];
1233 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1234 u16 eir_len;
1235
1236 memset(buf, 0, sizeof(buf));
1237
1238 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1239 ev->eir_len = cpu_to_le16(eir_len);
1240
1241 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1242 sizeof(*ev) + eir_len,
1243 HCI_MGMT_EXT_INFO_EVENTS, skip);
1244}
1245
1246static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247{
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1249
1250 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1252}
1253
1254void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1255{
1256 struct mgmt_ev_advertising_added ev;
1257
1258 ev.instance = instance;
1259
1260 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1261}
1262
1263void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1264 u8 instance)
1265{
1266 struct mgmt_ev_advertising_removed ev;
1267
1268 ev.instance = instance;
1269
1270 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1271}
1272
1273static void cancel_adv_timeout(struct hci_dev *hdev)
1274{
1275 if (hdev->adv_instance_timeout) {
1276 hdev->adv_instance_timeout = 0;
1277 cancel_delayed_work(&hdev->adv_instance_expire);
1278 }
1279}
1280
1281/* This function requires the caller holds hdev->lock */
1282static void restart_le_actions(struct hci_dev *hdev)
1283{
1284 struct hci_conn_params *p;
1285
1286 list_for_each_entry(p, &hdev->le_conn_params, list) {
1287 /* Needed for AUTO_OFF case where might not "really"
1288 * have been powered off.
1289 */
1290 hci_pend_le_list_del_init(p);
1291
1292 switch (p->auto_connect) {
1293 case HCI_AUTO_CONN_DIRECT:
1294 case HCI_AUTO_CONN_ALWAYS:
1295 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1296 break;
1297 case HCI_AUTO_CONN_REPORT:
1298 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1299 break;
1300 default:
1301 break;
1302 }
1303 }
1304}
1305
1306static int new_settings(struct hci_dev *hdev, struct sock *skip)
1307{
1308 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1309
1310 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1311 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1312}
1313
1314static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1315{
1316 struct mgmt_pending_cmd *cmd = data;
1317 struct mgmt_mode *cp;
1318
1319 /* Make sure cmd still outstanding. */
1320 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1321 return;
1322
1323 cp = cmd->param;
1324
1325 bt_dev_dbg(hdev, "err %d", err);
1326
1327 if (!err) {
1328 if (cp->val) {
1329 hci_dev_lock(hdev);
1330 restart_le_actions(hdev);
1331 hci_update_passive_scan(hdev);
1332 hci_dev_unlock(hdev);
1333 }
1334
1335 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1336
1337 /* Only call new_setting for power on as power off is deferred
1338 * to hdev->power_off work which does call hci_dev_do_close.
1339 */
1340 if (cp->val)
1341 new_settings(hdev, cmd->sk);
1342 } else {
1343 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1344 mgmt_status(err));
1345 }
1346
1347 mgmt_pending_remove(cmd);
1348}
1349
1350static int set_powered_sync(struct hci_dev *hdev, void *data)
1351{
1352 struct mgmt_pending_cmd *cmd = data;
1353 struct mgmt_mode *cp = cmd->param;
1354
1355 BT_DBG("%s", hdev->name);
1356
1357 return hci_set_powered_sync(hdev, cp->val);
1358}
1359
1360static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1361 u16 len)
1362{
1363 struct mgmt_mode *cp = data;
1364 struct mgmt_pending_cmd *cmd;
1365 int err;
1366
1367 bt_dev_dbg(hdev, "sock %p", sk);
1368
1369 if (cp->val != 0x00 && cp->val != 0x01)
1370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1371 MGMT_STATUS_INVALID_PARAMS);
1372
1373 hci_dev_lock(hdev);
1374
1375 if (!cp->val) {
1376 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1380 }
1381 }
1382
1383 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_BUSY);
1386 goto failed;
1387 }
1388
1389 if (!!cp->val == hdev_is_powered(hdev)) {
1390 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 goto failed;
1392 }
1393
1394 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 if (!cmd) {
1396 err = -ENOMEM;
1397 goto failed;
1398 }
1399
1400 /* Cancel potentially blocking sync operation before power off */
1401 if (cp->val == 0x00) {
1402 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 mgmt_set_powered_complete);
1405 } else {
1406 /* Use hci_cmd_sync_submit since hdev might not be running */
1407 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 mgmt_set_powered_complete);
1409 }
1410
1411 if (err < 0)
1412 mgmt_pending_remove(cmd);
1413
1414failed:
1415 hci_dev_unlock(hdev);
1416 return err;
1417}
1418
1419int mgmt_new_settings(struct hci_dev *hdev)
1420{
1421 return new_settings(hdev, NULL);
1422}
1423
1424struct cmd_lookup {
1425 struct sock *sk;
1426 struct hci_dev *hdev;
1427 u8 mgmt_status;
1428};
1429
1430static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431{
1432 struct cmd_lookup *match = data;
1433
1434 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435
1436 list_del(&cmd->list);
1437
1438 if (match->sk == NULL) {
1439 match->sk = cmd->sk;
1440 sock_hold(match->sk);
1441 }
1442
1443 mgmt_pending_free(cmd);
1444}
1445
1446static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447{
1448 u8 *status = data;
1449
1450 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 mgmt_pending_remove(cmd);
1452}
1453
1454static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455{
1456 if (cmd->cmd_complete) {
1457 u8 *status = data;
1458
1459 cmd->cmd_complete(cmd, *status);
1460 mgmt_pending_remove(cmd);
1461
1462 return;
1463 }
1464
1465 cmd_status_rsp(cmd, data);
1466}
1467
1468static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1469{
1470 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 cmd->param, cmd->param_len);
1472}
1473
1474static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1475{
1476 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1477 cmd->param, sizeof(struct mgmt_addr_info));
1478}
1479
1480static u8 mgmt_bredr_support(struct hci_dev *hdev)
1481{
1482 if (!lmp_bredr_capable(hdev))
1483 return MGMT_STATUS_NOT_SUPPORTED;
1484 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1485 return MGMT_STATUS_REJECTED;
1486 else
1487 return MGMT_STATUS_SUCCESS;
1488}
1489
1490static u8 mgmt_le_support(struct hci_dev *hdev)
1491{
1492 if (!lmp_le_capable(hdev))
1493 return MGMT_STATUS_NOT_SUPPORTED;
1494 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1495 return MGMT_STATUS_REJECTED;
1496 else
1497 return MGMT_STATUS_SUCCESS;
1498}
1499
1500static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1501 int err)
1502{
1503 struct mgmt_pending_cmd *cmd = data;
1504
1505 bt_dev_dbg(hdev, "err %d", err);
1506
1507 /* Make sure cmd still outstanding. */
1508 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1509 return;
1510
1511 hci_dev_lock(hdev);
1512
1513 if (err) {
1514 u8 mgmt_err = mgmt_status(err);
1515 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1516 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1517 goto done;
1518 }
1519
1520 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1521 hdev->discov_timeout > 0) {
1522 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1523 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1524 }
1525
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1528
1529done:
1530 mgmt_pending_remove(cmd);
1531 hci_dev_unlock(hdev);
1532}
1533
1534static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1535{
1536 BT_DBG("%s", hdev->name);
1537
1538 return hci_update_discoverable_sync(hdev);
1539}
1540
1541static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 u16 len)
1543{
1544 struct mgmt_cp_set_discoverable *cp = data;
1545 struct mgmt_pending_cmd *cmd;
1546 u16 timeout;
1547 int err;
1548
1549 bt_dev_dbg(hdev, "sock %p", sk);
1550
1551 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1552 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1553 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_REJECTED);
1555
1556 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558 MGMT_STATUS_INVALID_PARAMS);
1559
1560 timeout = __le16_to_cpu(cp->timeout);
1561
1562 /* Disabling discoverable requires that no timeout is set,
1563 * and enabling limited discoverable requires a timeout.
1564 */
1565 if ((cp->val == 0x00 && timeout > 0) ||
1566 (cp->val == 0x02 && timeout == 0))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_INVALID_PARAMS);
1569
1570 hci_dev_lock(hdev);
1571
1572 if (!hdev_is_powered(hdev) && timeout > 0) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_NOT_POWERED);
1575 goto failed;
1576 }
1577
1578 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1579 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_BUSY);
1582 goto failed;
1583 }
1584
1585 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_REJECTED);
1588 goto failed;
1589 }
1590
1591 if (hdev->advertising_paused) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 if (!hdev_is_powered(hdev)) {
1598 bool changed = false;
1599
1600 /* Setting limited discoverable when powered off is
1601 * not a valid operation since it requires a timeout
1602 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1603 */
1604 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1605 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1606 changed = true;
1607 }
1608
1609 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1610 if (err < 0)
1611 goto failed;
1612
1613 if (changed)
1614 err = new_settings(hdev, sk);
1615
1616 goto failed;
1617 }
1618
1619 /* If the current mode is the same, then just update the timeout
1620 * value with the new value. And if only the timeout gets updated,
1621 * then no need for any HCI transactions.
1622 */
1623 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1624 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1625 HCI_LIMITED_DISCOVERABLE)) {
1626 cancel_delayed_work(&hdev->discov_off);
1627 hdev->discov_timeout = timeout;
1628
1629 if (cp->val && hdev->discov_timeout > 0) {
1630 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1631 queue_delayed_work(hdev->req_workqueue,
1632 &hdev->discov_off, to);
1633 }
1634
1635 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1636 goto failed;
1637 }
1638
1639 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1640 if (!cmd) {
1641 err = -ENOMEM;
1642 goto failed;
1643 }
1644
1645 /* Cancel any potential discoverable timeout that might be
1646 * still active and store new timeout value. The arming of
1647 * the timeout happens in the complete handler.
1648 */
1649 cancel_delayed_work(&hdev->discov_off);
1650 hdev->discov_timeout = timeout;
1651
1652 if (cp->val)
1653 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1654 else
1655 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1656
1657 /* Limited discoverable mode */
1658 if (cp->val == 0x02)
1659 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1660 else
1661 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1662
1663 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1664 mgmt_set_discoverable_complete);
1665
1666 if (err < 0)
1667 mgmt_pending_remove(cmd);
1668
1669failed:
1670 hci_dev_unlock(hdev);
1671 return err;
1672}
1673
1674static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 int err)
1676{
1677 struct mgmt_pending_cmd *cmd = data;
1678
1679 bt_dev_dbg(hdev, "err %d", err);
1680
1681 /* Make sure cmd still outstanding. */
1682 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1683 return;
1684
1685 hci_dev_lock(hdev);
1686
1687 if (err) {
1688 u8 mgmt_err = mgmt_status(err);
1689 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1690 goto done;
1691 }
1692
1693 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1694 new_settings(hdev, cmd->sk);
1695
1696done:
1697 mgmt_pending_remove(cmd);
1698
1699 hci_dev_unlock(hdev);
1700}
1701
1702static int set_connectable_update_settings(struct hci_dev *hdev,
1703 struct sock *sk, u8 val)
1704{
1705 bool changed = false;
1706 int err;
1707
1708 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1709 changed = true;
1710
1711 if (val) {
1712 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1713 } else {
1714 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1715 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1716 }
1717
1718 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1719 if (err < 0)
1720 return err;
1721
1722 if (changed) {
1723 hci_update_scan(hdev);
1724 hci_update_passive_scan(hdev);
1725 return new_settings(hdev, sk);
1726 }
1727
1728 return 0;
1729}
1730
1731static int set_connectable_sync(struct hci_dev *hdev, void *data)
1732{
1733 BT_DBG("%s", hdev->name);
1734
1735 return hci_update_connectable_sync(hdev);
1736}
1737
1738static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1739 u16 len)
1740{
1741 struct mgmt_mode *cp = data;
1742 struct mgmt_pending_cmd *cmd;
1743 int err;
1744
1745 bt_dev_dbg(hdev, "sock %p", sk);
1746
1747 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1748 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1750 MGMT_STATUS_REJECTED);
1751
1752 if (cp->val != 0x00 && cp->val != 0x01)
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1754 MGMT_STATUS_INVALID_PARAMS);
1755
1756 hci_dev_lock(hdev);
1757
1758 if (!hdev_is_powered(hdev)) {
1759 err = set_connectable_update_settings(hdev, sk, cp->val);
1760 goto failed;
1761 }
1762
1763 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1764 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1766 MGMT_STATUS_BUSY);
1767 goto failed;
1768 }
1769
1770 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1771 if (!cmd) {
1772 err = -ENOMEM;
1773 goto failed;
1774 }
1775
1776 if (cp->val) {
1777 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1778 } else {
1779 if (hdev->discov_timeout > 0)
1780 cancel_delayed_work(&hdev->discov_off);
1781
1782 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1783 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1784 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1785 }
1786
1787 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1788 mgmt_set_connectable_complete);
1789
1790 if (err < 0)
1791 mgmt_pending_remove(cmd);
1792
1793failed:
1794 hci_dev_unlock(hdev);
1795 return err;
1796}
1797
1798static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1799 u16 len)
1800{
1801 struct mgmt_mode *cp = data;
1802 bool changed;
1803 int err;
1804
1805 bt_dev_dbg(hdev, "sock %p", sk);
1806
1807 if (cp->val != 0x00 && cp->val != 0x01)
1808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1809 MGMT_STATUS_INVALID_PARAMS);
1810
1811 hci_dev_lock(hdev);
1812
1813 if (cp->val)
1814 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1815 else
1816 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1817
1818 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1819 if (err < 0)
1820 goto unlock;
1821
1822 if (changed) {
1823 /* In limited privacy mode the change of bondable mode
1824 * may affect the local advertising address.
1825 */
1826 hci_update_discoverable(hdev);
1827
1828 err = new_settings(hdev, sk);
1829 }
1830
1831unlock:
1832 hci_dev_unlock(hdev);
1833 return err;
1834}
1835
1836static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1837 u16 len)
1838{
1839 struct mgmt_mode *cp = data;
1840 struct mgmt_pending_cmd *cmd;
1841 u8 val, status;
1842 int err;
1843
1844 bt_dev_dbg(hdev, "sock %p", sk);
1845
1846 status = mgmt_bredr_support(hdev);
1847 if (status)
1848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1849 status);
1850
1851 if (cp->val != 0x00 && cp->val != 0x01)
1852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1853 MGMT_STATUS_INVALID_PARAMS);
1854
1855 hci_dev_lock(hdev);
1856
1857 if (!hdev_is_powered(hdev)) {
1858 bool changed = false;
1859
1860 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1861 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1862 changed = true;
1863 }
1864
1865 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1866 if (err < 0)
1867 goto failed;
1868
1869 if (changed)
1870 err = new_settings(hdev, sk);
1871
1872 goto failed;
1873 }
1874
1875 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1876 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1877 MGMT_STATUS_BUSY);
1878 goto failed;
1879 }
1880
1881 val = !!cp->val;
1882
1883 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1884 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1885 goto failed;
1886 }
1887
1888 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1889 if (!cmd) {
1890 err = -ENOMEM;
1891 goto failed;
1892 }
1893
1894 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 if (err < 0) {
1896 mgmt_pending_remove(cmd);
1897 goto failed;
1898 }
1899
1900failed:
1901 hci_dev_unlock(hdev);
1902 return err;
1903}
1904
1905static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1906{
1907 struct cmd_lookup match = { NULL, hdev };
1908 struct mgmt_pending_cmd *cmd = data;
1909 struct mgmt_mode *cp = cmd->param;
1910 u8 enable = cp->val;
1911 bool changed;
1912
1913 /* Make sure cmd still outstanding. */
1914 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1915 return;
1916
1917 if (err) {
1918 u8 mgmt_err = mgmt_status(err);
1919
1920 if (enable && hci_dev_test_and_clear_flag(hdev,
1921 HCI_SSP_ENABLED)) {
1922 new_settings(hdev, NULL);
1923 }
1924
1925 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1926 &mgmt_err);
1927 return;
1928 }
1929
1930 if (enable) {
1931 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1932 } else {
1933 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1934 }
1935
1936 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1937
1938 if (changed)
1939 new_settings(hdev, match.sk);
1940
1941 if (match.sk)
1942 sock_put(match.sk);
1943
1944 hci_update_eir_sync(hdev);
1945}
1946
1947static int set_ssp_sync(struct hci_dev *hdev, void *data)
1948{
1949 struct mgmt_pending_cmd *cmd = data;
1950 struct mgmt_mode *cp = cmd->param;
1951 bool changed = false;
1952 int err;
1953
1954 if (cp->val)
1955 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1956
1957 err = hci_write_ssp_mode_sync(hdev, cp->val);
1958
1959 if (!err && changed)
1960 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1961
1962 return err;
1963}
1964
1965static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1966{
1967 struct mgmt_mode *cp = data;
1968 struct mgmt_pending_cmd *cmd;
1969 u8 status;
1970 int err;
1971
1972 bt_dev_dbg(hdev, "sock %p", sk);
1973
1974 status = mgmt_bredr_support(hdev);
1975 if (status)
1976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1977
1978 if (!lmp_ssp_capable(hdev))
1979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1980 MGMT_STATUS_NOT_SUPPORTED);
1981
1982 if (cp->val != 0x00 && cp->val != 0x01)
1983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1984 MGMT_STATUS_INVALID_PARAMS);
1985
1986 hci_dev_lock(hdev);
1987
1988 if (!hdev_is_powered(hdev)) {
1989 bool changed;
1990
1991 if (cp->val) {
1992 changed = !hci_dev_test_and_set_flag(hdev,
1993 HCI_SSP_ENABLED);
1994 } else {
1995 changed = hci_dev_test_and_clear_flag(hdev,
1996 HCI_SSP_ENABLED);
1997 }
1998
1999 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2000 if (err < 0)
2001 goto failed;
2002
2003 if (changed)
2004 err = new_settings(hdev, sk);
2005
2006 goto failed;
2007 }
2008
2009 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2011 MGMT_STATUS_BUSY);
2012 goto failed;
2013 }
2014
2015 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2016 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2017 goto failed;
2018 }
2019
2020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2021 if (!cmd)
2022 err = -ENOMEM;
2023 else
2024 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2025 set_ssp_complete);
2026
2027 if (err < 0) {
2028 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2029 MGMT_STATUS_FAILED);
2030
2031 if (cmd)
2032 mgmt_pending_remove(cmd);
2033 }
2034
2035failed:
2036 hci_dev_unlock(hdev);
2037 return err;
2038}
2039
2040static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2041{
2042 bt_dev_dbg(hdev, "sock %p", sk);
2043
2044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2045 MGMT_STATUS_NOT_SUPPORTED);
2046}
2047
2048static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2049{
2050 struct cmd_lookup match = { NULL, hdev };
2051 u8 status = mgmt_status(err);
2052
2053 bt_dev_dbg(hdev, "err %d", err);
2054
2055 if (status) {
2056 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2057 &status);
2058 return;
2059 }
2060
2061 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2062
2063 new_settings(hdev, match.sk);
2064
2065 if (match.sk)
2066 sock_put(match.sk);
2067}
2068
2069static int set_le_sync(struct hci_dev *hdev, void *data)
2070{
2071 struct mgmt_pending_cmd *cmd = data;
2072 struct mgmt_mode *cp = cmd->param;
2073 u8 val = !!cp->val;
2074 int err;
2075
2076 if (!val) {
2077 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2078
2079 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2080 hci_disable_advertising_sync(hdev);
2081
2082 if (ext_adv_capable(hdev))
2083 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2084 } else {
2085 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2086 }
2087
2088 err = hci_write_le_host_supported_sync(hdev, val, 0);
2089
2090 /* Make sure the controller has a good default for
2091 * advertising data. Restrict the update to when LE
2092 * has actually been enabled. During power on, the
2093 * update in powered_update_hci will take care of it.
2094 */
2095 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2096 if (ext_adv_capable(hdev)) {
2097 int status;
2098
2099 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2100 if (!status)
2101 hci_update_scan_rsp_data_sync(hdev, 0x00);
2102 } else {
2103 hci_update_adv_data_sync(hdev, 0x00);
2104 hci_update_scan_rsp_data_sync(hdev, 0x00);
2105 }
2106
2107 hci_update_passive_scan(hdev);
2108 }
2109
2110 return err;
2111}
2112
2113static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2114{
2115 struct mgmt_pending_cmd *cmd = data;
2116 u8 status = mgmt_status(err);
2117 struct sock *sk = cmd->sk;
2118
2119 if (status) {
2120 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2121 cmd_status_rsp, &status);
2122 return;
2123 }
2124
2125 mgmt_pending_remove(cmd);
2126 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2127}
2128
2129static int set_mesh_sync(struct hci_dev *hdev, void *data)
2130{
2131 struct mgmt_pending_cmd *cmd = data;
2132 struct mgmt_cp_set_mesh *cp = cmd->param;
2133 size_t len = cmd->param_len;
2134
2135 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2136
2137 if (cp->enable)
2138 hci_dev_set_flag(hdev, HCI_MESH);
2139 else
2140 hci_dev_clear_flag(hdev, HCI_MESH);
2141
2142 len -= sizeof(*cp);
2143
2144 /* If filters don't fit, forward all adv pkts */
2145 if (len <= sizeof(hdev->mesh_ad_types))
2146 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2147
2148 hci_update_passive_scan_sync(hdev);
2149 return 0;
2150}
2151
2152static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2153{
2154 struct mgmt_cp_set_mesh *cp = data;
2155 struct mgmt_pending_cmd *cmd;
2156 int err = 0;
2157
2158 bt_dev_dbg(hdev, "sock %p", sk);
2159
2160 if (!lmp_le_capable(hdev) ||
2161 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2163 MGMT_STATUS_NOT_SUPPORTED);
2164
2165 if (cp->enable != 0x00 && cp->enable != 0x01)
2166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2167 MGMT_STATUS_INVALID_PARAMS);
2168
2169 hci_dev_lock(hdev);
2170
2171 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2172 if (!cmd)
2173 err = -ENOMEM;
2174 else
2175 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2176 set_mesh_complete);
2177
2178 if (err < 0) {
2179 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_FAILED);
2181
2182 if (cmd)
2183 mgmt_pending_remove(cmd);
2184 }
2185
2186 hci_dev_unlock(hdev);
2187 return err;
2188}
2189
2190static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2191{
2192 struct mgmt_mesh_tx *mesh_tx = data;
2193 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2194 unsigned long mesh_send_interval;
2195 u8 mgmt_err = mgmt_status(err);
2196
2197 /* Report any errors here, but don't report completion */
2198
2199 if (mgmt_err) {
2200 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2201 /* Send Complete Error Code for handle */
2202 mesh_send_complete(hdev, mesh_tx, false);
2203 return;
2204 }
2205
2206 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2207 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2208 mesh_send_interval);
2209}
2210
2211static int mesh_send_sync(struct hci_dev *hdev, void *data)
2212{
2213 struct mgmt_mesh_tx *mesh_tx = data;
2214 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 struct adv_info *adv, *next_instance;
2216 u8 instance = hdev->le_num_of_adv_sets + 1;
2217 u16 timeout, duration;
2218 int err = 0;
2219
2220 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2221 return MGMT_STATUS_BUSY;
2222
2223 timeout = 1000;
2224 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2225 adv = hci_add_adv_instance(hdev, instance, 0,
2226 send->adv_data_len, send->adv_data,
2227 0, NULL,
2228 timeout, duration,
2229 HCI_ADV_TX_POWER_NO_PREFERENCE,
2230 hdev->le_adv_min_interval,
2231 hdev->le_adv_max_interval,
2232 mesh_tx->handle);
2233
2234 if (!IS_ERR(adv))
2235 mesh_tx->instance = instance;
2236 else
2237 err = PTR_ERR(adv);
2238
2239 if (hdev->cur_adv_instance == instance) {
2240 /* If the currently advertised instance is being changed then
2241 * cancel the current advertising and schedule the next
2242 * instance. If there is only one instance then the overridden
2243 * advertising data will be visible right away.
2244 */
2245 cancel_adv_timeout(hdev);
2246
2247 next_instance = hci_get_next_instance(hdev, instance);
2248 if (next_instance)
2249 instance = next_instance->instance;
2250 else
2251 instance = 0;
2252 } else if (hdev->adv_instance_timeout) {
2253 /* Immediately advertise the new instance if no other, or
2254 * let it go naturally from queue if ADV is already happening
2255 */
2256 instance = 0;
2257 }
2258
2259 if (instance)
2260 return hci_schedule_adv_instance_sync(hdev, instance, true);
2261
2262 return err;
2263}
2264
2265static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2266{
2267 struct mgmt_rp_mesh_read_features *rp = data;
2268
2269 if (rp->used_handles >= rp->max_handles)
2270 return;
2271
2272 rp->handles[rp->used_handles++] = mesh_tx->handle;
2273}
2274
2275static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2276 void *data, u16 len)
2277{
2278 struct mgmt_rp_mesh_read_features rp;
2279
2280 if (!lmp_le_capable(hdev) ||
2281 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2283 MGMT_STATUS_NOT_SUPPORTED);
2284
2285 memset(&rp, 0, sizeof(rp));
2286 rp.index = cpu_to_le16(hdev->id);
2287 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2288 rp.max_handles = MESH_HANDLES_MAX;
2289
2290 hci_dev_lock(hdev);
2291
2292 if (rp.max_handles)
2293 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2294
2295 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2296 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2297
2298 hci_dev_unlock(hdev);
2299 return 0;
2300}
2301
2302static int send_cancel(struct hci_dev *hdev, void *data)
2303{
2304 struct mgmt_pending_cmd *cmd = data;
2305 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2306 struct mgmt_mesh_tx *mesh_tx;
2307
2308 if (!cancel->handle) {
2309 do {
2310 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2311
2312 if (mesh_tx)
2313 mesh_send_complete(hdev, mesh_tx, false);
2314 } while (mesh_tx);
2315 } else {
2316 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2317
2318 if (mesh_tx && mesh_tx->sk == cmd->sk)
2319 mesh_send_complete(hdev, mesh_tx, false);
2320 }
2321
2322 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2323 0, NULL, 0);
2324 mgmt_pending_free(cmd);
2325
2326 return 0;
2327}
2328
2329static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2330 void *data, u16 len)
2331{
2332 struct mgmt_pending_cmd *cmd;
2333 int err;
2334
2335 if (!lmp_le_capable(hdev) ||
2336 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2338 MGMT_STATUS_NOT_SUPPORTED);
2339
2340 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2341 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2342 MGMT_STATUS_REJECTED);
2343
2344 hci_dev_lock(hdev);
2345 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2346 if (!cmd)
2347 err = -ENOMEM;
2348 else
2349 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2350
2351 if (err < 0) {
2352 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2353 MGMT_STATUS_FAILED);
2354
2355 if (cmd)
2356 mgmt_pending_free(cmd);
2357 }
2358
2359 hci_dev_unlock(hdev);
2360 return err;
2361}
2362
2363static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2364{
2365 struct mgmt_mesh_tx *mesh_tx;
2366 struct mgmt_cp_mesh_send *send = data;
2367 struct mgmt_rp_mesh_read_features rp;
2368 bool sending;
2369 int err = 0;
2370
2371 if (!lmp_le_capable(hdev) ||
2372 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2373 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2374 MGMT_STATUS_NOT_SUPPORTED);
2375 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2376 len <= MGMT_MESH_SEND_SIZE ||
2377 len > (MGMT_MESH_SEND_SIZE + 31))
2378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2379 MGMT_STATUS_REJECTED);
2380
2381 hci_dev_lock(hdev);
2382
2383 memset(&rp, 0, sizeof(rp));
2384 rp.max_handles = MESH_HANDLES_MAX;
2385
2386 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2387
2388 if (rp.max_handles <= rp.used_handles) {
2389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2390 MGMT_STATUS_BUSY);
2391 goto done;
2392 }
2393
2394 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2395 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2396
2397 if (!mesh_tx)
2398 err = -ENOMEM;
2399 else if (!sending)
2400 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2401 mesh_send_start_complete);
2402
2403 if (err < 0) {
2404 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2405 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2406 MGMT_STATUS_FAILED);
2407
2408 if (mesh_tx) {
2409 if (sending)
2410 mgmt_mesh_remove(mesh_tx);
2411 }
2412 } else {
2413 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2414
2415 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2416 &mesh_tx->handle, 1);
2417 }
2418
2419done:
2420 hci_dev_unlock(hdev);
2421 return err;
2422}
2423
2424static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2425{
2426 struct mgmt_mode *cp = data;
2427 struct mgmt_pending_cmd *cmd;
2428 int err;
2429 u8 val, enabled;
2430
2431 bt_dev_dbg(hdev, "sock %p", sk);
2432
2433 if (!lmp_le_capable(hdev))
2434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2435 MGMT_STATUS_NOT_SUPPORTED);
2436
2437 if (cp->val != 0x00 && cp->val != 0x01)
2438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2439 MGMT_STATUS_INVALID_PARAMS);
2440
2441 /* Bluetooth single mode LE only controllers or dual-mode
2442 * controllers configured as LE only devices, do not allow
2443 * switching LE off. These have either LE enabled explicitly
2444 * or BR/EDR has been previously switched off.
2445 *
2446 * When trying to enable an already enabled LE, then gracefully
2447 * send a positive response. Trying to disable it however will
2448 * result into rejection.
2449 */
2450 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2451 if (cp->val == 0x01)
2452 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2453
2454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2455 MGMT_STATUS_REJECTED);
2456 }
2457
2458 hci_dev_lock(hdev);
2459
2460 val = !!cp->val;
2461 enabled = lmp_host_le_capable(hdev);
2462
2463 if (!hdev_is_powered(hdev) || val == enabled) {
2464 bool changed = false;
2465
2466 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2467 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2468 changed = true;
2469 }
2470
2471 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2472 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2473 changed = true;
2474 }
2475
2476 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2477 if (err < 0)
2478 goto unlock;
2479
2480 if (changed)
2481 err = new_settings(hdev, sk);
2482
2483 goto unlock;
2484 }
2485
2486 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2487 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2488 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2489 MGMT_STATUS_BUSY);
2490 goto unlock;
2491 }
2492
2493 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2494 if (!cmd)
2495 err = -ENOMEM;
2496 else
2497 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2498 set_le_complete);
2499
2500 if (err < 0) {
2501 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2502 MGMT_STATUS_FAILED);
2503
2504 if (cmd)
2505 mgmt_pending_remove(cmd);
2506 }
2507
2508unlock:
2509 hci_dev_unlock(hdev);
2510 return err;
2511}
2512
2513/* This is a helper function to test for pending mgmt commands that can
2514 * cause CoD or EIR HCI commands. We can only allow one such pending
2515 * mgmt command at a time since otherwise we cannot easily track what
2516 * the current values are, will be, and based on that calculate if a new
2517 * HCI command needs to be sent and if yes with what value.
2518 */
2519static bool pending_eir_or_class(struct hci_dev *hdev)
2520{
2521 struct mgmt_pending_cmd *cmd;
2522
2523 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2524 switch (cmd->opcode) {
2525 case MGMT_OP_ADD_UUID:
2526 case MGMT_OP_REMOVE_UUID:
2527 case MGMT_OP_SET_DEV_CLASS:
2528 case MGMT_OP_SET_POWERED:
2529 return true;
2530 }
2531 }
2532
2533 return false;
2534}
2535
2536static const u8 bluetooth_base_uuid[] = {
2537 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2538 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2539};
2540
2541static u8 get_uuid_size(const u8 *uuid)
2542{
2543 u32 val;
2544
2545 if (memcmp(uuid, bluetooth_base_uuid, 12))
2546 return 128;
2547
2548 val = get_unaligned_le32(&uuid[12]);
2549 if (val > 0xffff)
2550 return 32;
2551
2552 return 16;
2553}
2554
2555static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2556{
2557 struct mgmt_pending_cmd *cmd = data;
2558
2559 bt_dev_dbg(hdev, "err %d", err);
2560
2561 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2562 mgmt_status(err), hdev->dev_class, 3);
2563
2564 mgmt_pending_free(cmd);
2565}
2566
2567static int add_uuid_sync(struct hci_dev *hdev, void *data)
2568{
2569 int err;
2570
2571 err = hci_update_class_sync(hdev);
2572 if (err)
2573 return err;
2574
2575 return hci_update_eir_sync(hdev);
2576}
2577
2578static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2579{
2580 struct mgmt_cp_add_uuid *cp = data;
2581 struct mgmt_pending_cmd *cmd;
2582 struct bt_uuid *uuid;
2583 int err;
2584
2585 bt_dev_dbg(hdev, "sock %p", sk);
2586
2587 hci_dev_lock(hdev);
2588
2589 if (pending_eir_or_class(hdev)) {
2590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2591 MGMT_STATUS_BUSY);
2592 goto failed;
2593 }
2594
2595 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2596 if (!uuid) {
2597 err = -ENOMEM;
2598 goto failed;
2599 }
2600
2601 memcpy(uuid->uuid, cp->uuid, 16);
2602 uuid->svc_hint = cp->svc_hint;
2603 uuid->size = get_uuid_size(cp->uuid);
2604
2605 list_add_tail(&uuid->list, &hdev->uuids);
2606
2607 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2608 if (!cmd) {
2609 err = -ENOMEM;
2610 goto failed;
2611 }
2612
2613 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2614 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2615 */
2616 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2617 mgmt_class_complete);
2618 if (err < 0) {
2619 mgmt_pending_free(cmd);
2620 goto failed;
2621 }
2622
2623failed:
2624 hci_dev_unlock(hdev);
2625 return err;
2626}
2627
2628static bool enable_service_cache(struct hci_dev *hdev)
2629{
2630 if (!hdev_is_powered(hdev))
2631 return false;
2632
2633 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2634 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2635 CACHE_TIMEOUT);
2636 return true;
2637 }
2638
2639 return false;
2640}
2641
2642static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2643{
2644 int err;
2645
2646 err = hci_update_class_sync(hdev);
2647 if (err)
2648 return err;
2649
2650 return hci_update_eir_sync(hdev);
2651}
2652
2653static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2654 u16 len)
2655{
2656 struct mgmt_cp_remove_uuid *cp = data;
2657 struct mgmt_pending_cmd *cmd;
2658 struct bt_uuid *match, *tmp;
2659 static const u8 bt_uuid_any[] = {
2660 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2661 };
2662 int err, found;
2663
2664 bt_dev_dbg(hdev, "sock %p", sk);
2665
2666 hci_dev_lock(hdev);
2667
2668 if (pending_eir_or_class(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2670 MGMT_STATUS_BUSY);
2671 goto unlock;
2672 }
2673
2674 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2675 hci_uuids_clear(hdev);
2676
2677 if (enable_service_cache(hdev)) {
2678 err = mgmt_cmd_complete(sk, hdev->id,
2679 MGMT_OP_REMOVE_UUID,
2680 0, hdev->dev_class, 3);
2681 goto unlock;
2682 }
2683
2684 goto update_class;
2685 }
2686
2687 found = 0;
2688
2689 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2690 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2691 continue;
2692
2693 list_del(&match->list);
2694 kfree(match);
2695 found++;
2696 }
2697
2698 if (found == 0) {
2699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2700 MGMT_STATUS_INVALID_PARAMS);
2701 goto unlock;
2702 }
2703
2704update_class:
2705 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2706 if (!cmd) {
2707 err = -ENOMEM;
2708 goto unlock;
2709 }
2710
2711 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2712 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2713 */
2714 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2715 mgmt_class_complete);
2716 if (err < 0)
2717 mgmt_pending_free(cmd);
2718
2719unlock:
2720 hci_dev_unlock(hdev);
2721 return err;
2722}
2723
2724static int set_class_sync(struct hci_dev *hdev, void *data)
2725{
2726 int err = 0;
2727
2728 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2729 cancel_delayed_work_sync(&hdev->service_cache);
2730 err = hci_update_eir_sync(hdev);
2731 }
2732
2733 if (err)
2734 return err;
2735
2736 return hci_update_class_sync(hdev);
2737}
2738
2739static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2740 u16 len)
2741{
2742 struct mgmt_cp_set_dev_class *cp = data;
2743 struct mgmt_pending_cmd *cmd;
2744 int err;
2745
2746 bt_dev_dbg(hdev, "sock %p", sk);
2747
2748 if (!lmp_bredr_capable(hdev))
2749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 MGMT_STATUS_NOT_SUPPORTED);
2751
2752 hci_dev_lock(hdev);
2753
2754 if (pending_eir_or_class(hdev)) {
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 MGMT_STATUS_BUSY);
2757 goto unlock;
2758 }
2759
2760 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 MGMT_STATUS_INVALID_PARAMS);
2763 goto unlock;
2764 }
2765
2766 hdev->major_class = cp->major;
2767 hdev->minor_class = cp->minor;
2768
2769 if (!hdev_is_powered(hdev)) {
2770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2771 hdev->dev_class, 3);
2772 goto unlock;
2773 }
2774
2775 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2776 if (!cmd) {
2777 err = -ENOMEM;
2778 goto unlock;
2779 }
2780
2781 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2782 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2783 */
2784 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2785 mgmt_class_complete);
2786 if (err < 0)
2787 mgmt_pending_free(cmd);
2788
2789unlock:
2790 hci_dev_unlock(hdev);
2791 return err;
2792}
2793
2794static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2795 u16 len)
2796{
2797 struct mgmt_cp_load_link_keys *cp = data;
2798 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2799 sizeof(struct mgmt_link_key_info));
2800 u16 key_count, expected_len;
2801 bool changed;
2802 int i;
2803
2804 bt_dev_dbg(hdev, "sock %p", sk);
2805
2806 if (!lmp_bredr_capable(hdev))
2807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2808 MGMT_STATUS_NOT_SUPPORTED);
2809
2810 key_count = __le16_to_cpu(cp->key_count);
2811 if (key_count > max_key_count) {
2812 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2813 key_count);
2814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2815 MGMT_STATUS_INVALID_PARAMS);
2816 }
2817
2818 expected_len = struct_size(cp, keys, key_count);
2819 if (expected_len != len) {
2820 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2821 expected_len, len);
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_INVALID_PARAMS);
2824 }
2825
2826 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2828 MGMT_STATUS_INVALID_PARAMS);
2829
2830 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2831 key_count);
2832
2833 for (i = 0; i < key_count; i++) {
2834 struct mgmt_link_key_info *key = &cp->keys[i];
2835
2836 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2837 if (key->type > 0x08)
2838 return mgmt_cmd_status(sk, hdev->id,
2839 MGMT_OP_LOAD_LINK_KEYS,
2840 MGMT_STATUS_INVALID_PARAMS);
2841 }
2842
2843 hci_dev_lock(hdev);
2844
2845 hci_link_keys_clear(hdev);
2846
2847 if (cp->debug_keys)
2848 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2849 else
2850 changed = hci_dev_test_and_clear_flag(hdev,
2851 HCI_KEEP_DEBUG_KEYS);
2852
2853 if (changed)
2854 new_settings(hdev, NULL);
2855
2856 for (i = 0; i < key_count; i++) {
2857 struct mgmt_link_key_info *key = &cp->keys[i];
2858
2859 if (hci_is_blocked_key(hdev,
2860 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2861 key->val)) {
2862 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2863 &key->addr.bdaddr);
2864 continue;
2865 }
2866
2867 /* Always ignore debug keys and require a new pairing if
2868 * the user wants to use them.
2869 */
2870 if (key->type == HCI_LK_DEBUG_COMBINATION)
2871 continue;
2872
2873 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2874 key->type, key->pin_len, NULL);
2875 }
2876
2877 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2878
2879 hci_dev_unlock(hdev);
2880
2881 return 0;
2882}
2883
2884static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2885 u8 addr_type, struct sock *skip_sk)
2886{
2887 struct mgmt_ev_device_unpaired ev;
2888
2889 bacpy(&ev.addr.bdaddr, bdaddr);
2890 ev.addr.type = addr_type;
2891
2892 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2893 skip_sk);
2894}
2895
2896static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2897{
2898 struct mgmt_pending_cmd *cmd = data;
2899 struct mgmt_cp_unpair_device *cp = cmd->param;
2900
2901 if (!err)
2902 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2903
2904 cmd->cmd_complete(cmd, err);
2905 mgmt_pending_free(cmd);
2906}
2907
2908static int unpair_device_sync(struct hci_dev *hdev, void *data)
2909{
2910 struct mgmt_pending_cmd *cmd = data;
2911 struct mgmt_cp_unpair_device *cp = cmd->param;
2912 struct hci_conn *conn;
2913
2914 if (cp->addr.type == BDADDR_BREDR)
2915 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2916 &cp->addr.bdaddr);
2917 else
2918 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2919 le_addr_type(cp->addr.type));
2920
2921 if (!conn)
2922 return 0;
2923
2924 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2925}
2926
2927static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2928 u16 len)
2929{
2930 struct mgmt_cp_unpair_device *cp = data;
2931 struct mgmt_rp_unpair_device rp;
2932 struct hci_conn_params *params;
2933 struct mgmt_pending_cmd *cmd;
2934 struct hci_conn *conn;
2935 u8 addr_type;
2936 int err;
2937
2938 memset(&rp, 0, sizeof(rp));
2939 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2940 rp.addr.type = cp->addr.type;
2941
2942 if (!bdaddr_type_is_valid(cp->addr.type))
2943 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2944 MGMT_STATUS_INVALID_PARAMS,
2945 &rp, sizeof(rp));
2946
2947 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2948 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2949 MGMT_STATUS_INVALID_PARAMS,
2950 &rp, sizeof(rp));
2951
2952 hci_dev_lock(hdev);
2953
2954 if (!hdev_is_powered(hdev)) {
2955 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2956 MGMT_STATUS_NOT_POWERED, &rp,
2957 sizeof(rp));
2958 goto unlock;
2959 }
2960
2961 if (cp->addr.type == BDADDR_BREDR) {
2962 /* If disconnection is requested, then look up the
2963 * connection. If the remote device is connected, it
2964 * will be later used to terminate the link.
2965 *
2966 * Setting it to NULL explicitly will cause no
2967 * termination of the link.
2968 */
2969 if (cp->disconnect)
2970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2971 &cp->addr.bdaddr);
2972 else
2973 conn = NULL;
2974
2975 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2976 if (err < 0) {
2977 err = mgmt_cmd_complete(sk, hdev->id,
2978 MGMT_OP_UNPAIR_DEVICE,
2979 MGMT_STATUS_NOT_PAIRED, &rp,
2980 sizeof(rp));
2981 goto unlock;
2982 }
2983
2984 goto done;
2985 }
2986
2987 /* LE address type */
2988 addr_type = le_addr_type(cp->addr.type);
2989
2990 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2991 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2992 if (err < 0) {
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2994 MGMT_STATUS_NOT_PAIRED, &rp,
2995 sizeof(rp));
2996 goto unlock;
2997 }
2998
2999 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3000 if (!conn) {
3001 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3002 goto done;
3003 }
3004
3005
3006 /* Defer clearing up the connection parameters until closing to
3007 * give a chance of keeping them if a repairing happens.
3008 */
3009 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3010
3011 /* Disable auto-connection parameters if present */
3012 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3013 if (params) {
3014 if (params->explicit_connect)
3015 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3016 else
3017 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3018 }
3019
3020 /* If disconnection is not requested, then clear the connection
3021 * variable so that the link is not terminated.
3022 */
3023 if (!cp->disconnect)
3024 conn = NULL;
3025
3026done:
3027 /* If the connection variable is set, then termination of the
3028 * link is requested.
3029 */
3030 if (!conn) {
3031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3032 &rp, sizeof(rp));
3033 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3034 goto unlock;
3035 }
3036
3037 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3038 sizeof(*cp));
3039 if (!cmd) {
3040 err = -ENOMEM;
3041 goto unlock;
3042 }
3043
3044 cmd->cmd_complete = addr_cmd_complete;
3045
3046 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3047 unpair_device_complete);
3048 if (err < 0)
3049 mgmt_pending_free(cmd);
3050
3051unlock:
3052 hci_dev_unlock(hdev);
3053 return err;
3054}
3055
3056static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3057 u16 len)
3058{
3059 struct mgmt_cp_disconnect *cp = data;
3060 struct mgmt_rp_disconnect rp;
3061 struct mgmt_pending_cmd *cmd;
3062 struct hci_conn *conn;
3063 int err;
3064
3065 bt_dev_dbg(hdev, "sock %p", sk);
3066
3067 memset(&rp, 0, sizeof(rp));
3068 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3069 rp.addr.type = cp->addr.type;
3070
3071 if (!bdaddr_type_is_valid(cp->addr.type))
3072 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3073 MGMT_STATUS_INVALID_PARAMS,
3074 &rp, sizeof(rp));
3075
3076 hci_dev_lock(hdev);
3077
3078 if (!test_bit(HCI_UP, &hdev->flags)) {
3079 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3080 MGMT_STATUS_NOT_POWERED, &rp,
3081 sizeof(rp));
3082 goto failed;
3083 }
3084
3085 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3086 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3087 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3088 goto failed;
3089 }
3090
3091 if (cp->addr.type == BDADDR_BREDR)
3092 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3093 &cp->addr.bdaddr);
3094 else
3095 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3096 le_addr_type(cp->addr.type));
3097
3098 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3100 MGMT_STATUS_NOT_CONNECTED, &rp,
3101 sizeof(rp));
3102 goto failed;
3103 }
3104
3105 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3106 if (!cmd) {
3107 err = -ENOMEM;
3108 goto failed;
3109 }
3110
3111 cmd->cmd_complete = generic_cmd_complete;
3112
3113 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3114 if (err < 0)
3115 mgmt_pending_remove(cmd);
3116
3117failed:
3118 hci_dev_unlock(hdev);
3119 return err;
3120}
3121
3122static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3123{
3124 switch (link_type) {
3125 case ISO_LINK:
3126 case LE_LINK:
3127 switch (addr_type) {
3128 case ADDR_LE_DEV_PUBLIC:
3129 return BDADDR_LE_PUBLIC;
3130
3131 default:
3132 /* Fallback to LE Random address type */
3133 return BDADDR_LE_RANDOM;
3134 }
3135
3136 default:
3137 /* Fallback to BR/EDR type */
3138 return BDADDR_BREDR;
3139 }
3140}
3141
3142static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3143 u16 data_len)
3144{
3145 struct mgmt_rp_get_connections *rp;
3146 struct hci_conn *c;
3147 int err;
3148 u16 i;
3149
3150 bt_dev_dbg(hdev, "sock %p", sk);
3151
3152 hci_dev_lock(hdev);
3153
3154 if (!hdev_is_powered(hdev)) {
3155 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3156 MGMT_STATUS_NOT_POWERED);
3157 goto unlock;
3158 }
3159
3160 i = 0;
3161 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3162 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3163 i++;
3164 }
3165
3166 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3167 if (!rp) {
3168 err = -ENOMEM;
3169 goto unlock;
3170 }
3171
3172 i = 0;
3173 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3174 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3175 continue;
3176 bacpy(&rp->addr[i].bdaddr, &c->dst);
3177 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3178 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3179 continue;
3180 i++;
3181 }
3182
3183 rp->conn_count = cpu_to_le16(i);
3184
3185 /* Recalculate length in case of filtered SCO connections, etc */
3186 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3187 struct_size(rp, addr, i));
3188
3189 kfree(rp);
3190
3191unlock:
3192 hci_dev_unlock(hdev);
3193 return err;
3194}
3195
3196static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3197 struct mgmt_cp_pin_code_neg_reply *cp)
3198{
3199 struct mgmt_pending_cmd *cmd;
3200 int err;
3201
3202 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3203 sizeof(*cp));
3204 if (!cmd)
3205 return -ENOMEM;
3206
3207 cmd->cmd_complete = addr_cmd_complete;
3208
3209 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3210 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3211 if (err < 0)
3212 mgmt_pending_remove(cmd);
3213
3214 return err;
3215}
3216
3217static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3218 u16 len)
3219{
3220 struct hci_conn *conn;
3221 struct mgmt_cp_pin_code_reply *cp = data;
3222 struct hci_cp_pin_code_reply reply;
3223 struct mgmt_pending_cmd *cmd;
3224 int err;
3225
3226 bt_dev_dbg(hdev, "sock %p", sk);
3227
3228 hci_dev_lock(hdev);
3229
3230 if (!hdev_is_powered(hdev)) {
3231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3232 MGMT_STATUS_NOT_POWERED);
3233 goto failed;
3234 }
3235
3236 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3237 if (!conn) {
3238 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3239 MGMT_STATUS_NOT_CONNECTED);
3240 goto failed;
3241 }
3242
3243 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3244 struct mgmt_cp_pin_code_neg_reply ncp;
3245
3246 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3247
3248 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3249
3250 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3251 if (err >= 0)
3252 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3253 MGMT_STATUS_INVALID_PARAMS);
3254
3255 goto failed;
3256 }
3257
3258 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3259 if (!cmd) {
3260 err = -ENOMEM;
3261 goto failed;
3262 }
3263
3264 cmd->cmd_complete = addr_cmd_complete;
3265
3266 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3267 reply.pin_len = cp->pin_len;
3268 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3269
3270 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3271 if (err < 0)
3272 mgmt_pending_remove(cmd);
3273
3274failed:
3275 hci_dev_unlock(hdev);
3276 return err;
3277}
3278
3279static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3280 u16 len)
3281{
3282 struct mgmt_cp_set_io_capability *cp = data;
3283
3284 bt_dev_dbg(hdev, "sock %p", sk);
3285
3286 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3288 MGMT_STATUS_INVALID_PARAMS);
3289
3290 hci_dev_lock(hdev);
3291
3292 hdev->io_capability = cp->io_capability;
3293
3294 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3295
3296 hci_dev_unlock(hdev);
3297
3298 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3299 NULL, 0);
3300}
3301
3302static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3303{
3304 struct hci_dev *hdev = conn->hdev;
3305 struct mgmt_pending_cmd *cmd;
3306
3307 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3308 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3309 continue;
3310
3311 if (cmd->user_data != conn)
3312 continue;
3313
3314 return cmd;
3315 }
3316
3317 return NULL;
3318}
3319
3320static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3321{
3322 struct mgmt_rp_pair_device rp;
3323 struct hci_conn *conn = cmd->user_data;
3324 int err;
3325
3326 bacpy(&rp.addr.bdaddr, &conn->dst);
3327 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3328
3329 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3330 status, &rp, sizeof(rp));
3331
3332 /* So we don't get further callbacks for this connection */
3333 conn->connect_cfm_cb = NULL;
3334 conn->security_cfm_cb = NULL;
3335 conn->disconn_cfm_cb = NULL;
3336
3337 hci_conn_drop(conn);
3338
3339 /* The device is paired so there is no need to remove
3340 * its connection parameters anymore.
3341 */
3342 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3343
3344 hci_conn_put(conn);
3345
3346 return err;
3347}
3348
3349void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3350{
3351 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3352 struct mgmt_pending_cmd *cmd;
3353
3354 cmd = find_pairing(conn);
3355 if (cmd) {
3356 cmd->cmd_complete(cmd, status);
3357 mgmt_pending_remove(cmd);
3358 }
3359}
3360
3361static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3362{
3363 struct mgmt_pending_cmd *cmd;
3364
3365 BT_DBG("status %u", status);
3366
3367 cmd = find_pairing(conn);
3368 if (!cmd) {
3369 BT_DBG("Unable to find a pending command");
3370 return;
3371 }
3372
3373 cmd->cmd_complete(cmd, mgmt_status(status));
3374 mgmt_pending_remove(cmd);
3375}
3376
3377static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3378{
3379 struct mgmt_pending_cmd *cmd;
3380
3381 BT_DBG("status %u", status);
3382
3383 if (!status)
3384 return;
3385
3386 cmd = find_pairing(conn);
3387 if (!cmd) {
3388 BT_DBG("Unable to find a pending command");
3389 return;
3390 }
3391
3392 cmd->cmd_complete(cmd, mgmt_status(status));
3393 mgmt_pending_remove(cmd);
3394}
3395
3396static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3397 u16 len)
3398{
3399 struct mgmt_cp_pair_device *cp = data;
3400 struct mgmt_rp_pair_device rp;
3401 struct mgmt_pending_cmd *cmd;
3402 u8 sec_level, auth_type;
3403 struct hci_conn *conn;
3404 int err;
3405
3406 bt_dev_dbg(hdev, "sock %p", sk);
3407
3408 memset(&rp, 0, sizeof(rp));
3409 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3410 rp.addr.type = cp->addr.type;
3411
3412 if (!bdaddr_type_is_valid(cp->addr.type))
3413 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3414 MGMT_STATUS_INVALID_PARAMS,
3415 &rp, sizeof(rp));
3416
3417 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3418 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3419 MGMT_STATUS_INVALID_PARAMS,
3420 &rp, sizeof(rp));
3421
3422 hci_dev_lock(hdev);
3423
3424 if (!hdev_is_powered(hdev)) {
3425 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3426 MGMT_STATUS_NOT_POWERED, &rp,
3427 sizeof(rp));
3428 goto unlock;
3429 }
3430
3431 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3432 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3433 MGMT_STATUS_ALREADY_PAIRED, &rp,
3434 sizeof(rp));
3435 goto unlock;
3436 }
3437
3438 sec_level = BT_SECURITY_MEDIUM;
3439 auth_type = HCI_AT_DEDICATED_BONDING;
3440
3441 if (cp->addr.type == BDADDR_BREDR) {
3442 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3443 auth_type, CONN_REASON_PAIR_DEVICE,
3444 HCI_ACL_CONN_TIMEOUT);
3445 } else {
3446 u8 addr_type = le_addr_type(cp->addr.type);
3447 struct hci_conn_params *p;
3448
3449 /* When pairing a new device, it is expected to remember
3450 * this device for future connections. Adding the connection
3451 * parameter information ahead of time allows tracking
3452 * of the peripheral preferred values and will speed up any
3453 * further connection establishment.
3454 *
3455 * If connection parameters already exist, then they
3456 * will be kept and this function does nothing.
3457 */
3458 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3459
3460 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3461 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3462
3463 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3464 sec_level, HCI_LE_CONN_TIMEOUT,
3465 CONN_REASON_PAIR_DEVICE);
3466 }
3467
3468 if (IS_ERR(conn)) {
3469 int status;
3470
3471 if (PTR_ERR(conn) == -EBUSY)
3472 status = MGMT_STATUS_BUSY;
3473 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3474 status = MGMT_STATUS_NOT_SUPPORTED;
3475 else if (PTR_ERR(conn) == -ECONNREFUSED)
3476 status = MGMT_STATUS_REJECTED;
3477 else
3478 status = MGMT_STATUS_CONNECT_FAILED;
3479
3480 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 status, &rp, sizeof(rp));
3482 goto unlock;
3483 }
3484
3485 if (conn->connect_cfm_cb) {
3486 hci_conn_drop(conn);
3487 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3488 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3489 goto unlock;
3490 }
3491
3492 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3493 if (!cmd) {
3494 err = -ENOMEM;
3495 hci_conn_drop(conn);
3496 goto unlock;
3497 }
3498
3499 cmd->cmd_complete = pairing_complete;
3500
3501 /* For LE, just connecting isn't a proof that the pairing finished */
3502 if (cp->addr.type == BDADDR_BREDR) {
3503 conn->connect_cfm_cb = pairing_complete_cb;
3504 conn->security_cfm_cb = pairing_complete_cb;
3505 conn->disconn_cfm_cb = pairing_complete_cb;
3506 } else {
3507 conn->connect_cfm_cb = le_pairing_complete_cb;
3508 conn->security_cfm_cb = le_pairing_complete_cb;
3509 conn->disconn_cfm_cb = le_pairing_complete_cb;
3510 }
3511
3512 conn->io_capability = cp->io_cap;
3513 cmd->user_data = hci_conn_get(conn);
3514
3515 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3516 hci_conn_security(conn, sec_level, auth_type, true)) {
3517 cmd->cmd_complete(cmd, 0);
3518 mgmt_pending_remove(cmd);
3519 }
3520
3521 err = 0;
3522
3523unlock:
3524 hci_dev_unlock(hdev);
3525 return err;
3526}
3527
3528static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3529 u16 len)
3530{
3531 struct mgmt_addr_info *addr = data;
3532 struct mgmt_pending_cmd *cmd;
3533 struct hci_conn *conn;
3534 int err;
3535
3536 bt_dev_dbg(hdev, "sock %p", sk);
3537
3538 hci_dev_lock(hdev);
3539
3540 if (!hdev_is_powered(hdev)) {
3541 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3542 MGMT_STATUS_NOT_POWERED);
3543 goto unlock;
3544 }
3545
3546 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3547 if (!cmd) {
3548 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3549 MGMT_STATUS_INVALID_PARAMS);
3550 goto unlock;
3551 }
3552
3553 conn = cmd->user_data;
3554
3555 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3556 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3557 MGMT_STATUS_INVALID_PARAMS);
3558 goto unlock;
3559 }
3560
3561 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3562 mgmt_pending_remove(cmd);
3563
3564 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3565 addr, sizeof(*addr));
3566
3567 /* Since user doesn't want to proceed with the connection, abort any
3568 * ongoing pairing and then terminate the link if it was created
3569 * because of the pair device action.
3570 */
3571 if (addr->type == BDADDR_BREDR)
3572 hci_remove_link_key(hdev, &addr->bdaddr);
3573 else
3574 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3575 le_addr_type(addr->type));
3576
3577 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3578 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3579
3580unlock:
3581 hci_dev_unlock(hdev);
3582 return err;
3583}
3584
3585static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3586 struct mgmt_addr_info *addr, u16 mgmt_op,
3587 u16 hci_op, __le32 passkey)
3588{
3589 struct mgmt_pending_cmd *cmd;
3590 struct hci_conn *conn;
3591 int err;
3592
3593 hci_dev_lock(hdev);
3594
3595 if (!hdev_is_powered(hdev)) {
3596 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3597 MGMT_STATUS_NOT_POWERED, addr,
3598 sizeof(*addr));
3599 goto done;
3600 }
3601
3602 if (addr->type == BDADDR_BREDR)
3603 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3604 else
3605 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3606 le_addr_type(addr->type));
3607
3608 if (!conn) {
3609 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3610 MGMT_STATUS_NOT_CONNECTED, addr,
3611 sizeof(*addr));
3612 goto done;
3613 }
3614
3615 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3616 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3617 if (!err)
3618 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3619 MGMT_STATUS_SUCCESS, addr,
3620 sizeof(*addr));
3621 else
3622 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3623 MGMT_STATUS_FAILED, addr,
3624 sizeof(*addr));
3625
3626 goto done;
3627 }
3628
3629 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3630 if (!cmd) {
3631 err = -ENOMEM;
3632 goto done;
3633 }
3634
3635 cmd->cmd_complete = addr_cmd_complete;
3636
3637 /* Continue with pairing via HCI */
3638 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3639 struct hci_cp_user_passkey_reply cp;
3640
3641 bacpy(&cp.bdaddr, &addr->bdaddr);
3642 cp.passkey = passkey;
3643 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3644 } else
3645 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3646 &addr->bdaddr);
3647
3648 if (err < 0)
3649 mgmt_pending_remove(cmd);
3650
3651done:
3652 hci_dev_unlock(hdev);
3653 return err;
3654}
3655
3656static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3657 void *data, u16 len)
3658{
3659 struct mgmt_cp_pin_code_neg_reply *cp = data;
3660
3661 bt_dev_dbg(hdev, "sock %p", sk);
3662
3663 return user_pairing_resp(sk, hdev, &cp->addr,
3664 MGMT_OP_PIN_CODE_NEG_REPLY,
3665 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3666}
3667
3668static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3669 u16 len)
3670{
3671 struct mgmt_cp_user_confirm_reply *cp = data;
3672
3673 bt_dev_dbg(hdev, "sock %p", sk);
3674
3675 if (len != sizeof(*cp))
3676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3677 MGMT_STATUS_INVALID_PARAMS);
3678
3679 return user_pairing_resp(sk, hdev, &cp->addr,
3680 MGMT_OP_USER_CONFIRM_REPLY,
3681 HCI_OP_USER_CONFIRM_REPLY, 0);
3682}
3683
3684static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3685 void *data, u16 len)
3686{
3687 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3688
3689 bt_dev_dbg(hdev, "sock %p", sk);
3690
3691 return user_pairing_resp(sk, hdev, &cp->addr,
3692 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3693 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3694}
3695
3696static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3697 u16 len)
3698{
3699 struct mgmt_cp_user_passkey_reply *cp = data;
3700
3701 bt_dev_dbg(hdev, "sock %p", sk);
3702
3703 return user_pairing_resp(sk, hdev, &cp->addr,
3704 MGMT_OP_USER_PASSKEY_REPLY,
3705 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3706}
3707
3708static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3709 void *data, u16 len)
3710{
3711 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3712
3713 bt_dev_dbg(hdev, "sock %p", sk);
3714
3715 return user_pairing_resp(sk, hdev, &cp->addr,
3716 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3717 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3718}
3719
3720static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3721{
3722 struct adv_info *adv_instance;
3723
3724 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3725 if (!adv_instance)
3726 return 0;
3727
3728 /* stop if current instance doesn't need to be changed */
3729 if (!(adv_instance->flags & flags))
3730 return 0;
3731
3732 cancel_adv_timeout(hdev);
3733
3734 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3735 if (!adv_instance)
3736 return 0;
3737
3738 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3739
3740 return 0;
3741}
3742
3743static int name_changed_sync(struct hci_dev *hdev, void *data)
3744{
3745 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3746}
3747
3748static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3749{
3750 struct mgmt_pending_cmd *cmd = data;
3751 struct mgmt_cp_set_local_name *cp = cmd->param;
3752 u8 status = mgmt_status(err);
3753
3754 bt_dev_dbg(hdev, "err %d", err);
3755
3756 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3757 return;
3758
3759 if (status) {
3760 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3761 status);
3762 } else {
3763 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3764 cp, sizeof(*cp));
3765
3766 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3767 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3768 }
3769
3770 mgmt_pending_remove(cmd);
3771}
3772
3773static int set_name_sync(struct hci_dev *hdev, void *data)
3774{
3775 if (lmp_bredr_capable(hdev)) {
3776 hci_update_name_sync(hdev);
3777 hci_update_eir_sync(hdev);
3778 }
3779
3780 /* The name is stored in the scan response data and so
3781 * no need to update the advertising data here.
3782 */
3783 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3784 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3785
3786 return 0;
3787}
3788
3789static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3790 u16 len)
3791{
3792 struct mgmt_cp_set_local_name *cp = data;
3793 struct mgmt_pending_cmd *cmd;
3794 int err;
3795
3796 bt_dev_dbg(hdev, "sock %p", sk);
3797
3798 hci_dev_lock(hdev);
3799
3800 /* If the old values are the same as the new ones just return a
3801 * direct command complete event.
3802 */
3803 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3804 !memcmp(hdev->short_name, cp->short_name,
3805 sizeof(hdev->short_name))) {
3806 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3807 data, len);
3808 goto failed;
3809 }
3810
3811 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3812
3813 if (!hdev_is_powered(hdev)) {
3814 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3815
3816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3817 data, len);
3818 if (err < 0)
3819 goto failed;
3820
3821 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3822 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3823 ext_info_changed(hdev, sk);
3824
3825 goto failed;
3826 }
3827
3828 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3829 if (!cmd)
3830 err = -ENOMEM;
3831 else
3832 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3833 set_name_complete);
3834
3835 if (err < 0) {
3836 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3837 MGMT_STATUS_FAILED);
3838
3839 if (cmd)
3840 mgmt_pending_remove(cmd);
3841
3842 goto failed;
3843 }
3844
3845 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3846
3847failed:
3848 hci_dev_unlock(hdev);
3849 return err;
3850}
3851
3852static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3853{
3854 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3855}
3856
3857static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3858 u16 len)
3859{
3860 struct mgmt_cp_set_appearance *cp = data;
3861 u16 appearance;
3862 int err;
3863
3864 bt_dev_dbg(hdev, "sock %p", sk);
3865
3866 if (!lmp_le_capable(hdev))
3867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3868 MGMT_STATUS_NOT_SUPPORTED);
3869
3870 appearance = le16_to_cpu(cp->appearance);
3871
3872 hci_dev_lock(hdev);
3873
3874 if (hdev->appearance != appearance) {
3875 hdev->appearance = appearance;
3876
3877 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3878 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3879 NULL);
3880
3881 ext_info_changed(hdev, sk);
3882 }
3883
3884 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3885 0);
3886
3887 hci_dev_unlock(hdev);
3888
3889 return err;
3890}
3891
3892static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3893 void *data, u16 len)
3894{
3895 struct mgmt_rp_get_phy_configuration rp;
3896
3897 bt_dev_dbg(hdev, "sock %p", sk);
3898
3899 hci_dev_lock(hdev);
3900
3901 memset(&rp, 0, sizeof(rp));
3902
3903 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3904 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3905 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3906
3907 hci_dev_unlock(hdev);
3908
3909 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3910 &rp, sizeof(rp));
3911}
3912
3913int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3914{
3915 struct mgmt_ev_phy_configuration_changed ev;
3916
3917 memset(&ev, 0, sizeof(ev));
3918
3919 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3920
3921 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3922 sizeof(ev), skip);
3923}
3924
3925static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3926{
3927 struct mgmt_pending_cmd *cmd = data;
3928 struct sk_buff *skb = cmd->skb;
3929 u8 status = mgmt_status(err);
3930
3931 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3932 return;
3933
3934 if (!status) {
3935 if (!skb)
3936 status = MGMT_STATUS_FAILED;
3937 else if (IS_ERR(skb))
3938 status = mgmt_status(PTR_ERR(skb));
3939 else
3940 status = mgmt_status(skb->data[0]);
3941 }
3942
3943 bt_dev_dbg(hdev, "status %d", status);
3944
3945 if (status) {
3946 mgmt_cmd_status(cmd->sk, hdev->id,
3947 MGMT_OP_SET_PHY_CONFIGURATION, status);
3948 } else {
3949 mgmt_cmd_complete(cmd->sk, hdev->id,
3950 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3951 NULL, 0);
3952
3953 mgmt_phy_configuration_changed(hdev, cmd->sk);
3954 }
3955
3956 if (skb && !IS_ERR(skb))
3957 kfree_skb(skb);
3958
3959 mgmt_pending_remove(cmd);
3960}
3961
3962static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3963{
3964 struct mgmt_pending_cmd *cmd = data;
3965 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3966 struct hci_cp_le_set_default_phy cp_phy;
3967 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3968
3969 memset(&cp_phy, 0, sizeof(cp_phy));
3970
3971 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3972 cp_phy.all_phys |= 0x01;
3973
3974 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3975 cp_phy.all_phys |= 0x02;
3976
3977 if (selected_phys & MGMT_PHY_LE_1M_TX)
3978 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3979
3980 if (selected_phys & MGMT_PHY_LE_2M_TX)
3981 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3982
3983 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3984 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3985
3986 if (selected_phys & MGMT_PHY_LE_1M_RX)
3987 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3988
3989 if (selected_phys & MGMT_PHY_LE_2M_RX)
3990 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3991
3992 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3993 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3994
3995 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3996 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3997
3998 return 0;
3999}
4000
4001static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4002 void *data, u16 len)
4003{
4004 struct mgmt_cp_set_phy_configuration *cp = data;
4005 struct mgmt_pending_cmd *cmd;
4006 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4007 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4008 bool changed = false;
4009 int err;
4010
4011 bt_dev_dbg(hdev, "sock %p", sk);
4012
4013 configurable_phys = get_configurable_phys(hdev);
4014 supported_phys = get_supported_phys(hdev);
4015 selected_phys = __le32_to_cpu(cp->selected_phys);
4016
4017 if (selected_phys & ~supported_phys)
4018 return mgmt_cmd_status(sk, hdev->id,
4019 MGMT_OP_SET_PHY_CONFIGURATION,
4020 MGMT_STATUS_INVALID_PARAMS);
4021
4022 unconfigure_phys = supported_phys & ~configurable_phys;
4023
4024 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4025 return mgmt_cmd_status(sk, hdev->id,
4026 MGMT_OP_SET_PHY_CONFIGURATION,
4027 MGMT_STATUS_INVALID_PARAMS);
4028
4029 if (selected_phys == get_selected_phys(hdev))
4030 return mgmt_cmd_complete(sk, hdev->id,
4031 MGMT_OP_SET_PHY_CONFIGURATION,
4032 0, NULL, 0);
4033
4034 hci_dev_lock(hdev);
4035
4036 if (!hdev_is_powered(hdev)) {
4037 err = mgmt_cmd_status(sk, hdev->id,
4038 MGMT_OP_SET_PHY_CONFIGURATION,
4039 MGMT_STATUS_REJECTED);
4040 goto unlock;
4041 }
4042
4043 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4044 err = mgmt_cmd_status(sk, hdev->id,
4045 MGMT_OP_SET_PHY_CONFIGURATION,
4046 MGMT_STATUS_BUSY);
4047 goto unlock;
4048 }
4049
4050 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4051 pkt_type |= (HCI_DH3 | HCI_DM3);
4052 else
4053 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4054
4055 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4056 pkt_type |= (HCI_DH5 | HCI_DM5);
4057 else
4058 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4059
4060 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4061 pkt_type &= ~HCI_2DH1;
4062 else
4063 pkt_type |= HCI_2DH1;
4064
4065 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4066 pkt_type &= ~HCI_2DH3;
4067 else
4068 pkt_type |= HCI_2DH3;
4069
4070 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4071 pkt_type &= ~HCI_2DH5;
4072 else
4073 pkt_type |= HCI_2DH5;
4074
4075 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4076 pkt_type &= ~HCI_3DH1;
4077 else
4078 pkt_type |= HCI_3DH1;
4079
4080 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4081 pkt_type &= ~HCI_3DH3;
4082 else
4083 pkt_type |= HCI_3DH3;
4084
4085 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4086 pkt_type &= ~HCI_3DH5;
4087 else
4088 pkt_type |= HCI_3DH5;
4089
4090 if (pkt_type != hdev->pkt_type) {
4091 hdev->pkt_type = pkt_type;
4092 changed = true;
4093 }
4094
4095 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4096 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4097 if (changed)
4098 mgmt_phy_configuration_changed(hdev, sk);
4099
4100 err = mgmt_cmd_complete(sk, hdev->id,
4101 MGMT_OP_SET_PHY_CONFIGURATION,
4102 0, NULL, 0);
4103
4104 goto unlock;
4105 }
4106
4107 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4108 len);
4109 if (!cmd)
4110 err = -ENOMEM;
4111 else
4112 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4113 set_default_phy_complete);
4114
4115 if (err < 0) {
4116 err = mgmt_cmd_status(sk, hdev->id,
4117 MGMT_OP_SET_PHY_CONFIGURATION,
4118 MGMT_STATUS_FAILED);
4119
4120 if (cmd)
4121 mgmt_pending_remove(cmd);
4122 }
4123
4124unlock:
4125 hci_dev_unlock(hdev);
4126
4127 return err;
4128}
4129
4130static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4131 u16 len)
4132{
4133 int err = MGMT_STATUS_SUCCESS;
4134 struct mgmt_cp_set_blocked_keys *keys = data;
4135 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4136 sizeof(struct mgmt_blocked_key_info));
4137 u16 key_count, expected_len;
4138 int i;
4139
4140 bt_dev_dbg(hdev, "sock %p", sk);
4141
4142 key_count = __le16_to_cpu(keys->key_count);
4143 if (key_count > max_key_count) {
4144 bt_dev_err(hdev, "too big key_count value %u", key_count);
4145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4146 MGMT_STATUS_INVALID_PARAMS);
4147 }
4148
4149 expected_len = struct_size(keys, keys, key_count);
4150 if (expected_len != len) {
4151 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4152 expected_len, len);
4153 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4154 MGMT_STATUS_INVALID_PARAMS);
4155 }
4156
4157 hci_dev_lock(hdev);
4158
4159 hci_blocked_keys_clear(hdev);
4160
4161 for (i = 0; i < key_count; ++i) {
4162 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4163
4164 if (!b) {
4165 err = MGMT_STATUS_NO_RESOURCES;
4166 break;
4167 }
4168
4169 b->type = keys->keys[i].type;
4170 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4171 list_add_rcu(&b->list, &hdev->blocked_keys);
4172 }
4173 hci_dev_unlock(hdev);
4174
4175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4176 err, NULL, 0);
4177}
4178
4179static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4180 void *data, u16 len)
4181{
4182 struct mgmt_mode *cp = data;
4183 int err;
4184 bool changed = false;
4185
4186 bt_dev_dbg(hdev, "sock %p", sk);
4187
4188 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4189 return mgmt_cmd_status(sk, hdev->id,
4190 MGMT_OP_SET_WIDEBAND_SPEECH,
4191 MGMT_STATUS_NOT_SUPPORTED);
4192
4193 if (cp->val != 0x00 && cp->val != 0x01)
4194 return mgmt_cmd_status(sk, hdev->id,
4195 MGMT_OP_SET_WIDEBAND_SPEECH,
4196 MGMT_STATUS_INVALID_PARAMS);
4197
4198 hci_dev_lock(hdev);
4199
4200 if (hdev_is_powered(hdev) &&
4201 !!cp->val != hci_dev_test_flag(hdev,
4202 HCI_WIDEBAND_SPEECH_ENABLED)) {
4203 err = mgmt_cmd_status(sk, hdev->id,
4204 MGMT_OP_SET_WIDEBAND_SPEECH,
4205 MGMT_STATUS_REJECTED);
4206 goto unlock;
4207 }
4208
4209 if (cp->val)
4210 changed = !hci_dev_test_and_set_flag(hdev,
4211 HCI_WIDEBAND_SPEECH_ENABLED);
4212 else
4213 changed = hci_dev_test_and_clear_flag(hdev,
4214 HCI_WIDEBAND_SPEECH_ENABLED);
4215
4216 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4217 if (err < 0)
4218 goto unlock;
4219
4220 if (changed)
4221 err = new_settings(hdev, sk);
4222
4223unlock:
4224 hci_dev_unlock(hdev);
4225 return err;
4226}
4227
4228static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4229 void *data, u16 data_len)
4230{
4231 char buf[20];
4232 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4233 u16 cap_len = 0;
4234 u8 flags = 0;
4235 u8 tx_power_range[2];
4236
4237 bt_dev_dbg(hdev, "sock %p", sk);
4238
4239 memset(&buf, 0, sizeof(buf));
4240
4241 hci_dev_lock(hdev);
4242
4243 /* When the Read Simple Pairing Options command is supported, then
4244 * the remote public key validation is supported.
4245 *
4246 * Alternatively, when Microsoft extensions are available, they can
4247 * indicate support for public key validation as well.
4248 */
4249 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4250 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4251
4252 flags |= 0x02; /* Remote public key validation (LE) */
4253
4254 /* When the Read Encryption Key Size command is supported, then the
4255 * encryption key size is enforced.
4256 */
4257 if (hdev->commands[20] & 0x10)
4258 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4259
4260 flags |= 0x08; /* Encryption key size enforcement (LE) */
4261
4262 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4263 &flags, 1);
4264
4265 /* When the Read Simple Pairing Options command is supported, then
4266 * also max encryption key size information is provided.
4267 */
4268 if (hdev->commands[41] & 0x08)
4269 cap_len = eir_append_le16(rp->cap, cap_len,
4270 MGMT_CAP_MAX_ENC_KEY_SIZE,
4271 hdev->max_enc_key_size);
4272
4273 cap_len = eir_append_le16(rp->cap, cap_len,
4274 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4275 SMP_MAX_ENC_KEY_SIZE);
4276
4277 /* Append the min/max LE tx power parameters if we were able to fetch
4278 * it from the controller
4279 */
4280 if (hdev->commands[38] & 0x80) {
4281 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4282 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4283 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4284 tx_power_range, 2);
4285 }
4286
4287 rp->cap_len = cpu_to_le16(cap_len);
4288
4289 hci_dev_unlock(hdev);
4290
4291 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4292 rp, sizeof(*rp) + cap_len);
4293}
4294
4295#ifdef CONFIG_BT_FEATURE_DEBUG
4296/* d4992530-b9ec-469f-ab01-6c481c47da1c */
4297static const u8 debug_uuid[16] = {
4298 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4299 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4300};
4301#endif
4302
4303/* 330859bc-7506-492d-9370-9a6f0614037f */
4304static const u8 quality_report_uuid[16] = {
4305 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4306 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4307};
4308
4309/* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4310static const u8 offload_codecs_uuid[16] = {
4311 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4312 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4313};
4314
4315/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4316static const u8 le_simultaneous_roles_uuid[16] = {
4317 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4318 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4319};
4320
4321/* 15c0a148-c273-11ea-b3de-0242ac130004 */
4322static const u8 rpa_resolution_uuid[16] = {
4323 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4324 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4325};
4326
4327/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4328static const u8 iso_socket_uuid[16] = {
4329 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4330 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4331};
4332
4333/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4334static const u8 mgmt_mesh_uuid[16] = {
4335 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4336 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4337};
4338
4339static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4340 void *data, u16 data_len)
4341{
4342 struct mgmt_rp_read_exp_features_info *rp;
4343 size_t len;
4344 u16 idx = 0;
4345 u32 flags;
4346 int status;
4347
4348 bt_dev_dbg(hdev, "sock %p", sk);
4349
4350 /* Enough space for 7 features */
4351 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4352 rp = kzalloc(len, GFP_KERNEL);
4353 if (!rp)
4354 return -ENOMEM;
4355
4356#ifdef CONFIG_BT_FEATURE_DEBUG
4357 if (!hdev) {
4358 flags = bt_dbg_get() ? BIT(0) : 0;
4359
4360 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4361 rp->features[idx].flags = cpu_to_le32(flags);
4362 idx++;
4363 }
4364#endif
4365
4366 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4367 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4368 flags = BIT(0);
4369 else
4370 flags = 0;
4371
4372 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4373 rp->features[idx].flags = cpu_to_le32(flags);
4374 idx++;
4375 }
4376
4377 if (hdev && ll_privacy_capable(hdev)) {
4378 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4379 flags = BIT(0) | BIT(1);
4380 else
4381 flags = BIT(1);
4382
4383 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4384 rp->features[idx].flags = cpu_to_le32(flags);
4385 idx++;
4386 }
4387
4388 if (hdev && (aosp_has_quality_report(hdev) ||
4389 hdev->set_quality_report)) {
4390 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4391 flags = BIT(0);
4392 else
4393 flags = 0;
4394
4395 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4396 rp->features[idx].flags = cpu_to_le32(flags);
4397 idx++;
4398 }
4399
4400 if (hdev && hdev->get_data_path_id) {
4401 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4402 flags = BIT(0);
4403 else
4404 flags = 0;
4405
4406 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4407 rp->features[idx].flags = cpu_to_le32(flags);
4408 idx++;
4409 }
4410
4411 if (IS_ENABLED(CONFIG_BT_LE)) {
4412 flags = iso_enabled() ? BIT(0) : 0;
4413 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4414 rp->features[idx].flags = cpu_to_le32(flags);
4415 idx++;
4416 }
4417
4418 if (hdev && lmp_le_capable(hdev)) {
4419 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4420 flags = BIT(0);
4421 else
4422 flags = 0;
4423
4424 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4425 rp->features[idx].flags = cpu_to_le32(flags);
4426 idx++;
4427 }
4428
4429 rp->feature_count = cpu_to_le16(idx);
4430
4431 /* After reading the experimental features information, enable
4432 * the events to update client on any future change.
4433 */
4434 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4435
4436 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4437 MGMT_OP_READ_EXP_FEATURES_INFO,
4438 0, rp, sizeof(*rp) + (20 * idx));
4439
4440 kfree(rp);
4441 return status;
4442}
4443
4444static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4445 struct sock *skip)
4446{
4447 struct mgmt_ev_exp_feature_changed ev;
4448
4449 memset(&ev, 0, sizeof(ev));
4450 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4451 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4452
4453 // Do we need to be atomic with the conn_flags?
4454 if (enabled && privacy_mode_capable(hdev))
4455 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4456 else
4457 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4458
4459 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4460 &ev, sizeof(ev),
4461 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4462
4463}
4464
4465static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4466 bool enabled, struct sock *skip)
4467{
4468 struct mgmt_ev_exp_feature_changed ev;
4469
4470 memset(&ev, 0, sizeof(ev));
4471 memcpy(ev.uuid, uuid, 16);
4472 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4473
4474 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4475 &ev, sizeof(ev),
4476 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4477}
4478
4479#define EXP_FEAT(_uuid, _set_func) \
4480{ \
4481 .uuid = _uuid, \
4482 .set_func = _set_func, \
4483}
4484
4485/* The zero key uuid is special. Multiple exp features are set through it. */
4486static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4487 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4488{
4489 struct mgmt_rp_set_exp_feature rp;
4490
4491 memset(rp.uuid, 0, 16);
4492 rp.flags = cpu_to_le32(0);
4493
4494#ifdef CONFIG_BT_FEATURE_DEBUG
4495 if (!hdev) {
4496 bool changed = bt_dbg_get();
4497
4498 bt_dbg_set(false);
4499
4500 if (changed)
4501 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4502 }
4503#endif
4504
4505 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4506 bool changed;
4507
4508 changed = hci_dev_test_and_clear_flag(hdev,
4509 HCI_ENABLE_LL_PRIVACY);
4510 if (changed)
4511 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4512 sk);
4513 }
4514
4515 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4516
4517 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4518 MGMT_OP_SET_EXP_FEATURE, 0,
4519 &rp, sizeof(rp));
4520}
4521
4522#ifdef CONFIG_BT_FEATURE_DEBUG
4523static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4524 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4525{
4526 struct mgmt_rp_set_exp_feature rp;
4527
4528 bool val, changed;
4529 int err;
4530
4531 /* Command requires to use the non-controller index */
4532 if (hdev)
4533 return mgmt_cmd_status(sk, hdev->id,
4534 MGMT_OP_SET_EXP_FEATURE,
4535 MGMT_STATUS_INVALID_INDEX);
4536
4537 /* Parameters are limited to a single octet */
4538 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4539 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4540 MGMT_OP_SET_EXP_FEATURE,
4541 MGMT_STATUS_INVALID_PARAMS);
4542
4543 /* Only boolean on/off is supported */
4544 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4545 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4546 MGMT_OP_SET_EXP_FEATURE,
4547 MGMT_STATUS_INVALID_PARAMS);
4548
4549 val = !!cp->param[0];
4550 changed = val ? !bt_dbg_get() : bt_dbg_get();
4551 bt_dbg_set(val);
4552
4553 memcpy(rp.uuid, debug_uuid, 16);
4554 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4555
4556 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4557
4558 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4559 MGMT_OP_SET_EXP_FEATURE, 0,
4560 &rp, sizeof(rp));
4561
4562 if (changed)
4563 exp_feature_changed(hdev, debug_uuid, val, sk);
4564
4565 return err;
4566}
4567#endif
4568
4569static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4570 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4571{
4572 struct mgmt_rp_set_exp_feature rp;
4573 bool val, changed;
4574 int err;
4575
4576 /* Command requires to use the controller index */
4577 if (!hdev)
4578 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4579 MGMT_OP_SET_EXP_FEATURE,
4580 MGMT_STATUS_INVALID_INDEX);
4581
4582 /* Parameters are limited to a single octet */
4583 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4584 return mgmt_cmd_status(sk, hdev->id,
4585 MGMT_OP_SET_EXP_FEATURE,
4586 MGMT_STATUS_INVALID_PARAMS);
4587
4588 /* Only boolean on/off is supported */
4589 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4590 return mgmt_cmd_status(sk, hdev->id,
4591 MGMT_OP_SET_EXP_FEATURE,
4592 MGMT_STATUS_INVALID_PARAMS);
4593
4594 val = !!cp->param[0];
4595
4596 if (val) {
4597 changed = !hci_dev_test_and_set_flag(hdev,
4598 HCI_MESH_EXPERIMENTAL);
4599 } else {
4600 hci_dev_clear_flag(hdev, HCI_MESH);
4601 changed = hci_dev_test_and_clear_flag(hdev,
4602 HCI_MESH_EXPERIMENTAL);
4603 }
4604
4605 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4606 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4607
4608 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4609
4610 err = mgmt_cmd_complete(sk, hdev->id,
4611 MGMT_OP_SET_EXP_FEATURE, 0,
4612 &rp, sizeof(rp));
4613
4614 if (changed)
4615 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4616
4617 return err;
4618}
4619
4620static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4621 struct mgmt_cp_set_exp_feature *cp,
4622 u16 data_len)
4623{
4624 struct mgmt_rp_set_exp_feature rp;
4625 bool val, changed;
4626 int err;
4627 u32 flags;
4628
4629 /* Command requires to use the controller index */
4630 if (!hdev)
4631 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4632 MGMT_OP_SET_EXP_FEATURE,
4633 MGMT_STATUS_INVALID_INDEX);
4634
4635 /* Changes can only be made when controller is powered down */
4636 if (hdev_is_powered(hdev))
4637 return mgmt_cmd_status(sk, hdev->id,
4638 MGMT_OP_SET_EXP_FEATURE,
4639 MGMT_STATUS_REJECTED);
4640
4641 /* Parameters are limited to a single octet */
4642 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4643 return mgmt_cmd_status(sk, hdev->id,
4644 MGMT_OP_SET_EXP_FEATURE,
4645 MGMT_STATUS_INVALID_PARAMS);
4646
4647 /* Only boolean on/off is supported */
4648 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4649 return mgmt_cmd_status(sk, hdev->id,
4650 MGMT_OP_SET_EXP_FEATURE,
4651 MGMT_STATUS_INVALID_PARAMS);
4652
4653 val = !!cp->param[0];
4654
4655 if (val) {
4656 changed = !hci_dev_test_and_set_flag(hdev,
4657 HCI_ENABLE_LL_PRIVACY);
4658 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4659
4660 /* Enable LL privacy + supported settings changed */
4661 flags = BIT(0) | BIT(1);
4662 } else {
4663 changed = hci_dev_test_and_clear_flag(hdev,
4664 HCI_ENABLE_LL_PRIVACY);
4665
4666 /* Disable LL privacy + supported settings changed */
4667 flags = BIT(1);
4668 }
4669
4670 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4671 rp.flags = cpu_to_le32(flags);
4672
4673 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4674
4675 err = mgmt_cmd_complete(sk, hdev->id,
4676 MGMT_OP_SET_EXP_FEATURE, 0,
4677 &rp, sizeof(rp));
4678
4679 if (changed)
4680 exp_ll_privacy_feature_changed(val, hdev, sk);
4681
4682 return err;
4683}
4684
4685static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4686 struct mgmt_cp_set_exp_feature *cp,
4687 u16 data_len)
4688{
4689 struct mgmt_rp_set_exp_feature rp;
4690 bool val, changed;
4691 int err;
4692
4693 /* Command requires to use a valid controller index */
4694 if (!hdev)
4695 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4696 MGMT_OP_SET_EXP_FEATURE,
4697 MGMT_STATUS_INVALID_INDEX);
4698
4699 /* Parameters are limited to a single octet */
4700 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4701 return mgmt_cmd_status(sk, hdev->id,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_INVALID_PARAMS);
4704
4705 /* Only boolean on/off is supported */
4706 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4710
4711 hci_req_sync_lock(hdev);
4712
4713 val = !!cp->param[0];
4714 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4715
4716 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4717 err = mgmt_cmd_status(sk, hdev->id,
4718 MGMT_OP_SET_EXP_FEATURE,
4719 MGMT_STATUS_NOT_SUPPORTED);
4720 goto unlock_quality_report;
4721 }
4722
4723 if (changed) {
4724 if (hdev->set_quality_report)
4725 err = hdev->set_quality_report(hdev, val);
4726 else
4727 err = aosp_set_quality_report(hdev, val);
4728
4729 if (err) {
4730 err = mgmt_cmd_status(sk, hdev->id,
4731 MGMT_OP_SET_EXP_FEATURE,
4732 MGMT_STATUS_FAILED);
4733 goto unlock_quality_report;
4734 }
4735
4736 if (val)
4737 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4738 else
4739 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4740 }
4741
4742 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4743
4744 memcpy(rp.uuid, quality_report_uuid, 16);
4745 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4746 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4747
4748 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4749 &rp, sizeof(rp));
4750
4751 if (changed)
4752 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4753
4754unlock_quality_report:
4755 hci_req_sync_unlock(hdev);
4756 return err;
4757}
4758
4759static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4760 struct mgmt_cp_set_exp_feature *cp,
4761 u16 data_len)
4762{
4763 bool val, changed;
4764 int err;
4765 struct mgmt_rp_set_exp_feature rp;
4766
4767 /* Command requires to use a valid controller index */
4768 if (!hdev)
4769 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4770 MGMT_OP_SET_EXP_FEATURE,
4771 MGMT_STATUS_INVALID_INDEX);
4772
4773 /* Parameters are limited to a single octet */
4774 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4775 return mgmt_cmd_status(sk, hdev->id,
4776 MGMT_OP_SET_EXP_FEATURE,
4777 MGMT_STATUS_INVALID_PARAMS);
4778
4779 /* Only boolean on/off is supported */
4780 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4781 return mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_INVALID_PARAMS);
4784
4785 val = !!cp->param[0];
4786 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4787
4788 if (!hdev->get_data_path_id) {
4789 return mgmt_cmd_status(sk, hdev->id,
4790 MGMT_OP_SET_EXP_FEATURE,
4791 MGMT_STATUS_NOT_SUPPORTED);
4792 }
4793
4794 if (changed) {
4795 if (val)
4796 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4797 else
4798 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4799 }
4800
4801 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4802 val, changed);
4803
4804 memcpy(rp.uuid, offload_codecs_uuid, 16);
4805 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4806 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4807 err = mgmt_cmd_complete(sk, hdev->id,
4808 MGMT_OP_SET_EXP_FEATURE, 0,
4809 &rp, sizeof(rp));
4810
4811 if (changed)
4812 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4813
4814 return err;
4815}
4816
4817static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4818 struct mgmt_cp_set_exp_feature *cp,
4819 u16 data_len)
4820{
4821 bool val, changed;
4822 int err;
4823 struct mgmt_rp_set_exp_feature rp;
4824
4825 /* Command requires to use a valid controller index */
4826 if (!hdev)
4827 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4828 MGMT_OP_SET_EXP_FEATURE,
4829 MGMT_STATUS_INVALID_INDEX);
4830
4831 /* Parameters are limited to a single octet */
4832 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4833 return mgmt_cmd_status(sk, hdev->id,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_INVALID_PARAMS);
4836
4837 /* Only boolean on/off is supported */
4838 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_INVALID_PARAMS);
4842
4843 val = !!cp->param[0];
4844 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4845
4846 if (!hci_dev_le_state_simultaneous(hdev)) {
4847 return mgmt_cmd_status(sk, hdev->id,
4848 MGMT_OP_SET_EXP_FEATURE,
4849 MGMT_STATUS_NOT_SUPPORTED);
4850 }
4851
4852 if (changed) {
4853 if (val)
4854 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4855 else
4856 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4857 }
4858
4859 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4860 val, changed);
4861
4862 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4863 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4864 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4865 err = mgmt_cmd_complete(sk, hdev->id,
4866 MGMT_OP_SET_EXP_FEATURE, 0,
4867 &rp, sizeof(rp));
4868
4869 if (changed)
4870 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4871
4872 return err;
4873}
4874
4875#ifdef CONFIG_BT_LE
4876static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4877 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4878{
4879 struct mgmt_rp_set_exp_feature rp;
4880 bool val, changed = false;
4881 int err;
4882
4883 /* Command requires to use the non-controller index */
4884 if (hdev)
4885 return mgmt_cmd_status(sk, hdev->id,
4886 MGMT_OP_SET_EXP_FEATURE,
4887 MGMT_STATUS_INVALID_INDEX);
4888
4889 /* Parameters are limited to a single octet */
4890 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4891 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 MGMT_OP_SET_EXP_FEATURE,
4893 MGMT_STATUS_INVALID_PARAMS);
4894
4895 /* Only boolean on/off is supported */
4896 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4897 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_INVALID_PARAMS);
4900
4901 val = cp->param[0] ? true : false;
4902 if (val)
4903 err = iso_init();
4904 else
4905 err = iso_exit();
4906
4907 if (!err)
4908 changed = true;
4909
4910 memcpy(rp.uuid, iso_socket_uuid, 16);
4911 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4912
4913 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4914
4915 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4916 MGMT_OP_SET_EXP_FEATURE, 0,
4917 &rp, sizeof(rp));
4918
4919 if (changed)
4920 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4921
4922 return err;
4923}
4924#endif
4925
4926static const struct mgmt_exp_feature {
4927 const u8 *uuid;
4928 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4929 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4930} exp_features[] = {
4931 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4932#ifdef CONFIG_BT_FEATURE_DEBUG
4933 EXP_FEAT(debug_uuid, set_debug_func),
4934#endif
4935 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4936 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4937 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4938 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4939 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4940#ifdef CONFIG_BT_LE
4941 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4942#endif
4943
4944 /* end with a null feature */
4945 EXP_FEAT(NULL, NULL)
4946};
4947
4948static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4949 void *data, u16 data_len)
4950{
4951 struct mgmt_cp_set_exp_feature *cp = data;
4952 size_t i = 0;
4953
4954 bt_dev_dbg(hdev, "sock %p", sk);
4955
4956 for (i = 0; exp_features[i].uuid; i++) {
4957 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4958 return exp_features[i].set_func(sk, hdev, cp, data_len);
4959 }
4960
4961 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4962 MGMT_OP_SET_EXP_FEATURE,
4963 MGMT_STATUS_NOT_SUPPORTED);
4964}
4965
4966static u32 get_params_flags(struct hci_dev *hdev,
4967 struct hci_conn_params *params)
4968{
4969 u32 flags = hdev->conn_flags;
4970
4971 /* Devices using RPAs can only be programmed in the acceptlist if
4972 * LL Privacy has been enable otherwise they cannot mark
4973 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4974 */
4975 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4976 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4977 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4978
4979 return flags;
4980}
4981
4982static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4983 u16 data_len)
4984{
4985 struct mgmt_cp_get_device_flags *cp = data;
4986 struct mgmt_rp_get_device_flags rp;
4987 struct bdaddr_list_with_flags *br_params;
4988 struct hci_conn_params *params;
4989 u32 supported_flags;
4990 u32 current_flags = 0;
4991 u8 status = MGMT_STATUS_INVALID_PARAMS;
4992
4993 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4994 &cp->addr.bdaddr, cp->addr.type);
4995
4996 hci_dev_lock(hdev);
4997
4998 supported_flags = hdev->conn_flags;
4999
5000 memset(&rp, 0, sizeof(rp));
5001
5002 if (cp->addr.type == BDADDR_BREDR) {
5003 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5004 &cp->addr.bdaddr,
5005 cp->addr.type);
5006 if (!br_params)
5007 goto done;
5008
5009 current_flags = br_params->flags;
5010 } else {
5011 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5012 le_addr_type(cp->addr.type));
5013 if (!params)
5014 goto done;
5015
5016 supported_flags = get_params_flags(hdev, params);
5017 current_flags = params->flags;
5018 }
5019
5020 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5021 rp.addr.type = cp->addr.type;
5022 rp.supported_flags = cpu_to_le32(supported_flags);
5023 rp.current_flags = cpu_to_le32(current_flags);
5024
5025 status = MGMT_STATUS_SUCCESS;
5026
5027done:
5028 hci_dev_unlock(hdev);
5029
5030 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5031 &rp, sizeof(rp));
5032}
5033
5034static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5035 bdaddr_t *bdaddr, u8 bdaddr_type,
5036 u32 supported_flags, u32 current_flags)
5037{
5038 struct mgmt_ev_device_flags_changed ev;
5039
5040 bacpy(&ev.addr.bdaddr, bdaddr);
5041 ev.addr.type = bdaddr_type;
5042 ev.supported_flags = cpu_to_le32(supported_flags);
5043 ev.current_flags = cpu_to_le32(current_flags);
5044
5045 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5046}
5047
5048static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 u16 len)
5050{
5051 struct mgmt_cp_set_device_flags *cp = data;
5052 struct bdaddr_list_with_flags *br_params;
5053 struct hci_conn_params *params;
5054 u8 status = MGMT_STATUS_INVALID_PARAMS;
5055 u32 supported_flags;
5056 u32 current_flags = __le32_to_cpu(cp->current_flags);
5057
5058 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5059 &cp->addr.bdaddr, cp->addr.type, current_flags);
5060
5061 // We should take hci_dev_lock() early, I think.. conn_flags can change
5062 supported_flags = hdev->conn_flags;
5063
5064 if ((supported_flags | current_flags) != supported_flags) {
5065 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5066 current_flags, supported_flags);
5067 goto done;
5068 }
5069
5070 hci_dev_lock(hdev);
5071
5072 if (cp->addr.type == BDADDR_BREDR) {
5073 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5074 &cp->addr.bdaddr,
5075 cp->addr.type);
5076
5077 if (br_params) {
5078 br_params->flags = current_flags;
5079 status = MGMT_STATUS_SUCCESS;
5080 } else {
5081 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5082 &cp->addr.bdaddr, cp->addr.type);
5083 }
5084
5085 goto unlock;
5086 }
5087
5088 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5089 le_addr_type(cp->addr.type));
5090 if (!params) {
5091 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5092 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5093 goto unlock;
5094 }
5095
5096 supported_flags = get_params_flags(hdev, params);
5097
5098 if ((supported_flags | current_flags) != supported_flags) {
5099 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5100 current_flags, supported_flags);
5101 goto unlock;
5102 }
5103
5104 WRITE_ONCE(params->flags, current_flags);
5105 status = MGMT_STATUS_SUCCESS;
5106
5107 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5108 * has been set.
5109 */
5110 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5111 hci_update_passive_scan(hdev);
5112
5113unlock:
5114 hci_dev_unlock(hdev);
5115
5116done:
5117 if (status == MGMT_STATUS_SUCCESS)
5118 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5119 supported_flags, current_flags);
5120
5121 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5122 &cp->addr, sizeof(cp->addr));
5123}
5124
5125static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5126 u16 handle)
5127{
5128 struct mgmt_ev_adv_monitor_added ev;
5129
5130 ev.monitor_handle = cpu_to_le16(handle);
5131
5132 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5133}
5134
5135void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5136{
5137 struct mgmt_ev_adv_monitor_removed ev;
5138 struct mgmt_pending_cmd *cmd;
5139 struct sock *sk_skip = NULL;
5140 struct mgmt_cp_remove_adv_monitor *cp;
5141
5142 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5143 if (cmd) {
5144 cp = cmd->param;
5145
5146 if (cp->monitor_handle)
5147 sk_skip = cmd->sk;
5148 }
5149
5150 ev.monitor_handle = cpu_to_le16(handle);
5151
5152 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5153}
5154
5155static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5156 void *data, u16 len)
5157{
5158 struct adv_monitor *monitor = NULL;
5159 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5160 int handle, err;
5161 size_t rp_size = 0;
5162 __u32 supported = 0;
5163 __u32 enabled = 0;
5164 __u16 num_handles = 0;
5165 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5166
5167 BT_DBG("request for %s", hdev->name);
5168
5169 hci_dev_lock(hdev);
5170
5171 if (msft_monitor_supported(hdev))
5172 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5173
5174 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5175 handles[num_handles++] = monitor->handle;
5176
5177 hci_dev_unlock(hdev);
5178
5179 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5180 rp = kmalloc(rp_size, GFP_KERNEL);
5181 if (!rp)
5182 return -ENOMEM;
5183
5184 /* All supported features are currently enabled */
5185 enabled = supported;
5186
5187 rp->supported_features = cpu_to_le32(supported);
5188 rp->enabled_features = cpu_to_le32(enabled);
5189 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5190 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5191 rp->num_handles = cpu_to_le16(num_handles);
5192 if (num_handles)
5193 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5194
5195 err = mgmt_cmd_complete(sk, hdev->id,
5196 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5197 MGMT_STATUS_SUCCESS, rp, rp_size);
5198
5199 kfree(rp);
5200
5201 return err;
5202}
5203
5204static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5205 void *data, int status)
5206{
5207 struct mgmt_rp_add_adv_patterns_monitor rp;
5208 struct mgmt_pending_cmd *cmd = data;
5209 struct adv_monitor *monitor = cmd->user_data;
5210
5211 hci_dev_lock(hdev);
5212
5213 rp.monitor_handle = cpu_to_le16(monitor->handle);
5214
5215 if (!status) {
5216 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5217 hdev->adv_monitors_cnt++;
5218 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5219 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5220 hci_update_passive_scan(hdev);
5221 }
5222
5223 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5224 mgmt_status(status), &rp, sizeof(rp));
5225 mgmt_pending_remove(cmd);
5226
5227 hci_dev_unlock(hdev);
5228 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5229 rp.monitor_handle, status);
5230}
5231
5232static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5233{
5234 struct mgmt_pending_cmd *cmd = data;
5235 struct adv_monitor *monitor = cmd->user_data;
5236
5237 return hci_add_adv_monitor(hdev, monitor);
5238}
5239
5240static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5241 struct adv_monitor *m, u8 status,
5242 void *data, u16 len, u16 op)
5243{
5244 struct mgmt_pending_cmd *cmd;
5245 int err;
5246
5247 hci_dev_lock(hdev);
5248
5249 if (status)
5250 goto unlock;
5251
5252 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5253 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5254 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5255 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5256 status = MGMT_STATUS_BUSY;
5257 goto unlock;
5258 }
5259
5260 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5261 if (!cmd) {
5262 status = MGMT_STATUS_NO_RESOURCES;
5263 goto unlock;
5264 }
5265
5266 cmd->user_data = m;
5267 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5268 mgmt_add_adv_patterns_monitor_complete);
5269 if (err) {
5270 if (err == -ENOMEM)
5271 status = MGMT_STATUS_NO_RESOURCES;
5272 else
5273 status = MGMT_STATUS_FAILED;
5274
5275 goto unlock;
5276 }
5277
5278 hci_dev_unlock(hdev);
5279
5280 return 0;
5281
5282unlock:
5283 hci_free_adv_monitor(hdev, m);
5284 hci_dev_unlock(hdev);
5285 return mgmt_cmd_status(sk, hdev->id, op, status);
5286}
5287
5288static void parse_adv_monitor_rssi(struct adv_monitor *m,
5289 struct mgmt_adv_rssi_thresholds *rssi)
5290{
5291 if (rssi) {
5292 m->rssi.low_threshold = rssi->low_threshold;
5293 m->rssi.low_threshold_timeout =
5294 __le16_to_cpu(rssi->low_threshold_timeout);
5295 m->rssi.high_threshold = rssi->high_threshold;
5296 m->rssi.high_threshold_timeout =
5297 __le16_to_cpu(rssi->high_threshold_timeout);
5298 m->rssi.sampling_period = rssi->sampling_period;
5299 } else {
5300 /* Default values. These numbers are the least constricting
5301 * parameters for MSFT API to work, so it behaves as if there
5302 * are no rssi parameter to consider. May need to be changed
5303 * if other API are to be supported.
5304 */
5305 m->rssi.low_threshold = -127;
5306 m->rssi.low_threshold_timeout = 60;
5307 m->rssi.high_threshold = -127;
5308 m->rssi.high_threshold_timeout = 0;
5309 m->rssi.sampling_period = 0;
5310 }
5311}
5312
5313static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5314 struct mgmt_adv_pattern *patterns)
5315{
5316 u8 offset = 0, length = 0;
5317 struct adv_pattern *p = NULL;
5318 int i;
5319
5320 for (i = 0; i < pattern_count; i++) {
5321 offset = patterns[i].offset;
5322 length = patterns[i].length;
5323 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5324 length > HCI_MAX_EXT_AD_LENGTH ||
5325 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5326 return MGMT_STATUS_INVALID_PARAMS;
5327
5328 p = kmalloc(sizeof(*p), GFP_KERNEL);
5329 if (!p)
5330 return MGMT_STATUS_NO_RESOURCES;
5331
5332 p->ad_type = patterns[i].ad_type;
5333 p->offset = patterns[i].offset;
5334 p->length = patterns[i].length;
5335 memcpy(p->value, patterns[i].value, p->length);
5336
5337 INIT_LIST_HEAD(&p->list);
5338 list_add(&p->list, &m->patterns);
5339 }
5340
5341 return MGMT_STATUS_SUCCESS;
5342}
5343
5344static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5345 void *data, u16 len)
5346{
5347 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5348 struct adv_monitor *m = NULL;
5349 u8 status = MGMT_STATUS_SUCCESS;
5350 size_t expected_size = sizeof(*cp);
5351
5352 BT_DBG("request for %s", hdev->name);
5353
5354 if (len <= sizeof(*cp)) {
5355 status = MGMT_STATUS_INVALID_PARAMS;
5356 goto done;
5357 }
5358
5359 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5360 if (len != expected_size) {
5361 status = MGMT_STATUS_INVALID_PARAMS;
5362 goto done;
5363 }
5364
5365 m = kzalloc(sizeof(*m), GFP_KERNEL);
5366 if (!m) {
5367 status = MGMT_STATUS_NO_RESOURCES;
5368 goto done;
5369 }
5370
5371 INIT_LIST_HEAD(&m->patterns);
5372
5373 parse_adv_monitor_rssi(m, NULL);
5374 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5375
5376done:
5377 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5378 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5379}
5380
5381static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5382 void *data, u16 len)
5383{
5384 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5385 struct adv_monitor *m = NULL;
5386 u8 status = MGMT_STATUS_SUCCESS;
5387 size_t expected_size = sizeof(*cp);
5388
5389 BT_DBG("request for %s", hdev->name);
5390
5391 if (len <= sizeof(*cp)) {
5392 status = MGMT_STATUS_INVALID_PARAMS;
5393 goto done;
5394 }
5395
5396 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5397 if (len != expected_size) {
5398 status = MGMT_STATUS_INVALID_PARAMS;
5399 goto done;
5400 }
5401
5402 m = kzalloc(sizeof(*m), GFP_KERNEL);
5403 if (!m) {
5404 status = MGMT_STATUS_NO_RESOURCES;
5405 goto done;
5406 }
5407
5408 INIT_LIST_HEAD(&m->patterns);
5409
5410 parse_adv_monitor_rssi(m, &cp->rssi);
5411 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5412
5413done:
5414 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5415 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5416}
5417
5418static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5419 void *data, int status)
5420{
5421 struct mgmt_rp_remove_adv_monitor rp;
5422 struct mgmt_pending_cmd *cmd = data;
5423 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5424
5425 hci_dev_lock(hdev);
5426
5427 rp.monitor_handle = cp->monitor_handle;
5428
5429 if (!status)
5430 hci_update_passive_scan(hdev);
5431
5432 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5433 mgmt_status(status), &rp, sizeof(rp));
5434 mgmt_pending_remove(cmd);
5435
5436 hci_dev_unlock(hdev);
5437 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5438 rp.monitor_handle, status);
5439}
5440
5441static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5442{
5443 struct mgmt_pending_cmd *cmd = data;
5444 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5445 u16 handle = __le16_to_cpu(cp->monitor_handle);
5446
5447 if (!handle)
5448 return hci_remove_all_adv_monitor(hdev);
5449
5450 return hci_remove_single_adv_monitor(hdev, handle);
5451}
5452
5453static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5454 void *data, u16 len)
5455{
5456 struct mgmt_pending_cmd *cmd;
5457 int err, status;
5458
5459 hci_dev_lock(hdev);
5460
5461 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5462 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5463 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5464 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5465 status = MGMT_STATUS_BUSY;
5466 goto unlock;
5467 }
5468
5469 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5470 if (!cmd) {
5471 status = MGMT_STATUS_NO_RESOURCES;
5472 goto unlock;
5473 }
5474
5475 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5476 mgmt_remove_adv_monitor_complete);
5477
5478 if (err) {
5479 mgmt_pending_remove(cmd);
5480
5481 if (err == -ENOMEM)
5482 status = MGMT_STATUS_NO_RESOURCES;
5483 else
5484 status = MGMT_STATUS_FAILED;
5485
5486 goto unlock;
5487 }
5488
5489 hci_dev_unlock(hdev);
5490
5491 return 0;
5492
5493unlock:
5494 hci_dev_unlock(hdev);
5495 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5496 status);
5497}
5498
5499static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5500{
5501 struct mgmt_rp_read_local_oob_data mgmt_rp;
5502 size_t rp_size = sizeof(mgmt_rp);
5503 struct mgmt_pending_cmd *cmd = data;
5504 struct sk_buff *skb = cmd->skb;
5505 u8 status = mgmt_status(err);
5506
5507 if (!status) {
5508 if (!skb)
5509 status = MGMT_STATUS_FAILED;
5510 else if (IS_ERR(skb))
5511 status = mgmt_status(PTR_ERR(skb));
5512 else
5513 status = mgmt_status(skb->data[0]);
5514 }
5515
5516 bt_dev_dbg(hdev, "status %d", status);
5517
5518 if (status) {
5519 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5520 goto remove;
5521 }
5522
5523 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5524
5525 if (!bredr_sc_enabled(hdev)) {
5526 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5527
5528 if (skb->len < sizeof(*rp)) {
5529 mgmt_cmd_status(cmd->sk, hdev->id,
5530 MGMT_OP_READ_LOCAL_OOB_DATA,
5531 MGMT_STATUS_FAILED);
5532 goto remove;
5533 }
5534
5535 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5536 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5537
5538 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5539 } else {
5540 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5541
5542 if (skb->len < sizeof(*rp)) {
5543 mgmt_cmd_status(cmd->sk, hdev->id,
5544 MGMT_OP_READ_LOCAL_OOB_DATA,
5545 MGMT_STATUS_FAILED);
5546 goto remove;
5547 }
5548
5549 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5550 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5551
5552 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5553 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5554 }
5555
5556 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5557 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5558
5559remove:
5560 if (skb && !IS_ERR(skb))
5561 kfree_skb(skb);
5562
5563 mgmt_pending_free(cmd);
5564}
5565
5566static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5567{
5568 struct mgmt_pending_cmd *cmd = data;
5569
5570 if (bredr_sc_enabled(hdev))
5571 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5572 else
5573 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5574
5575 if (IS_ERR(cmd->skb))
5576 return PTR_ERR(cmd->skb);
5577 else
5578 return 0;
5579}
5580
5581static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5582 void *data, u16 data_len)
5583{
5584 struct mgmt_pending_cmd *cmd;
5585 int err;
5586
5587 bt_dev_dbg(hdev, "sock %p", sk);
5588
5589 hci_dev_lock(hdev);
5590
5591 if (!hdev_is_powered(hdev)) {
5592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5593 MGMT_STATUS_NOT_POWERED);
5594 goto unlock;
5595 }
5596
5597 if (!lmp_ssp_capable(hdev)) {
5598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5599 MGMT_STATUS_NOT_SUPPORTED);
5600 goto unlock;
5601 }
5602
5603 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5604 if (!cmd)
5605 err = -ENOMEM;
5606 else
5607 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5608 read_local_oob_data_complete);
5609
5610 if (err < 0) {
5611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5612 MGMT_STATUS_FAILED);
5613
5614 if (cmd)
5615 mgmt_pending_free(cmd);
5616 }
5617
5618unlock:
5619 hci_dev_unlock(hdev);
5620 return err;
5621}
5622
5623static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5624 void *data, u16 len)
5625{
5626 struct mgmt_addr_info *addr = data;
5627 int err;
5628
5629 bt_dev_dbg(hdev, "sock %p", sk);
5630
5631 if (!bdaddr_type_is_valid(addr->type))
5632 return mgmt_cmd_complete(sk, hdev->id,
5633 MGMT_OP_ADD_REMOTE_OOB_DATA,
5634 MGMT_STATUS_INVALID_PARAMS,
5635 addr, sizeof(*addr));
5636
5637 hci_dev_lock(hdev);
5638
5639 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5640 struct mgmt_cp_add_remote_oob_data *cp = data;
5641 u8 status;
5642
5643 if (cp->addr.type != BDADDR_BREDR) {
5644 err = mgmt_cmd_complete(sk, hdev->id,
5645 MGMT_OP_ADD_REMOTE_OOB_DATA,
5646 MGMT_STATUS_INVALID_PARAMS,
5647 &cp->addr, sizeof(cp->addr));
5648 goto unlock;
5649 }
5650
5651 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5652 cp->addr.type, cp->hash,
5653 cp->rand, NULL, NULL);
5654 if (err < 0)
5655 status = MGMT_STATUS_FAILED;
5656 else
5657 status = MGMT_STATUS_SUCCESS;
5658
5659 err = mgmt_cmd_complete(sk, hdev->id,
5660 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5661 &cp->addr, sizeof(cp->addr));
5662 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5663 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5664 u8 *rand192, *hash192, *rand256, *hash256;
5665 u8 status;
5666
5667 if (bdaddr_type_is_le(cp->addr.type)) {
5668 /* Enforce zero-valued 192-bit parameters as
5669 * long as legacy SMP OOB isn't implemented.
5670 */
5671 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5672 memcmp(cp->hash192, ZERO_KEY, 16)) {
5673 err = mgmt_cmd_complete(sk, hdev->id,
5674 MGMT_OP_ADD_REMOTE_OOB_DATA,
5675 MGMT_STATUS_INVALID_PARAMS,
5676 addr, sizeof(*addr));
5677 goto unlock;
5678 }
5679
5680 rand192 = NULL;
5681 hash192 = NULL;
5682 } else {
5683 /* In case one of the P-192 values is set to zero,
5684 * then just disable OOB data for P-192.
5685 */
5686 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5687 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5688 rand192 = NULL;
5689 hash192 = NULL;
5690 } else {
5691 rand192 = cp->rand192;
5692 hash192 = cp->hash192;
5693 }
5694 }
5695
5696 /* In case one of the P-256 values is set to zero, then just
5697 * disable OOB data for P-256.
5698 */
5699 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5700 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5701 rand256 = NULL;
5702 hash256 = NULL;
5703 } else {
5704 rand256 = cp->rand256;
5705 hash256 = cp->hash256;
5706 }
5707
5708 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5709 cp->addr.type, hash192, rand192,
5710 hash256, rand256);
5711 if (err < 0)
5712 status = MGMT_STATUS_FAILED;
5713 else
5714 status = MGMT_STATUS_SUCCESS;
5715
5716 err = mgmt_cmd_complete(sk, hdev->id,
5717 MGMT_OP_ADD_REMOTE_OOB_DATA,
5718 status, &cp->addr, sizeof(cp->addr));
5719 } else {
5720 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5721 len);
5722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5723 MGMT_STATUS_INVALID_PARAMS);
5724 }
5725
5726unlock:
5727 hci_dev_unlock(hdev);
5728 return err;
5729}
5730
5731static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5732 void *data, u16 len)
5733{
5734 struct mgmt_cp_remove_remote_oob_data *cp = data;
5735 u8 status;
5736 int err;
5737
5738 bt_dev_dbg(hdev, "sock %p", sk);
5739
5740 if (cp->addr.type != BDADDR_BREDR)
5741 return mgmt_cmd_complete(sk, hdev->id,
5742 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5743 MGMT_STATUS_INVALID_PARAMS,
5744 &cp->addr, sizeof(cp->addr));
5745
5746 hci_dev_lock(hdev);
5747
5748 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5749 hci_remote_oob_data_clear(hdev);
5750 status = MGMT_STATUS_SUCCESS;
5751 goto done;
5752 }
5753
5754 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5755 if (err < 0)
5756 status = MGMT_STATUS_INVALID_PARAMS;
5757 else
5758 status = MGMT_STATUS_SUCCESS;
5759
5760done:
5761 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5762 status, &cp->addr, sizeof(cp->addr));
5763
5764 hci_dev_unlock(hdev);
5765 return err;
5766}
5767
5768void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5769{
5770 struct mgmt_pending_cmd *cmd;
5771
5772 bt_dev_dbg(hdev, "status %u", status);
5773
5774 hci_dev_lock(hdev);
5775
5776 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5777 if (!cmd)
5778 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5779
5780 if (!cmd)
5781 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5782
5783 if (cmd) {
5784 cmd->cmd_complete(cmd, mgmt_status(status));
5785 mgmt_pending_remove(cmd);
5786 }
5787
5788 hci_dev_unlock(hdev);
5789}
5790
5791static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5792 uint8_t *mgmt_status)
5793{
5794 switch (type) {
5795 case DISCOV_TYPE_LE:
5796 *mgmt_status = mgmt_le_support(hdev);
5797 if (*mgmt_status)
5798 return false;
5799 break;
5800 case DISCOV_TYPE_INTERLEAVED:
5801 *mgmt_status = mgmt_le_support(hdev);
5802 if (*mgmt_status)
5803 return false;
5804 fallthrough;
5805 case DISCOV_TYPE_BREDR:
5806 *mgmt_status = mgmt_bredr_support(hdev);
5807 if (*mgmt_status)
5808 return false;
5809 break;
5810 default:
5811 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5812 return false;
5813 }
5814
5815 return true;
5816}
5817
5818static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5819{
5820 struct mgmt_pending_cmd *cmd = data;
5821
5822 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5823 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5824 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5825 return;
5826
5827 bt_dev_dbg(hdev, "err %d", err);
5828
5829 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5830 cmd->param, 1);
5831 mgmt_pending_remove(cmd);
5832
5833 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5834 DISCOVERY_FINDING);
5835}
5836
5837static int start_discovery_sync(struct hci_dev *hdev, void *data)
5838{
5839 return hci_start_discovery_sync(hdev);
5840}
5841
5842static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5843 u16 op, void *data, u16 len)
5844{
5845 struct mgmt_cp_start_discovery *cp = data;
5846 struct mgmt_pending_cmd *cmd;
5847 u8 status;
5848 int err;
5849
5850 bt_dev_dbg(hdev, "sock %p", sk);
5851
5852 hci_dev_lock(hdev);
5853
5854 if (!hdev_is_powered(hdev)) {
5855 err = mgmt_cmd_complete(sk, hdev->id, op,
5856 MGMT_STATUS_NOT_POWERED,
5857 &cp->type, sizeof(cp->type));
5858 goto failed;
5859 }
5860
5861 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5862 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5863 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5864 &cp->type, sizeof(cp->type));
5865 goto failed;
5866 }
5867
5868 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5869 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5870 &cp->type, sizeof(cp->type));
5871 goto failed;
5872 }
5873
5874 /* Can't start discovery when it is paused */
5875 if (hdev->discovery_paused) {
5876 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5877 &cp->type, sizeof(cp->type));
5878 goto failed;
5879 }
5880
5881 /* Clear the discovery filter first to free any previously
5882 * allocated memory for the UUID list.
5883 */
5884 hci_discovery_filter_clear(hdev);
5885
5886 hdev->discovery.type = cp->type;
5887 hdev->discovery.report_invalid_rssi = false;
5888 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5889 hdev->discovery.limited = true;
5890 else
5891 hdev->discovery.limited = false;
5892
5893 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5894 if (!cmd) {
5895 err = -ENOMEM;
5896 goto failed;
5897 }
5898
5899 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5900 start_discovery_complete);
5901 if (err < 0) {
5902 mgmt_pending_remove(cmd);
5903 goto failed;
5904 }
5905
5906 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5907
5908failed:
5909 hci_dev_unlock(hdev);
5910 return err;
5911}
5912
5913static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5914 void *data, u16 len)
5915{
5916 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5917 data, len);
5918}
5919
5920static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5921 void *data, u16 len)
5922{
5923 return start_discovery_internal(sk, hdev,
5924 MGMT_OP_START_LIMITED_DISCOVERY,
5925 data, len);
5926}
5927
5928static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5929 void *data, u16 len)
5930{
5931 struct mgmt_cp_start_service_discovery *cp = data;
5932 struct mgmt_pending_cmd *cmd;
5933 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5934 u16 uuid_count, expected_len;
5935 u8 status;
5936 int err;
5937
5938 bt_dev_dbg(hdev, "sock %p", sk);
5939
5940 hci_dev_lock(hdev);
5941
5942 if (!hdev_is_powered(hdev)) {
5943 err = mgmt_cmd_complete(sk, hdev->id,
5944 MGMT_OP_START_SERVICE_DISCOVERY,
5945 MGMT_STATUS_NOT_POWERED,
5946 &cp->type, sizeof(cp->type));
5947 goto failed;
5948 }
5949
5950 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5951 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5952 err = mgmt_cmd_complete(sk, hdev->id,
5953 MGMT_OP_START_SERVICE_DISCOVERY,
5954 MGMT_STATUS_BUSY, &cp->type,
5955 sizeof(cp->type));
5956 goto failed;
5957 }
5958
5959 if (hdev->discovery_paused) {
5960 err = mgmt_cmd_complete(sk, hdev->id,
5961 MGMT_OP_START_SERVICE_DISCOVERY,
5962 MGMT_STATUS_BUSY, &cp->type,
5963 sizeof(cp->type));
5964 goto failed;
5965 }
5966
5967 uuid_count = __le16_to_cpu(cp->uuid_count);
5968 if (uuid_count > max_uuid_count) {
5969 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5970 uuid_count);
5971 err = mgmt_cmd_complete(sk, hdev->id,
5972 MGMT_OP_START_SERVICE_DISCOVERY,
5973 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5974 sizeof(cp->type));
5975 goto failed;
5976 }
5977
5978 expected_len = sizeof(*cp) + uuid_count * 16;
5979 if (expected_len != len) {
5980 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5981 expected_len, len);
5982 err = mgmt_cmd_complete(sk, hdev->id,
5983 MGMT_OP_START_SERVICE_DISCOVERY,
5984 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5985 sizeof(cp->type));
5986 goto failed;
5987 }
5988
5989 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5990 err = mgmt_cmd_complete(sk, hdev->id,
5991 MGMT_OP_START_SERVICE_DISCOVERY,
5992 status, &cp->type, sizeof(cp->type));
5993 goto failed;
5994 }
5995
5996 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5997 hdev, data, len);
5998 if (!cmd) {
5999 err = -ENOMEM;
6000 goto failed;
6001 }
6002
6003 /* Clear the discovery filter first to free any previously
6004 * allocated memory for the UUID list.
6005 */
6006 hci_discovery_filter_clear(hdev);
6007
6008 hdev->discovery.result_filtering = true;
6009 hdev->discovery.type = cp->type;
6010 hdev->discovery.rssi = cp->rssi;
6011 hdev->discovery.uuid_count = uuid_count;
6012
6013 if (uuid_count > 0) {
6014 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6015 GFP_KERNEL);
6016 if (!hdev->discovery.uuids) {
6017 err = mgmt_cmd_complete(sk, hdev->id,
6018 MGMT_OP_START_SERVICE_DISCOVERY,
6019 MGMT_STATUS_FAILED,
6020 &cp->type, sizeof(cp->type));
6021 mgmt_pending_remove(cmd);
6022 goto failed;
6023 }
6024 }
6025
6026 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6027 start_discovery_complete);
6028 if (err < 0) {
6029 mgmt_pending_remove(cmd);
6030 goto failed;
6031 }
6032
6033 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6034
6035failed:
6036 hci_dev_unlock(hdev);
6037 return err;
6038}
6039
6040void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6041{
6042 struct mgmt_pending_cmd *cmd;
6043
6044 bt_dev_dbg(hdev, "status %u", status);
6045
6046 hci_dev_lock(hdev);
6047
6048 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6049 if (cmd) {
6050 cmd->cmd_complete(cmd, mgmt_status(status));
6051 mgmt_pending_remove(cmd);
6052 }
6053
6054 hci_dev_unlock(hdev);
6055}
6056
6057static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6058{
6059 struct mgmt_pending_cmd *cmd = data;
6060
6061 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6062 return;
6063
6064 bt_dev_dbg(hdev, "err %d", err);
6065
6066 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6067 cmd->param, 1);
6068 mgmt_pending_remove(cmd);
6069
6070 if (!err)
6071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6072}
6073
6074static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6075{
6076 return hci_stop_discovery_sync(hdev);
6077}
6078
6079static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6080 u16 len)
6081{
6082 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6083 struct mgmt_pending_cmd *cmd;
6084 int err;
6085
6086 bt_dev_dbg(hdev, "sock %p", sk);
6087
6088 hci_dev_lock(hdev);
6089
6090 if (!hci_discovery_active(hdev)) {
6091 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6092 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6093 sizeof(mgmt_cp->type));
6094 goto unlock;
6095 }
6096
6097 if (hdev->discovery.type != mgmt_cp->type) {
6098 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6099 MGMT_STATUS_INVALID_PARAMS,
6100 &mgmt_cp->type, sizeof(mgmt_cp->type));
6101 goto unlock;
6102 }
6103
6104 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6105 if (!cmd) {
6106 err = -ENOMEM;
6107 goto unlock;
6108 }
6109
6110 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6111 stop_discovery_complete);
6112 if (err < 0) {
6113 mgmt_pending_remove(cmd);
6114 goto unlock;
6115 }
6116
6117 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6118
6119unlock:
6120 hci_dev_unlock(hdev);
6121 return err;
6122}
6123
6124static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6125 u16 len)
6126{
6127 struct mgmt_cp_confirm_name *cp = data;
6128 struct inquiry_entry *e;
6129 int err;
6130
6131 bt_dev_dbg(hdev, "sock %p", sk);
6132
6133 hci_dev_lock(hdev);
6134
6135 if (!hci_discovery_active(hdev)) {
6136 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6137 MGMT_STATUS_FAILED, &cp->addr,
6138 sizeof(cp->addr));
6139 goto failed;
6140 }
6141
6142 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6143 if (!e) {
6144 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6145 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6146 sizeof(cp->addr));
6147 goto failed;
6148 }
6149
6150 if (cp->name_known) {
6151 e->name_state = NAME_KNOWN;
6152 list_del(&e->list);
6153 } else {
6154 e->name_state = NAME_NEEDED;
6155 hci_inquiry_cache_update_resolve(hdev, e);
6156 }
6157
6158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6159 &cp->addr, sizeof(cp->addr));
6160
6161failed:
6162 hci_dev_unlock(hdev);
6163 return err;
6164}
6165
6166static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6167 u16 len)
6168{
6169 struct mgmt_cp_block_device *cp = data;
6170 u8 status;
6171 int err;
6172
6173 bt_dev_dbg(hdev, "sock %p", sk);
6174
6175 if (!bdaddr_type_is_valid(cp->addr.type))
6176 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6177 MGMT_STATUS_INVALID_PARAMS,
6178 &cp->addr, sizeof(cp->addr));
6179
6180 hci_dev_lock(hdev);
6181
6182 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6183 cp->addr.type);
6184 if (err < 0) {
6185 status = MGMT_STATUS_FAILED;
6186 goto done;
6187 }
6188
6189 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6190 sk);
6191 status = MGMT_STATUS_SUCCESS;
6192
6193done:
6194 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6195 &cp->addr, sizeof(cp->addr));
6196
6197 hci_dev_unlock(hdev);
6198
6199 return err;
6200}
6201
6202static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6203 u16 len)
6204{
6205 struct mgmt_cp_unblock_device *cp = data;
6206 u8 status;
6207 int err;
6208
6209 bt_dev_dbg(hdev, "sock %p", sk);
6210
6211 if (!bdaddr_type_is_valid(cp->addr.type))
6212 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6213 MGMT_STATUS_INVALID_PARAMS,
6214 &cp->addr, sizeof(cp->addr));
6215
6216 hci_dev_lock(hdev);
6217
6218 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6219 cp->addr.type);
6220 if (err < 0) {
6221 status = MGMT_STATUS_INVALID_PARAMS;
6222 goto done;
6223 }
6224
6225 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6226 sk);
6227 status = MGMT_STATUS_SUCCESS;
6228
6229done:
6230 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6231 &cp->addr, sizeof(cp->addr));
6232
6233 hci_dev_unlock(hdev);
6234
6235 return err;
6236}
6237
6238static int set_device_id_sync(struct hci_dev *hdev, void *data)
6239{
6240 return hci_update_eir_sync(hdev);
6241}
6242
6243static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6244 u16 len)
6245{
6246 struct mgmt_cp_set_device_id *cp = data;
6247 int err;
6248 __u16 source;
6249
6250 bt_dev_dbg(hdev, "sock %p", sk);
6251
6252 source = __le16_to_cpu(cp->source);
6253
6254 if (source > 0x0002)
6255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6256 MGMT_STATUS_INVALID_PARAMS);
6257
6258 hci_dev_lock(hdev);
6259
6260 hdev->devid_source = source;
6261 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6262 hdev->devid_product = __le16_to_cpu(cp->product);
6263 hdev->devid_version = __le16_to_cpu(cp->version);
6264
6265 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6266 NULL, 0);
6267
6268 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6269
6270 hci_dev_unlock(hdev);
6271
6272 return err;
6273}
6274
6275static void enable_advertising_instance(struct hci_dev *hdev, int err)
6276{
6277 if (err)
6278 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6279 else
6280 bt_dev_dbg(hdev, "status %d", err);
6281}
6282
6283static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6284{
6285 struct cmd_lookup match = { NULL, hdev };
6286 u8 instance;
6287 struct adv_info *adv_instance;
6288 u8 status = mgmt_status(err);
6289
6290 if (status) {
6291 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6292 cmd_status_rsp, &status);
6293 return;
6294 }
6295
6296 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6297 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6298 else
6299 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6300
6301 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6302 &match);
6303
6304 new_settings(hdev, match.sk);
6305
6306 if (match.sk)
6307 sock_put(match.sk);
6308
6309 /* If "Set Advertising" was just disabled and instance advertising was
6310 * set up earlier, then re-enable multi-instance advertising.
6311 */
6312 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6313 list_empty(&hdev->adv_instances))
6314 return;
6315
6316 instance = hdev->cur_adv_instance;
6317 if (!instance) {
6318 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6319 struct adv_info, list);
6320 if (!adv_instance)
6321 return;
6322
6323 instance = adv_instance->instance;
6324 }
6325
6326 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6327
6328 enable_advertising_instance(hdev, err);
6329}
6330
6331static int set_adv_sync(struct hci_dev *hdev, void *data)
6332{
6333 struct mgmt_pending_cmd *cmd = data;
6334 struct mgmt_mode *cp = cmd->param;
6335 u8 val = !!cp->val;
6336
6337 if (cp->val == 0x02)
6338 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6339 else
6340 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6341
6342 cancel_adv_timeout(hdev);
6343
6344 if (val) {
6345 /* Switch to instance "0" for the Set Advertising setting.
6346 * We cannot use update_[adv|scan_rsp]_data() here as the
6347 * HCI_ADVERTISING flag is not yet set.
6348 */
6349 hdev->cur_adv_instance = 0x00;
6350
6351 if (ext_adv_capable(hdev)) {
6352 hci_start_ext_adv_sync(hdev, 0x00);
6353 } else {
6354 hci_update_adv_data_sync(hdev, 0x00);
6355 hci_update_scan_rsp_data_sync(hdev, 0x00);
6356 hci_enable_advertising_sync(hdev);
6357 }
6358 } else {
6359 hci_disable_advertising_sync(hdev);
6360 }
6361
6362 return 0;
6363}
6364
6365static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6366 u16 len)
6367{
6368 struct mgmt_mode *cp = data;
6369 struct mgmt_pending_cmd *cmd;
6370 u8 val, status;
6371 int err;
6372
6373 bt_dev_dbg(hdev, "sock %p", sk);
6374
6375 status = mgmt_le_support(hdev);
6376 if (status)
6377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6378 status);
6379
6380 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6381 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6382 MGMT_STATUS_INVALID_PARAMS);
6383
6384 if (hdev->advertising_paused)
6385 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6386 MGMT_STATUS_BUSY);
6387
6388 hci_dev_lock(hdev);
6389
6390 val = !!cp->val;
6391
6392 /* The following conditions are ones which mean that we should
6393 * not do any HCI communication but directly send a mgmt
6394 * response to user space (after toggling the flag if
6395 * necessary).
6396 */
6397 if (!hdev_is_powered(hdev) ||
6398 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6399 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6400 hci_dev_test_flag(hdev, HCI_MESH) ||
6401 hci_conn_num(hdev, LE_LINK) > 0 ||
6402 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6403 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6404 bool changed;
6405
6406 if (cp->val) {
6407 hdev->cur_adv_instance = 0x00;
6408 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6409 if (cp->val == 0x02)
6410 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6411 else
6412 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6413 } else {
6414 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6415 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6416 }
6417
6418 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6419 if (err < 0)
6420 goto unlock;
6421
6422 if (changed)
6423 err = new_settings(hdev, sk);
6424
6425 goto unlock;
6426 }
6427
6428 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6429 pending_find(MGMT_OP_SET_LE, hdev)) {
6430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6431 MGMT_STATUS_BUSY);
6432 goto unlock;
6433 }
6434
6435 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6436 if (!cmd)
6437 err = -ENOMEM;
6438 else
6439 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6440 set_advertising_complete);
6441
6442 if (err < 0 && cmd)
6443 mgmt_pending_remove(cmd);
6444
6445unlock:
6446 hci_dev_unlock(hdev);
6447 return err;
6448}
6449
6450static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6451 void *data, u16 len)
6452{
6453 struct mgmt_cp_set_static_address *cp = data;
6454 int err;
6455
6456 bt_dev_dbg(hdev, "sock %p", sk);
6457
6458 if (!lmp_le_capable(hdev))
6459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6460 MGMT_STATUS_NOT_SUPPORTED);
6461
6462 if (hdev_is_powered(hdev))
6463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6464 MGMT_STATUS_REJECTED);
6465
6466 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6467 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6468 return mgmt_cmd_status(sk, hdev->id,
6469 MGMT_OP_SET_STATIC_ADDRESS,
6470 MGMT_STATUS_INVALID_PARAMS);
6471
6472 /* Two most significant bits shall be set */
6473 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6474 return mgmt_cmd_status(sk, hdev->id,
6475 MGMT_OP_SET_STATIC_ADDRESS,
6476 MGMT_STATUS_INVALID_PARAMS);
6477 }
6478
6479 hci_dev_lock(hdev);
6480
6481 bacpy(&hdev->static_addr, &cp->bdaddr);
6482
6483 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6484 if (err < 0)
6485 goto unlock;
6486
6487 err = new_settings(hdev, sk);
6488
6489unlock:
6490 hci_dev_unlock(hdev);
6491 return err;
6492}
6493
6494static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6495 void *data, u16 len)
6496{
6497 struct mgmt_cp_set_scan_params *cp = data;
6498 __u16 interval, window;
6499 int err;
6500
6501 bt_dev_dbg(hdev, "sock %p", sk);
6502
6503 if (!lmp_le_capable(hdev))
6504 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6505 MGMT_STATUS_NOT_SUPPORTED);
6506
6507 interval = __le16_to_cpu(cp->interval);
6508
6509 if (interval < 0x0004 || interval > 0x4000)
6510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6511 MGMT_STATUS_INVALID_PARAMS);
6512
6513 window = __le16_to_cpu(cp->window);
6514
6515 if (window < 0x0004 || window > 0x4000)
6516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6517 MGMT_STATUS_INVALID_PARAMS);
6518
6519 if (window > interval)
6520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6521 MGMT_STATUS_INVALID_PARAMS);
6522
6523 hci_dev_lock(hdev);
6524
6525 hdev->le_scan_interval = interval;
6526 hdev->le_scan_window = window;
6527
6528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6529 NULL, 0);
6530
6531 /* If background scan is running, restart it so new parameters are
6532 * loaded.
6533 */
6534 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6535 hdev->discovery.state == DISCOVERY_STOPPED)
6536 hci_update_passive_scan(hdev);
6537
6538 hci_dev_unlock(hdev);
6539
6540 return err;
6541}
6542
6543static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6544{
6545 struct mgmt_pending_cmd *cmd = data;
6546
6547 bt_dev_dbg(hdev, "err %d", err);
6548
6549 if (err) {
6550 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6551 mgmt_status(err));
6552 } else {
6553 struct mgmt_mode *cp = cmd->param;
6554
6555 if (cp->val)
6556 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6557 else
6558 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6559
6560 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6561 new_settings(hdev, cmd->sk);
6562 }
6563
6564 mgmt_pending_free(cmd);
6565}
6566
6567static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6568{
6569 struct mgmt_pending_cmd *cmd = data;
6570 struct mgmt_mode *cp = cmd->param;
6571
6572 return hci_write_fast_connectable_sync(hdev, cp->val);
6573}
6574
6575static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6576 void *data, u16 len)
6577{
6578 struct mgmt_mode *cp = data;
6579 struct mgmt_pending_cmd *cmd;
6580 int err;
6581
6582 bt_dev_dbg(hdev, "sock %p", sk);
6583
6584 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6585 hdev->hci_ver < BLUETOOTH_VER_1_2)
6586 return mgmt_cmd_status(sk, hdev->id,
6587 MGMT_OP_SET_FAST_CONNECTABLE,
6588 MGMT_STATUS_NOT_SUPPORTED);
6589
6590 if (cp->val != 0x00 && cp->val != 0x01)
6591 return mgmt_cmd_status(sk, hdev->id,
6592 MGMT_OP_SET_FAST_CONNECTABLE,
6593 MGMT_STATUS_INVALID_PARAMS);
6594
6595 hci_dev_lock(hdev);
6596
6597 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6598 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6599 goto unlock;
6600 }
6601
6602 if (!hdev_is_powered(hdev)) {
6603 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6604 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6605 new_settings(hdev, sk);
6606 goto unlock;
6607 }
6608
6609 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6610 len);
6611 if (!cmd)
6612 err = -ENOMEM;
6613 else
6614 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6615 fast_connectable_complete);
6616
6617 if (err < 0) {
6618 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619 MGMT_STATUS_FAILED);
6620
6621 if (cmd)
6622 mgmt_pending_free(cmd);
6623 }
6624
6625unlock:
6626 hci_dev_unlock(hdev);
6627
6628 return err;
6629}
6630
6631static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6632{
6633 struct mgmt_pending_cmd *cmd = data;
6634
6635 bt_dev_dbg(hdev, "err %d", err);
6636
6637 if (err) {
6638 u8 mgmt_err = mgmt_status(err);
6639
6640 /* We need to restore the flag if related HCI commands
6641 * failed.
6642 */
6643 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6644
6645 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6646 } else {
6647 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6648 new_settings(hdev, cmd->sk);
6649 }
6650
6651 mgmt_pending_free(cmd);
6652}
6653
6654static int set_bredr_sync(struct hci_dev *hdev, void *data)
6655{
6656 int status;
6657
6658 status = hci_write_fast_connectable_sync(hdev, false);
6659
6660 if (!status)
6661 status = hci_update_scan_sync(hdev);
6662
6663 /* Since only the advertising data flags will change, there
6664 * is no need to update the scan response data.
6665 */
6666 if (!status)
6667 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6668
6669 return status;
6670}
6671
6672static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6673{
6674 struct mgmt_mode *cp = data;
6675 struct mgmt_pending_cmd *cmd;
6676 int err;
6677
6678 bt_dev_dbg(hdev, "sock %p", sk);
6679
6680 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6682 MGMT_STATUS_NOT_SUPPORTED);
6683
6684 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6686 MGMT_STATUS_REJECTED);
6687
6688 if (cp->val != 0x00 && cp->val != 0x01)
6689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6690 MGMT_STATUS_INVALID_PARAMS);
6691
6692 hci_dev_lock(hdev);
6693
6694 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6695 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6696 goto unlock;
6697 }
6698
6699 if (!hdev_is_powered(hdev)) {
6700 if (!cp->val) {
6701 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6702 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6703 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6704 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6705 }
6706
6707 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6708
6709 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6710 if (err < 0)
6711 goto unlock;
6712
6713 err = new_settings(hdev, sk);
6714 goto unlock;
6715 }
6716
6717 /* Reject disabling when powered on */
6718 if (!cp->val) {
6719 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6720 MGMT_STATUS_REJECTED);
6721 goto unlock;
6722 } else {
6723 /* When configuring a dual-mode controller to operate
6724 * with LE only and using a static address, then switching
6725 * BR/EDR back on is not allowed.
6726 *
6727 * Dual-mode controllers shall operate with the public
6728 * address as its identity address for BR/EDR and LE. So
6729 * reject the attempt to create an invalid configuration.
6730 *
6731 * The same restrictions applies when secure connections
6732 * has been enabled. For BR/EDR this is a controller feature
6733 * while for LE it is a host stack feature. This means that
6734 * switching BR/EDR back on when secure connections has been
6735 * enabled is not a supported transaction.
6736 */
6737 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6738 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6739 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6741 MGMT_STATUS_REJECTED);
6742 goto unlock;
6743 }
6744 }
6745
6746 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6747 if (!cmd)
6748 err = -ENOMEM;
6749 else
6750 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6751 set_bredr_complete);
6752
6753 if (err < 0) {
6754 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6755 MGMT_STATUS_FAILED);
6756 if (cmd)
6757 mgmt_pending_free(cmd);
6758
6759 goto unlock;
6760 }
6761
6762 /* We need to flip the bit already here so that
6763 * hci_req_update_adv_data generates the correct flags.
6764 */
6765 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6766
6767unlock:
6768 hci_dev_unlock(hdev);
6769 return err;
6770}
6771
6772static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6773{
6774 struct mgmt_pending_cmd *cmd = data;
6775 struct mgmt_mode *cp;
6776
6777 bt_dev_dbg(hdev, "err %d", err);
6778
6779 if (err) {
6780 u8 mgmt_err = mgmt_status(err);
6781
6782 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6783 goto done;
6784 }
6785
6786 cp = cmd->param;
6787
6788 switch (cp->val) {
6789 case 0x00:
6790 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6791 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6792 break;
6793 case 0x01:
6794 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6795 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6796 break;
6797 case 0x02:
6798 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6799 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6800 break;
6801 }
6802
6803 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6804 new_settings(hdev, cmd->sk);
6805
6806done:
6807 mgmt_pending_free(cmd);
6808}
6809
6810static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6811{
6812 struct mgmt_pending_cmd *cmd = data;
6813 struct mgmt_mode *cp = cmd->param;
6814 u8 val = !!cp->val;
6815
6816 /* Force write of val */
6817 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6818
6819 return hci_write_sc_support_sync(hdev, val);
6820}
6821
6822static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6823 void *data, u16 len)
6824{
6825 struct mgmt_mode *cp = data;
6826 struct mgmt_pending_cmd *cmd;
6827 u8 val;
6828 int err;
6829
6830 bt_dev_dbg(hdev, "sock %p", sk);
6831
6832 if (!lmp_sc_capable(hdev) &&
6833 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6835 MGMT_STATUS_NOT_SUPPORTED);
6836
6837 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6838 lmp_sc_capable(hdev) &&
6839 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6841 MGMT_STATUS_REJECTED);
6842
6843 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6845 MGMT_STATUS_INVALID_PARAMS);
6846
6847 hci_dev_lock(hdev);
6848
6849 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6850 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6851 bool changed;
6852
6853 if (cp->val) {
6854 changed = !hci_dev_test_and_set_flag(hdev,
6855 HCI_SC_ENABLED);
6856 if (cp->val == 0x02)
6857 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6858 else
6859 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6860 } else {
6861 changed = hci_dev_test_and_clear_flag(hdev,
6862 HCI_SC_ENABLED);
6863 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6864 }
6865
6866 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6867 if (err < 0)
6868 goto failed;
6869
6870 if (changed)
6871 err = new_settings(hdev, sk);
6872
6873 goto failed;
6874 }
6875
6876 val = !!cp->val;
6877
6878 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6879 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6880 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6881 goto failed;
6882 }
6883
6884 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6885 if (!cmd)
6886 err = -ENOMEM;
6887 else
6888 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6889 set_secure_conn_complete);
6890
6891 if (err < 0) {
6892 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6893 MGMT_STATUS_FAILED);
6894 if (cmd)
6895 mgmt_pending_free(cmd);
6896 }
6897
6898failed:
6899 hci_dev_unlock(hdev);
6900 return err;
6901}
6902
6903static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6904 void *data, u16 len)
6905{
6906 struct mgmt_mode *cp = data;
6907 bool changed, use_changed;
6908 int err;
6909
6910 bt_dev_dbg(hdev, "sock %p", sk);
6911
6912 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6914 MGMT_STATUS_INVALID_PARAMS);
6915
6916 hci_dev_lock(hdev);
6917
6918 if (cp->val)
6919 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6920 else
6921 changed = hci_dev_test_and_clear_flag(hdev,
6922 HCI_KEEP_DEBUG_KEYS);
6923
6924 if (cp->val == 0x02)
6925 use_changed = !hci_dev_test_and_set_flag(hdev,
6926 HCI_USE_DEBUG_KEYS);
6927 else
6928 use_changed = hci_dev_test_and_clear_flag(hdev,
6929 HCI_USE_DEBUG_KEYS);
6930
6931 if (hdev_is_powered(hdev) && use_changed &&
6932 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6933 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6934 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6935 sizeof(mode), &mode);
6936 }
6937
6938 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6939 if (err < 0)
6940 goto unlock;
6941
6942 if (changed)
6943 err = new_settings(hdev, sk);
6944
6945unlock:
6946 hci_dev_unlock(hdev);
6947 return err;
6948}
6949
6950static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6951 u16 len)
6952{
6953 struct mgmt_cp_set_privacy *cp = cp_data;
6954 bool changed;
6955 int err;
6956
6957 bt_dev_dbg(hdev, "sock %p", sk);
6958
6959 if (!lmp_le_capable(hdev))
6960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6961 MGMT_STATUS_NOT_SUPPORTED);
6962
6963 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6965 MGMT_STATUS_INVALID_PARAMS);
6966
6967 if (hdev_is_powered(hdev))
6968 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6969 MGMT_STATUS_REJECTED);
6970
6971 hci_dev_lock(hdev);
6972
6973 /* If user space supports this command it is also expected to
6974 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6975 */
6976 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6977
6978 if (cp->privacy) {
6979 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6980 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6981 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6982 hci_adv_instances_set_rpa_expired(hdev, true);
6983 if (cp->privacy == 0x02)
6984 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6985 else
6986 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6987 } else {
6988 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6989 memset(hdev->irk, 0, sizeof(hdev->irk));
6990 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6991 hci_adv_instances_set_rpa_expired(hdev, false);
6992 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6993 }
6994
6995 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6996 if (err < 0)
6997 goto unlock;
6998
6999 if (changed)
7000 err = new_settings(hdev, sk);
7001
7002unlock:
7003 hci_dev_unlock(hdev);
7004 return err;
7005}
7006
7007static bool irk_is_valid(struct mgmt_irk_info *irk)
7008{
7009 switch (irk->addr.type) {
7010 case BDADDR_LE_PUBLIC:
7011 return true;
7012
7013 case BDADDR_LE_RANDOM:
7014 /* Two most significant bits shall be set */
7015 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7016 return false;
7017 return true;
7018 }
7019
7020 return false;
7021}
7022
7023static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7024 u16 len)
7025{
7026 struct mgmt_cp_load_irks *cp = cp_data;
7027 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7028 sizeof(struct mgmt_irk_info));
7029 u16 irk_count, expected_len;
7030 int i, err;
7031
7032 bt_dev_dbg(hdev, "sock %p", sk);
7033
7034 if (!lmp_le_capable(hdev))
7035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7036 MGMT_STATUS_NOT_SUPPORTED);
7037
7038 irk_count = __le16_to_cpu(cp->irk_count);
7039 if (irk_count > max_irk_count) {
7040 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7041 irk_count);
7042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7043 MGMT_STATUS_INVALID_PARAMS);
7044 }
7045
7046 expected_len = struct_size(cp, irks, irk_count);
7047 if (expected_len != len) {
7048 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7049 expected_len, len);
7050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7051 MGMT_STATUS_INVALID_PARAMS);
7052 }
7053
7054 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7055
7056 for (i = 0; i < irk_count; i++) {
7057 struct mgmt_irk_info *key = &cp->irks[i];
7058
7059 if (!irk_is_valid(key))
7060 return mgmt_cmd_status(sk, hdev->id,
7061 MGMT_OP_LOAD_IRKS,
7062 MGMT_STATUS_INVALID_PARAMS);
7063 }
7064
7065 hci_dev_lock(hdev);
7066
7067 hci_smp_irks_clear(hdev);
7068
7069 for (i = 0; i < irk_count; i++) {
7070 struct mgmt_irk_info *irk = &cp->irks[i];
7071 u8 addr_type = le_addr_type(irk->addr.type);
7072
7073 if (hci_is_blocked_key(hdev,
7074 HCI_BLOCKED_KEY_TYPE_IRK,
7075 irk->val)) {
7076 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7077 &irk->addr.bdaddr);
7078 continue;
7079 }
7080
7081 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7082 if (irk->addr.type == BDADDR_BREDR)
7083 addr_type = BDADDR_BREDR;
7084
7085 hci_add_irk(hdev, &irk->addr.bdaddr,
7086 addr_type, irk->val,
7087 BDADDR_ANY);
7088 }
7089
7090 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7091
7092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7093
7094 hci_dev_unlock(hdev);
7095
7096 return err;
7097}
7098
7099static bool ltk_is_valid(struct mgmt_ltk_info *key)
7100{
7101 if (key->initiator != 0x00 && key->initiator != 0x01)
7102 return false;
7103
7104 switch (key->addr.type) {
7105 case BDADDR_LE_PUBLIC:
7106 return true;
7107
7108 case BDADDR_LE_RANDOM:
7109 /* Two most significant bits shall be set */
7110 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7111 return false;
7112 return true;
7113 }
7114
7115 return false;
7116}
7117
7118static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7119 void *cp_data, u16 len)
7120{
7121 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7122 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7123 sizeof(struct mgmt_ltk_info));
7124 u16 key_count, expected_len;
7125 int i, err;
7126
7127 bt_dev_dbg(hdev, "sock %p", sk);
7128
7129 if (!lmp_le_capable(hdev))
7130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7131 MGMT_STATUS_NOT_SUPPORTED);
7132
7133 key_count = __le16_to_cpu(cp->key_count);
7134 if (key_count > max_key_count) {
7135 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7136 key_count);
7137 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7138 MGMT_STATUS_INVALID_PARAMS);
7139 }
7140
7141 expected_len = struct_size(cp, keys, key_count);
7142 if (expected_len != len) {
7143 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7144 expected_len, len);
7145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7146 MGMT_STATUS_INVALID_PARAMS);
7147 }
7148
7149 bt_dev_dbg(hdev, "key_count %u", key_count);
7150
7151 for (i = 0; i < key_count; i++) {
7152 struct mgmt_ltk_info *key = &cp->keys[i];
7153
7154 if (!ltk_is_valid(key))
7155 return mgmt_cmd_status(sk, hdev->id,
7156 MGMT_OP_LOAD_LONG_TERM_KEYS,
7157 MGMT_STATUS_INVALID_PARAMS);
7158 }
7159
7160 hci_dev_lock(hdev);
7161
7162 hci_smp_ltks_clear(hdev);
7163
7164 for (i = 0; i < key_count; i++) {
7165 struct mgmt_ltk_info *key = &cp->keys[i];
7166 u8 type, authenticated;
7167 u8 addr_type = le_addr_type(key->addr.type);
7168
7169 if (hci_is_blocked_key(hdev,
7170 HCI_BLOCKED_KEY_TYPE_LTK,
7171 key->val)) {
7172 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7173 &key->addr.bdaddr);
7174 continue;
7175 }
7176
7177 switch (key->type) {
7178 case MGMT_LTK_UNAUTHENTICATED:
7179 authenticated = 0x00;
7180 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7181 break;
7182 case MGMT_LTK_AUTHENTICATED:
7183 authenticated = 0x01;
7184 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7185 break;
7186 case MGMT_LTK_P256_UNAUTH:
7187 authenticated = 0x00;
7188 type = SMP_LTK_P256;
7189 break;
7190 case MGMT_LTK_P256_AUTH:
7191 authenticated = 0x01;
7192 type = SMP_LTK_P256;
7193 break;
7194 case MGMT_LTK_P256_DEBUG:
7195 authenticated = 0x00;
7196 type = SMP_LTK_P256_DEBUG;
7197 fallthrough;
7198 default:
7199 continue;
7200 }
7201
7202 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7203 if (key->addr.type == BDADDR_BREDR)
7204 addr_type = BDADDR_BREDR;
7205
7206 hci_add_ltk(hdev, &key->addr.bdaddr,
7207 addr_type, type, authenticated,
7208 key->val, key->enc_size, key->ediv, key->rand);
7209 }
7210
7211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7212 NULL, 0);
7213
7214 hci_dev_unlock(hdev);
7215
7216 return err;
7217}
7218
7219static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7220{
7221 struct mgmt_pending_cmd *cmd = data;
7222 struct hci_conn *conn = cmd->user_data;
7223 struct mgmt_cp_get_conn_info *cp = cmd->param;
7224 struct mgmt_rp_get_conn_info rp;
7225 u8 status;
7226
7227 bt_dev_dbg(hdev, "err %d", err);
7228
7229 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7230
7231 status = mgmt_status(err);
7232 if (status == MGMT_STATUS_SUCCESS) {
7233 rp.rssi = conn->rssi;
7234 rp.tx_power = conn->tx_power;
7235 rp.max_tx_power = conn->max_tx_power;
7236 } else {
7237 rp.rssi = HCI_RSSI_INVALID;
7238 rp.tx_power = HCI_TX_POWER_INVALID;
7239 rp.max_tx_power = HCI_TX_POWER_INVALID;
7240 }
7241
7242 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7243 &rp, sizeof(rp));
7244
7245 mgmt_pending_free(cmd);
7246}
7247
7248static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7249{
7250 struct mgmt_pending_cmd *cmd = data;
7251 struct mgmt_cp_get_conn_info *cp = cmd->param;
7252 struct hci_conn *conn;
7253 int err;
7254 __le16 handle;
7255
7256 /* Make sure we are still connected */
7257 if (cp->addr.type == BDADDR_BREDR)
7258 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7259 &cp->addr.bdaddr);
7260 else
7261 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7262
7263 if (!conn || conn->state != BT_CONNECTED)
7264 return MGMT_STATUS_NOT_CONNECTED;
7265
7266 cmd->user_data = conn;
7267 handle = cpu_to_le16(conn->handle);
7268
7269 /* Refresh RSSI each time */
7270 err = hci_read_rssi_sync(hdev, handle);
7271
7272 /* For LE links TX power does not change thus we don't need to
7273 * query for it once value is known.
7274 */
7275 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7276 conn->tx_power == HCI_TX_POWER_INVALID))
7277 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7278
7279 /* Max TX power needs to be read only once per connection */
7280 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7281 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7282
7283 return err;
7284}
7285
7286static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7287 u16 len)
7288{
7289 struct mgmt_cp_get_conn_info *cp = data;
7290 struct mgmt_rp_get_conn_info rp;
7291 struct hci_conn *conn;
7292 unsigned long conn_info_age;
7293 int err = 0;
7294
7295 bt_dev_dbg(hdev, "sock %p", sk);
7296
7297 memset(&rp, 0, sizeof(rp));
7298 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7299 rp.addr.type = cp->addr.type;
7300
7301 if (!bdaddr_type_is_valid(cp->addr.type))
7302 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7303 MGMT_STATUS_INVALID_PARAMS,
7304 &rp, sizeof(rp));
7305
7306 hci_dev_lock(hdev);
7307
7308 if (!hdev_is_powered(hdev)) {
7309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7310 MGMT_STATUS_NOT_POWERED, &rp,
7311 sizeof(rp));
7312 goto unlock;
7313 }
7314
7315 if (cp->addr.type == BDADDR_BREDR)
7316 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7317 &cp->addr.bdaddr);
7318 else
7319 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7320
7321 if (!conn || conn->state != BT_CONNECTED) {
7322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7323 MGMT_STATUS_NOT_CONNECTED, &rp,
7324 sizeof(rp));
7325 goto unlock;
7326 }
7327
7328 /* To avoid client trying to guess when to poll again for information we
7329 * calculate conn info age as random value between min/max set in hdev.
7330 */
7331 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7332 hdev->conn_info_max_age - 1);
7333
7334 /* Query controller to refresh cached values if they are too old or were
7335 * never read.
7336 */
7337 if (time_after(jiffies, conn->conn_info_timestamp +
7338 msecs_to_jiffies(conn_info_age)) ||
7339 !conn->conn_info_timestamp) {
7340 struct mgmt_pending_cmd *cmd;
7341
7342 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7343 len);
7344 if (!cmd) {
7345 err = -ENOMEM;
7346 } else {
7347 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7348 cmd, get_conn_info_complete);
7349 }
7350
7351 if (err < 0) {
7352 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7353 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7354
7355 if (cmd)
7356 mgmt_pending_free(cmd);
7357
7358 goto unlock;
7359 }
7360
7361 conn->conn_info_timestamp = jiffies;
7362 } else {
7363 /* Cache is valid, just reply with values cached in hci_conn */
7364 rp.rssi = conn->rssi;
7365 rp.tx_power = conn->tx_power;
7366 rp.max_tx_power = conn->max_tx_power;
7367
7368 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7369 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7370 }
7371
7372unlock:
7373 hci_dev_unlock(hdev);
7374 return err;
7375}
7376
7377static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7378{
7379 struct mgmt_pending_cmd *cmd = data;
7380 struct mgmt_cp_get_clock_info *cp = cmd->param;
7381 struct mgmt_rp_get_clock_info rp;
7382 struct hci_conn *conn = cmd->user_data;
7383 u8 status = mgmt_status(err);
7384
7385 bt_dev_dbg(hdev, "err %d", err);
7386
7387 memset(&rp, 0, sizeof(rp));
7388 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7389 rp.addr.type = cp->addr.type;
7390
7391 if (err)
7392 goto complete;
7393
7394 rp.local_clock = cpu_to_le32(hdev->clock);
7395
7396 if (conn) {
7397 rp.piconet_clock = cpu_to_le32(conn->clock);
7398 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7399 }
7400
7401complete:
7402 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7403 sizeof(rp));
7404
7405 mgmt_pending_free(cmd);
7406}
7407
7408static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7409{
7410 struct mgmt_pending_cmd *cmd = data;
7411 struct mgmt_cp_get_clock_info *cp = cmd->param;
7412 struct hci_cp_read_clock hci_cp;
7413 struct hci_conn *conn;
7414
7415 memset(&hci_cp, 0, sizeof(hci_cp));
7416 hci_read_clock_sync(hdev, &hci_cp);
7417
7418 /* Make sure connection still exists */
7419 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7420 if (!conn || conn->state != BT_CONNECTED)
7421 return MGMT_STATUS_NOT_CONNECTED;
7422
7423 cmd->user_data = conn;
7424 hci_cp.handle = cpu_to_le16(conn->handle);
7425 hci_cp.which = 0x01; /* Piconet clock */
7426
7427 return hci_read_clock_sync(hdev, &hci_cp);
7428}
7429
7430static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7431 u16 len)
7432{
7433 struct mgmt_cp_get_clock_info *cp = data;
7434 struct mgmt_rp_get_clock_info rp;
7435 struct mgmt_pending_cmd *cmd;
7436 struct hci_conn *conn;
7437 int err;
7438
7439 bt_dev_dbg(hdev, "sock %p", sk);
7440
7441 memset(&rp, 0, sizeof(rp));
7442 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7443 rp.addr.type = cp->addr.type;
7444
7445 if (cp->addr.type != BDADDR_BREDR)
7446 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7447 MGMT_STATUS_INVALID_PARAMS,
7448 &rp, sizeof(rp));
7449
7450 hci_dev_lock(hdev);
7451
7452 if (!hdev_is_powered(hdev)) {
7453 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7454 MGMT_STATUS_NOT_POWERED, &rp,
7455 sizeof(rp));
7456 goto unlock;
7457 }
7458
7459 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7460 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7461 &cp->addr.bdaddr);
7462 if (!conn || conn->state != BT_CONNECTED) {
7463 err = mgmt_cmd_complete(sk, hdev->id,
7464 MGMT_OP_GET_CLOCK_INFO,
7465 MGMT_STATUS_NOT_CONNECTED,
7466 &rp, sizeof(rp));
7467 goto unlock;
7468 }
7469 } else {
7470 conn = NULL;
7471 }
7472
7473 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7474 if (!cmd)
7475 err = -ENOMEM;
7476 else
7477 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7478 get_clock_info_complete);
7479
7480 if (err < 0) {
7481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7482 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7483
7484 if (cmd)
7485 mgmt_pending_free(cmd);
7486 }
7487
7488
7489unlock:
7490 hci_dev_unlock(hdev);
7491 return err;
7492}
7493
7494static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7495{
7496 struct hci_conn *conn;
7497
7498 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7499 if (!conn)
7500 return false;
7501
7502 if (conn->dst_type != type)
7503 return false;
7504
7505 if (conn->state != BT_CONNECTED)
7506 return false;
7507
7508 return true;
7509}
7510
7511/* This function requires the caller holds hdev->lock */
7512static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7513 u8 addr_type, u8 auto_connect)
7514{
7515 struct hci_conn_params *params;
7516
7517 params = hci_conn_params_add(hdev, addr, addr_type);
7518 if (!params)
7519 return -EIO;
7520
7521 if (params->auto_connect == auto_connect)
7522 return 0;
7523
7524 hci_pend_le_list_del_init(params);
7525
7526 switch (auto_connect) {
7527 case HCI_AUTO_CONN_DISABLED:
7528 case HCI_AUTO_CONN_LINK_LOSS:
7529 /* If auto connect is being disabled when we're trying to
7530 * connect to device, keep connecting.
7531 */
7532 if (params->explicit_connect)
7533 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7534 break;
7535 case HCI_AUTO_CONN_REPORT:
7536 if (params->explicit_connect)
7537 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7538 else
7539 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7540 break;
7541 case HCI_AUTO_CONN_DIRECT:
7542 case HCI_AUTO_CONN_ALWAYS:
7543 if (!is_connected(hdev, addr, addr_type))
7544 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7545 break;
7546 }
7547
7548 params->auto_connect = auto_connect;
7549
7550 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7551 addr, addr_type, auto_connect);
7552
7553 return 0;
7554}
7555
7556static void device_added(struct sock *sk, struct hci_dev *hdev,
7557 bdaddr_t *bdaddr, u8 type, u8 action)
7558{
7559 struct mgmt_ev_device_added ev;
7560
7561 bacpy(&ev.addr.bdaddr, bdaddr);
7562 ev.addr.type = type;
7563 ev.action = action;
7564
7565 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7566}
7567
7568static int add_device_sync(struct hci_dev *hdev, void *data)
7569{
7570 return hci_update_passive_scan_sync(hdev);
7571}
7572
7573static int add_device(struct sock *sk, struct hci_dev *hdev,
7574 void *data, u16 len)
7575{
7576 struct mgmt_cp_add_device *cp = data;
7577 u8 auto_conn, addr_type;
7578 struct hci_conn_params *params;
7579 int err;
7580 u32 current_flags = 0;
7581 u32 supported_flags;
7582
7583 bt_dev_dbg(hdev, "sock %p", sk);
7584
7585 if (!bdaddr_type_is_valid(cp->addr.type) ||
7586 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7587 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7588 MGMT_STATUS_INVALID_PARAMS,
7589 &cp->addr, sizeof(cp->addr));
7590
7591 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7592 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7593 MGMT_STATUS_INVALID_PARAMS,
7594 &cp->addr, sizeof(cp->addr));
7595
7596 hci_dev_lock(hdev);
7597
7598 if (cp->addr.type == BDADDR_BREDR) {
7599 /* Only incoming connections action is supported for now */
7600 if (cp->action != 0x01) {
7601 err = mgmt_cmd_complete(sk, hdev->id,
7602 MGMT_OP_ADD_DEVICE,
7603 MGMT_STATUS_INVALID_PARAMS,
7604 &cp->addr, sizeof(cp->addr));
7605 goto unlock;
7606 }
7607
7608 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7609 &cp->addr.bdaddr,
7610 cp->addr.type, 0);
7611 if (err)
7612 goto unlock;
7613
7614 hci_update_scan(hdev);
7615
7616 goto added;
7617 }
7618
7619 addr_type = le_addr_type(cp->addr.type);
7620
7621 if (cp->action == 0x02)
7622 auto_conn = HCI_AUTO_CONN_ALWAYS;
7623 else if (cp->action == 0x01)
7624 auto_conn = HCI_AUTO_CONN_DIRECT;
7625 else
7626 auto_conn = HCI_AUTO_CONN_REPORT;
7627
7628 /* Kernel internally uses conn_params with resolvable private
7629 * address, but Add Device allows only identity addresses.
7630 * Make sure it is enforced before calling
7631 * hci_conn_params_lookup.
7632 */
7633 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7634 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7635 MGMT_STATUS_INVALID_PARAMS,
7636 &cp->addr, sizeof(cp->addr));
7637 goto unlock;
7638 }
7639
7640 /* If the connection parameters don't exist for this device,
7641 * they will be created and configured with defaults.
7642 */
7643 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7644 auto_conn) < 0) {
7645 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7646 MGMT_STATUS_FAILED, &cp->addr,
7647 sizeof(cp->addr));
7648 goto unlock;
7649 } else {
7650 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7651 addr_type);
7652 if (params)
7653 current_flags = params->flags;
7654 }
7655
7656 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7657 if (err < 0)
7658 goto unlock;
7659
7660added:
7661 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7662 supported_flags = hdev->conn_flags;
7663 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7664 supported_flags, current_flags);
7665
7666 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7667 MGMT_STATUS_SUCCESS, &cp->addr,
7668 sizeof(cp->addr));
7669
7670unlock:
7671 hci_dev_unlock(hdev);
7672 return err;
7673}
7674
7675static void device_removed(struct sock *sk, struct hci_dev *hdev,
7676 bdaddr_t *bdaddr, u8 type)
7677{
7678 struct mgmt_ev_device_removed ev;
7679
7680 bacpy(&ev.addr.bdaddr, bdaddr);
7681 ev.addr.type = type;
7682
7683 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7684}
7685
7686static int remove_device_sync(struct hci_dev *hdev, void *data)
7687{
7688 return hci_update_passive_scan_sync(hdev);
7689}
7690
7691static int remove_device(struct sock *sk, struct hci_dev *hdev,
7692 void *data, u16 len)
7693{
7694 struct mgmt_cp_remove_device *cp = data;
7695 int err;
7696
7697 bt_dev_dbg(hdev, "sock %p", sk);
7698
7699 hci_dev_lock(hdev);
7700
7701 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7702 struct hci_conn_params *params;
7703 u8 addr_type;
7704
7705 if (!bdaddr_type_is_valid(cp->addr.type)) {
7706 err = mgmt_cmd_complete(sk, hdev->id,
7707 MGMT_OP_REMOVE_DEVICE,
7708 MGMT_STATUS_INVALID_PARAMS,
7709 &cp->addr, sizeof(cp->addr));
7710 goto unlock;
7711 }
7712
7713 if (cp->addr.type == BDADDR_BREDR) {
7714 err = hci_bdaddr_list_del(&hdev->accept_list,
7715 &cp->addr.bdaddr,
7716 cp->addr.type);
7717 if (err) {
7718 err = mgmt_cmd_complete(sk, hdev->id,
7719 MGMT_OP_REMOVE_DEVICE,
7720 MGMT_STATUS_INVALID_PARAMS,
7721 &cp->addr,
7722 sizeof(cp->addr));
7723 goto unlock;
7724 }
7725
7726 hci_update_scan(hdev);
7727
7728 device_removed(sk, hdev, &cp->addr.bdaddr,
7729 cp->addr.type);
7730 goto complete;
7731 }
7732
7733 addr_type = le_addr_type(cp->addr.type);
7734
7735 /* Kernel internally uses conn_params with resolvable private
7736 * address, but Remove Device allows only identity addresses.
7737 * Make sure it is enforced before calling
7738 * hci_conn_params_lookup.
7739 */
7740 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7741 err = mgmt_cmd_complete(sk, hdev->id,
7742 MGMT_OP_REMOVE_DEVICE,
7743 MGMT_STATUS_INVALID_PARAMS,
7744 &cp->addr, sizeof(cp->addr));
7745 goto unlock;
7746 }
7747
7748 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7749 addr_type);
7750 if (!params) {
7751 err = mgmt_cmd_complete(sk, hdev->id,
7752 MGMT_OP_REMOVE_DEVICE,
7753 MGMT_STATUS_INVALID_PARAMS,
7754 &cp->addr, sizeof(cp->addr));
7755 goto unlock;
7756 }
7757
7758 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7759 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7760 err = mgmt_cmd_complete(sk, hdev->id,
7761 MGMT_OP_REMOVE_DEVICE,
7762 MGMT_STATUS_INVALID_PARAMS,
7763 &cp->addr, sizeof(cp->addr));
7764 goto unlock;
7765 }
7766
7767 hci_conn_params_free(params);
7768
7769 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7770 } else {
7771 struct hci_conn_params *p, *tmp;
7772 struct bdaddr_list *b, *btmp;
7773
7774 if (cp->addr.type) {
7775 err = mgmt_cmd_complete(sk, hdev->id,
7776 MGMT_OP_REMOVE_DEVICE,
7777 MGMT_STATUS_INVALID_PARAMS,
7778 &cp->addr, sizeof(cp->addr));
7779 goto unlock;
7780 }
7781
7782 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7783 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7784 list_del(&b->list);
7785 kfree(b);
7786 }
7787
7788 hci_update_scan(hdev);
7789
7790 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7791 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7792 continue;
7793 device_removed(sk, hdev, &p->addr, p->addr_type);
7794 if (p->explicit_connect) {
7795 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7796 continue;
7797 }
7798 hci_conn_params_free(p);
7799 }
7800
7801 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7802 }
7803
7804 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7805
7806complete:
7807 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7808 MGMT_STATUS_SUCCESS, &cp->addr,
7809 sizeof(cp->addr));
7810unlock:
7811 hci_dev_unlock(hdev);
7812 return err;
7813}
7814
7815static int conn_update_sync(struct hci_dev *hdev, void *data)
7816{
7817 struct hci_conn_params *params = data;
7818 struct hci_conn *conn;
7819
7820 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7821 if (!conn)
7822 return -ECANCELED;
7823
7824 return hci_le_conn_update_sync(hdev, conn, params);
7825}
7826
7827static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7828 u16 len)
7829{
7830 struct mgmt_cp_load_conn_param *cp = data;
7831 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7832 sizeof(struct mgmt_conn_param));
7833 u16 param_count, expected_len;
7834 int i;
7835
7836 if (!lmp_le_capable(hdev))
7837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7838 MGMT_STATUS_NOT_SUPPORTED);
7839
7840 param_count = __le16_to_cpu(cp->param_count);
7841 if (param_count > max_param_count) {
7842 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7843 param_count);
7844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7845 MGMT_STATUS_INVALID_PARAMS);
7846 }
7847
7848 expected_len = struct_size(cp, params, param_count);
7849 if (expected_len != len) {
7850 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7851 expected_len, len);
7852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7853 MGMT_STATUS_INVALID_PARAMS);
7854 }
7855
7856 bt_dev_dbg(hdev, "param_count %u", param_count);
7857
7858 hci_dev_lock(hdev);
7859
7860 if (param_count > 1)
7861 hci_conn_params_clear_disabled(hdev);
7862
7863 for (i = 0; i < param_count; i++) {
7864 struct mgmt_conn_param *param = &cp->params[i];
7865 struct hci_conn_params *hci_param;
7866 u16 min, max, latency, timeout;
7867 bool update = false;
7868 u8 addr_type;
7869
7870 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7871 param->addr.type);
7872
7873 if (param->addr.type == BDADDR_LE_PUBLIC) {
7874 addr_type = ADDR_LE_DEV_PUBLIC;
7875 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7876 addr_type = ADDR_LE_DEV_RANDOM;
7877 } else {
7878 bt_dev_err(hdev, "ignoring invalid connection parameters");
7879 continue;
7880 }
7881
7882 min = le16_to_cpu(param->min_interval);
7883 max = le16_to_cpu(param->max_interval);
7884 latency = le16_to_cpu(param->latency);
7885 timeout = le16_to_cpu(param->timeout);
7886
7887 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7888 min, max, latency, timeout);
7889
7890 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7891 bt_dev_err(hdev, "ignoring invalid connection parameters");
7892 continue;
7893 }
7894
7895 /* Detect when the loading is for an existing parameter then
7896 * attempt to trigger the connection update procedure.
7897 */
7898 if (!i && param_count == 1) {
7899 hci_param = hci_conn_params_lookup(hdev,
7900 ¶m->addr.bdaddr,
7901 addr_type);
7902 if (hci_param)
7903 update = true;
7904 else
7905 hci_conn_params_clear_disabled(hdev);
7906 }
7907
7908 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7909 addr_type);
7910 if (!hci_param) {
7911 bt_dev_err(hdev, "failed to add connection parameters");
7912 continue;
7913 }
7914
7915 hci_param->conn_min_interval = min;
7916 hci_param->conn_max_interval = max;
7917 hci_param->conn_latency = latency;
7918 hci_param->supervision_timeout = timeout;
7919
7920 /* Check if we need to trigger a connection update */
7921 if (update) {
7922 struct hci_conn *conn;
7923
7924 /* Lookup for existing connection as central and check
7925 * if parameters match and if they don't then trigger
7926 * a connection update.
7927 */
7928 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7929 addr_type);
7930 if (conn && conn->role == HCI_ROLE_MASTER &&
7931 (conn->le_conn_min_interval != min ||
7932 conn->le_conn_max_interval != max ||
7933 conn->le_conn_latency != latency ||
7934 conn->le_supv_timeout != timeout))
7935 hci_cmd_sync_queue(hdev, conn_update_sync,
7936 hci_param, NULL);
7937 }
7938 }
7939
7940 hci_dev_unlock(hdev);
7941
7942 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7943 NULL, 0);
7944}
7945
7946static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7947 void *data, u16 len)
7948{
7949 struct mgmt_cp_set_external_config *cp = data;
7950 bool changed;
7951 int err;
7952
7953 bt_dev_dbg(hdev, "sock %p", sk);
7954
7955 if (hdev_is_powered(hdev))
7956 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7957 MGMT_STATUS_REJECTED);
7958
7959 if (cp->config != 0x00 && cp->config != 0x01)
7960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7961 MGMT_STATUS_INVALID_PARAMS);
7962
7963 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7965 MGMT_STATUS_NOT_SUPPORTED);
7966
7967 hci_dev_lock(hdev);
7968
7969 if (cp->config)
7970 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7971 else
7972 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7973
7974 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7975 if (err < 0)
7976 goto unlock;
7977
7978 if (!changed)
7979 goto unlock;
7980
7981 err = new_options(hdev, sk);
7982
7983 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7984 mgmt_index_removed(hdev);
7985
7986 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7987 hci_dev_set_flag(hdev, HCI_CONFIG);
7988 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7989
7990 queue_work(hdev->req_workqueue, &hdev->power_on);
7991 } else {
7992 set_bit(HCI_RAW, &hdev->flags);
7993 mgmt_index_added(hdev);
7994 }
7995 }
7996
7997unlock:
7998 hci_dev_unlock(hdev);
7999 return err;
8000}
8001
8002static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8003 void *data, u16 len)
8004{
8005 struct mgmt_cp_set_public_address *cp = data;
8006 bool changed;
8007 int err;
8008
8009 bt_dev_dbg(hdev, "sock %p", sk);
8010
8011 if (hdev_is_powered(hdev))
8012 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8013 MGMT_STATUS_REJECTED);
8014
8015 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8016 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8017 MGMT_STATUS_INVALID_PARAMS);
8018
8019 if (!hdev->set_bdaddr)
8020 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8021 MGMT_STATUS_NOT_SUPPORTED);
8022
8023 hci_dev_lock(hdev);
8024
8025 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8026 bacpy(&hdev->public_addr, &cp->bdaddr);
8027
8028 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8029 if (err < 0)
8030 goto unlock;
8031
8032 if (!changed)
8033 goto unlock;
8034
8035 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8036 err = new_options(hdev, sk);
8037
8038 if (is_configured(hdev)) {
8039 mgmt_index_removed(hdev);
8040
8041 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8042
8043 hci_dev_set_flag(hdev, HCI_CONFIG);
8044 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8045
8046 queue_work(hdev->req_workqueue, &hdev->power_on);
8047 }
8048
8049unlock:
8050 hci_dev_unlock(hdev);
8051 return err;
8052}
8053
8054static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8055 int err)
8056{
8057 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8058 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8059 u8 *h192, *r192, *h256, *r256;
8060 struct mgmt_pending_cmd *cmd = data;
8061 struct sk_buff *skb = cmd->skb;
8062 u8 status = mgmt_status(err);
8063 u16 eir_len;
8064
8065 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8066 return;
8067
8068 if (!status) {
8069 if (!skb)
8070 status = MGMT_STATUS_FAILED;
8071 else if (IS_ERR(skb))
8072 status = mgmt_status(PTR_ERR(skb));
8073 else
8074 status = mgmt_status(skb->data[0]);
8075 }
8076
8077 bt_dev_dbg(hdev, "status %u", status);
8078
8079 mgmt_cp = cmd->param;
8080
8081 if (status) {
8082 status = mgmt_status(status);
8083 eir_len = 0;
8084
8085 h192 = NULL;
8086 r192 = NULL;
8087 h256 = NULL;
8088 r256 = NULL;
8089 } else if (!bredr_sc_enabled(hdev)) {
8090 struct hci_rp_read_local_oob_data *rp;
8091
8092 if (skb->len != sizeof(*rp)) {
8093 status = MGMT_STATUS_FAILED;
8094 eir_len = 0;
8095 } else {
8096 status = MGMT_STATUS_SUCCESS;
8097 rp = (void *)skb->data;
8098
8099 eir_len = 5 + 18 + 18;
8100 h192 = rp->hash;
8101 r192 = rp->rand;
8102 h256 = NULL;
8103 r256 = NULL;
8104 }
8105 } else {
8106 struct hci_rp_read_local_oob_ext_data *rp;
8107
8108 if (skb->len != sizeof(*rp)) {
8109 status = MGMT_STATUS_FAILED;
8110 eir_len = 0;
8111 } else {
8112 status = MGMT_STATUS_SUCCESS;
8113 rp = (void *)skb->data;
8114
8115 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8116 eir_len = 5 + 18 + 18;
8117 h192 = NULL;
8118 r192 = NULL;
8119 } else {
8120 eir_len = 5 + 18 + 18 + 18 + 18;
8121 h192 = rp->hash192;
8122 r192 = rp->rand192;
8123 }
8124
8125 h256 = rp->hash256;
8126 r256 = rp->rand256;
8127 }
8128 }
8129
8130 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8131 if (!mgmt_rp)
8132 goto done;
8133
8134 if (eir_len == 0)
8135 goto send_rsp;
8136
8137 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8138 hdev->dev_class, 3);
8139
8140 if (h192 && r192) {
8141 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8142 EIR_SSP_HASH_C192, h192, 16);
8143 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8144 EIR_SSP_RAND_R192, r192, 16);
8145 }
8146
8147 if (h256 && r256) {
8148 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8149 EIR_SSP_HASH_C256, h256, 16);
8150 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8151 EIR_SSP_RAND_R256, r256, 16);
8152 }
8153
8154send_rsp:
8155 mgmt_rp->type = mgmt_cp->type;
8156 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8157
8158 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8159 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8160 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8161 if (err < 0 || status)
8162 goto done;
8163
8164 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8165
8166 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8167 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8168 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8169done:
8170 if (skb && !IS_ERR(skb))
8171 kfree_skb(skb);
8172
8173 kfree(mgmt_rp);
8174 mgmt_pending_remove(cmd);
8175}
8176
8177static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8178 struct mgmt_cp_read_local_oob_ext_data *cp)
8179{
8180 struct mgmt_pending_cmd *cmd;
8181 int err;
8182
8183 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8184 cp, sizeof(*cp));
8185 if (!cmd)
8186 return -ENOMEM;
8187
8188 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8189 read_local_oob_ext_data_complete);
8190
8191 if (err < 0) {
8192 mgmt_pending_remove(cmd);
8193 return err;
8194 }
8195
8196 return 0;
8197}
8198
8199static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8200 void *data, u16 data_len)
8201{
8202 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8203 struct mgmt_rp_read_local_oob_ext_data *rp;
8204 size_t rp_len;
8205 u16 eir_len;
8206 u8 status, flags, role, addr[7], hash[16], rand[16];
8207 int err;
8208
8209 bt_dev_dbg(hdev, "sock %p", sk);
8210
8211 if (hdev_is_powered(hdev)) {
8212 switch (cp->type) {
8213 case BIT(BDADDR_BREDR):
8214 status = mgmt_bredr_support(hdev);
8215 if (status)
8216 eir_len = 0;
8217 else
8218 eir_len = 5;
8219 break;
8220 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8221 status = mgmt_le_support(hdev);
8222 if (status)
8223 eir_len = 0;
8224 else
8225 eir_len = 9 + 3 + 18 + 18 + 3;
8226 break;
8227 default:
8228 status = MGMT_STATUS_INVALID_PARAMS;
8229 eir_len = 0;
8230 break;
8231 }
8232 } else {
8233 status = MGMT_STATUS_NOT_POWERED;
8234 eir_len = 0;
8235 }
8236
8237 rp_len = sizeof(*rp) + eir_len;
8238 rp = kmalloc(rp_len, GFP_ATOMIC);
8239 if (!rp)
8240 return -ENOMEM;
8241
8242 if (!status && !lmp_ssp_capable(hdev)) {
8243 status = MGMT_STATUS_NOT_SUPPORTED;
8244 eir_len = 0;
8245 }
8246
8247 if (status)
8248 goto complete;
8249
8250 hci_dev_lock(hdev);
8251
8252 eir_len = 0;
8253 switch (cp->type) {
8254 case BIT(BDADDR_BREDR):
8255 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8256 err = read_local_ssp_oob_req(hdev, sk, cp);
8257 hci_dev_unlock(hdev);
8258 if (!err)
8259 goto done;
8260
8261 status = MGMT_STATUS_FAILED;
8262 goto complete;
8263 } else {
8264 eir_len = eir_append_data(rp->eir, eir_len,
8265 EIR_CLASS_OF_DEV,
8266 hdev->dev_class, 3);
8267 }
8268 break;
8269 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8270 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8271 smp_generate_oob(hdev, hash, rand) < 0) {
8272 hci_dev_unlock(hdev);
8273 status = MGMT_STATUS_FAILED;
8274 goto complete;
8275 }
8276
8277 /* This should return the active RPA, but since the RPA
8278 * is only programmed on demand, it is really hard to fill
8279 * this in at the moment. For now disallow retrieving
8280 * local out-of-band data when privacy is in use.
8281 *
8282 * Returning the identity address will not help here since
8283 * pairing happens before the identity resolving key is
8284 * known and thus the connection establishment happens
8285 * based on the RPA and not the identity address.
8286 */
8287 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8288 hci_dev_unlock(hdev);
8289 status = MGMT_STATUS_REJECTED;
8290 goto complete;
8291 }
8292
8293 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8294 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8295 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8296 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8297 memcpy(addr, &hdev->static_addr, 6);
8298 addr[6] = 0x01;
8299 } else {
8300 memcpy(addr, &hdev->bdaddr, 6);
8301 addr[6] = 0x00;
8302 }
8303
8304 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8305 addr, sizeof(addr));
8306
8307 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8308 role = 0x02;
8309 else
8310 role = 0x01;
8311
8312 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8313 &role, sizeof(role));
8314
8315 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8316 eir_len = eir_append_data(rp->eir, eir_len,
8317 EIR_LE_SC_CONFIRM,
8318 hash, sizeof(hash));
8319
8320 eir_len = eir_append_data(rp->eir, eir_len,
8321 EIR_LE_SC_RANDOM,
8322 rand, sizeof(rand));
8323 }
8324
8325 flags = mgmt_get_adv_discov_flags(hdev);
8326
8327 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8328 flags |= LE_AD_NO_BREDR;
8329
8330 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8331 &flags, sizeof(flags));
8332 break;
8333 }
8334
8335 hci_dev_unlock(hdev);
8336
8337 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8338
8339 status = MGMT_STATUS_SUCCESS;
8340
8341complete:
8342 rp->type = cp->type;
8343 rp->eir_len = cpu_to_le16(eir_len);
8344
8345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8346 status, rp, sizeof(*rp) + eir_len);
8347 if (err < 0 || status)
8348 goto done;
8349
8350 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8351 rp, sizeof(*rp) + eir_len,
8352 HCI_MGMT_OOB_DATA_EVENTS, sk);
8353
8354done:
8355 kfree(rp);
8356
8357 return err;
8358}
8359
8360static u32 get_supported_adv_flags(struct hci_dev *hdev)
8361{
8362 u32 flags = 0;
8363
8364 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8365 flags |= MGMT_ADV_FLAG_DISCOV;
8366 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8367 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8368 flags |= MGMT_ADV_FLAG_APPEARANCE;
8369 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8370 flags |= MGMT_ADV_PARAM_DURATION;
8371 flags |= MGMT_ADV_PARAM_TIMEOUT;
8372 flags |= MGMT_ADV_PARAM_INTERVALS;
8373 flags |= MGMT_ADV_PARAM_TX_POWER;
8374 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8375
8376 /* In extended adv TX_POWER returned from Set Adv Param
8377 * will be always valid.
8378 */
8379 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8380 flags |= MGMT_ADV_FLAG_TX_POWER;
8381
8382 if (ext_adv_capable(hdev)) {
8383 flags |= MGMT_ADV_FLAG_SEC_1M;
8384 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8385 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8386
8387 if (le_2m_capable(hdev))
8388 flags |= MGMT_ADV_FLAG_SEC_2M;
8389
8390 if (le_coded_capable(hdev))
8391 flags |= MGMT_ADV_FLAG_SEC_CODED;
8392 }
8393
8394 return flags;
8395}
8396
8397static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8398 void *data, u16 data_len)
8399{
8400 struct mgmt_rp_read_adv_features *rp;
8401 size_t rp_len;
8402 int err;
8403 struct adv_info *adv_instance;
8404 u32 supported_flags;
8405 u8 *instance;
8406
8407 bt_dev_dbg(hdev, "sock %p", sk);
8408
8409 if (!lmp_le_capable(hdev))
8410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8411 MGMT_STATUS_REJECTED);
8412
8413 hci_dev_lock(hdev);
8414
8415 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8416 rp = kmalloc(rp_len, GFP_ATOMIC);
8417 if (!rp) {
8418 hci_dev_unlock(hdev);
8419 return -ENOMEM;
8420 }
8421
8422 supported_flags = get_supported_adv_flags(hdev);
8423
8424 rp->supported_flags = cpu_to_le32(supported_flags);
8425 rp->max_adv_data_len = max_adv_len(hdev);
8426 rp->max_scan_rsp_len = max_adv_len(hdev);
8427 rp->max_instances = hdev->le_num_of_adv_sets;
8428 rp->num_instances = hdev->adv_instance_cnt;
8429
8430 instance = rp->instance;
8431 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8432 /* Only instances 1-le_num_of_adv_sets are externally visible */
8433 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8434 *instance = adv_instance->instance;
8435 instance++;
8436 } else {
8437 rp->num_instances--;
8438 rp_len--;
8439 }
8440 }
8441
8442 hci_dev_unlock(hdev);
8443
8444 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8445 MGMT_STATUS_SUCCESS, rp, rp_len);
8446
8447 kfree(rp);
8448
8449 return err;
8450}
8451
8452static u8 calculate_name_len(struct hci_dev *hdev)
8453{
8454 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8455
8456 return eir_append_local_name(hdev, buf, 0);
8457}
8458
8459static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8460 bool is_adv_data)
8461{
8462 u8 max_len = max_adv_len(hdev);
8463
8464 if (is_adv_data) {
8465 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8466 MGMT_ADV_FLAG_LIMITED_DISCOV |
8467 MGMT_ADV_FLAG_MANAGED_FLAGS))
8468 max_len -= 3;
8469
8470 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8471 max_len -= 3;
8472 } else {
8473 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8474 max_len -= calculate_name_len(hdev);
8475
8476 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8477 max_len -= 4;
8478 }
8479
8480 return max_len;
8481}
8482
8483static bool flags_managed(u32 adv_flags)
8484{
8485 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8486 MGMT_ADV_FLAG_LIMITED_DISCOV |
8487 MGMT_ADV_FLAG_MANAGED_FLAGS);
8488}
8489
8490static bool tx_power_managed(u32 adv_flags)
8491{
8492 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8493}
8494
8495static bool name_managed(u32 adv_flags)
8496{
8497 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8498}
8499
8500static bool appearance_managed(u32 adv_flags)
8501{
8502 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8503}
8504
8505static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8506 u8 len, bool is_adv_data)
8507{
8508 int i, cur_len;
8509 u8 max_len;
8510
8511 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8512
8513 if (len > max_len)
8514 return false;
8515
8516 /* Make sure that the data is correctly formatted. */
8517 for (i = 0; i < len; i += (cur_len + 1)) {
8518 cur_len = data[i];
8519
8520 if (!cur_len)
8521 continue;
8522
8523 if (data[i + 1] == EIR_FLAGS &&
8524 (!is_adv_data || flags_managed(adv_flags)))
8525 return false;
8526
8527 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8528 return false;
8529
8530 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8531 return false;
8532
8533 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8534 return false;
8535
8536 if (data[i + 1] == EIR_APPEARANCE &&
8537 appearance_managed(adv_flags))
8538 return false;
8539
8540 /* If the current field length would exceed the total data
8541 * length, then it's invalid.
8542 */
8543 if (i + cur_len >= len)
8544 return false;
8545 }
8546
8547 return true;
8548}
8549
8550static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8551{
8552 u32 supported_flags, phy_flags;
8553
8554 /* The current implementation only supports a subset of the specified
8555 * flags. Also need to check mutual exclusiveness of sec flags.
8556 */
8557 supported_flags = get_supported_adv_flags(hdev);
8558 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8559 if (adv_flags & ~supported_flags ||
8560 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8561 return false;
8562
8563 return true;
8564}
8565
8566static bool adv_busy(struct hci_dev *hdev)
8567{
8568 return pending_find(MGMT_OP_SET_LE, hdev);
8569}
8570
8571static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8572 int err)
8573{
8574 struct adv_info *adv, *n;
8575
8576 bt_dev_dbg(hdev, "err %d", err);
8577
8578 hci_dev_lock(hdev);
8579
8580 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8581 u8 instance;
8582
8583 if (!adv->pending)
8584 continue;
8585
8586 if (!err) {
8587 adv->pending = false;
8588 continue;
8589 }
8590
8591 instance = adv->instance;
8592
8593 if (hdev->cur_adv_instance == instance)
8594 cancel_adv_timeout(hdev);
8595
8596 hci_remove_adv_instance(hdev, instance);
8597 mgmt_advertising_removed(sk, hdev, instance);
8598 }
8599
8600 hci_dev_unlock(hdev);
8601}
8602
8603static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8604{
8605 struct mgmt_pending_cmd *cmd = data;
8606 struct mgmt_cp_add_advertising *cp = cmd->param;
8607 struct mgmt_rp_add_advertising rp;
8608
8609 memset(&rp, 0, sizeof(rp));
8610
8611 rp.instance = cp->instance;
8612
8613 if (err)
8614 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8615 mgmt_status(err));
8616 else
8617 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8618 mgmt_status(err), &rp, sizeof(rp));
8619
8620 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8621
8622 mgmt_pending_free(cmd);
8623}
8624
8625static int add_advertising_sync(struct hci_dev *hdev, void *data)
8626{
8627 struct mgmt_pending_cmd *cmd = data;
8628 struct mgmt_cp_add_advertising *cp = cmd->param;
8629
8630 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8631}
8632
8633static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8634 void *data, u16 data_len)
8635{
8636 struct mgmt_cp_add_advertising *cp = data;
8637 struct mgmt_rp_add_advertising rp;
8638 u32 flags;
8639 u8 status;
8640 u16 timeout, duration;
8641 unsigned int prev_instance_cnt;
8642 u8 schedule_instance = 0;
8643 struct adv_info *adv, *next_instance;
8644 int err;
8645 struct mgmt_pending_cmd *cmd;
8646
8647 bt_dev_dbg(hdev, "sock %p", sk);
8648
8649 status = mgmt_le_support(hdev);
8650 if (status)
8651 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8652 status);
8653
8654 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8655 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8656 MGMT_STATUS_INVALID_PARAMS);
8657
8658 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8660 MGMT_STATUS_INVALID_PARAMS);
8661
8662 flags = __le32_to_cpu(cp->flags);
8663 timeout = __le16_to_cpu(cp->timeout);
8664 duration = __le16_to_cpu(cp->duration);
8665
8666 if (!requested_adv_flags_are_valid(hdev, flags))
8667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8668 MGMT_STATUS_INVALID_PARAMS);
8669
8670 hci_dev_lock(hdev);
8671
8672 if (timeout && !hdev_is_powered(hdev)) {
8673 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8674 MGMT_STATUS_REJECTED);
8675 goto unlock;
8676 }
8677
8678 if (adv_busy(hdev)) {
8679 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8680 MGMT_STATUS_BUSY);
8681 goto unlock;
8682 }
8683
8684 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8685 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8686 cp->scan_rsp_len, false)) {
8687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8688 MGMT_STATUS_INVALID_PARAMS);
8689 goto unlock;
8690 }
8691
8692 prev_instance_cnt = hdev->adv_instance_cnt;
8693
8694 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8695 cp->adv_data_len, cp->data,
8696 cp->scan_rsp_len,
8697 cp->data + cp->adv_data_len,
8698 timeout, duration,
8699 HCI_ADV_TX_POWER_NO_PREFERENCE,
8700 hdev->le_adv_min_interval,
8701 hdev->le_adv_max_interval, 0);
8702 if (IS_ERR(adv)) {
8703 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8704 MGMT_STATUS_FAILED);
8705 goto unlock;
8706 }
8707
8708 /* Only trigger an advertising added event if a new instance was
8709 * actually added.
8710 */
8711 if (hdev->adv_instance_cnt > prev_instance_cnt)
8712 mgmt_advertising_added(sk, hdev, cp->instance);
8713
8714 if (hdev->cur_adv_instance == cp->instance) {
8715 /* If the currently advertised instance is being changed then
8716 * cancel the current advertising and schedule the next
8717 * instance. If there is only one instance then the overridden
8718 * advertising data will be visible right away.
8719 */
8720 cancel_adv_timeout(hdev);
8721
8722 next_instance = hci_get_next_instance(hdev, cp->instance);
8723 if (next_instance)
8724 schedule_instance = next_instance->instance;
8725 } else if (!hdev->adv_instance_timeout) {
8726 /* Immediately advertise the new instance if no other
8727 * instance is currently being advertised.
8728 */
8729 schedule_instance = cp->instance;
8730 }
8731
8732 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8733 * there is no instance to be advertised then we have no HCI
8734 * communication to make. Simply return.
8735 */
8736 if (!hdev_is_powered(hdev) ||
8737 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8738 !schedule_instance) {
8739 rp.instance = cp->instance;
8740 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8741 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8742 goto unlock;
8743 }
8744
8745 /* We're good to go, update advertising data, parameters, and start
8746 * advertising.
8747 */
8748 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8749 data_len);
8750 if (!cmd) {
8751 err = -ENOMEM;
8752 goto unlock;
8753 }
8754
8755 cp->instance = schedule_instance;
8756
8757 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8758 add_advertising_complete);
8759 if (err < 0)
8760 mgmt_pending_free(cmd);
8761
8762unlock:
8763 hci_dev_unlock(hdev);
8764
8765 return err;
8766}
8767
8768static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8769 int err)
8770{
8771 struct mgmt_pending_cmd *cmd = data;
8772 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8773 struct mgmt_rp_add_ext_adv_params rp;
8774 struct adv_info *adv;
8775 u32 flags;
8776
8777 BT_DBG("%s", hdev->name);
8778
8779 hci_dev_lock(hdev);
8780
8781 adv = hci_find_adv_instance(hdev, cp->instance);
8782 if (!adv)
8783 goto unlock;
8784
8785 rp.instance = cp->instance;
8786 rp.tx_power = adv->tx_power;
8787
8788 /* While we're at it, inform userspace of the available space for this
8789 * advertisement, given the flags that will be used.
8790 */
8791 flags = __le32_to_cpu(cp->flags);
8792 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8793 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8794
8795 if (err) {
8796 /* If this advertisement was previously advertising and we
8797 * failed to update it, we signal that it has been removed and
8798 * delete its structure
8799 */
8800 if (!adv->pending)
8801 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8802
8803 hci_remove_adv_instance(hdev, cp->instance);
8804
8805 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8806 mgmt_status(err));
8807 } else {
8808 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8809 mgmt_status(err), &rp, sizeof(rp));
8810 }
8811
8812unlock:
8813 mgmt_pending_free(cmd);
8814
8815 hci_dev_unlock(hdev);
8816}
8817
8818static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8819{
8820 struct mgmt_pending_cmd *cmd = data;
8821 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8822
8823 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8824}
8825
8826static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8827 void *data, u16 data_len)
8828{
8829 struct mgmt_cp_add_ext_adv_params *cp = data;
8830 struct mgmt_rp_add_ext_adv_params rp;
8831 struct mgmt_pending_cmd *cmd = NULL;
8832 struct adv_info *adv;
8833 u32 flags, min_interval, max_interval;
8834 u16 timeout, duration;
8835 u8 status;
8836 s8 tx_power;
8837 int err;
8838
8839 BT_DBG("%s", hdev->name);
8840
8841 status = mgmt_le_support(hdev);
8842 if (status)
8843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8844 status);
8845
8846 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8848 MGMT_STATUS_INVALID_PARAMS);
8849
8850 /* The purpose of breaking add_advertising into two separate MGMT calls
8851 * for params and data is to allow more parameters to be added to this
8852 * structure in the future. For this reason, we verify that we have the
8853 * bare minimum structure we know of when the interface was defined. Any
8854 * extra parameters we don't know about will be ignored in this request.
8855 */
8856 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 MGMT_STATUS_INVALID_PARAMS);
8859
8860 flags = __le32_to_cpu(cp->flags);
8861
8862 if (!requested_adv_flags_are_valid(hdev, flags))
8863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8864 MGMT_STATUS_INVALID_PARAMS);
8865
8866 hci_dev_lock(hdev);
8867
8868 /* In new interface, we require that we are powered to register */
8869 if (!hdev_is_powered(hdev)) {
8870 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8871 MGMT_STATUS_REJECTED);
8872 goto unlock;
8873 }
8874
8875 if (adv_busy(hdev)) {
8876 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8877 MGMT_STATUS_BUSY);
8878 goto unlock;
8879 }
8880
8881 /* Parse defined parameters from request, use defaults otherwise */
8882 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8883 __le16_to_cpu(cp->timeout) : 0;
8884
8885 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8886 __le16_to_cpu(cp->duration) :
8887 hdev->def_multi_adv_rotation_duration;
8888
8889 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8890 __le32_to_cpu(cp->min_interval) :
8891 hdev->le_adv_min_interval;
8892
8893 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8894 __le32_to_cpu(cp->max_interval) :
8895 hdev->le_adv_max_interval;
8896
8897 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8898 cp->tx_power :
8899 HCI_ADV_TX_POWER_NO_PREFERENCE;
8900
8901 /* Create advertising instance with no advertising or response data */
8902 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8903 timeout, duration, tx_power, min_interval,
8904 max_interval, 0);
8905
8906 if (IS_ERR(adv)) {
8907 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8908 MGMT_STATUS_FAILED);
8909 goto unlock;
8910 }
8911
8912 /* Submit request for advertising params if ext adv available */
8913 if (ext_adv_capable(hdev)) {
8914 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8915 data, data_len);
8916 if (!cmd) {
8917 err = -ENOMEM;
8918 hci_remove_adv_instance(hdev, cp->instance);
8919 goto unlock;
8920 }
8921
8922 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8923 add_ext_adv_params_complete);
8924 if (err < 0)
8925 mgmt_pending_free(cmd);
8926 } else {
8927 rp.instance = cp->instance;
8928 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8929 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8930 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8931 err = mgmt_cmd_complete(sk, hdev->id,
8932 MGMT_OP_ADD_EXT_ADV_PARAMS,
8933 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8934 }
8935
8936unlock:
8937 hci_dev_unlock(hdev);
8938
8939 return err;
8940}
8941
8942static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8943{
8944 struct mgmt_pending_cmd *cmd = data;
8945 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8946 struct mgmt_rp_add_advertising rp;
8947
8948 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8949
8950 memset(&rp, 0, sizeof(rp));
8951
8952 rp.instance = cp->instance;
8953
8954 if (err)
8955 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8956 mgmt_status(err));
8957 else
8958 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8959 mgmt_status(err), &rp, sizeof(rp));
8960
8961 mgmt_pending_free(cmd);
8962}
8963
8964static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8965{
8966 struct mgmt_pending_cmd *cmd = data;
8967 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8968 int err;
8969
8970 if (ext_adv_capable(hdev)) {
8971 err = hci_update_adv_data_sync(hdev, cp->instance);
8972 if (err)
8973 return err;
8974
8975 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8976 if (err)
8977 return err;
8978
8979 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8980 }
8981
8982 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8983}
8984
8985static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8986 u16 data_len)
8987{
8988 struct mgmt_cp_add_ext_adv_data *cp = data;
8989 struct mgmt_rp_add_ext_adv_data rp;
8990 u8 schedule_instance = 0;
8991 struct adv_info *next_instance;
8992 struct adv_info *adv_instance;
8993 int err = 0;
8994 struct mgmt_pending_cmd *cmd;
8995
8996 BT_DBG("%s", hdev->name);
8997
8998 hci_dev_lock(hdev);
8999
9000 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9001
9002 if (!adv_instance) {
9003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9004 MGMT_STATUS_INVALID_PARAMS);
9005 goto unlock;
9006 }
9007
9008 /* In new interface, we require that we are powered to register */
9009 if (!hdev_is_powered(hdev)) {
9010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9011 MGMT_STATUS_REJECTED);
9012 goto clear_new_instance;
9013 }
9014
9015 if (adv_busy(hdev)) {
9016 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9017 MGMT_STATUS_BUSY);
9018 goto clear_new_instance;
9019 }
9020
9021 /* Validate new data */
9022 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9023 cp->adv_data_len, true) ||
9024 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9025 cp->adv_data_len, cp->scan_rsp_len, false)) {
9026 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9027 MGMT_STATUS_INVALID_PARAMS);
9028 goto clear_new_instance;
9029 }
9030
9031 /* Set the data in the advertising instance */
9032 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9033 cp->data, cp->scan_rsp_len,
9034 cp->data + cp->adv_data_len);
9035
9036 /* If using software rotation, determine next instance to use */
9037 if (hdev->cur_adv_instance == cp->instance) {
9038 /* If the currently advertised instance is being changed
9039 * then cancel the current advertising and schedule the
9040 * next instance. If there is only one instance then the
9041 * overridden advertising data will be visible right
9042 * away
9043 */
9044 cancel_adv_timeout(hdev);
9045
9046 next_instance = hci_get_next_instance(hdev, cp->instance);
9047 if (next_instance)
9048 schedule_instance = next_instance->instance;
9049 } else if (!hdev->adv_instance_timeout) {
9050 /* Immediately advertise the new instance if no other
9051 * instance is currently being advertised.
9052 */
9053 schedule_instance = cp->instance;
9054 }
9055
9056 /* If the HCI_ADVERTISING flag is set or there is no instance to
9057 * be advertised then we have no HCI communication to make.
9058 * Simply return.
9059 */
9060 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9061 if (adv_instance->pending) {
9062 mgmt_advertising_added(sk, hdev, cp->instance);
9063 adv_instance->pending = false;
9064 }
9065 rp.instance = cp->instance;
9066 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9067 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9068 goto unlock;
9069 }
9070
9071 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9072 data_len);
9073 if (!cmd) {
9074 err = -ENOMEM;
9075 goto clear_new_instance;
9076 }
9077
9078 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9079 add_ext_adv_data_complete);
9080 if (err < 0) {
9081 mgmt_pending_free(cmd);
9082 goto clear_new_instance;
9083 }
9084
9085 /* We were successful in updating data, so trigger advertising_added
9086 * event if this is an instance that wasn't previously advertising. If
9087 * a failure occurs in the requests we initiated, we will remove the
9088 * instance again in add_advertising_complete
9089 */
9090 if (adv_instance->pending)
9091 mgmt_advertising_added(sk, hdev, cp->instance);
9092
9093 goto unlock;
9094
9095clear_new_instance:
9096 hci_remove_adv_instance(hdev, cp->instance);
9097
9098unlock:
9099 hci_dev_unlock(hdev);
9100
9101 return err;
9102}
9103
9104static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9105 int err)
9106{
9107 struct mgmt_pending_cmd *cmd = data;
9108 struct mgmt_cp_remove_advertising *cp = cmd->param;
9109 struct mgmt_rp_remove_advertising rp;
9110
9111 bt_dev_dbg(hdev, "err %d", err);
9112
9113 memset(&rp, 0, sizeof(rp));
9114 rp.instance = cp->instance;
9115
9116 if (err)
9117 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9118 mgmt_status(err));
9119 else
9120 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9121 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9122
9123 mgmt_pending_free(cmd);
9124}
9125
9126static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9127{
9128 struct mgmt_pending_cmd *cmd = data;
9129 struct mgmt_cp_remove_advertising *cp = cmd->param;
9130 int err;
9131
9132 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9133 if (err)
9134 return err;
9135
9136 if (list_empty(&hdev->adv_instances))
9137 err = hci_disable_advertising_sync(hdev);
9138
9139 return err;
9140}
9141
9142static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9143 void *data, u16 data_len)
9144{
9145 struct mgmt_cp_remove_advertising *cp = data;
9146 struct mgmt_pending_cmd *cmd;
9147 int err;
9148
9149 bt_dev_dbg(hdev, "sock %p", sk);
9150
9151 hci_dev_lock(hdev);
9152
9153 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9154 err = mgmt_cmd_status(sk, hdev->id,
9155 MGMT_OP_REMOVE_ADVERTISING,
9156 MGMT_STATUS_INVALID_PARAMS);
9157 goto unlock;
9158 }
9159
9160 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9161 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9162 MGMT_STATUS_BUSY);
9163 goto unlock;
9164 }
9165
9166 if (list_empty(&hdev->adv_instances)) {
9167 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9168 MGMT_STATUS_INVALID_PARAMS);
9169 goto unlock;
9170 }
9171
9172 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9173 data_len);
9174 if (!cmd) {
9175 err = -ENOMEM;
9176 goto unlock;
9177 }
9178
9179 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9180 remove_advertising_complete);
9181 if (err < 0)
9182 mgmt_pending_free(cmd);
9183
9184unlock:
9185 hci_dev_unlock(hdev);
9186
9187 return err;
9188}
9189
9190static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9191 void *data, u16 data_len)
9192{
9193 struct mgmt_cp_get_adv_size_info *cp = data;
9194 struct mgmt_rp_get_adv_size_info rp;
9195 u32 flags, supported_flags;
9196
9197 bt_dev_dbg(hdev, "sock %p", sk);
9198
9199 if (!lmp_le_capable(hdev))
9200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9201 MGMT_STATUS_REJECTED);
9202
9203 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9205 MGMT_STATUS_INVALID_PARAMS);
9206
9207 flags = __le32_to_cpu(cp->flags);
9208
9209 /* The current implementation only supports a subset of the specified
9210 * flags.
9211 */
9212 supported_flags = get_supported_adv_flags(hdev);
9213 if (flags & ~supported_flags)
9214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9215 MGMT_STATUS_INVALID_PARAMS);
9216
9217 rp.instance = cp->instance;
9218 rp.flags = cp->flags;
9219 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9220 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9221
9222 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9223 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9224}
9225
9226static const struct hci_mgmt_handler mgmt_handlers[] = {
9227 { NULL }, /* 0x0000 (no command) */
9228 { read_version, MGMT_READ_VERSION_SIZE,
9229 HCI_MGMT_NO_HDEV |
9230 HCI_MGMT_UNTRUSTED },
9231 { read_commands, MGMT_READ_COMMANDS_SIZE,
9232 HCI_MGMT_NO_HDEV |
9233 HCI_MGMT_UNTRUSTED },
9234 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9235 HCI_MGMT_NO_HDEV |
9236 HCI_MGMT_UNTRUSTED },
9237 { read_controller_info, MGMT_READ_INFO_SIZE,
9238 HCI_MGMT_UNTRUSTED },
9239 { set_powered, MGMT_SETTING_SIZE },
9240 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9241 { set_connectable, MGMT_SETTING_SIZE },
9242 { set_fast_connectable, MGMT_SETTING_SIZE },
9243 { set_bondable, MGMT_SETTING_SIZE },
9244 { set_link_security, MGMT_SETTING_SIZE },
9245 { set_ssp, MGMT_SETTING_SIZE },
9246 { set_hs, MGMT_SETTING_SIZE },
9247 { set_le, MGMT_SETTING_SIZE },
9248 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9249 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9250 { add_uuid, MGMT_ADD_UUID_SIZE },
9251 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9252 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9253 HCI_MGMT_VAR_LEN },
9254 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9255 HCI_MGMT_VAR_LEN },
9256 { disconnect, MGMT_DISCONNECT_SIZE },
9257 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9258 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9259 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9260 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9261 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9262 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9263 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9264 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9265 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9266 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9267 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9268 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9269 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9270 HCI_MGMT_VAR_LEN },
9271 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9272 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9273 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9274 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9275 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9276 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9277 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9278 { set_advertising, MGMT_SETTING_SIZE },
9279 { set_bredr, MGMT_SETTING_SIZE },
9280 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9281 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9282 { set_secure_conn, MGMT_SETTING_SIZE },
9283 { set_debug_keys, MGMT_SETTING_SIZE },
9284 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9285 { load_irks, MGMT_LOAD_IRKS_SIZE,
9286 HCI_MGMT_VAR_LEN },
9287 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9288 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9289 { add_device, MGMT_ADD_DEVICE_SIZE },
9290 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9291 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9292 HCI_MGMT_VAR_LEN },
9293 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9294 HCI_MGMT_NO_HDEV |
9295 HCI_MGMT_UNTRUSTED },
9296 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9297 HCI_MGMT_UNCONFIGURED |
9298 HCI_MGMT_UNTRUSTED },
9299 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9300 HCI_MGMT_UNCONFIGURED },
9301 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9302 HCI_MGMT_UNCONFIGURED },
9303 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9304 HCI_MGMT_VAR_LEN },
9305 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9306 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9307 HCI_MGMT_NO_HDEV |
9308 HCI_MGMT_UNTRUSTED },
9309 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9310 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9311 HCI_MGMT_VAR_LEN },
9312 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9313 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9314 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9315 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9316 HCI_MGMT_UNTRUSTED },
9317 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9318 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9319 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9320 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9321 HCI_MGMT_VAR_LEN },
9322 { set_wideband_speech, MGMT_SETTING_SIZE },
9323 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9324 HCI_MGMT_UNTRUSTED },
9325 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9326 HCI_MGMT_UNTRUSTED |
9327 HCI_MGMT_HDEV_OPTIONAL },
9328 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9329 HCI_MGMT_VAR_LEN |
9330 HCI_MGMT_HDEV_OPTIONAL },
9331 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9332 HCI_MGMT_UNTRUSTED },
9333 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9334 HCI_MGMT_VAR_LEN },
9335 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9336 HCI_MGMT_UNTRUSTED },
9337 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9338 HCI_MGMT_VAR_LEN },
9339 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9340 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9341 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9342 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9343 HCI_MGMT_VAR_LEN },
9344 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9345 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9346 HCI_MGMT_VAR_LEN },
9347 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9348 HCI_MGMT_VAR_LEN },
9349 { add_adv_patterns_monitor_rssi,
9350 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9351 HCI_MGMT_VAR_LEN },
9352 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9353 HCI_MGMT_VAR_LEN },
9354 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9355 { mesh_send, MGMT_MESH_SEND_SIZE,
9356 HCI_MGMT_VAR_LEN },
9357 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9358};
9359
9360void mgmt_index_added(struct hci_dev *hdev)
9361{
9362 struct mgmt_ev_ext_index ev;
9363
9364 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9365 return;
9366
9367 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9368 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9369 HCI_MGMT_UNCONF_INDEX_EVENTS);
9370 ev.type = 0x01;
9371 } else {
9372 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9373 HCI_MGMT_INDEX_EVENTS);
9374 ev.type = 0x00;
9375 }
9376
9377 ev.bus = hdev->bus;
9378
9379 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9380 HCI_MGMT_EXT_INDEX_EVENTS);
9381}
9382
9383void mgmt_index_removed(struct hci_dev *hdev)
9384{
9385 struct mgmt_ev_ext_index ev;
9386 u8 status = MGMT_STATUS_INVALID_INDEX;
9387
9388 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9389 return;
9390
9391 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9392
9393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9394 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9395 HCI_MGMT_UNCONF_INDEX_EVENTS);
9396 ev.type = 0x01;
9397 } else {
9398 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9399 HCI_MGMT_INDEX_EVENTS);
9400 ev.type = 0x00;
9401 }
9402
9403 ev.bus = hdev->bus;
9404
9405 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9406 HCI_MGMT_EXT_INDEX_EVENTS);
9407
9408 /* Cancel any remaining timed work */
9409 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9410 return;
9411 cancel_delayed_work_sync(&hdev->discov_off);
9412 cancel_delayed_work_sync(&hdev->service_cache);
9413 cancel_delayed_work_sync(&hdev->rpa_expired);
9414}
9415
9416void mgmt_power_on(struct hci_dev *hdev, int err)
9417{
9418 struct cmd_lookup match = { NULL, hdev };
9419
9420 bt_dev_dbg(hdev, "err %d", err);
9421
9422 hci_dev_lock(hdev);
9423
9424 if (!err) {
9425 restart_le_actions(hdev);
9426 hci_update_passive_scan(hdev);
9427 }
9428
9429 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9430
9431 new_settings(hdev, match.sk);
9432
9433 if (match.sk)
9434 sock_put(match.sk);
9435
9436 hci_dev_unlock(hdev);
9437}
9438
9439void __mgmt_power_off(struct hci_dev *hdev)
9440{
9441 struct cmd_lookup match = { NULL, hdev };
9442 u8 status, zero_cod[] = { 0, 0, 0 };
9443
9444 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9445
9446 /* If the power off is because of hdev unregistration let
9447 * use the appropriate INVALID_INDEX status. Otherwise use
9448 * NOT_POWERED. We cover both scenarios here since later in
9449 * mgmt_index_removed() any hci_conn callbacks will have already
9450 * been triggered, potentially causing misleading DISCONNECTED
9451 * status responses.
9452 */
9453 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9454 status = MGMT_STATUS_INVALID_INDEX;
9455 else
9456 status = MGMT_STATUS_NOT_POWERED;
9457
9458 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9459
9460 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9461 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9462 zero_cod, sizeof(zero_cod),
9463 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9464 ext_info_changed(hdev, NULL);
9465 }
9466
9467 new_settings(hdev, match.sk);
9468
9469 if (match.sk)
9470 sock_put(match.sk);
9471}
9472
9473void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9474{
9475 struct mgmt_pending_cmd *cmd;
9476 u8 status;
9477
9478 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9479 if (!cmd)
9480 return;
9481
9482 if (err == -ERFKILL)
9483 status = MGMT_STATUS_RFKILLED;
9484 else
9485 status = MGMT_STATUS_FAILED;
9486
9487 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9488
9489 mgmt_pending_remove(cmd);
9490}
9491
9492void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9493 bool persistent)
9494{
9495 struct mgmt_ev_new_link_key ev;
9496
9497 memset(&ev, 0, sizeof(ev));
9498
9499 ev.store_hint = persistent;
9500 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9501 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9502 ev.key.type = key->type;
9503 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9504 ev.key.pin_len = key->pin_len;
9505
9506 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9507}
9508
9509static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9510{
9511 switch (ltk->type) {
9512 case SMP_LTK:
9513 case SMP_LTK_RESPONDER:
9514 if (ltk->authenticated)
9515 return MGMT_LTK_AUTHENTICATED;
9516 return MGMT_LTK_UNAUTHENTICATED;
9517 case SMP_LTK_P256:
9518 if (ltk->authenticated)
9519 return MGMT_LTK_P256_AUTH;
9520 return MGMT_LTK_P256_UNAUTH;
9521 case SMP_LTK_P256_DEBUG:
9522 return MGMT_LTK_P256_DEBUG;
9523 }
9524
9525 return MGMT_LTK_UNAUTHENTICATED;
9526}
9527
9528void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9529{
9530 struct mgmt_ev_new_long_term_key ev;
9531
9532 memset(&ev, 0, sizeof(ev));
9533
9534 /* Devices using resolvable or non-resolvable random addresses
9535 * without providing an identity resolving key don't require
9536 * to store long term keys. Their addresses will change the
9537 * next time around.
9538 *
9539 * Only when a remote device provides an identity address
9540 * make sure the long term key is stored. If the remote
9541 * identity is known, the long term keys are internally
9542 * mapped to the identity address. So allow static random
9543 * and public addresses here.
9544 */
9545 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9546 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9547 ev.store_hint = 0x00;
9548 else
9549 ev.store_hint = persistent;
9550
9551 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9552 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9553 ev.key.type = mgmt_ltk_type(key);
9554 ev.key.enc_size = key->enc_size;
9555 ev.key.ediv = key->ediv;
9556 ev.key.rand = key->rand;
9557
9558 if (key->type == SMP_LTK)
9559 ev.key.initiator = 1;
9560
9561 /* Make sure we copy only the significant bytes based on the
9562 * encryption key size, and set the rest of the value to zeroes.
9563 */
9564 memcpy(ev.key.val, key->val, key->enc_size);
9565 memset(ev.key.val + key->enc_size, 0,
9566 sizeof(ev.key.val) - key->enc_size);
9567
9568 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9569}
9570
9571void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9572{
9573 struct mgmt_ev_new_irk ev;
9574
9575 memset(&ev, 0, sizeof(ev));
9576
9577 ev.store_hint = persistent;
9578
9579 bacpy(&ev.rpa, &irk->rpa);
9580 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9581 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9582 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9583
9584 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9585}
9586
9587void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9588 bool persistent)
9589{
9590 struct mgmt_ev_new_csrk ev;
9591
9592 memset(&ev, 0, sizeof(ev));
9593
9594 /* Devices using resolvable or non-resolvable random addresses
9595 * without providing an identity resolving key don't require
9596 * to store signature resolving keys. Their addresses will change
9597 * the next time around.
9598 *
9599 * Only when a remote device provides an identity address
9600 * make sure the signature resolving key is stored. So allow
9601 * static random and public addresses here.
9602 */
9603 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9604 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9605 ev.store_hint = 0x00;
9606 else
9607 ev.store_hint = persistent;
9608
9609 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9610 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9611 ev.key.type = csrk->type;
9612 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9613
9614 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9615}
9616
9617void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9618 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9619 u16 max_interval, u16 latency, u16 timeout)
9620{
9621 struct mgmt_ev_new_conn_param ev;
9622
9623 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9624 return;
9625
9626 memset(&ev, 0, sizeof(ev));
9627 bacpy(&ev.addr.bdaddr, bdaddr);
9628 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9629 ev.store_hint = store_hint;
9630 ev.min_interval = cpu_to_le16(min_interval);
9631 ev.max_interval = cpu_to_le16(max_interval);
9632 ev.latency = cpu_to_le16(latency);
9633 ev.timeout = cpu_to_le16(timeout);
9634
9635 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9636}
9637
9638void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9639 u8 *name, u8 name_len)
9640{
9641 struct sk_buff *skb;
9642 struct mgmt_ev_device_connected *ev;
9643 u16 eir_len = 0;
9644 u32 flags = 0;
9645
9646 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9647 return;
9648
9649 /* allocate buff for LE or BR/EDR adv */
9650 if (conn->le_adv_data_len > 0)
9651 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9652 sizeof(*ev) + conn->le_adv_data_len);
9653 else
9654 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9655 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9656 eir_precalc_len(sizeof(conn->dev_class)));
9657
9658 ev = skb_put(skb, sizeof(*ev));
9659 bacpy(&ev->addr.bdaddr, &conn->dst);
9660 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9661
9662 if (conn->out)
9663 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9664
9665 ev->flags = __cpu_to_le32(flags);
9666
9667 /* We must ensure that the EIR Data fields are ordered and
9668 * unique. Keep it simple for now and avoid the problem by not
9669 * adding any BR/EDR data to the LE adv.
9670 */
9671 if (conn->le_adv_data_len > 0) {
9672 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9673 eir_len = conn->le_adv_data_len;
9674 } else {
9675 if (name)
9676 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9677
9678 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9679 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9680 conn->dev_class, sizeof(conn->dev_class));
9681 }
9682
9683 ev->eir_len = cpu_to_le16(eir_len);
9684
9685 mgmt_event_skb(skb, NULL);
9686}
9687
9688static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9689{
9690 struct sock **sk = data;
9691
9692 cmd->cmd_complete(cmd, 0);
9693
9694 *sk = cmd->sk;
9695 sock_hold(*sk);
9696
9697 mgmt_pending_remove(cmd);
9698}
9699
9700static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9701{
9702 struct hci_dev *hdev = data;
9703 struct mgmt_cp_unpair_device *cp = cmd->param;
9704
9705 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9706
9707 cmd->cmd_complete(cmd, 0);
9708 mgmt_pending_remove(cmd);
9709}
9710
9711bool mgmt_powering_down(struct hci_dev *hdev)
9712{
9713 struct mgmt_pending_cmd *cmd;
9714 struct mgmt_mode *cp;
9715
9716 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9717 return true;
9718
9719 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9720 if (!cmd)
9721 return false;
9722
9723 cp = cmd->param;
9724 if (!cp->val)
9725 return true;
9726
9727 return false;
9728}
9729
9730void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9731 u8 link_type, u8 addr_type, u8 reason,
9732 bool mgmt_connected)
9733{
9734 struct mgmt_ev_device_disconnected ev;
9735 struct sock *sk = NULL;
9736
9737 if (!mgmt_connected)
9738 return;
9739
9740 if (link_type != ACL_LINK && link_type != LE_LINK)
9741 return;
9742
9743 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9744
9745 bacpy(&ev.addr.bdaddr, bdaddr);
9746 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9747 ev.reason = reason;
9748
9749 /* Report disconnects due to suspend */
9750 if (hdev->suspended)
9751 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9752
9753 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9754
9755 if (sk)
9756 sock_put(sk);
9757
9758 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9759 hdev);
9760}
9761
9762void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9763 u8 link_type, u8 addr_type, u8 status)
9764{
9765 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9766 struct mgmt_cp_disconnect *cp;
9767 struct mgmt_pending_cmd *cmd;
9768
9769 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9770 hdev);
9771
9772 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9773 if (!cmd)
9774 return;
9775
9776 cp = cmd->param;
9777
9778 if (bacmp(bdaddr, &cp->addr.bdaddr))
9779 return;
9780
9781 if (cp->addr.type != bdaddr_type)
9782 return;
9783
9784 cmd->cmd_complete(cmd, mgmt_status(status));
9785 mgmt_pending_remove(cmd);
9786}
9787
9788void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9789 u8 addr_type, u8 status)
9790{
9791 struct mgmt_ev_connect_failed ev;
9792
9793 bacpy(&ev.addr.bdaddr, bdaddr);
9794 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9795 ev.status = mgmt_status(status);
9796
9797 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9798}
9799
9800void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9801{
9802 struct mgmt_ev_pin_code_request ev;
9803
9804 bacpy(&ev.addr.bdaddr, bdaddr);
9805 ev.addr.type = BDADDR_BREDR;
9806 ev.secure = secure;
9807
9808 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9809}
9810
9811void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9812 u8 status)
9813{
9814 struct mgmt_pending_cmd *cmd;
9815
9816 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9817 if (!cmd)
9818 return;
9819
9820 cmd->cmd_complete(cmd, mgmt_status(status));
9821 mgmt_pending_remove(cmd);
9822}
9823
9824void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9825 u8 status)
9826{
9827 struct mgmt_pending_cmd *cmd;
9828
9829 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9830 if (!cmd)
9831 return;
9832
9833 cmd->cmd_complete(cmd, mgmt_status(status));
9834 mgmt_pending_remove(cmd);
9835}
9836
9837int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9838 u8 link_type, u8 addr_type, u32 value,
9839 u8 confirm_hint)
9840{
9841 struct mgmt_ev_user_confirm_request ev;
9842
9843 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9844
9845 bacpy(&ev.addr.bdaddr, bdaddr);
9846 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9847 ev.confirm_hint = confirm_hint;
9848 ev.value = cpu_to_le32(value);
9849
9850 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9851 NULL);
9852}
9853
9854int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9855 u8 link_type, u8 addr_type)
9856{
9857 struct mgmt_ev_user_passkey_request ev;
9858
9859 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9860
9861 bacpy(&ev.addr.bdaddr, bdaddr);
9862 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9863
9864 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9865 NULL);
9866}
9867
9868static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9869 u8 link_type, u8 addr_type, u8 status,
9870 u8 opcode)
9871{
9872 struct mgmt_pending_cmd *cmd;
9873
9874 cmd = pending_find(opcode, hdev);
9875 if (!cmd)
9876 return -ENOENT;
9877
9878 cmd->cmd_complete(cmd, mgmt_status(status));
9879 mgmt_pending_remove(cmd);
9880
9881 return 0;
9882}
9883
9884int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9885 u8 link_type, u8 addr_type, u8 status)
9886{
9887 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9888 status, MGMT_OP_USER_CONFIRM_REPLY);
9889}
9890
9891int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892 u8 link_type, u8 addr_type, u8 status)
9893{
9894 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9895 status,
9896 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9897}
9898
9899int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9900 u8 link_type, u8 addr_type, u8 status)
9901{
9902 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9903 status, MGMT_OP_USER_PASSKEY_REPLY);
9904}
9905
9906int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9907 u8 link_type, u8 addr_type, u8 status)
9908{
9909 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9910 status,
9911 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9912}
9913
9914int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915 u8 link_type, u8 addr_type, u32 passkey,
9916 u8 entered)
9917{
9918 struct mgmt_ev_passkey_notify ev;
9919
9920 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9921
9922 bacpy(&ev.addr.bdaddr, bdaddr);
9923 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9924 ev.passkey = __cpu_to_le32(passkey);
9925 ev.entered = entered;
9926
9927 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9928}
9929
9930void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9931{
9932 struct mgmt_ev_auth_failed ev;
9933 struct mgmt_pending_cmd *cmd;
9934 u8 status = mgmt_status(hci_status);
9935
9936 bacpy(&ev.addr.bdaddr, &conn->dst);
9937 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9938 ev.status = status;
9939
9940 cmd = find_pairing(conn);
9941
9942 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9943 cmd ? cmd->sk : NULL);
9944
9945 if (cmd) {
9946 cmd->cmd_complete(cmd, status);
9947 mgmt_pending_remove(cmd);
9948 }
9949}
9950
9951void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9952{
9953 struct cmd_lookup match = { NULL, hdev };
9954 bool changed;
9955
9956 if (status) {
9957 u8 mgmt_err = mgmt_status(status);
9958 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9959 cmd_status_rsp, &mgmt_err);
9960 return;
9961 }
9962
9963 if (test_bit(HCI_AUTH, &hdev->flags))
9964 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9965 else
9966 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9967
9968 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9969 &match);
9970
9971 if (changed)
9972 new_settings(hdev, match.sk);
9973
9974 if (match.sk)
9975 sock_put(match.sk);
9976}
9977
9978static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9979{
9980 struct cmd_lookup *match = data;
9981
9982 if (match->sk == NULL) {
9983 match->sk = cmd->sk;
9984 sock_hold(match->sk);
9985 }
9986}
9987
9988void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9989 u8 status)
9990{
9991 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9992
9993 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9994 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9995 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9996
9997 if (!status) {
9998 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9999 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10000 ext_info_changed(hdev, NULL);
10001 }
10002
10003 if (match.sk)
10004 sock_put(match.sk);
10005}
10006
10007void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10008{
10009 struct mgmt_cp_set_local_name ev;
10010 struct mgmt_pending_cmd *cmd;
10011
10012 if (status)
10013 return;
10014
10015 memset(&ev, 0, sizeof(ev));
10016 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10017 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10018
10019 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10020 if (!cmd) {
10021 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10022
10023 /* If this is a HCI command related to powering on the
10024 * HCI dev don't send any mgmt signals.
10025 */
10026 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10027 return;
10028
10029 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10030 return;
10031 }
10032
10033 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10034 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10035 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10036}
10037
10038static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10039{
10040 int i;
10041
10042 for (i = 0; i < uuid_count; i++) {
10043 if (!memcmp(uuid, uuids[i], 16))
10044 return true;
10045 }
10046
10047 return false;
10048}
10049
10050static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10051{
10052 u16 parsed = 0;
10053
10054 while (parsed < eir_len) {
10055 u8 field_len = eir[0];
10056 u8 uuid[16];
10057 int i;
10058
10059 if (field_len == 0)
10060 break;
10061
10062 if (eir_len - parsed < field_len + 1)
10063 break;
10064
10065 switch (eir[1]) {
10066 case EIR_UUID16_ALL:
10067 case EIR_UUID16_SOME:
10068 for (i = 0; i + 3 <= field_len; i += 2) {
10069 memcpy(uuid, bluetooth_base_uuid, 16);
10070 uuid[13] = eir[i + 3];
10071 uuid[12] = eir[i + 2];
10072 if (has_uuid(uuid, uuid_count, uuids))
10073 return true;
10074 }
10075 break;
10076 case EIR_UUID32_ALL:
10077 case EIR_UUID32_SOME:
10078 for (i = 0; i + 5 <= field_len; i += 4) {
10079 memcpy(uuid, bluetooth_base_uuid, 16);
10080 uuid[15] = eir[i + 5];
10081 uuid[14] = eir[i + 4];
10082 uuid[13] = eir[i + 3];
10083 uuid[12] = eir[i + 2];
10084 if (has_uuid(uuid, uuid_count, uuids))
10085 return true;
10086 }
10087 break;
10088 case EIR_UUID128_ALL:
10089 case EIR_UUID128_SOME:
10090 for (i = 0; i + 17 <= field_len; i += 16) {
10091 memcpy(uuid, eir + i + 2, 16);
10092 if (has_uuid(uuid, uuid_count, uuids))
10093 return true;
10094 }
10095 break;
10096 }
10097
10098 parsed += field_len + 1;
10099 eir += field_len + 1;
10100 }
10101
10102 return false;
10103}
10104
10105static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10106 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10107{
10108 /* If a RSSI threshold has been specified, and
10109 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10110 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10111 * is set, let it through for further processing, as we might need to
10112 * restart the scan.
10113 *
10114 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10115 * the results are also dropped.
10116 */
10117 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10118 (rssi == HCI_RSSI_INVALID ||
10119 (rssi < hdev->discovery.rssi &&
10120 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10121 return false;
10122
10123 if (hdev->discovery.uuid_count != 0) {
10124 /* If a list of UUIDs is provided in filter, results with no
10125 * matching UUID should be dropped.
10126 */
10127 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10128 hdev->discovery.uuids) &&
10129 !eir_has_uuids(scan_rsp, scan_rsp_len,
10130 hdev->discovery.uuid_count,
10131 hdev->discovery.uuids))
10132 return false;
10133 }
10134
10135 /* If duplicate filtering does not report RSSI changes, then restart
10136 * scanning to ensure updated result with updated RSSI values.
10137 */
10138 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10139 /* Validate RSSI value against the RSSI threshold once more. */
10140 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10141 rssi < hdev->discovery.rssi)
10142 return false;
10143 }
10144
10145 return true;
10146}
10147
10148void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10149 bdaddr_t *bdaddr, u8 addr_type)
10150{
10151 struct mgmt_ev_adv_monitor_device_lost ev;
10152
10153 ev.monitor_handle = cpu_to_le16(handle);
10154 bacpy(&ev.addr.bdaddr, bdaddr);
10155 ev.addr.type = addr_type;
10156
10157 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10158 NULL);
10159}
10160
10161static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10162 struct sk_buff *skb,
10163 struct sock *skip_sk,
10164 u16 handle)
10165{
10166 struct sk_buff *advmon_skb;
10167 size_t advmon_skb_len;
10168 __le16 *monitor_handle;
10169
10170 if (!skb)
10171 return;
10172
10173 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10174 sizeof(struct mgmt_ev_device_found)) + skb->len;
10175 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10176 advmon_skb_len);
10177 if (!advmon_skb)
10178 return;
10179
10180 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10181 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10182 * store monitor_handle of the matched monitor.
10183 */
10184 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10185 *monitor_handle = cpu_to_le16(handle);
10186 skb_put_data(advmon_skb, skb->data, skb->len);
10187
10188 mgmt_event_skb(advmon_skb, skip_sk);
10189}
10190
10191static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10192 bdaddr_t *bdaddr, bool report_device,
10193 struct sk_buff *skb,
10194 struct sock *skip_sk)
10195{
10196 struct monitored_device *dev, *tmp;
10197 bool matched = false;
10198 bool notified = false;
10199
10200 /* We have received the Advertisement Report because:
10201 * 1. the kernel has initiated active discovery
10202 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10203 * passive scanning
10204 * 3. if none of the above is true, we have one or more active
10205 * Advertisement Monitor
10206 *
10207 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10208 * and report ONLY one advertisement per device for the matched Monitor
10209 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10210 *
10211 * For case 3, since we are not active scanning and all advertisements
10212 * received are due to a matched Advertisement Monitor, report all
10213 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10214 */
10215 if (report_device && !hdev->advmon_pend_notify) {
10216 mgmt_event_skb(skb, skip_sk);
10217 return;
10218 }
10219
10220 hdev->advmon_pend_notify = false;
10221
10222 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10223 if (!bacmp(&dev->bdaddr, bdaddr)) {
10224 matched = true;
10225
10226 if (!dev->notified) {
10227 mgmt_send_adv_monitor_device_found(hdev, skb,
10228 skip_sk,
10229 dev->handle);
10230 notified = true;
10231 dev->notified = true;
10232 }
10233 }
10234
10235 if (!dev->notified)
10236 hdev->advmon_pend_notify = true;
10237 }
10238
10239 if (!report_device &&
10240 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10241 /* Handle 0 indicates that we are not active scanning and this
10242 * is a subsequent advertisement report for an already matched
10243 * Advertisement Monitor or the controller offloading support
10244 * is not available.
10245 */
10246 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10247 }
10248
10249 if (report_device)
10250 mgmt_event_skb(skb, skip_sk);
10251 else
10252 kfree_skb(skb);
10253}
10254
10255static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10256 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10257 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10258 u64 instant)
10259{
10260 struct sk_buff *skb;
10261 struct mgmt_ev_mesh_device_found *ev;
10262 int i, j;
10263
10264 if (!hdev->mesh_ad_types[0])
10265 goto accepted;
10266
10267 /* Scan for requested AD types */
10268 if (eir_len > 0) {
10269 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10270 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10271 if (!hdev->mesh_ad_types[j])
10272 break;
10273
10274 if (hdev->mesh_ad_types[j] == eir[i + 1])
10275 goto accepted;
10276 }
10277 }
10278 }
10279
10280 if (scan_rsp_len > 0) {
10281 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10282 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10283 if (!hdev->mesh_ad_types[j])
10284 break;
10285
10286 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10287 goto accepted;
10288 }
10289 }
10290 }
10291
10292 return;
10293
10294accepted:
10295 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10296 sizeof(*ev) + eir_len + scan_rsp_len);
10297 if (!skb)
10298 return;
10299
10300 ev = skb_put(skb, sizeof(*ev));
10301
10302 bacpy(&ev->addr.bdaddr, bdaddr);
10303 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10304 ev->rssi = rssi;
10305 ev->flags = cpu_to_le32(flags);
10306 ev->instant = cpu_to_le64(instant);
10307
10308 if (eir_len > 0)
10309 /* Copy EIR or advertising data into event */
10310 skb_put_data(skb, eir, eir_len);
10311
10312 if (scan_rsp_len > 0)
10313 /* Append scan response data to event */
10314 skb_put_data(skb, scan_rsp, scan_rsp_len);
10315
10316 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10317
10318 mgmt_event_skb(skb, NULL);
10319}
10320
10321void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10322 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10323 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10324 u64 instant)
10325{
10326 struct sk_buff *skb;
10327 struct mgmt_ev_device_found *ev;
10328 bool report_device = hci_discovery_active(hdev);
10329
10330 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10331 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10332 eir, eir_len, scan_rsp, scan_rsp_len,
10333 instant);
10334
10335 /* Don't send events for a non-kernel initiated discovery. With
10336 * LE one exception is if we have pend_le_reports > 0 in which
10337 * case we're doing passive scanning and want these events.
10338 */
10339 if (!hci_discovery_active(hdev)) {
10340 if (link_type == ACL_LINK)
10341 return;
10342 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10343 report_device = true;
10344 else if (!hci_is_adv_monitoring(hdev))
10345 return;
10346 }
10347
10348 if (hdev->discovery.result_filtering) {
10349 /* We are using service discovery */
10350 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10351 scan_rsp_len))
10352 return;
10353 }
10354
10355 if (hdev->discovery.limited) {
10356 /* Check for limited discoverable bit */
10357 if (dev_class) {
10358 if (!(dev_class[1] & 0x20))
10359 return;
10360 } else {
10361 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10362 if (!flags || !(flags[0] & LE_AD_LIMITED))
10363 return;
10364 }
10365 }
10366
10367 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10368 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10369 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10370 if (!skb)
10371 return;
10372
10373 ev = skb_put(skb, sizeof(*ev));
10374
10375 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10376 * RSSI value was reported as 0 when not available. This behavior
10377 * is kept when using device discovery. This is required for full
10378 * backwards compatibility with the API.
10379 *
10380 * However when using service discovery, the value 127 will be
10381 * returned when the RSSI is not available.
10382 */
10383 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10384 link_type == ACL_LINK)
10385 rssi = 0;
10386
10387 bacpy(&ev->addr.bdaddr, bdaddr);
10388 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10389 ev->rssi = rssi;
10390 ev->flags = cpu_to_le32(flags);
10391
10392 if (eir_len > 0)
10393 /* Copy EIR or advertising data into event */
10394 skb_put_data(skb, eir, eir_len);
10395
10396 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10397 u8 eir_cod[5];
10398
10399 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10400 dev_class, 3);
10401 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10402 }
10403
10404 if (scan_rsp_len > 0)
10405 /* Append scan response data to event */
10406 skb_put_data(skb, scan_rsp, scan_rsp_len);
10407
10408 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10409
10410 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10411}
10412
10413void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10414 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10415{
10416 struct sk_buff *skb;
10417 struct mgmt_ev_device_found *ev;
10418 u16 eir_len = 0;
10419 u32 flags = 0;
10420
10421 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10422 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10423
10424 ev = skb_put(skb, sizeof(*ev));
10425 bacpy(&ev->addr.bdaddr, bdaddr);
10426 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10427 ev->rssi = rssi;
10428
10429 if (name)
10430 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10431 else
10432 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10433
10434 ev->eir_len = cpu_to_le16(eir_len);
10435 ev->flags = cpu_to_le32(flags);
10436
10437 mgmt_event_skb(skb, NULL);
10438}
10439
10440void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10441{
10442 struct mgmt_ev_discovering ev;
10443
10444 bt_dev_dbg(hdev, "discovering %u", discovering);
10445
10446 memset(&ev, 0, sizeof(ev));
10447 ev.type = hdev->discovery.type;
10448 ev.discovering = discovering;
10449
10450 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10451}
10452
10453void mgmt_suspending(struct hci_dev *hdev, u8 state)
10454{
10455 struct mgmt_ev_controller_suspend ev;
10456
10457 ev.suspend_state = state;
10458 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10459}
10460
10461void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10462 u8 addr_type)
10463{
10464 struct mgmt_ev_controller_resume ev;
10465
10466 ev.wake_reason = reason;
10467 if (bdaddr) {
10468 bacpy(&ev.addr.bdaddr, bdaddr);
10469 ev.addr.type = addr_type;
10470 } else {
10471 memset(&ev.addr, 0, sizeof(ev.addr));
10472 }
10473
10474 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10475}
10476
10477static struct hci_mgmt_chan chan = {
10478 .channel = HCI_CHANNEL_CONTROL,
10479 .handler_count = ARRAY_SIZE(mgmt_handlers),
10480 .handlers = mgmt_handlers,
10481 .hdev_init = mgmt_init_hdev,
10482};
10483
10484int mgmt_init(void)
10485{
10486 return hci_mgmt_chan_register(&chan);
10487}
10488
10489void mgmt_exit(void)
10490{
10491 hci_mgmt_chan_unregister(&chan);
10492}
10493
10494void mgmt_cleanup(struct sock *sk)
10495{
10496 struct mgmt_mesh_tx *mesh_tx;
10497 struct hci_dev *hdev;
10498
10499 read_lock(&hci_dev_list_lock);
10500
10501 list_for_each_entry(hdev, &hci_dev_list, list) {
10502 do {
10503 mesh_tx = mgmt_mesh_next(hdev, sk);
10504
10505 if (mesh_tx)
10506 mesh_send_complete(hdev, mesh_tx, true);
10507 } while (mesh_tx);
10508 }
10509
10510 read_unlock(&hci_dev_list_lock);
10511}